1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 11 // and generates target-independent LLVM-IR. 12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 13 // of instructions in order to estimate the profitability of vectorization. 14 // 15 // The loop vectorizer combines consecutive loop iterations into a single 16 // 'wide' iteration. After this transformation the index is incremented 17 // by the SIMD vector width, and not by one. 18 // 19 // This pass has three parts: 20 // 1. The main loop pass that drives the different parts. 21 // 2. LoopVectorizationLegality - A unit that checks for the legality 22 // of the vectorization. 23 // 3. InnerLoopVectorizer - A unit that performs the actual 24 // widening of instructions. 25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 26 // of vectorization. It decides on the optimal vector width, which 27 // can be one, if vectorization is not profitable. 28 // 29 //===----------------------------------------------------------------------===// 30 // 31 // The reduction-variable vectorization is based on the paper: 32 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 33 // 34 // Variable uniformity checks are inspired by: 35 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 36 // 37 // The interleaved access vectorization is based on the paper: 38 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 39 // Data for SIMD 40 // 41 // Other ideas/concepts are from: 42 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 43 // 44 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 45 // Vectorizing Compilers. 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 50 #include "llvm/ADT/DenseMap.h" 51 #include "llvm/ADT/Hashing.h" 52 #include "llvm/ADT/MapVector.h" 53 #include "llvm/ADT/SCCIterator.h" 54 #include "llvm/ADT/SetVector.h" 55 #include "llvm/ADT/SmallPtrSet.h" 56 #include "llvm/ADT/SmallSet.h" 57 #include "llvm/ADT/SmallVector.h" 58 #include "llvm/ADT/Statistic.h" 59 #include "llvm/ADT/StringExtras.h" 60 #include "llvm/Analysis/CodeMetrics.h" 61 #include "llvm/Analysis/GlobalsModRef.h" 62 #include "llvm/Analysis/LoopInfo.h" 63 #include "llvm/Analysis/LoopIterator.h" 64 #include "llvm/Analysis/LoopPass.h" 65 #include "llvm/Analysis/ScalarEvolutionExpander.h" 66 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 67 #include "llvm/Analysis/ValueTracking.h" 68 #include "llvm/Analysis/VectorUtils.h" 69 #include "llvm/IR/Constants.h" 70 #include "llvm/IR/DataLayout.h" 71 #include "llvm/IR/DebugInfo.h" 72 #include "llvm/IR/DerivedTypes.h" 73 #include "llvm/IR/DiagnosticInfo.h" 74 #include "llvm/IR/Dominators.h" 75 #include "llvm/IR/Function.h" 76 #include "llvm/IR/IRBuilder.h" 77 #include "llvm/IR/Instructions.h" 78 #include "llvm/IR/IntrinsicInst.h" 79 #include "llvm/IR/LLVMContext.h" 80 #include "llvm/IR/Module.h" 81 #include "llvm/IR/PatternMatch.h" 82 #include "llvm/IR/Type.h" 83 #include "llvm/IR/User.h" 84 #include "llvm/IR/Value.h" 85 #include "llvm/IR/ValueHandle.h" 86 #include "llvm/IR/Verifier.h" 87 #include "llvm/Pass.h" 88 #include "llvm/Support/BranchProbability.h" 89 #include "llvm/Support/CommandLine.h" 90 #include "llvm/Support/Debug.h" 91 #include "llvm/Support/raw_ostream.h" 92 #include "llvm/Transforms/Scalar.h" 93 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 94 #include "llvm/Transforms/Utils/Local.h" 95 #include "llvm/Transforms/Utils/LoopSimplify.h" 96 #include "llvm/Transforms/Utils/LoopUtils.h" 97 #include "llvm/Transforms/Utils/LoopVersioning.h" 98 #include "llvm/Transforms/Vectorize.h" 99 #include <algorithm> 100 #include <map> 101 #include <tuple> 102 103 using namespace llvm; 104 using namespace llvm::PatternMatch; 105 106 #define LV_NAME "loop-vectorize" 107 #define DEBUG_TYPE LV_NAME 108 109 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 110 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 111 112 static cl::opt<bool> 113 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, 114 cl::desc("Enable if-conversion during vectorization.")); 115 116 /// We don't vectorize loops with a known constant trip count below this number. 117 static cl::opt<unsigned> TinyTripCountVectorThreshold( 118 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 119 cl::desc("Don't vectorize loops with a constant " 120 "trip count that is smaller than this " 121 "value.")); 122 123 static cl::opt<bool> MaximizeBandwidth( 124 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 125 cl::desc("Maximize bandwidth when selecting vectorization factor which " 126 "will be determined by the smallest type in loop.")); 127 128 static cl::opt<bool> EnableInterleavedMemAccesses( 129 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 130 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 131 132 /// Maximum factor for an interleaved memory access. 133 static cl::opt<unsigned> MaxInterleaveGroupFactor( 134 "max-interleave-group-factor", cl::Hidden, 135 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 136 cl::init(8)); 137 138 /// We don't interleave loops with a known constant trip count below this 139 /// number. 140 static const unsigned TinyTripCountInterleaveThreshold = 128; 141 142 static cl::opt<unsigned> ForceTargetNumScalarRegs( 143 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 144 cl::desc("A flag that overrides the target's number of scalar registers.")); 145 146 static cl::opt<unsigned> ForceTargetNumVectorRegs( 147 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 148 cl::desc("A flag that overrides the target's number of vector registers.")); 149 150 /// Maximum vectorization interleave count. 151 static const unsigned MaxInterleaveFactor = 16; 152 153 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 154 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 155 cl::desc("A flag that overrides the target's max interleave factor for " 156 "scalar loops.")); 157 158 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 159 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 160 cl::desc("A flag that overrides the target's max interleave factor for " 161 "vectorized loops.")); 162 163 static cl::opt<unsigned> ForceTargetInstructionCost( 164 "force-target-instruction-cost", cl::init(0), cl::Hidden, 165 cl::desc("A flag that overrides the target's expected cost for " 166 "an instruction to a single constant value. Mostly " 167 "useful for getting consistent testing.")); 168 169 static cl::opt<unsigned> SmallLoopCost( 170 "small-loop-cost", cl::init(20), cl::Hidden, 171 cl::desc( 172 "The cost of a loop that is considered 'small' by the interleaver.")); 173 174 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 175 "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden, 176 cl::desc("Enable the use of the block frequency analysis to access PGO " 177 "heuristics minimizing code growth in cold regions and being more " 178 "aggressive in hot regions.")); 179 180 // Runtime interleave loops for load/store throughput. 181 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 182 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 183 cl::desc( 184 "Enable runtime interleaving until load/store ports are saturated")); 185 186 /// The number of stores in a loop that are allowed to need predication. 187 static cl::opt<unsigned> NumberOfStoresToPredicate( 188 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 189 cl::desc("Max number of stores to be predicated behind an if.")); 190 191 static cl::opt<bool> EnableIndVarRegisterHeur( 192 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 193 cl::desc("Count the induction variable only once when interleaving")); 194 195 static cl::opt<bool> EnableCondStoresVectorization( 196 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 197 cl::desc("Enable if predication of stores during vectorization.")); 198 199 static cl::opt<unsigned> MaxNestedScalarReductionIC( 200 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 201 cl::desc("The maximum interleave count to use when interleaving a scalar " 202 "reduction in a nested loop.")); 203 204 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 205 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 206 cl::desc("The maximum allowed number of runtime memory checks with a " 207 "vectorize(enable) pragma.")); 208 209 static cl::opt<unsigned> VectorizeSCEVCheckThreshold( 210 "vectorize-scev-check-threshold", cl::init(16), cl::Hidden, 211 cl::desc("The maximum number of SCEV checks allowed.")); 212 213 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold( 214 "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden, 215 cl::desc("The maximum number of SCEV checks allowed with a " 216 "vectorize(enable) pragma")); 217 218 /// Create an analysis remark that explains why vectorization failed 219 /// 220 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 221 /// RemarkName is the identifier for the remark. If \p I is passed it is an 222 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 223 /// the location of the remark. \return the remark object that can be 224 /// streamed to. 225 static OptimizationRemarkAnalysis 226 createMissedAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, 227 Instruction *I = nullptr) { 228 Value *CodeRegion = TheLoop->getHeader(); 229 DebugLoc DL = TheLoop->getStartLoc(); 230 231 if (I) { 232 CodeRegion = I->getParent(); 233 // If there is no debug location attached to the instruction, revert back to 234 // using the loop's. 235 if (I->getDebugLoc()) 236 DL = I->getDebugLoc(); 237 } 238 239 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion); 240 R << "loop not vectorized: "; 241 return R; 242 } 243 244 namespace { 245 246 // Forward declarations. 247 class LoopVectorizeHints; 248 class LoopVectorizationLegality; 249 class LoopVectorizationCostModel; 250 class LoopVectorizationRequirements; 251 252 /// Returns true if the given loop body has a cycle, excluding the loop 253 /// itself. 254 static bool hasCyclesInLoopBody(const Loop &L) { 255 if (!L.empty()) 256 return true; 257 258 for (const auto &SCC : 259 make_range(scc_iterator<Loop, LoopBodyTraits>::begin(L), 260 scc_iterator<Loop, LoopBodyTraits>::end(L))) { 261 if (SCC.size() > 1) { 262 DEBUG(dbgs() << "LVL: Detected a cycle in the loop body:\n"); 263 DEBUG(L.dump()); 264 return true; 265 } 266 } 267 return false; 268 } 269 270 /// \brief This modifies LoopAccessReport to initialize message with 271 /// loop-vectorizer-specific part. 272 class VectorizationReport : public LoopAccessReport { 273 public: 274 VectorizationReport(Instruction *I = nullptr) 275 : LoopAccessReport("loop not vectorized: ", I) {} 276 277 /// \brief This allows promotion of the loop-access analysis report into the 278 /// loop-vectorizer report. It modifies the message to add the 279 /// loop-vectorizer-specific part of the message. 280 explicit VectorizationReport(const LoopAccessReport &R) 281 : LoopAccessReport(Twine("loop not vectorized: ") + R.str(), 282 R.getInstr()) {} 283 }; 284 285 /// A helper function for converting Scalar types to vector types. 286 /// If the incoming type is void, we return void. If the VF is 1, we return 287 /// the scalar type. 288 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 289 if (Scalar->isVoidTy() || VF == 1) 290 return Scalar; 291 return VectorType::get(Scalar, VF); 292 } 293 294 /// A helper function that returns GEP instruction and knows to skip a 295 /// 'bitcast'. The 'bitcast' may be skipped if the source and the destination 296 /// pointee types of the 'bitcast' have the same size. 297 /// For example: 298 /// bitcast double** %var to i64* - can be skipped 299 /// bitcast double** %var to i8* - can not 300 static GetElementPtrInst *getGEPInstruction(Value *Ptr) { 301 302 if (isa<GetElementPtrInst>(Ptr)) 303 return cast<GetElementPtrInst>(Ptr); 304 305 if (isa<BitCastInst>(Ptr) && 306 isa<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0))) { 307 Type *BitcastTy = Ptr->getType(); 308 Type *GEPTy = cast<BitCastInst>(Ptr)->getSrcTy(); 309 if (!isa<PointerType>(BitcastTy) || !isa<PointerType>(GEPTy)) 310 return nullptr; 311 Type *Pointee1Ty = cast<PointerType>(BitcastTy)->getPointerElementType(); 312 Type *Pointee2Ty = cast<PointerType>(GEPTy)->getPointerElementType(); 313 const DataLayout &DL = cast<BitCastInst>(Ptr)->getModule()->getDataLayout(); 314 if (DL.getTypeSizeInBits(Pointee1Ty) == DL.getTypeSizeInBits(Pointee2Ty)) 315 return cast<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0)); 316 } 317 return nullptr; 318 } 319 320 // FIXME: The following helper functions have multiple implementations 321 // in the project. They can be effectively organized in a common Load/Store 322 // utilities unit. 323 324 /// A helper function that returns the pointer operand of a load or store 325 /// instruction. 326 static Value *getPointerOperand(Value *I) { 327 if (auto *LI = dyn_cast<LoadInst>(I)) 328 return LI->getPointerOperand(); 329 if (auto *SI = dyn_cast<StoreInst>(I)) 330 return SI->getPointerOperand(); 331 return nullptr; 332 } 333 334 /// A helper function that returns the type of loaded or stored value. 335 static Type *getMemInstValueType(Value *I) { 336 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 337 "Expected Load or Store instruction"); 338 if (auto *LI = dyn_cast<LoadInst>(I)) 339 return LI->getType(); 340 return cast<StoreInst>(I)->getValueOperand()->getType(); 341 } 342 343 /// A helper function that returns the alignment of load or store instruction. 344 static unsigned getMemInstAlignment(Value *I) { 345 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 346 "Expected Load or Store instruction"); 347 if (auto *LI = dyn_cast<LoadInst>(I)) 348 return LI->getAlignment(); 349 return cast<StoreInst>(I)->getAlignment(); 350 } 351 352 /// A helper function that returns the address space of the pointer operand of 353 /// load or store instruction. 354 static unsigned getMemInstAddressSpace(Value *I) { 355 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 356 "Expected Load or Store instruction"); 357 if (auto *LI = dyn_cast<LoadInst>(I)) 358 return LI->getPointerAddressSpace(); 359 return cast<StoreInst>(I)->getPointerAddressSpace(); 360 } 361 362 /// A helper function that returns true if the given type is irregular. The 363 /// type is irregular if its allocated size doesn't equal the store size of an 364 /// element of the corresponding vector type at the given vectorization factor. 365 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) { 366 367 // Determine if an array of VF elements of type Ty is "bitcast compatible" 368 // with a <VF x Ty> vector. 369 if (VF > 1) { 370 auto *VectorTy = VectorType::get(Ty, VF); 371 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); 372 } 373 374 // If the vectorization factor is one, we just check if an array of type Ty 375 // requires padding between elements. 376 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 377 } 378 379 /// A helper function that returns the reciprocal of the block probability of 380 /// predicated blocks. If we return X, we are assuming the predicated block 381 /// will execute once for for every X iterations of the loop header. 382 /// 383 /// TODO: We should use actual block probability here, if available. Currently, 384 /// we always assume predicated blocks have a 50% chance of executing. 385 static unsigned getReciprocalPredBlockProb() { return 2; } 386 387 /// InnerLoopVectorizer vectorizes loops which contain only one basic 388 /// block to a specified vectorization factor (VF). 389 /// This class performs the widening of scalars into vectors, or multiple 390 /// scalars. This class also implements the following features: 391 /// * It inserts an epilogue loop for handling loops that don't have iteration 392 /// counts that are known to be a multiple of the vectorization factor. 393 /// * It handles the code generation for reduction variables. 394 /// * Scalarization (implementation using scalars) of un-vectorizable 395 /// instructions. 396 /// InnerLoopVectorizer does not perform any vectorization-legality 397 /// checks, and relies on the caller to check for the different legality 398 /// aspects. The InnerLoopVectorizer relies on the 399 /// LoopVectorizationLegality class to provide information about the induction 400 /// and reduction variables that were found to a given vectorization factor. 401 class InnerLoopVectorizer { 402 public: 403 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 404 LoopInfo *LI, DominatorTree *DT, 405 const TargetLibraryInfo *TLI, 406 const TargetTransformInfo *TTI, AssumptionCache *AC, 407 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 408 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 409 LoopVectorizationCostModel *CM) 410 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 411 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 412 Builder(PSE.getSE()->getContext()), Induction(nullptr), 413 OldInduction(nullptr), VectorLoopValueMap(UnrollFactor, VecWidth), 414 TripCount(nullptr), VectorTripCount(nullptr), Legal(LVL), Cost(CM), 415 AddedSafetyChecks(false) {} 416 417 // Perform the actual loop widening (vectorization). 418 void vectorize() { 419 // Create a new empty loop. Unlink the old loop and connect the new one. 420 createEmptyLoop(); 421 // Widen each instruction in the old loop to a new one in the new loop. 422 vectorizeLoop(); 423 } 424 425 // Return true if any runtime check is added. 426 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 427 428 virtual ~InnerLoopVectorizer() {} 429 430 protected: 431 /// A small list of PHINodes. 432 typedef SmallVector<PHINode *, 4> PhiVector; 433 434 /// A type for vectorized values in the new loop. Each value from the 435 /// original loop, when vectorized, is represented by UF vector values in the 436 /// new unrolled loop, where UF is the unroll factor. 437 typedef SmallVector<Value *, 2> VectorParts; 438 439 /// A type for scalarized values in the new loop. Each value from the 440 /// original loop, when scalarized, is represented by UF x VF scalar values 441 /// in the new unrolled loop, where UF is the unroll factor and VF is the 442 /// vectorization factor. 443 typedef SmallVector<SmallVector<Value *, 4>, 2> ScalarParts; 444 445 // When we if-convert we need to create edge masks. We have to cache values 446 // so that we don't end up with exponential recursion/IR. 447 typedef DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts> 448 EdgeMaskCache; 449 450 /// Create an empty loop, based on the loop ranges of the old loop. 451 void createEmptyLoop(); 452 453 /// Set up the values of the IVs correctly when exiting the vector loop. 454 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 455 Value *CountRoundDown, Value *EndValue, 456 BasicBlock *MiddleBlock); 457 458 /// Create a new induction variable inside L. 459 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 460 Value *Step, Instruction *DL); 461 /// Copy and widen the instructions from the old loop. 462 virtual void vectorizeLoop(); 463 464 /// Fix a first-order recurrence. This is the second phase of vectorizing 465 /// this phi node. 466 void fixFirstOrderRecurrence(PHINode *Phi); 467 468 /// \brief The Loop exit block may have single value PHI nodes where the 469 /// incoming value is 'Undef'. While vectorizing we only handled real values 470 /// that were defined inside the loop. Here we fix the 'undef case'. 471 /// See PR14725. 472 void fixLCSSAPHIs(); 473 474 /// Iteratively sink the scalarized operands of a predicated instruction into 475 /// the block that was created for it. 476 void sinkScalarOperands(Instruction *PredInst); 477 478 /// Predicate conditional instructions that require predication on their 479 /// respective conditions. 480 void predicateInstructions(); 481 482 /// Collect the instructions from the original loop that would be trivially 483 /// dead in the vectorized loop if generated. 484 void collectTriviallyDeadInstructions(); 485 486 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 487 /// represented as. 488 void truncateToMinimalBitwidths(); 489 490 /// A helper function that computes the predicate of the block BB, assuming 491 /// that the header block of the loop is set to True. It returns the *entry* 492 /// mask for the block BB. 493 VectorParts createBlockInMask(BasicBlock *BB); 494 /// A helper function that computes the predicate of the edge between SRC 495 /// and DST. 496 VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst); 497 498 /// A helper function to vectorize a single BB within the innermost loop. 499 void vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV); 500 501 /// Vectorize a single PHINode in a block. This method handles the induction 502 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 503 /// arbitrary length vectors. 504 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF, 505 PhiVector *PV); 506 507 /// Insert the new loop to the loop hierarchy and pass manager 508 /// and update the analysis passes. 509 void updateAnalysis(); 510 511 /// This instruction is un-vectorizable. Implement it as a sequence 512 /// of scalars. If \p IfPredicateInstr is true we need to 'hide' each 513 /// scalarized instruction behind an if block predicated on the control 514 /// dependence of the instruction. 515 virtual void scalarizeInstruction(Instruction *Instr, 516 bool IfPredicateInstr = false); 517 518 /// Vectorize Load and Store instructions, 519 virtual void vectorizeMemoryInstruction(Instruction *Instr); 520 521 /// Create a broadcast instruction. This method generates a broadcast 522 /// instruction (shuffle) for loop invariant values and for the induction 523 /// value. If this is the induction variable then we extend it to N, N+1, ... 524 /// this is needed because each iteration in the loop corresponds to a SIMD 525 /// element. 526 virtual Value *getBroadcastInstrs(Value *V); 527 528 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 529 /// to each vector element of Val. The sequence starts at StartIndex. 530 /// \p Opcode is relevant for FP induction variable. 531 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 532 Instruction::BinaryOps Opcode = 533 Instruction::BinaryOpsEnd); 534 535 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 536 /// variable on which to base the steps, \p Step is the size of the step, and 537 /// \p EntryVal is the value from the original loop that maps to the steps. 538 /// Note that \p EntryVal doesn't have to be an induction variable (e.g., it 539 /// can be a truncate instruction). 540 void buildScalarSteps(Value *ScalarIV, Value *Step, Value *EntryVal); 541 542 /// Create a vector induction phi node based on an existing scalar one. This 543 /// currently only works for integer induction variables with a constant 544 /// step. \p EntryVal is the value from the original loop that maps to the 545 /// vector phi node. If \p EntryVal is a truncate instruction, instead of 546 /// widening the original IV, we widen a version of the IV truncated to \p 547 /// EntryVal's type. 548 void createVectorIntInductionPHI(const InductionDescriptor &II, 549 Instruction *EntryVal); 550 551 /// Widen an integer induction variable \p IV. If \p Trunc is provided, the 552 /// induction variable will first be truncated to the corresponding type. 553 void widenIntInduction(PHINode *IV, TruncInst *Trunc = nullptr); 554 555 /// Returns true if an instruction \p I should be scalarized instead of 556 /// vectorized for the chosen vectorization factor. 557 bool shouldScalarizeInstruction(Instruction *I) const; 558 559 /// Returns true if we should generate a scalar version of \p IV. 560 bool needsScalarInduction(Instruction *IV) const; 561 562 /// Return a constant reference to the VectorParts corresponding to \p V from 563 /// the original loop. If the value has already been vectorized, the 564 /// corresponding vector entry in VectorLoopValueMap is returned. If, 565 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 566 /// new vector values on-demand by inserting the scalar values into vectors 567 /// with an insertelement sequence. If the value has been neither vectorized 568 /// nor scalarized, it must be loop invariant, so we simply broadcast the 569 /// value into vectors. 570 const VectorParts &getVectorValue(Value *V); 571 572 /// Return a value in the new loop corresponding to \p V from the original 573 /// loop at unroll index \p Part and vector index \p Lane. If the value has 574 /// been vectorized but not scalarized, the necessary extractelement 575 /// instruction will be generated. 576 Value *getScalarValue(Value *V, unsigned Part, unsigned Lane); 577 578 /// Try to vectorize the interleaved access group that \p Instr belongs to. 579 void vectorizeInterleaveGroup(Instruction *Instr); 580 581 /// Generate a shuffle sequence that will reverse the vector Vec. 582 virtual Value *reverseVector(Value *Vec); 583 584 /// Returns (and creates if needed) the original loop trip count. 585 Value *getOrCreateTripCount(Loop *NewLoop); 586 587 /// Returns (and creates if needed) the trip count of the widened loop. 588 Value *getOrCreateVectorTripCount(Loop *NewLoop); 589 590 /// Emit a bypass check to see if the trip count would overflow, or we 591 /// wouldn't have enough iterations to execute one vector loop. 592 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 593 /// Emit a bypass check to see if the vector trip count is nonzero. 594 void emitVectorLoopEnteredCheck(Loop *L, BasicBlock *Bypass); 595 /// Emit a bypass check to see if all of the SCEV assumptions we've 596 /// had to make are correct. 597 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 598 /// Emit bypass checks to check any memory assumptions we may have made. 599 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 600 601 /// Add additional metadata to \p To that was not present on \p Orig. 602 /// 603 /// Currently this is used to add the noalias annotations based on the 604 /// inserted memchecks. Use this for instructions that are *cloned* into the 605 /// vector loop. 606 void addNewMetadata(Instruction *To, const Instruction *Orig); 607 608 /// Add metadata from one instruction to another. 609 /// 610 /// This includes both the original MDs from \p From and additional ones (\see 611 /// addNewMetadata). Use this for *newly created* instructions in the vector 612 /// loop. 613 void addMetadata(Instruction *To, Instruction *From); 614 615 /// \brief Similar to the previous function but it adds the metadata to a 616 /// vector of instructions. 617 void addMetadata(ArrayRef<Value *> To, Instruction *From); 618 619 /// This is a helper class for maintaining vectorization state. It's used for 620 /// mapping values from the original loop to their corresponding values in 621 /// the new loop. Two mappings are maintained: one for vectorized values and 622 /// one for scalarized values. Vectorized values are represented with UF 623 /// vector values in the new loop, and scalarized values are represented with 624 /// UF x VF scalar values in the new loop. UF and VF are the unroll and 625 /// vectorization factors, respectively. 626 /// 627 /// Entries can be added to either map with initVector and initScalar, which 628 /// initialize and return a constant reference to the new entry. If a 629 /// non-constant reference to a vector entry is required, getVector can be 630 /// used to retrieve a mutable entry. We currently directly modify the mapped 631 /// values during "fix-up" operations that occur once the first phase of 632 /// widening is complete. These operations include type truncation and the 633 /// second phase of recurrence widening. 634 /// 635 /// Otherwise, entries from either map should be accessed using the 636 /// getVectorValue or getScalarValue functions from InnerLoopVectorizer. 637 /// getVectorValue and getScalarValue coordinate to generate a vector or 638 /// scalar value on-demand if one is not yet available. When vectorizing a 639 /// loop, we visit the definition of an instruction before its uses. When 640 /// visiting the definition, we either vectorize or scalarize the 641 /// instruction, creating an entry for it in the corresponding map. (In some 642 /// cases, such as induction variables, we will create both vector and scalar 643 /// entries.) Then, as we encounter uses of the definition, we derive values 644 /// for each scalar or vector use unless such a value is already available. 645 /// For example, if we scalarize a definition and one of its uses is vector, 646 /// we build the required vector on-demand with an insertelement sequence 647 /// when visiting the use. Otherwise, if the use is scalar, we can use the 648 /// existing scalar definition. 649 struct ValueMap { 650 651 /// Construct an empty map with the given unroll and vectorization factors. 652 ValueMap(unsigned UnrollFactor, unsigned VecWidth) 653 : UF(UnrollFactor), VF(VecWidth) { 654 // The unroll and vectorization factors are only used in asserts builds 655 // to verify map entries are sized appropriately. 656 (void)UF; 657 (void)VF; 658 } 659 660 /// \return True if the map has a vector entry for \p Key. 661 bool hasVector(Value *Key) const { return VectorMapStorage.count(Key); } 662 663 /// \return True if the map has a scalar entry for \p Key. 664 bool hasScalar(Value *Key) const { return ScalarMapStorage.count(Key); } 665 666 /// \brief Map \p Key to the given VectorParts \p Entry, and return a 667 /// constant reference to the new vector map entry. The given key should 668 /// not already be in the map, and the given VectorParts should be 669 /// correctly sized for the current unroll factor. 670 const VectorParts &initVector(Value *Key, const VectorParts &Entry) { 671 assert(!hasVector(Key) && "Vector entry already initialized"); 672 assert(Entry.size() == UF && "VectorParts has wrong dimensions"); 673 VectorMapStorage[Key] = Entry; 674 return VectorMapStorage[Key]; 675 } 676 677 /// \brief Map \p Key to the given ScalarParts \p Entry, and return a 678 /// constant reference to the new scalar map entry. The given key should 679 /// not already be in the map, and the given ScalarParts should be 680 /// correctly sized for the current unroll and vectorization factors. 681 const ScalarParts &initScalar(Value *Key, const ScalarParts &Entry) { 682 assert(!hasScalar(Key) && "Scalar entry already initialized"); 683 assert(Entry.size() == UF && 684 all_of(make_range(Entry.begin(), Entry.end()), 685 [&](const SmallVectorImpl<Value *> &Values) -> bool { 686 return Values.size() == VF; 687 }) && 688 "ScalarParts has wrong dimensions"); 689 ScalarMapStorage[Key] = Entry; 690 return ScalarMapStorage[Key]; 691 } 692 693 /// \return A reference to the vector map entry corresponding to \p Key. 694 /// The key should already be in the map. This function should only be used 695 /// when it's necessary to update values that have already been vectorized. 696 /// This is the case for "fix-up" operations including type truncation and 697 /// the second phase of recurrence vectorization. If a non-const reference 698 /// isn't required, getVectorValue should be used instead. 699 VectorParts &getVector(Value *Key) { 700 assert(hasVector(Key) && "Vector entry not initialized"); 701 return VectorMapStorage.find(Key)->second; 702 } 703 704 /// Retrieve an entry from the vector or scalar maps. The preferred way to 705 /// access an existing mapped entry is with getVectorValue or 706 /// getScalarValue from InnerLoopVectorizer. Until those functions can be 707 /// moved inside ValueMap, we have to declare them as friends. 708 friend const VectorParts &InnerLoopVectorizer::getVectorValue(Value *V); 709 friend Value *InnerLoopVectorizer::getScalarValue(Value *V, unsigned Part, 710 unsigned Lane); 711 712 private: 713 /// The unroll factor. Each entry in the vector map contains UF vector 714 /// values. 715 unsigned UF; 716 717 /// The vectorization factor. Each entry in the scalar map contains UF x VF 718 /// scalar values. 719 unsigned VF; 720 721 /// The vector and scalar map storage. We use std::map and not DenseMap 722 /// because insertions to DenseMap invalidate its iterators. 723 std::map<Value *, VectorParts> VectorMapStorage; 724 std::map<Value *, ScalarParts> ScalarMapStorage; 725 }; 726 727 /// The original loop. 728 Loop *OrigLoop; 729 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 730 /// dynamic knowledge to simplify SCEV expressions and converts them to a 731 /// more usable form. 732 PredicatedScalarEvolution &PSE; 733 /// Loop Info. 734 LoopInfo *LI; 735 /// Dominator Tree. 736 DominatorTree *DT; 737 /// Alias Analysis. 738 AliasAnalysis *AA; 739 /// Target Library Info. 740 const TargetLibraryInfo *TLI; 741 /// Target Transform Info. 742 const TargetTransformInfo *TTI; 743 /// Assumption Cache. 744 AssumptionCache *AC; 745 /// Interface to emit optimization remarks. 746 OptimizationRemarkEmitter *ORE; 747 748 /// \brief LoopVersioning. It's only set up (non-null) if memchecks were 749 /// used. 750 /// 751 /// This is currently only used to add no-alias metadata based on the 752 /// memchecks. The actually versioning is performed manually. 753 std::unique_ptr<LoopVersioning> LVer; 754 755 /// The vectorization SIMD factor to use. Each vector will have this many 756 /// vector elements. 757 unsigned VF; 758 759 protected: 760 /// The vectorization unroll factor to use. Each scalar is vectorized to this 761 /// many different vector instructions. 762 unsigned UF; 763 764 /// The builder that we use 765 IRBuilder<> Builder; 766 767 // --- Vectorization state --- 768 769 /// The vector-loop preheader. 770 BasicBlock *LoopVectorPreHeader; 771 /// The scalar-loop preheader. 772 BasicBlock *LoopScalarPreHeader; 773 /// Middle Block between the vector and the scalar. 774 BasicBlock *LoopMiddleBlock; 775 /// The ExitBlock of the scalar loop. 776 BasicBlock *LoopExitBlock; 777 /// The vector loop body. 778 BasicBlock *LoopVectorBody; 779 /// The scalar loop body. 780 BasicBlock *LoopScalarBody; 781 /// A list of all bypass blocks. The first block is the entry of the loop. 782 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 783 784 /// The new Induction variable which was added to the new block. 785 PHINode *Induction; 786 /// The induction variable of the old basic block. 787 PHINode *OldInduction; 788 789 /// Maps values from the original loop to their corresponding values in the 790 /// vectorized loop. A key value can map to either vector values, scalar 791 /// values or both kinds of values, depending on whether the key was 792 /// vectorized and scalarized. 793 ValueMap VectorLoopValueMap; 794 795 /// Store instructions that should be predicated, as a pair 796 /// <StoreInst, Predicate> 797 SmallVector<std::pair<Instruction *, Value *>, 4> PredicatedInstructions; 798 EdgeMaskCache MaskCache; 799 /// Trip count of the original loop. 800 Value *TripCount; 801 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 802 Value *VectorTripCount; 803 804 /// The legality analysis. 805 LoopVectorizationLegality *Legal; 806 807 /// The profitablity analysis. 808 LoopVectorizationCostModel *Cost; 809 810 // Record whether runtime checks are added. 811 bool AddedSafetyChecks; 812 813 // Holds instructions from the original loop whose counterparts in the 814 // vectorized loop would be trivially dead if generated. For example, 815 // original induction update instructions can become dead because we 816 // separately emit induction "steps" when generating code for the new loop. 817 // Similarly, we create a new latch condition when setting up the structure 818 // of the new loop, so the old one can become dead. 819 SmallPtrSet<Instruction *, 4> DeadInstructions; 820 821 // Holds the end values for each induction variable. We save the end values 822 // so we can later fix-up the external users of the induction variables. 823 DenseMap<PHINode *, Value *> IVEndValues; 824 }; 825 826 class InnerLoopUnroller : public InnerLoopVectorizer { 827 public: 828 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 829 LoopInfo *LI, DominatorTree *DT, 830 const TargetLibraryInfo *TLI, 831 const TargetTransformInfo *TTI, AssumptionCache *AC, 832 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 833 LoopVectorizationLegality *LVL, 834 LoopVectorizationCostModel *CM) 835 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 836 UnrollFactor, LVL, CM) {} 837 838 private: 839 void scalarizeInstruction(Instruction *Instr, 840 bool IfPredicateInstr = false) override; 841 void vectorizeMemoryInstruction(Instruction *Instr) override; 842 Value *getBroadcastInstrs(Value *V) override; 843 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 844 Instruction::BinaryOps Opcode = 845 Instruction::BinaryOpsEnd) override; 846 Value *reverseVector(Value *Vec) override; 847 }; 848 849 /// \brief Look for a meaningful debug location on the instruction or it's 850 /// operands. 851 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 852 if (!I) 853 return I; 854 855 DebugLoc Empty; 856 if (I->getDebugLoc() != Empty) 857 return I; 858 859 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 860 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 861 if (OpInst->getDebugLoc() != Empty) 862 return OpInst; 863 } 864 865 return I; 866 } 867 868 /// \brief Set the debug location in the builder using the debug location in the 869 /// instruction. 870 static void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 871 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) 872 B.SetCurrentDebugLocation(Inst->getDebugLoc()); 873 else 874 B.SetCurrentDebugLocation(DebugLoc()); 875 } 876 877 #ifndef NDEBUG 878 /// \return string containing a file name and a line # for the given loop. 879 static std::string getDebugLocString(const Loop *L) { 880 std::string Result; 881 if (L) { 882 raw_string_ostream OS(Result); 883 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 884 LoopDbgLoc.print(OS); 885 else 886 // Just print the module name. 887 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 888 OS.flush(); 889 } 890 return Result; 891 } 892 #endif 893 894 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 895 const Instruction *Orig) { 896 // If the loop was versioned with memchecks, add the corresponding no-alias 897 // metadata. 898 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 899 LVer->annotateInstWithNoAlias(To, Orig); 900 } 901 902 void InnerLoopVectorizer::addMetadata(Instruction *To, 903 Instruction *From) { 904 propagateMetadata(To, From); 905 addNewMetadata(To, From); 906 } 907 908 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 909 Instruction *From) { 910 for (Value *V : To) { 911 if (Instruction *I = dyn_cast<Instruction>(V)) 912 addMetadata(I, From); 913 } 914 } 915 916 /// \brief The group of interleaved loads/stores sharing the same stride and 917 /// close to each other. 918 /// 919 /// Each member in this group has an index starting from 0, and the largest 920 /// index should be less than interleaved factor, which is equal to the absolute 921 /// value of the access's stride. 922 /// 923 /// E.g. An interleaved load group of factor 4: 924 /// for (unsigned i = 0; i < 1024; i+=4) { 925 /// a = A[i]; // Member of index 0 926 /// b = A[i+1]; // Member of index 1 927 /// d = A[i+3]; // Member of index 3 928 /// ... 929 /// } 930 /// 931 /// An interleaved store group of factor 4: 932 /// for (unsigned i = 0; i < 1024; i+=4) { 933 /// ... 934 /// A[i] = a; // Member of index 0 935 /// A[i+1] = b; // Member of index 1 936 /// A[i+2] = c; // Member of index 2 937 /// A[i+3] = d; // Member of index 3 938 /// } 939 /// 940 /// Note: the interleaved load group could have gaps (missing members), but 941 /// the interleaved store group doesn't allow gaps. 942 class InterleaveGroup { 943 public: 944 InterleaveGroup(Instruction *Instr, int Stride, unsigned Align) 945 : Align(Align), SmallestKey(0), LargestKey(0), InsertPos(Instr) { 946 assert(Align && "The alignment should be non-zero"); 947 948 Factor = std::abs(Stride); 949 assert(Factor > 1 && "Invalid interleave factor"); 950 951 Reverse = Stride < 0; 952 Members[0] = Instr; 953 } 954 955 bool isReverse() const { return Reverse; } 956 unsigned getFactor() const { return Factor; } 957 unsigned getAlignment() const { return Align; } 958 unsigned getNumMembers() const { return Members.size(); } 959 960 /// \brief Try to insert a new member \p Instr with index \p Index and 961 /// alignment \p NewAlign. The index is related to the leader and it could be 962 /// negative if it is the new leader. 963 /// 964 /// \returns false if the instruction doesn't belong to the group. 965 bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) { 966 assert(NewAlign && "The new member's alignment should be non-zero"); 967 968 int Key = Index + SmallestKey; 969 970 // Skip if there is already a member with the same index. 971 if (Members.count(Key)) 972 return false; 973 974 if (Key > LargestKey) { 975 // The largest index is always less than the interleave factor. 976 if (Index >= static_cast<int>(Factor)) 977 return false; 978 979 LargestKey = Key; 980 } else if (Key < SmallestKey) { 981 // The largest index is always less than the interleave factor. 982 if (LargestKey - Key >= static_cast<int>(Factor)) 983 return false; 984 985 SmallestKey = Key; 986 } 987 988 // It's always safe to select the minimum alignment. 989 Align = std::min(Align, NewAlign); 990 Members[Key] = Instr; 991 return true; 992 } 993 994 /// \brief Get the member with the given index \p Index 995 /// 996 /// \returns nullptr if contains no such member. 997 Instruction *getMember(unsigned Index) const { 998 int Key = SmallestKey + Index; 999 if (!Members.count(Key)) 1000 return nullptr; 1001 1002 return Members.find(Key)->second; 1003 } 1004 1005 /// \brief Get the index for the given member. Unlike the key in the member 1006 /// map, the index starts from 0. 1007 unsigned getIndex(Instruction *Instr) const { 1008 for (auto I : Members) 1009 if (I.second == Instr) 1010 return I.first - SmallestKey; 1011 1012 llvm_unreachable("InterleaveGroup contains no such member"); 1013 } 1014 1015 Instruction *getInsertPos() const { return InsertPos; } 1016 void setInsertPos(Instruction *Inst) { InsertPos = Inst; } 1017 1018 private: 1019 unsigned Factor; // Interleave Factor. 1020 bool Reverse; 1021 unsigned Align; 1022 DenseMap<int, Instruction *> Members; 1023 int SmallestKey; 1024 int LargestKey; 1025 1026 // To avoid breaking dependences, vectorized instructions of an interleave 1027 // group should be inserted at either the first load or the last store in 1028 // program order. 1029 // 1030 // E.g. %even = load i32 // Insert Position 1031 // %add = add i32 %even // Use of %even 1032 // %odd = load i32 1033 // 1034 // store i32 %even 1035 // %odd = add i32 // Def of %odd 1036 // store i32 %odd // Insert Position 1037 Instruction *InsertPos; 1038 }; 1039 1040 /// \brief Drive the analysis of interleaved memory accesses in the loop. 1041 /// 1042 /// Use this class to analyze interleaved accesses only when we can vectorize 1043 /// a loop. Otherwise it's meaningless to do analysis as the vectorization 1044 /// on interleaved accesses is unsafe. 1045 /// 1046 /// The analysis collects interleave groups and records the relationships 1047 /// between the member and the group in a map. 1048 class InterleavedAccessInfo { 1049 public: 1050 InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L, 1051 DominatorTree *DT, LoopInfo *LI) 1052 : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(nullptr), 1053 RequiresScalarEpilogue(false) {} 1054 1055 ~InterleavedAccessInfo() { 1056 SmallSet<InterleaveGroup *, 4> DelSet; 1057 // Avoid releasing a pointer twice. 1058 for (auto &I : InterleaveGroupMap) 1059 DelSet.insert(I.second); 1060 for (auto *Ptr : DelSet) 1061 delete Ptr; 1062 } 1063 1064 /// \brief Analyze the interleaved accesses and collect them in interleave 1065 /// groups. Substitute symbolic strides using \p Strides. 1066 void analyzeInterleaving(const ValueToValueMap &Strides); 1067 1068 /// \brief Check if \p Instr belongs to any interleave group. 1069 bool isInterleaved(Instruction *Instr) const { 1070 return InterleaveGroupMap.count(Instr); 1071 } 1072 1073 /// \brief Return the maximum interleave factor of all interleaved groups. 1074 unsigned getMaxInterleaveFactor() const { 1075 unsigned MaxFactor = 1; 1076 for (auto &Entry : InterleaveGroupMap) 1077 MaxFactor = std::max(MaxFactor, Entry.second->getFactor()); 1078 return MaxFactor; 1079 } 1080 1081 /// \brief Get the interleave group that \p Instr belongs to. 1082 /// 1083 /// \returns nullptr if doesn't have such group. 1084 InterleaveGroup *getInterleaveGroup(Instruction *Instr) const { 1085 if (InterleaveGroupMap.count(Instr)) 1086 return InterleaveGroupMap.find(Instr)->second; 1087 return nullptr; 1088 } 1089 1090 /// \brief Returns true if an interleaved group that may access memory 1091 /// out-of-bounds requires a scalar epilogue iteration for correctness. 1092 bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; } 1093 1094 /// \brief Initialize the LoopAccessInfo used for dependence checking. 1095 void setLAI(const LoopAccessInfo *Info) { LAI = Info; } 1096 1097 private: 1098 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks. 1099 /// Simplifies SCEV expressions in the context of existing SCEV assumptions. 1100 /// The interleaved access analysis can also add new predicates (for example 1101 /// by versioning strides of pointers). 1102 PredicatedScalarEvolution &PSE; 1103 Loop *TheLoop; 1104 DominatorTree *DT; 1105 LoopInfo *LI; 1106 const LoopAccessInfo *LAI; 1107 1108 /// True if the loop may contain non-reversed interleaved groups with 1109 /// out-of-bounds accesses. We ensure we don't speculatively access memory 1110 /// out-of-bounds by executing at least one scalar epilogue iteration. 1111 bool RequiresScalarEpilogue; 1112 1113 /// Holds the relationships between the members and the interleave group. 1114 DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap; 1115 1116 /// Holds dependences among the memory accesses in the loop. It maps a source 1117 /// access to a set of dependent sink accesses. 1118 DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences; 1119 1120 /// \brief The descriptor for a strided memory access. 1121 struct StrideDescriptor { 1122 StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size, 1123 unsigned Align) 1124 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {} 1125 1126 StrideDescriptor() = default; 1127 1128 // The access's stride. It is negative for a reverse access. 1129 int64_t Stride = 0; 1130 const SCEV *Scev = nullptr; // The scalar expression of this access 1131 uint64_t Size = 0; // The size of the memory object. 1132 unsigned Align = 0; // The alignment of this access. 1133 }; 1134 1135 /// \brief A type for holding instructions and their stride descriptors. 1136 typedef std::pair<Instruction *, StrideDescriptor> StrideEntry; 1137 1138 /// \brief Create a new interleave group with the given instruction \p Instr, 1139 /// stride \p Stride and alignment \p Align. 1140 /// 1141 /// \returns the newly created interleave group. 1142 InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride, 1143 unsigned Align) { 1144 assert(!InterleaveGroupMap.count(Instr) && 1145 "Already in an interleaved access group"); 1146 InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align); 1147 return InterleaveGroupMap[Instr]; 1148 } 1149 1150 /// \brief Release the group and remove all the relationships. 1151 void releaseGroup(InterleaveGroup *Group) { 1152 for (unsigned i = 0; i < Group->getFactor(); i++) 1153 if (Instruction *Member = Group->getMember(i)) 1154 InterleaveGroupMap.erase(Member); 1155 1156 delete Group; 1157 } 1158 1159 /// \brief Collect all the accesses with a constant stride in program order. 1160 void collectConstStrideAccesses( 1161 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 1162 const ValueToValueMap &Strides); 1163 1164 /// \brief Returns true if \p Stride is allowed in an interleaved group. 1165 static bool isStrided(int Stride) { 1166 unsigned Factor = std::abs(Stride); 1167 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 1168 } 1169 1170 /// \brief Returns true if \p BB is a predicated block. 1171 bool isPredicated(BasicBlock *BB) const { 1172 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 1173 } 1174 1175 /// \brief Returns true if LoopAccessInfo can be used for dependence queries. 1176 bool areDependencesValid() const { 1177 return LAI && LAI->getDepChecker().getDependences(); 1178 } 1179 1180 /// \brief Returns true if memory accesses \p A and \p B can be reordered, if 1181 /// necessary, when constructing interleaved groups. 1182 /// 1183 /// \p A must precede \p B in program order. We return false if reordering is 1184 /// not necessary or is prevented because \p A and \p B may be dependent. 1185 bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A, 1186 StrideEntry *B) const { 1187 1188 // Code motion for interleaved accesses can potentially hoist strided loads 1189 // and sink strided stores. The code below checks the legality of the 1190 // following two conditions: 1191 // 1192 // 1. Potentially moving a strided load (B) before any store (A) that 1193 // precedes B, or 1194 // 1195 // 2. Potentially moving a strided store (A) after any load or store (B) 1196 // that A precedes. 1197 // 1198 // It's legal to reorder A and B if we know there isn't a dependence from A 1199 // to B. Note that this determination is conservative since some 1200 // dependences could potentially be reordered safely. 1201 1202 // A is potentially the source of a dependence. 1203 auto *Src = A->first; 1204 auto SrcDes = A->second; 1205 1206 // B is potentially the sink of a dependence. 1207 auto *Sink = B->first; 1208 auto SinkDes = B->second; 1209 1210 // Code motion for interleaved accesses can't violate WAR dependences. 1211 // Thus, reordering is legal if the source isn't a write. 1212 if (!Src->mayWriteToMemory()) 1213 return true; 1214 1215 // At least one of the accesses must be strided. 1216 if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride)) 1217 return true; 1218 1219 // If dependence information is not available from LoopAccessInfo, 1220 // conservatively assume the instructions can't be reordered. 1221 if (!areDependencesValid()) 1222 return false; 1223 1224 // If we know there is a dependence from source to sink, assume the 1225 // instructions can't be reordered. Otherwise, reordering is legal. 1226 return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink); 1227 } 1228 1229 /// \brief Collect the dependences from LoopAccessInfo. 1230 /// 1231 /// We process the dependences once during the interleaved access analysis to 1232 /// enable constant-time dependence queries. 1233 void collectDependences() { 1234 if (!areDependencesValid()) 1235 return; 1236 auto *Deps = LAI->getDepChecker().getDependences(); 1237 for (auto Dep : *Deps) 1238 Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI)); 1239 } 1240 }; 1241 1242 /// Utility class for getting and setting loop vectorizer hints in the form 1243 /// of loop metadata. 1244 /// This class keeps a number of loop annotations locally (as member variables) 1245 /// and can, upon request, write them back as metadata on the loop. It will 1246 /// initially scan the loop for existing metadata, and will update the local 1247 /// values based on information in the loop. 1248 /// We cannot write all values to metadata, as the mere presence of some info, 1249 /// for example 'force', means a decision has been made. So, we need to be 1250 /// careful NOT to add them if the user hasn't specifically asked so. 1251 class LoopVectorizeHints { 1252 enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE }; 1253 1254 /// Hint - associates name and validation with the hint value. 1255 struct Hint { 1256 const char *Name; 1257 unsigned Value; // This may have to change for non-numeric values. 1258 HintKind Kind; 1259 1260 Hint(const char *Name, unsigned Value, HintKind Kind) 1261 : Name(Name), Value(Value), Kind(Kind) {} 1262 1263 bool validate(unsigned Val) { 1264 switch (Kind) { 1265 case HK_WIDTH: 1266 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth; 1267 case HK_UNROLL: 1268 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor; 1269 case HK_FORCE: 1270 return (Val <= 1); 1271 } 1272 return false; 1273 } 1274 }; 1275 1276 /// Vectorization width. 1277 Hint Width; 1278 /// Vectorization interleave factor. 1279 Hint Interleave; 1280 /// Vectorization forced 1281 Hint Force; 1282 1283 /// Return the loop metadata prefix. 1284 static StringRef Prefix() { return "llvm.loop."; } 1285 1286 /// True if there is any unsafe math in the loop. 1287 bool PotentiallyUnsafe; 1288 1289 public: 1290 enum ForceKind { 1291 FK_Undefined = -1, ///< Not selected. 1292 FK_Disabled = 0, ///< Forcing disabled. 1293 FK_Enabled = 1, ///< Forcing enabled. 1294 }; 1295 1296 LoopVectorizeHints(const Loop *L, bool DisableInterleaving, 1297 OptimizationRemarkEmitter &ORE) 1298 : Width("vectorize.width", VectorizerParams::VectorizationFactor, 1299 HK_WIDTH), 1300 Interleave("interleave.count", DisableInterleaving, HK_UNROLL), 1301 Force("vectorize.enable", FK_Undefined, HK_FORCE), 1302 PotentiallyUnsafe(false), TheLoop(L), ORE(ORE) { 1303 // Populate values with existing loop metadata. 1304 getHintsFromMetadata(); 1305 1306 // force-vector-interleave overrides DisableInterleaving. 1307 if (VectorizerParams::isInterleaveForced()) 1308 Interleave.Value = VectorizerParams::VectorizationInterleave; 1309 1310 DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs() 1311 << "LV: Interleaving disabled by the pass manager\n"); 1312 } 1313 1314 /// Mark the loop L as already vectorized by setting the width to 1. 1315 void setAlreadyVectorized() { 1316 Width.Value = Interleave.Value = 1; 1317 Hint Hints[] = {Width, Interleave}; 1318 writeHintsToMetadata(Hints); 1319 } 1320 1321 bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const { 1322 if (getForce() == LoopVectorizeHints::FK_Disabled) { 1323 DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); 1324 emitRemarkWithHints(); 1325 return false; 1326 } 1327 1328 if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) { 1329 DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); 1330 emitRemarkWithHints(); 1331 return false; 1332 } 1333 1334 if (getWidth() == 1 && getInterleave() == 1) { 1335 // FIXME: Add a separate metadata to indicate when the loop has already 1336 // been vectorized instead of setting width and count to 1. 1337 DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); 1338 // FIXME: Add interleave.disable metadata. This will allow 1339 // vectorize.disable to be used without disabling the pass and errors 1340 // to differentiate between disabled vectorization and a width of 1. 1341 ORE.emit(OptimizationRemarkAnalysis(vectorizeAnalysisPassName(), 1342 "AllDisabled", L->getStartLoc(), 1343 L->getHeader()) 1344 << "loop not vectorized: vectorization and interleaving are " 1345 "explicitly disabled, or vectorize width and interleave " 1346 "count are both set to 1"); 1347 return false; 1348 } 1349 1350 return true; 1351 } 1352 1353 /// Dumps all the hint information. 1354 void emitRemarkWithHints() const { 1355 using namespace ore; 1356 if (Force.Value == LoopVectorizeHints::FK_Disabled) 1357 ORE.emit(OptimizationRemarkMissed(LV_NAME, "MissedExplicitlyDisabled", 1358 TheLoop->getStartLoc(), 1359 TheLoop->getHeader()) 1360 << "loop not vectorized: vectorization is explicitly disabled"); 1361 else { 1362 OptimizationRemarkMissed R(LV_NAME, "MissedDetails", 1363 TheLoop->getStartLoc(), TheLoop->getHeader()); 1364 R << "loop not vectorized"; 1365 if (Force.Value == LoopVectorizeHints::FK_Enabled) { 1366 R << " (Force=" << NV("Force", true); 1367 if (Width.Value != 0) 1368 R << ", Vector Width=" << NV("VectorWidth", Width.Value); 1369 if (Interleave.Value != 0) 1370 R << ", Interleave Count=" << NV("InterleaveCount", Interleave.Value); 1371 R << ")"; 1372 } 1373 ORE.emit(R); 1374 } 1375 } 1376 1377 unsigned getWidth() const { return Width.Value; } 1378 unsigned getInterleave() const { return Interleave.Value; } 1379 enum ForceKind getForce() const { return (ForceKind)Force.Value; } 1380 1381 /// \brief If hints are provided that force vectorization, use the AlwaysPrint 1382 /// pass name to force the frontend to print the diagnostic. 1383 const char *vectorizeAnalysisPassName() const { 1384 if (getWidth() == 1) 1385 return LV_NAME; 1386 if (getForce() == LoopVectorizeHints::FK_Disabled) 1387 return LV_NAME; 1388 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0) 1389 return LV_NAME; 1390 return OptimizationRemarkAnalysis::AlwaysPrint; 1391 } 1392 1393 bool allowReordering() const { 1394 // When enabling loop hints are provided we allow the vectorizer to change 1395 // the order of operations that is given by the scalar loop. This is not 1396 // enabled by default because can be unsafe or inefficient. For example, 1397 // reordering floating-point operations will change the way round-off 1398 // error accumulates in the loop. 1399 return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1; 1400 } 1401 1402 bool isPotentiallyUnsafe() const { 1403 // Avoid FP vectorization if the target is unsure about proper support. 1404 // This may be related to the SIMD unit in the target not handling 1405 // IEEE 754 FP ops properly, or bad single-to-double promotions. 1406 // Otherwise, a sequence of vectorized loops, even without reduction, 1407 // could lead to different end results on the destination vectors. 1408 return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe; 1409 } 1410 1411 void setPotentiallyUnsafe() { PotentiallyUnsafe = true; } 1412 1413 private: 1414 /// Find hints specified in the loop metadata and update local values. 1415 void getHintsFromMetadata() { 1416 MDNode *LoopID = TheLoop->getLoopID(); 1417 if (!LoopID) 1418 return; 1419 1420 // First operand should refer to the loop id itself. 1421 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 1422 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 1423 1424 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1425 const MDString *S = nullptr; 1426 SmallVector<Metadata *, 4> Args; 1427 1428 // The expected hint is either a MDString or a MDNode with the first 1429 // operand a MDString. 1430 if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) { 1431 if (!MD || MD->getNumOperands() == 0) 1432 continue; 1433 S = dyn_cast<MDString>(MD->getOperand(0)); 1434 for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i) 1435 Args.push_back(MD->getOperand(i)); 1436 } else { 1437 S = dyn_cast<MDString>(LoopID->getOperand(i)); 1438 assert(Args.size() == 0 && "too many arguments for MDString"); 1439 } 1440 1441 if (!S) 1442 continue; 1443 1444 // Check if the hint starts with the loop metadata prefix. 1445 StringRef Name = S->getString(); 1446 if (Args.size() == 1) 1447 setHint(Name, Args[0]); 1448 } 1449 } 1450 1451 /// Checks string hint with one operand and set value if valid. 1452 void setHint(StringRef Name, Metadata *Arg) { 1453 if (!Name.startswith(Prefix())) 1454 return; 1455 Name = Name.substr(Prefix().size(), StringRef::npos); 1456 1457 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); 1458 if (!C) 1459 return; 1460 unsigned Val = C->getZExtValue(); 1461 1462 Hint *Hints[] = {&Width, &Interleave, &Force}; 1463 for (auto H : Hints) { 1464 if (Name == H->Name) { 1465 if (H->validate(Val)) 1466 H->Value = Val; 1467 else 1468 DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); 1469 break; 1470 } 1471 } 1472 } 1473 1474 /// Create a new hint from name / value pair. 1475 MDNode *createHintMetadata(StringRef Name, unsigned V) const { 1476 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1477 Metadata *MDs[] = {MDString::get(Context, Name), 1478 ConstantAsMetadata::get( 1479 ConstantInt::get(Type::getInt32Ty(Context), V))}; 1480 return MDNode::get(Context, MDs); 1481 } 1482 1483 /// Matches metadata with hint name. 1484 bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) { 1485 MDString *Name = dyn_cast<MDString>(Node->getOperand(0)); 1486 if (!Name) 1487 return false; 1488 1489 for (auto H : HintTypes) 1490 if (Name->getString().endswith(H.Name)) 1491 return true; 1492 return false; 1493 } 1494 1495 /// Sets current hints into loop metadata, keeping other values intact. 1496 void writeHintsToMetadata(ArrayRef<Hint> HintTypes) { 1497 if (HintTypes.size() == 0) 1498 return; 1499 1500 // Reserve the first element to LoopID (see below). 1501 SmallVector<Metadata *, 4> MDs(1); 1502 // If the loop already has metadata, then ignore the existing operands. 1503 MDNode *LoopID = TheLoop->getLoopID(); 1504 if (LoopID) { 1505 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1506 MDNode *Node = cast<MDNode>(LoopID->getOperand(i)); 1507 // If node in update list, ignore old value. 1508 if (!matchesHintMetadataName(Node, HintTypes)) 1509 MDs.push_back(Node); 1510 } 1511 } 1512 1513 // Now, add the missing hints. 1514 for (auto H : HintTypes) 1515 MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value)); 1516 1517 // Replace current metadata node with new one. 1518 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1519 MDNode *NewLoopID = MDNode::get(Context, MDs); 1520 // Set operand 0 to refer to the loop id itself. 1521 NewLoopID->replaceOperandWith(0, NewLoopID); 1522 1523 TheLoop->setLoopID(NewLoopID); 1524 } 1525 1526 /// The loop these hints belong to. 1527 const Loop *TheLoop; 1528 1529 /// Interface to emit optimization remarks. 1530 OptimizationRemarkEmitter &ORE; 1531 }; 1532 1533 static void emitAnalysisDiag(const Loop *TheLoop, 1534 const LoopVectorizeHints &Hints, 1535 OptimizationRemarkEmitter &ORE, 1536 const LoopAccessReport &Message) { 1537 const char *Name = Hints.vectorizeAnalysisPassName(); 1538 LoopAccessReport::emitAnalysis(Message, TheLoop, Name, ORE); 1539 } 1540 1541 static void emitMissedWarning(Function *F, Loop *L, 1542 const LoopVectorizeHints &LH, 1543 OptimizationRemarkEmitter *ORE) { 1544 LH.emitRemarkWithHints(); 1545 1546 if (LH.getForce() == LoopVectorizeHints::FK_Enabled) { 1547 if (LH.getWidth() != 1) 1548 ORE->emit(DiagnosticInfoOptimizationFailure( 1549 DEBUG_TYPE, "FailedRequestedVectorization", 1550 L->getStartLoc(), L->getHeader()) 1551 << "loop not vectorized: " 1552 << "failed explicitly specified loop vectorization"); 1553 else if (LH.getInterleave() != 1) 1554 ORE->emit(DiagnosticInfoOptimizationFailure( 1555 DEBUG_TYPE, "FailedRequestedInterleaving", L->getStartLoc(), 1556 L->getHeader()) 1557 << "loop not interleaved: " 1558 << "failed explicitly specified loop interleaving"); 1559 } 1560 } 1561 1562 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and 1563 /// to what vectorization factor. 1564 /// This class does not look at the profitability of vectorization, only the 1565 /// legality. This class has two main kinds of checks: 1566 /// * Memory checks - The code in canVectorizeMemory checks if vectorization 1567 /// will change the order of memory accesses in a way that will change the 1568 /// correctness of the program. 1569 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory 1570 /// checks for a number of different conditions, such as the availability of a 1571 /// single induction variable, that all types are supported and vectorize-able, 1572 /// etc. This code reflects the capabilities of InnerLoopVectorizer. 1573 /// This class is also used by InnerLoopVectorizer for identifying 1574 /// induction variable and the different reduction variables. 1575 class LoopVectorizationLegality { 1576 public: 1577 LoopVectorizationLegality( 1578 Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT, 1579 TargetLibraryInfo *TLI, AliasAnalysis *AA, Function *F, 1580 const TargetTransformInfo *TTI, 1581 std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI, 1582 OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R, 1583 LoopVectorizeHints *H) 1584 : NumPredStores(0), TheLoop(L), PSE(PSE), TLI(TLI), TTI(TTI), DT(DT), 1585 GetLAA(GetLAA), LAI(nullptr), ORE(ORE), InterleaveInfo(PSE, L, DT, LI), 1586 Induction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false), 1587 Requirements(R), Hints(H) {} 1588 1589 /// ReductionList contains the reduction descriptors for all 1590 /// of the reductions that were found in the loop. 1591 typedef DenseMap<PHINode *, RecurrenceDescriptor> ReductionList; 1592 1593 /// InductionList saves induction variables and maps them to the 1594 /// induction descriptor. 1595 typedef MapVector<PHINode *, InductionDescriptor> InductionList; 1596 1597 /// RecurrenceSet contains the phi nodes that are recurrences other than 1598 /// inductions and reductions. 1599 typedef SmallPtrSet<const PHINode *, 8> RecurrenceSet; 1600 1601 /// Returns true if it is legal to vectorize this loop. 1602 /// This does not mean that it is profitable to vectorize this 1603 /// loop, only that it is legal to do so. 1604 bool canVectorize(); 1605 1606 /// Returns the Induction variable. 1607 PHINode *getInduction() { return Induction; } 1608 1609 /// Returns the reduction variables found in the loop. 1610 ReductionList *getReductionVars() { return &Reductions; } 1611 1612 /// Returns the induction variables found in the loop. 1613 InductionList *getInductionVars() { return &Inductions; } 1614 1615 /// Return the first-order recurrences found in the loop. 1616 RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; } 1617 1618 /// Returns the widest induction type. 1619 Type *getWidestInductionType() { return WidestIndTy; } 1620 1621 /// Returns True if V is an induction variable in this loop. 1622 bool isInductionVariable(const Value *V); 1623 1624 /// Returns True if PN is a reduction variable in this loop. 1625 bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); } 1626 1627 /// Returns True if Phi is a first-order recurrence in this loop. 1628 bool isFirstOrderRecurrence(const PHINode *Phi); 1629 1630 /// Return true if the block BB needs to be predicated in order for the loop 1631 /// to be vectorized. 1632 bool blockNeedsPredication(BasicBlock *BB); 1633 1634 /// Check if this pointer is consecutive when vectorizing. This happens 1635 /// when the last index of the GEP is the induction variable, or that the 1636 /// pointer itself is an induction variable. 1637 /// This check allows us to vectorize A[idx] into a wide load/store. 1638 /// Returns: 1639 /// 0 - Stride is unknown or non-consecutive. 1640 /// 1 - Address is consecutive. 1641 /// -1 - Address is consecutive, and decreasing. 1642 int isConsecutivePtr(Value *Ptr); 1643 1644 /// Returns true if the value V is uniform within the loop. 1645 bool isUniform(Value *V); 1646 1647 /// Returns the information that we collected about runtime memory check. 1648 const RuntimePointerChecking *getRuntimePointerChecking() const { 1649 return LAI->getRuntimePointerChecking(); 1650 } 1651 1652 const LoopAccessInfo *getLAI() const { return LAI; } 1653 1654 /// \brief Check if \p Instr belongs to any interleaved access group. 1655 bool isAccessInterleaved(Instruction *Instr) { 1656 return InterleaveInfo.isInterleaved(Instr); 1657 } 1658 1659 /// \brief Return the maximum interleave factor of all interleaved groups. 1660 unsigned getMaxInterleaveFactor() const { 1661 return InterleaveInfo.getMaxInterleaveFactor(); 1662 } 1663 1664 /// \brief Get the interleaved access group that \p Instr belongs to. 1665 const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { 1666 return InterleaveInfo.getInterleaveGroup(Instr); 1667 } 1668 1669 /// \brief Returns true if an interleaved group requires a scalar iteration 1670 /// to handle accesses with gaps. 1671 bool requiresScalarEpilogue() const { 1672 return InterleaveInfo.requiresScalarEpilogue(); 1673 } 1674 1675 unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); } 1676 1677 bool hasStride(Value *V) { return LAI->hasStride(V); } 1678 1679 /// Returns true if the target machine supports masked store operation 1680 /// for the given \p DataType and kind of access to \p Ptr. 1681 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1682 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType); 1683 } 1684 /// Returns true if the target machine supports masked load operation 1685 /// for the given \p DataType and kind of access to \p Ptr. 1686 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1687 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType); 1688 } 1689 /// Returns true if the target machine supports masked scatter operation 1690 /// for the given \p DataType. 1691 bool isLegalMaskedScatter(Type *DataType) { 1692 return TTI->isLegalMaskedScatter(DataType); 1693 } 1694 /// Returns true if the target machine supports masked gather operation 1695 /// for the given \p DataType. 1696 bool isLegalMaskedGather(Type *DataType) { 1697 return TTI->isLegalMaskedGather(DataType); 1698 } 1699 /// Returns true if the target machine can represent \p V as a masked gather 1700 /// or scatter operation. 1701 bool isLegalGatherOrScatter(Value *V) { 1702 auto *LI = dyn_cast<LoadInst>(V); 1703 auto *SI = dyn_cast<StoreInst>(V); 1704 if (!LI && !SI) 1705 return false; 1706 auto *Ptr = getPointerOperand(V); 1707 auto *Ty = cast<PointerType>(Ptr->getType())->getElementType(); 1708 return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty)); 1709 } 1710 1711 /// Returns true if vector representation of the instruction \p I 1712 /// requires mask. 1713 bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); } 1714 unsigned getNumStores() const { return LAI->getNumStores(); } 1715 unsigned getNumLoads() const { return LAI->getNumLoads(); } 1716 unsigned getNumPredStores() const { return NumPredStores; } 1717 1718 /// Returns true if \p I is an instruction that will be scalarized with 1719 /// predication. Such instructions include conditional stores and 1720 /// instructions that may divide by zero. 1721 bool isScalarWithPredication(Instruction *I); 1722 1723 /// Returns true if \p I is a memory instruction with consecutive memory 1724 /// access that can be widened. 1725 bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1); 1726 1727 private: 1728 /// Check if a single basic block loop is vectorizable. 1729 /// At this point we know that this is a loop with a constant trip count 1730 /// and we only need to check individual instructions. 1731 bool canVectorizeInstrs(); 1732 1733 /// When we vectorize loops we may change the order in which 1734 /// we read and write from memory. This method checks if it is 1735 /// legal to vectorize the code, considering only memory constrains. 1736 /// Returns true if the loop is vectorizable 1737 bool canVectorizeMemory(); 1738 1739 /// Return true if we can vectorize this loop using the IF-conversion 1740 /// transformation. 1741 bool canVectorizeWithIfConvert(); 1742 1743 /// Return true if all of the instructions in the block can be speculatively 1744 /// executed. \p SafePtrs is a list of addresses that are known to be legal 1745 /// and we know that we can read from them without segfault. 1746 bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs); 1747 1748 /// Updates the vectorization state by adding \p Phi to the inductions list. 1749 /// This can set \p Phi as the main induction of the loop if \p Phi is a 1750 /// better choice for the main induction than the existing one. 1751 void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID, 1752 SmallPtrSetImpl<Value *> &AllowedExit); 1753 1754 /// Report an analysis message to assist the user in diagnosing loops that are 1755 /// not vectorized. These are handled as LoopAccessReport rather than 1756 /// VectorizationReport because the << operator of VectorizationReport returns 1757 /// LoopAccessReport. 1758 void emitAnalysis(const LoopAccessReport &Message) const { 1759 emitAnalysisDiag(TheLoop, *Hints, *ORE, Message); 1760 } 1761 1762 /// Create an analysis remark that explains why vectorization failed 1763 /// 1764 /// \p RemarkName is the identifier for the remark. If \p I is passed it is 1765 /// an instruction that prevents vectorization. Otherwise the loop is used 1766 /// for the location of the remark. \return the remark object that can be 1767 /// streamed to. 1768 OptimizationRemarkAnalysis 1769 createMissedAnalysis(StringRef RemarkName, Instruction *I = nullptr) const { 1770 return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(), 1771 RemarkName, TheLoop, I); 1772 } 1773 1774 /// \brief If an access has a symbolic strides, this maps the pointer value to 1775 /// the stride symbol. 1776 const ValueToValueMap *getSymbolicStrides() { 1777 // FIXME: Currently, the set of symbolic strides is sometimes queried before 1778 // it's collected. This happens from canVectorizeWithIfConvert, when the 1779 // pointer is checked to reference consecutive elements suitable for a 1780 // masked access. 1781 return LAI ? &LAI->getSymbolicStrides() : nullptr; 1782 } 1783 1784 unsigned NumPredStores; 1785 1786 /// The loop that we evaluate. 1787 Loop *TheLoop; 1788 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. 1789 /// Applies dynamic knowledge to simplify SCEV expressions in the context 1790 /// of existing SCEV assumptions. The analysis will also add a minimal set 1791 /// of new predicates if this is required to enable vectorization and 1792 /// unrolling. 1793 PredicatedScalarEvolution &PSE; 1794 /// Target Library Info. 1795 TargetLibraryInfo *TLI; 1796 /// Target Transform Info 1797 const TargetTransformInfo *TTI; 1798 /// Dominator Tree. 1799 DominatorTree *DT; 1800 // LoopAccess analysis. 1801 std::function<const LoopAccessInfo &(Loop &)> *GetLAA; 1802 // And the loop-accesses info corresponding to this loop. This pointer is 1803 // null until canVectorizeMemory sets it up. 1804 const LoopAccessInfo *LAI; 1805 /// Interface to emit optimization remarks. 1806 OptimizationRemarkEmitter *ORE; 1807 1808 /// The interleave access information contains groups of interleaved accesses 1809 /// with the same stride and close to each other. 1810 InterleavedAccessInfo InterleaveInfo; 1811 1812 // --- vectorization state --- // 1813 1814 /// Holds the integer induction variable. This is the counter of the 1815 /// loop. 1816 PHINode *Induction; 1817 /// Holds the reduction variables. 1818 ReductionList Reductions; 1819 /// Holds all of the induction variables that we found in the loop. 1820 /// Notice that inductions don't need to start at zero and that induction 1821 /// variables can be pointers. 1822 InductionList Inductions; 1823 /// Holds the phi nodes that are first-order recurrences. 1824 RecurrenceSet FirstOrderRecurrences; 1825 /// Holds the widest induction type encountered. 1826 Type *WidestIndTy; 1827 1828 /// Allowed outside users. This holds the induction and reduction 1829 /// vars which can be accessed from outside the loop. 1830 SmallPtrSet<Value *, 4> AllowedExit; 1831 1832 /// Can we assume the absence of NaNs. 1833 bool HasFunNoNaNAttr; 1834 1835 /// Vectorization requirements that will go through late-evaluation. 1836 LoopVectorizationRequirements *Requirements; 1837 1838 /// Used to emit an analysis of any legality issues. 1839 LoopVectorizeHints *Hints; 1840 1841 /// While vectorizing these instructions we have to generate a 1842 /// call to the appropriate masked intrinsic 1843 SmallPtrSet<const Instruction *, 8> MaskedOp; 1844 }; 1845 1846 /// LoopVectorizationCostModel - estimates the expected speedups due to 1847 /// vectorization. 1848 /// In many cases vectorization is not profitable. This can happen because of 1849 /// a number of reasons. In this class we mainly attempt to predict the 1850 /// expected speedup/slowdowns due to the supported instruction set. We use the 1851 /// TargetTransformInfo to query the different backends for the cost of 1852 /// different operations. 1853 class LoopVectorizationCostModel { 1854 public: 1855 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, 1856 LoopInfo *LI, LoopVectorizationLegality *Legal, 1857 const TargetTransformInfo &TTI, 1858 const TargetLibraryInfo *TLI, DemandedBits *DB, 1859 AssumptionCache *AC, 1860 OptimizationRemarkEmitter *ORE, const Function *F, 1861 const LoopVectorizeHints *Hints) 1862 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 1863 AC(AC), ORE(ORE), TheFunction(F), Hints(Hints) {} 1864 1865 /// Information about vectorization costs 1866 struct VectorizationFactor { 1867 unsigned Width; // Vector width with best cost 1868 unsigned Cost; // Cost of the loop with that width 1869 }; 1870 /// \return The most profitable vectorization factor and the cost of that VF. 1871 /// This method checks every power of two up to VF. If UserVF is not ZERO 1872 /// then this vectorization factor will be selected if vectorization is 1873 /// possible. 1874 VectorizationFactor selectVectorizationFactor(bool OptForSize); 1875 1876 /// \return The size (in bits) of the smallest and widest types in the code 1877 /// that needs to be vectorized. We ignore values that remain scalar such as 1878 /// 64 bit loop indices. 1879 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1880 1881 /// \return The desired interleave count. 1882 /// If interleave count has been specified by metadata it will be returned. 1883 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1884 /// are the selected vectorization factor and the cost of the selected VF. 1885 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 1886 unsigned LoopCost); 1887 1888 /// Memory access instruction may be vectorized in more than one way. 1889 /// Form of instruction after vectorization depends on cost. 1890 /// This function takes cost-based decisions for Load/Store instructions 1891 /// and collects them in a map. This decisions map is used for building 1892 /// the lists of loop-uniform and loop-scalar instructions. 1893 /// The calculated cost is saved with widening decision in order to 1894 /// avoid redundant calculations. 1895 void setCostBasedWideningDecision(unsigned VF); 1896 1897 /// \brief A struct that represents some properties of the register usage 1898 /// of a loop. 1899 struct RegisterUsage { 1900 /// Holds the number of loop invariant values that are used in the loop. 1901 unsigned LoopInvariantRegs; 1902 /// Holds the maximum number of concurrent live intervals in the loop. 1903 unsigned MaxLocalUsers; 1904 /// Holds the number of instructions in the loop. 1905 unsigned NumInstructions; 1906 }; 1907 1908 /// \return Returns information about the register usages of the loop for the 1909 /// given vectorization factors. 1910 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 1911 1912 /// Collect values we want to ignore in the cost model. 1913 void collectValuesToIgnore(); 1914 1915 /// \returns The smallest bitwidth each instruction can be represented with. 1916 /// The vector equivalents of these instructions should be truncated to this 1917 /// type. 1918 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1919 return MinBWs; 1920 } 1921 1922 /// \returns True if it is more profitable to scalarize instruction \p I for 1923 /// vectorization factor \p VF. 1924 bool isProfitableToScalarize(Instruction *I, unsigned VF) const { 1925 auto Scalars = InstsToScalarize.find(VF); 1926 assert(Scalars != InstsToScalarize.end() && 1927 "VF not yet analyzed for scalarization profitability"); 1928 return Scalars->second.count(I); 1929 } 1930 1931 /// Returns true if \p I is known to be uniform after vectorization. 1932 bool isUniformAfterVectorization(Instruction *I, unsigned VF) const { 1933 if (VF == 1) 1934 return true; 1935 assert(Uniforms.count(VF) && "VF not yet analyzed for uniformity"); 1936 auto UniformsPerVF = Uniforms.find(VF); 1937 return UniformsPerVF->second.count(I); 1938 } 1939 1940 /// Returns true if \p I is known to be scalar after vectorization. 1941 bool isScalarAfterVectorization(Instruction *I, unsigned VF) const { 1942 if (VF == 1) 1943 return true; 1944 assert(Scalars.count(VF) && "Scalar values are not calculated for VF"); 1945 auto ScalarsPerVF = Scalars.find(VF); 1946 return ScalarsPerVF->second.count(I); 1947 } 1948 1949 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1950 /// for vectorization factor \p VF. 1951 bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const { 1952 return VF > 1 && MinBWs.count(I) && !isProfitableToScalarize(I, VF) && 1953 !isScalarAfterVectorization(I, VF); 1954 } 1955 1956 /// Decision that was taken during cost calculation for memory instruction. 1957 enum InstWidening { 1958 CM_Unknown, 1959 CM_Widen, 1960 CM_Interleave, 1961 CM_GatherScatter, 1962 CM_Scalarize 1963 }; 1964 1965 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1966 /// instruction \p I and vector width \p VF. 1967 void setWideningDecision(Instruction *I, unsigned VF, InstWidening W, 1968 unsigned Cost) { 1969 assert(VF >= 2 && "Expected VF >=2"); 1970 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1971 } 1972 1973 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1974 /// interleaving group \p Grp and vector width \p VF. 1975 void setWideningDecision(const InterleaveGroup *Grp, unsigned VF, 1976 InstWidening W, unsigned Cost) { 1977 assert(VF >= 2 && "Expected VF >=2"); 1978 /// Broadcast this decicion to all instructions inside the group. 1979 /// But the cost will be assigned to one instruction only. 1980 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1981 if (auto *I = Grp->getMember(i)) { 1982 if (Grp->getInsertPos() == I) 1983 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1984 else 1985 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1986 } 1987 } 1988 } 1989 1990 /// Return the cost model decision for the given instruction \p I and vector 1991 /// width \p VF. Return CM_Unknown if this instruction did not pass 1992 /// through the cost modeling. 1993 InstWidening getWideningDecision(Instruction *I, unsigned VF) { 1994 assert(VF >= 2 && "Expected VF >=2"); 1995 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1996 auto Itr = WideningDecisions.find(InstOnVF); 1997 if (Itr == WideningDecisions.end()) 1998 return CM_Unknown; 1999 return Itr->second.first; 2000 } 2001 2002 /// Return the vectorization cost for the given instruction \p I and vector 2003 /// width \p VF. 2004 unsigned getWideningCost(Instruction *I, unsigned VF) { 2005 assert(VF >= 2 && "Expected VF >=2"); 2006 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 2007 assert(WideningDecisions.count(InstOnVF) && "The cost is not calculated"); 2008 return WideningDecisions[InstOnVF].second; 2009 } 2010 2011 private: 2012 /// The vectorization cost is a combination of the cost itself and a boolean 2013 /// indicating whether any of the contributing operations will actually 2014 /// operate on 2015 /// vector values after type legalization in the backend. If this latter value 2016 /// is 2017 /// false, then all operations will be scalarized (i.e. no vectorization has 2018 /// actually taken place). 2019 typedef std::pair<unsigned, bool> VectorizationCostTy; 2020 2021 /// Returns the expected execution cost. The unit of the cost does 2022 /// not matter because we use the 'cost' units to compare different 2023 /// vector widths. The cost that is returned is *not* normalized by 2024 /// the factor width. 2025 VectorizationCostTy expectedCost(unsigned VF); 2026 2027 /// Returns the execution time cost of an instruction for a given vector 2028 /// width. Vector width of one means scalar. 2029 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 2030 2031 /// The cost-computation logic from getInstructionCost which provides 2032 /// the vector type as an output parameter. 2033 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 2034 2035 /// Calculate vectorization cost of memory instruction \p I. 2036 unsigned getMemoryInstructionCost(Instruction *I, unsigned VF); 2037 2038 /// The cost computation for scalarized memory instruction. 2039 unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF); 2040 2041 /// The cost computation for interleaving group of memory instructions. 2042 unsigned getInterleaveGroupCost(Instruction *I, unsigned VF); 2043 2044 /// The cost computation for Gather/Scatter instruction. 2045 unsigned getGatherScatterCost(Instruction *I, unsigned VF); 2046 2047 /// The cost computation for widening instruction \p I with consecutive 2048 /// memory access. 2049 unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF); 2050 2051 /// The cost calculation for Load instruction \p I with uniform pointer - 2052 /// scalar load + broadcast. 2053 unsigned getUniformMemOpCost(Instruction *I, unsigned VF); 2054 2055 /// Returns whether the instruction is a load or store and will be a emitted 2056 /// as a vector operation. 2057 bool isConsecutiveLoadOrStore(Instruction *I); 2058 2059 /// Create an analysis remark that explains why vectorization failed 2060 /// 2061 /// \p RemarkName is the identifier for the remark. \return the remark object 2062 /// that can be streamed to. 2063 OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) { 2064 return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(), 2065 RemarkName, TheLoop); 2066 } 2067 2068 /// Map of scalar integer values to the smallest bitwidth they can be legally 2069 /// represented as. The vector equivalents of these values should be truncated 2070 /// to this type. 2071 MapVector<Instruction *, uint64_t> MinBWs; 2072 2073 /// A type representing the costs for instructions if they were to be 2074 /// scalarized rather than vectorized. The entries are Instruction-Cost 2075 /// pairs. 2076 typedef DenseMap<Instruction *, unsigned> ScalarCostsTy; 2077 2078 /// A map holding scalar costs for different vectorization factors. The 2079 /// presence of a cost for an instruction in the mapping indicates that the 2080 /// instruction will be scalarized when vectorizing with the associated 2081 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 2082 DenseMap<unsigned, ScalarCostsTy> InstsToScalarize; 2083 2084 /// Holds the instructions known to be uniform after vectorization. 2085 /// The data is collected per VF. 2086 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms; 2087 2088 /// Holds the instructions known to be scalar after vectorization. 2089 /// The data is collected per VF. 2090 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars; 2091 2092 /// Returns the expected difference in cost from scalarizing the expression 2093 /// feeding a predicated instruction \p PredInst. The instructions to 2094 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 2095 /// non-negative return value implies the expression will be scalarized. 2096 /// Currently, only single-use chains are considered for scalarization. 2097 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 2098 unsigned VF); 2099 2100 /// Collects the instructions to scalarize for each predicated instruction in 2101 /// the loop. 2102 void collectInstsToScalarize(unsigned VF); 2103 2104 /// Collect the instructions that are uniform after vectorization. An 2105 /// instruction is uniform if we represent it with a single scalar value in 2106 /// the vectorized loop corresponding to each vector iteration. Examples of 2107 /// uniform instructions include pointer operands of consecutive or 2108 /// interleaved memory accesses. Note that although uniformity implies an 2109 /// instruction will be scalar, the reverse is not true. In general, a 2110 /// scalarized instruction will be represented by VF scalar values in the 2111 /// vectorized loop, each corresponding to an iteration of the original 2112 /// scalar loop. 2113 void collectLoopUniforms(unsigned VF); 2114 2115 /// Collect the instructions that are scalar after vectorization. An 2116 /// instruction is scalar if it is known to be uniform or will be scalarized 2117 /// during vectorization. Non-uniform scalarized instructions will be 2118 /// represented by VF values in the vectorized loop, each corresponding to an 2119 /// iteration of the original scalar loop. 2120 void collectLoopScalars(unsigned VF); 2121 2122 /// Collect Uniform and Scalar values for the given \p VF. 2123 /// The sets depend on CM decision for Load/Store instructions 2124 /// that may be vectorized as interleave, gather-scatter or scalarized. 2125 void collectUniformsAndScalars(unsigned VF) { 2126 // Do the analysis once. 2127 if (VF == 1 || Uniforms.count(VF)) 2128 return; 2129 setCostBasedWideningDecision(VF); 2130 collectLoopUniforms(VF); 2131 collectLoopScalars(VF); 2132 } 2133 2134 /// Keeps cost model vectorization decision and cost for instructions. 2135 /// Right now it is used for memory instructions only. 2136 typedef DenseMap<std::pair<Instruction *, unsigned>, 2137 std::pair<InstWidening, unsigned>> 2138 DecisionList; 2139 2140 DecisionList WideningDecisions; 2141 2142 public: 2143 /// The loop that we evaluate. 2144 Loop *TheLoop; 2145 /// Predicated scalar evolution analysis. 2146 PredicatedScalarEvolution &PSE; 2147 /// Loop Info analysis. 2148 LoopInfo *LI; 2149 /// Vectorization legality. 2150 LoopVectorizationLegality *Legal; 2151 /// Vector target information. 2152 const TargetTransformInfo &TTI; 2153 /// Target Library Info. 2154 const TargetLibraryInfo *TLI; 2155 /// Demanded bits analysis. 2156 DemandedBits *DB; 2157 /// Assumption cache. 2158 AssumptionCache *AC; 2159 /// Interface to emit optimization remarks. 2160 OptimizationRemarkEmitter *ORE; 2161 2162 const Function *TheFunction; 2163 /// Loop Vectorize Hint. 2164 const LoopVectorizeHints *Hints; 2165 /// Values to ignore in the cost model. 2166 SmallPtrSet<const Value *, 16> ValuesToIgnore; 2167 /// Values to ignore in the cost model when VF > 1. 2168 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 2169 }; 2170 2171 /// \brief This holds vectorization requirements that must be verified late in 2172 /// the process. The requirements are set by legalize and costmodel. Once 2173 /// vectorization has been determined to be possible and profitable the 2174 /// requirements can be verified by looking for metadata or compiler options. 2175 /// For example, some loops require FP commutativity which is only allowed if 2176 /// vectorization is explicitly specified or if the fast-math compiler option 2177 /// has been provided. 2178 /// Late evaluation of these requirements allows helpful diagnostics to be 2179 /// composed that tells the user what need to be done to vectorize the loop. For 2180 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late 2181 /// evaluation should be used only when diagnostics can generated that can be 2182 /// followed by a non-expert user. 2183 class LoopVectorizationRequirements { 2184 public: 2185 LoopVectorizationRequirements(OptimizationRemarkEmitter &ORE) 2186 : NumRuntimePointerChecks(0), UnsafeAlgebraInst(nullptr), ORE(ORE) {} 2187 2188 void addUnsafeAlgebraInst(Instruction *I) { 2189 // First unsafe algebra instruction. 2190 if (!UnsafeAlgebraInst) 2191 UnsafeAlgebraInst = I; 2192 } 2193 2194 void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; } 2195 2196 bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) { 2197 const char *PassName = Hints.vectorizeAnalysisPassName(); 2198 bool Failed = false; 2199 if (UnsafeAlgebraInst && !Hints.allowReordering()) { 2200 ORE.emit( 2201 OptimizationRemarkAnalysisFPCommute(PassName, "CantReorderFPOps", 2202 UnsafeAlgebraInst->getDebugLoc(), 2203 UnsafeAlgebraInst->getParent()) 2204 << "loop not vectorized: cannot prove it is safe to reorder " 2205 "floating-point operations"); 2206 Failed = true; 2207 } 2208 2209 // Test if runtime memcheck thresholds are exceeded. 2210 bool PragmaThresholdReached = 2211 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 2212 bool ThresholdReached = 2213 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 2214 if ((ThresholdReached && !Hints.allowReordering()) || 2215 PragmaThresholdReached) { 2216 ORE.emit(OptimizationRemarkAnalysisAliasing(PassName, "CantReorderMemOps", 2217 L->getStartLoc(), 2218 L->getHeader()) 2219 << "loop not vectorized: cannot prove it is safe to reorder " 2220 "memory operations"); 2221 DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 2222 Failed = true; 2223 } 2224 2225 return Failed; 2226 } 2227 2228 private: 2229 unsigned NumRuntimePointerChecks; 2230 Instruction *UnsafeAlgebraInst; 2231 2232 /// Interface to emit optimization remarks. 2233 OptimizationRemarkEmitter &ORE; 2234 }; 2235 2236 static void addAcyclicInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) { 2237 if (L.empty()) { 2238 if (!hasCyclesInLoopBody(L)) 2239 V.push_back(&L); 2240 return; 2241 } 2242 for (Loop *InnerL : L) 2243 addAcyclicInnerLoop(*InnerL, V); 2244 } 2245 2246 /// The LoopVectorize Pass. 2247 struct LoopVectorize : public FunctionPass { 2248 /// Pass identification, replacement for typeid 2249 static char ID; 2250 2251 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) 2252 : FunctionPass(ID) { 2253 Impl.DisableUnrolling = NoUnrolling; 2254 Impl.AlwaysVectorize = AlwaysVectorize; 2255 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2256 } 2257 2258 LoopVectorizePass Impl; 2259 2260 bool runOnFunction(Function &F) override { 2261 if (skipFunction(F)) 2262 return false; 2263 2264 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2265 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2266 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2267 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2268 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2269 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2270 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 2271 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2272 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2273 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2274 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2275 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2276 2277 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2278 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2279 2280 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2281 GetLAA, *ORE); 2282 } 2283 2284 void getAnalysisUsage(AnalysisUsage &AU) const override { 2285 AU.addRequired<AssumptionCacheTracker>(); 2286 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2287 AU.addRequired<DominatorTreeWrapperPass>(); 2288 AU.addRequired<LoopInfoWrapperPass>(); 2289 AU.addRequired<ScalarEvolutionWrapperPass>(); 2290 AU.addRequired<TargetTransformInfoWrapperPass>(); 2291 AU.addRequired<AAResultsWrapperPass>(); 2292 AU.addRequired<LoopAccessLegacyAnalysis>(); 2293 AU.addRequired<DemandedBitsWrapperPass>(); 2294 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2295 AU.addPreserved<LoopInfoWrapperPass>(); 2296 AU.addPreserved<DominatorTreeWrapperPass>(); 2297 AU.addPreserved<BasicAAWrapperPass>(); 2298 AU.addPreserved<GlobalsAAWrapperPass>(); 2299 } 2300 }; 2301 2302 } // end anonymous namespace 2303 2304 //===----------------------------------------------------------------------===// 2305 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2306 // LoopVectorizationCostModel. 2307 //===----------------------------------------------------------------------===// 2308 2309 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2310 // We need to place the broadcast of invariant variables outside the loop. 2311 Instruction *Instr = dyn_cast<Instruction>(V); 2312 bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody); 2313 bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr; 2314 2315 // Place the code for broadcasting invariant variables in the new preheader. 2316 IRBuilder<>::InsertPointGuard Guard(Builder); 2317 if (Invariant) 2318 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2319 2320 // Broadcast the scalar into all locations in the vector. 2321 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2322 2323 return Shuf; 2324 } 2325 2326 void InnerLoopVectorizer::createVectorIntInductionPHI( 2327 const InductionDescriptor &II, Instruction *EntryVal) { 2328 Value *Start = II.getStartValue(); 2329 ConstantInt *Step = II.getConstIntStepValue(); 2330 assert(Step && "Can not widen an IV with a non-constant step"); 2331 2332 // Construct the initial value of the vector IV in the vector loop preheader 2333 auto CurrIP = Builder.saveIP(); 2334 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2335 if (isa<TruncInst>(EntryVal)) { 2336 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2337 Step = ConstantInt::getSigned(TruncType, Step->getSExtValue()); 2338 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2339 } 2340 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2341 Value *SteppedStart = getStepVector(SplatStart, 0, Step); 2342 Builder.restoreIP(CurrIP); 2343 2344 Value *SplatVF = 2345 ConstantVector::getSplat(VF, ConstantInt::getSigned(Start->getType(), 2346 VF * Step->getSExtValue())); 2347 // We may need to add the step a number of times, depending on the unroll 2348 // factor. The last of those goes into the PHI. 2349 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2350 &*LoopVectorBody->getFirstInsertionPt()); 2351 Instruction *LastInduction = VecInd; 2352 VectorParts Entry(UF); 2353 for (unsigned Part = 0; Part < UF; ++Part) { 2354 Entry[Part] = LastInduction; 2355 LastInduction = cast<Instruction>( 2356 Builder.CreateAdd(LastInduction, SplatVF, "step.add")); 2357 } 2358 VectorLoopValueMap.initVector(EntryVal, Entry); 2359 if (isa<TruncInst>(EntryVal)) 2360 addMetadata(Entry, EntryVal); 2361 2362 // Move the last step to the end of the latch block. This ensures consistent 2363 // placement of all induction updates. 2364 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2365 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2366 auto *ICmp = cast<Instruction>(Br->getCondition()); 2367 LastInduction->moveBefore(ICmp); 2368 LastInduction->setName("vec.ind.next"); 2369 2370 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2371 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2372 } 2373 2374 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2375 return Cost->isScalarAfterVectorization(I, VF) || 2376 Cost->isProfitableToScalarize(I, VF); 2377 } 2378 2379 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2380 if (shouldScalarizeInstruction(IV)) 2381 return true; 2382 auto isScalarInst = [&](User *U) -> bool { 2383 auto *I = cast<Instruction>(U); 2384 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2385 }; 2386 return any_of(IV->users(), isScalarInst); 2387 } 2388 2389 void InnerLoopVectorizer::widenIntInduction(PHINode *IV, TruncInst *Trunc) { 2390 2391 auto II = Legal->getInductionVars()->find(IV); 2392 assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); 2393 2394 auto ID = II->second; 2395 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2396 2397 // The scalar value to broadcast. This will be derived from the canonical 2398 // induction variable. 2399 Value *ScalarIV = nullptr; 2400 2401 // The step of the induction. 2402 Value *Step = nullptr; 2403 2404 // The value from the original loop to which we are mapping the new induction 2405 // variable. 2406 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2407 2408 // True if we have vectorized the induction variable. 2409 auto VectorizedIV = false; 2410 2411 // Determine if we want a scalar version of the induction variable. This is 2412 // true if the induction variable itself is not widened, or if it has at 2413 // least one user in the loop that is not widened. 2414 auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal); 2415 2416 // If the induction variable has a constant integer step value, go ahead and 2417 // get it now. 2418 if (ID.getConstIntStepValue()) 2419 Step = ID.getConstIntStepValue(); 2420 2421 // Try to create a new independent vector induction variable. If we can't 2422 // create the phi node, we will splat the scalar induction variable in each 2423 // loop iteration. 2424 if (VF > 1 && IV->getType() == Induction->getType() && Step && 2425 !shouldScalarizeInstruction(EntryVal)) { 2426 createVectorIntInductionPHI(ID, EntryVal); 2427 VectorizedIV = true; 2428 } 2429 2430 // If we haven't yet vectorized the induction variable, or if we will create 2431 // a scalar one, we need to define the scalar induction variable and step 2432 // values. If we were given a truncation type, truncate the canonical 2433 // induction variable and constant step. Otherwise, derive these values from 2434 // the induction descriptor. 2435 if (!VectorizedIV || NeedsScalarIV) { 2436 if (Trunc) { 2437 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2438 assert(Step && "Truncation requires constant integer step"); 2439 auto StepInt = cast<ConstantInt>(Step)->getSExtValue(); 2440 ScalarIV = Builder.CreateCast(Instruction::Trunc, Induction, TruncType); 2441 Step = ConstantInt::getSigned(TruncType, StepInt); 2442 } else { 2443 ScalarIV = Induction; 2444 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2445 if (IV != OldInduction) { 2446 ScalarIV = Builder.CreateSExtOrTrunc(ScalarIV, IV->getType()); 2447 ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL); 2448 ScalarIV->setName("offset.idx"); 2449 } 2450 if (!Step) { 2451 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2452 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), 2453 &*Builder.GetInsertPoint()); 2454 } 2455 } 2456 } 2457 2458 // If we haven't yet vectorized the induction variable, splat the scalar 2459 // induction variable, and build the necessary step vectors. 2460 if (!VectorizedIV) { 2461 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2462 VectorParts Entry(UF); 2463 for (unsigned Part = 0; Part < UF; ++Part) 2464 Entry[Part] = getStepVector(Broadcasted, VF * Part, Step); 2465 VectorLoopValueMap.initVector(EntryVal, Entry); 2466 if (Trunc) 2467 addMetadata(Entry, Trunc); 2468 } 2469 2470 // If an induction variable is only used for counting loop iterations or 2471 // calculating addresses, it doesn't need to be widened. Create scalar steps 2472 // that can be used by instructions we will later scalarize. Note that the 2473 // addition of the scalar steps will not increase the number of instructions 2474 // in the loop in the common case prior to InstCombine. We will be trading 2475 // one vector extract for each scalar step. 2476 if (NeedsScalarIV) 2477 buildScalarSteps(ScalarIV, Step, EntryVal); 2478 } 2479 2480 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2481 Instruction::BinaryOps BinOp) { 2482 // Create and check the types. 2483 assert(Val->getType()->isVectorTy() && "Must be a vector"); 2484 int VLen = Val->getType()->getVectorNumElements(); 2485 2486 Type *STy = Val->getType()->getScalarType(); 2487 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2488 "Induction Step must be an integer or FP"); 2489 assert(Step->getType() == STy && "Step has wrong type"); 2490 2491 SmallVector<Constant *, 8> Indices; 2492 2493 if (STy->isIntegerTy()) { 2494 // Create a vector of consecutive numbers from zero to VF. 2495 for (int i = 0; i < VLen; ++i) 2496 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 2497 2498 // Add the consecutive indices to the vector value. 2499 Constant *Cv = ConstantVector::get(Indices); 2500 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2501 Step = Builder.CreateVectorSplat(VLen, Step); 2502 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2503 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2504 // which can be found from the original scalar operations. 2505 Step = Builder.CreateMul(Cv, Step); 2506 return Builder.CreateAdd(Val, Step, "induction"); 2507 } 2508 2509 // Floating point induction. 2510 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2511 "Binary Opcode should be specified for FP induction"); 2512 // Create a vector of consecutive numbers from zero to VF. 2513 for (int i = 0; i < VLen; ++i) 2514 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2515 2516 // Add the consecutive indices to the vector value. 2517 Constant *Cv = ConstantVector::get(Indices); 2518 2519 Step = Builder.CreateVectorSplat(VLen, Step); 2520 2521 // Floating point operations had to be 'fast' to enable the induction. 2522 FastMathFlags Flags; 2523 Flags.setUnsafeAlgebra(); 2524 2525 Value *MulOp = Builder.CreateFMul(Cv, Step); 2526 if (isa<Instruction>(MulOp)) 2527 // Have to check, MulOp may be a constant 2528 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2529 2530 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2531 if (isa<Instruction>(BOp)) 2532 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2533 return BOp; 2534 } 2535 2536 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2537 Value *EntryVal) { 2538 2539 // We shouldn't have to build scalar steps if we aren't vectorizing. 2540 assert(VF > 1 && "VF should be greater than one"); 2541 2542 // Get the value type and ensure it and the step have the same integer type. 2543 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2544 assert(ScalarIVTy->isIntegerTy() && ScalarIVTy == Step->getType() && 2545 "Val and Step should have the same integer type"); 2546 2547 // Determine the number of scalars we need to generate for each unroll 2548 // iteration. If EntryVal is uniform, we only need to generate the first 2549 // lane. Otherwise, we generate all VF values. 2550 unsigned Lanes = 2551 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1 : VF; 2552 2553 // Compute the scalar steps and save the results in VectorLoopValueMap. 2554 ScalarParts Entry(UF); 2555 for (unsigned Part = 0; Part < UF; ++Part) { 2556 Entry[Part].resize(VF); 2557 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2558 auto *StartIdx = ConstantInt::get(ScalarIVTy, VF * Part + Lane); 2559 auto *Mul = Builder.CreateMul(StartIdx, Step); 2560 auto *Add = Builder.CreateAdd(ScalarIV, Mul); 2561 Entry[Part][Lane] = Add; 2562 } 2563 } 2564 VectorLoopValueMap.initScalar(EntryVal, Entry); 2565 } 2566 2567 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { 2568 2569 const ValueToValueMap &Strides = getSymbolicStrides() ? *getSymbolicStrides() : 2570 ValueToValueMap(); 2571 2572 int Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, true, false); 2573 if (Stride == 1 || Stride == -1) 2574 return Stride; 2575 return 0; 2576 } 2577 2578 bool LoopVectorizationLegality::isUniform(Value *V) { 2579 return LAI->isUniform(V); 2580 } 2581 2582 const InnerLoopVectorizer::VectorParts & 2583 InnerLoopVectorizer::getVectorValue(Value *V) { 2584 assert(V != Induction && "The new induction variable should not be used."); 2585 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2586 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2587 2588 // If we have a stride that is replaced by one, do it here. 2589 if (Legal->hasStride(V)) 2590 V = ConstantInt::get(V->getType(), 1); 2591 2592 // If we have this scalar in the map, return it. 2593 if (VectorLoopValueMap.hasVector(V)) 2594 return VectorLoopValueMap.VectorMapStorage[V]; 2595 2596 // If the value has not been vectorized, check if it has been scalarized 2597 // instead. If it has been scalarized, and we actually need the value in 2598 // vector form, we will construct the vector values on demand. 2599 if (VectorLoopValueMap.hasScalar(V)) { 2600 2601 // Initialize a new vector map entry. 2602 VectorParts Entry(UF); 2603 2604 // If we've scalarized a value, that value should be an instruction. 2605 auto *I = cast<Instruction>(V); 2606 2607 // If we aren't vectorizing, we can just copy the scalar map values over to 2608 // the vector map. 2609 if (VF == 1) { 2610 for (unsigned Part = 0; Part < UF; ++Part) 2611 Entry[Part] = getScalarValue(V, Part, 0); 2612 return VectorLoopValueMap.initVector(V, Entry); 2613 } 2614 2615 // Get the last scalar instruction we generated for V. If the value is 2616 // known to be uniform after vectorization, this corresponds to lane zero 2617 // of the last unroll iteration. Otherwise, the last instruction is the one 2618 // we created for the last vector lane of the last unroll iteration. 2619 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1; 2620 auto *LastInst = cast<Instruction>(getScalarValue(V, UF - 1, LastLane)); 2621 2622 // Set the insert point after the last scalarized instruction. This ensures 2623 // the insertelement sequence will directly follow the scalar definitions. 2624 auto OldIP = Builder.saveIP(); 2625 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 2626 Builder.SetInsertPoint(&*NewIP); 2627 2628 // However, if we are vectorizing, we need to construct the vector values. 2629 // If the value is known to be uniform after vectorization, we can just 2630 // broadcast the scalar value corresponding to lane zero for each unroll 2631 // iteration. Otherwise, we construct the vector values using insertelement 2632 // instructions. Since the resulting vectors are stored in 2633 // VectorLoopValueMap, we will only generate the insertelements once. 2634 for (unsigned Part = 0; Part < UF; ++Part) { 2635 Value *VectorValue = nullptr; 2636 if (Cost->isUniformAfterVectorization(I, VF)) { 2637 VectorValue = getBroadcastInstrs(getScalarValue(V, Part, 0)); 2638 } else { 2639 VectorValue = UndefValue::get(VectorType::get(V->getType(), VF)); 2640 for (unsigned Lane = 0; Lane < VF; ++Lane) 2641 VectorValue = Builder.CreateInsertElement( 2642 VectorValue, getScalarValue(V, Part, Lane), 2643 Builder.getInt32(Lane)); 2644 } 2645 Entry[Part] = VectorValue; 2646 } 2647 Builder.restoreIP(OldIP); 2648 return VectorLoopValueMap.initVector(V, Entry); 2649 } 2650 2651 // If this scalar is unknown, assume that it is a constant or that it is 2652 // loop invariant. Broadcast V and save the value for future uses. 2653 Value *B = getBroadcastInstrs(V); 2654 return VectorLoopValueMap.initVector(V, VectorParts(UF, B)); 2655 } 2656 2657 Value *InnerLoopVectorizer::getScalarValue(Value *V, unsigned Part, 2658 unsigned Lane) { 2659 2660 // If the value is not an instruction contained in the loop, it should 2661 // already be scalar. 2662 if (OrigLoop->isLoopInvariant(V)) 2663 return V; 2664 2665 assert(Lane > 0 ? 2666 !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF) 2667 : true && "Uniform values only have lane zero"); 2668 2669 // If the value from the original loop has not been vectorized, it is 2670 // represented by UF x VF scalar values in the new loop. Return the requested 2671 // scalar value. 2672 if (VectorLoopValueMap.hasScalar(V)) 2673 return VectorLoopValueMap.ScalarMapStorage[V][Part][Lane]; 2674 2675 // If the value has not been scalarized, get its entry in VectorLoopValueMap 2676 // for the given unroll part. If this entry is not a vector type (i.e., the 2677 // vectorization factor is one), there is no need to generate an 2678 // extractelement instruction. 2679 auto *U = getVectorValue(V)[Part]; 2680 if (!U->getType()->isVectorTy()) { 2681 assert(VF == 1 && "Value not scalarized has non-vector type"); 2682 return U; 2683 } 2684 2685 // Otherwise, the value from the original loop has been vectorized and is 2686 // represented by UF vector values. Extract and return the requested scalar 2687 // value from the appropriate vector lane. 2688 return Builder.CreateExtractElement(U, Builder.getInt32(Lane)); 2689 } 2690 2691 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2692 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2693 SmallVector<Constant *, 8> ShuffleMask; 2694 for (unsigned i = 0; i < VF; ++i) 2695 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 2696 2697 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2698 ConstantVector::get(ShuffleMask), 2699 "reverse"); 2700 } 2701 2702 // Try to vectorize the interleave group that \p Instr belongs to. 2703 // 2704 // E.g. Translate following interleaved load group (factor = 3): 2705 // for (i = 0; i < N; i+=3) { 2706 // R = Pic[i]; // Member of index 0 2707 // G = Pic[i+1]; // Member of index 1 2708 // B = Pic[i+2]; // Member of index 2 2709 // ... // do something to R, G, B 2710 // } 2711 // To: 2712 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2713 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2714 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2715 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2716 // 2717 // Or translate following interleaved store group (factor = 3): 2718 // for (i = 0; i < N; i+=3) { 2719 // ... do something to R, G, B 2720 // Pic[i] = R; // Member of index 0 2721 // Pic[i+1] = G; // Member of index 1 2722 // Pic[i+2] = B; // Member of index 2 2723 // } 2724 // To: 2725 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2726 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2727 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2728 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2729 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2730 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { 2731 const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr); 2732 assert(Group && "Fail to get an interleaved access group."); 2733 2734 // Skip if current instruction is not the insert position. 2735 if (Instr != Group->getInsertPos()) 2736 return; 2737 2738 Value *Ptr = getPointerOperand(Instr); 2739 2740 // Prepare for the vector type of the interleaved load/store. 2741 Type *ScalarTy = getMemInstValueType(Instr); 2742 unsigned InterleaveFactor = Group->getFactor(); 2743 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2744 Type *PtrTy = VecTy->getPointerTo(getMemInstAddressSpace(Instr)); 2745 2746 // Prepare for the new pointers. 2747 setDebugLocFromInst(Builder, Ptr); 2748 SmallVector<Value *, 2> NewPtrs; 2749 unsigned Index = Group->getIndex(Instr); 2750 2751 // If the group is reverse, adjust the index to refer to the last vector lane 2752 // instead of the first. We adjust the index from the first vector lane, 2753 // rather than directly getting the pointer for lane VF - 1, because the 2754 // pointer operand of the interleaved access is supposed to be uniform. For 2755 // uniform instructions, we're only required to generate a value for the 2756 // first vector lane in each unroll iteration. 2757 if (Group->isReverse()) 2758 Index += (VF - 1) * Group->getFactor(); 2759 2760 for (unsigned Part = 0; Part < UF; Part++) { 2761 Value *NewPtr = getScalarValue(Ptr, Part, 0); 2762 2763 // Notice current instruction could be any index. Need to adjust the address 2764 // to the member of index 0. 2765 // 2766 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2767 // b = A[i]; // Member of index 0 2768 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2769 // 2770 // E.g. A[i+1] = a; // Member of index 1 2771 // A[i] = b; // Member of index 0 2772 // A[i+2] = c; // Member of index 2 (Current instruction) 2773 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2774 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 2775 2776 // Cast to the vector pointer type. 2777 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2778 } 2779 2780 setDebugLocFromInst(Builder, Instr); 2781 Value *UndefVec = UndefValue::get(VecTy); 2782 2783 // Vectorize the interleaved load group. 2784 if (isa<LoadInst>(Instr)) { 2785 2786 // For each unroll part, create a wide load for the group. 2787 SmallVector<Value *, 2> NewLoads; 2788 for (unsigned Part = 0; Part < UF; Part++) { 2789 auto *NewLoad = Builder.CreateAlignedLoad( 2790 NewPtrs[Part], Group->getAlignment(), "wide.vec"); 2791 addMetadata(NewLoad, Instr); 2792 NewLoads.push_back(NewLoad); 2793 } 2794 2795 // For each member in the group, shuffle out the appropriate data from the 2796 // wide loads. 2797 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2798 Instruction *Member = Group->getMember(I); 2799 2800 // Skip the gaps in the group. 2801 if (!Member) 2802 continue; 2803 2804 VectorParts Entry(UF); 2805 Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF); 2806 for (unsigned Part = 0; Part < UF; Part++) { 2807 Value *StridedVec = Builder.CreateShuffleVector( 2808 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 2809 2810 // If this member has different type, cast the result type. 2811 if (Member->getType() != ScalarTy) { 2812 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2813 StridedVec = Builder.CreateBitOrPointerCast(StridedVec, OtherVTy); 2814 } 2815 2816 Entry[Part] = 2817 Group->isReverse() ? reverseVector(StridedVec) : StridedVec; 2818 } 2819 VectorLoopValueMap.initVector(Member, Entry); 2820 } 2821 return; 2822 } 2823 2824 // The sub vector type for current instruction. 2825 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2826 2827 // Vectorize the interleaved store group. 2828 for (unsigned Part = 0; Part < UF; Part++) { 2829 // Collect the stored vector from each member. 2830 SmallVector<Value *, 4> StoredVecs; 2831 for (unsigned i = 0; i < InterleaveFactor; i++) { 2832 // Interleaved store group doesn't allow a gap, so each index has a member 2833 Instruction *Member = Group->getMember(i); 2834 assert(Member && "Fail to get a member from an interleaved store group"); 2835 2836 Value *StoredVec = 2837 getVectorValue(cast<StoreInst>(Member)->getValueOperand())[Part]; 2838 if (Group->isReverse()) 2839 StoredVec = reverseVector(StoredVec); 2840 2841 // If this member has different type, cast it to an unified type. 2842 if (StoredVec->getType() != SubVT) 2843 StoredVec = Builder.CreateBitOrPointerCast(StoredVec, SubVT); 2844 2845 StoredVecs.push_back(StoredVec); 2846 } 2847 2848 // Concatenate all vectors into a wide vector. 2849 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2850 2851 // Interleave the elements in the wide vector. 2852 Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor); 2853 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2854 "interleaved.vec"); 2855 2856 Instruction *NewStoreInstr = 2857 Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment()); 2858 addMetadata(NewStoreInstr, Instr); 2859 } 2860 } 2861 2862 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { 2863 // Attempt to issue a wide load. 2864 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2865 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2866 2867 assert((LI || SI) && "Invalid Load/Store instruction"); 2868 2869 LoopVectorizationCostModel::InstWidening Decision = 2870 Cost->getWideningDecision(Instr, VF); 2871 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 2872 "CM decision should be taken at this point"); 2873 if (Decision == LoopVectorizationCostModel::CM_Interleave) 2874 return vectorizeInterleaveGroup(Instr); 2875 2876 Type *ScalarDataTy = getMemInstValueType(Instr); 2877 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2878 Value *Ptr = getPointerOperand(Instr); 2879 unsigned Alignment = getMemInstAlignment(Instr); 2880 // An alignment of 0 means target abi alignment. We need to use the scalar's 2881 // target abi alignment in such a case. 2882 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2883 if (!Alignment) 2884 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2885 unsigned AddressSpace = getMemInstAddressSpace(Instr); 2886 2887 // Scalarize the memory instruction if necessary. 2888 if (Decision == LoopVectorizationCostModel::CM_Scalarize) 2889 return scalarizeInstruction(Instr, Legal->isScalarWithPredication(Instr)); 2890 2891 // Determine if the pointer operand of the access is either consecutive or 2892 // reverse consecutive. 2893 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 2894 bool Reverse = ConsecutiveStride < 0; 2895 bool CreateGatherScatter = 2896 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2897 2898 VectorParts VectorGep; 2899 2900 // Handle consecutive loads/stores. 2901 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2902 if (ConsecutiveStride) { 2903 if (Gep) { 2904 unsigned NumOperands = Gep->getNumOperands(); 2905 #ifndef NDEBUG 2906 // The original GEP that identified as a consecutive memory access 2907 // should have only one loop-variant operand. 2908 unsigned NumOfLoopVariantOps = 0; 2909 for (unsigned i = 0; i < NumOperands; ++i) 2910 if (!PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), 2911 OrigLoop)) 2912 NumOfLoopVariantOps++; 2913 assert(NumOfLoopVariantOps == 1 && 2914 "Consecutive GEP should have only one loop-variant operand"); 2915 #endif 2916 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2917 Gep2->setName("gep.indvar"); 2918 2919 // A new GEP is created for a 0-lane value of the first unroll iteration. 2920 // The GEPs for the rest of the unroll iterations are computed below as an 2921 // offset from this GEP. 2922 for (unsigned i = 0; i < NumOperands; ++i) 2923 // We can apply getScalarValue() for all GEP indices. It returns an 2924 // original value for loop-invariant operand and 0-lane for consecutive 2925 // operand. 2926 Gep2->setOperand(i, getScalarValue(Gep->getOperand(i), 2927 0, /* First unroll iteration */ 2928 0 /* 0-lane of the vector */ )); 2929 setDebugLocFromInst(Builder, Gep); 2930 Ptr = Builder.Insert(Gep2); 2931 2932 } else { // No GEP 2933 setDebugLocFromInst(Builder, Ptr); 2934 Ptr = getScalarValue(Ptr, 0, 0); 2935 } 2936 } else { 2937 // At this point we should vector version of GEP for Gather or Scatter 2938 assert(CreateGatherScatter && "The instruction should be scalarized"); 2939 if (Gep) { 2940 // Vectorizing GEP, across UF parts. We want to get a vector value for base 2941 // and each index that's defined inside the loop, even if it is 2942 // loop-invariant but wasn't hoisted out. Otherwise we want to keep them 2943 // scalar. 2944 SmallVector<VectorParts, 4> OpsV; 2945 for (Value *Op : Gep->operands()) { 2946 Instruction *SrcInst = dyn_cast<Instruction>(Op); 2947 if (SrcInst && OrigLoop->contains(SrcInst)) 2948 OpsV.push_back(getVectorValue(Op)); 2949 else 2950 OpsV.push_back(VectorParts(UF, Op)); 2951 } 2952 for (unsigned Part = 0; Part < UF; ++Part) { 2953 SmallVector<Value *, 4> Ops; 2954 Value *GEPBasePtr = OpsV[0][Part]; 2955 for (unsigned i = 1; i < Gep->getNumOperands(); i++) 2956 Ops.push_back(OpsV[i][Part]); 2957 Value *NewGep = Builder.CreateGEP(GEPBasePtr, Ops, "VectorGep"); 2958 cast<GetElementPtrInst>(NewGep)->setIsInBounds(Gep->isInBounds()); 2959 assert(NewGep->getType()->isVectorTy() && "Expected vector GEP"); 2960 2961 NewGep = 2962 Builder.CreateBitCast(NewGep, VectorType::get(Ptr->getType(), VF)); 2963 VectorGep.push_back(NewGep); 2964 } 2965 } else 2966 VectorGep = getVectorValue(Ptr); 2967 } 2968 2969 VectorParts Mask = createBlockInMask(Instr->getParent()); 2970 // Handle Stores: 2971 if (SI) { 2972 assert(!Legal->isUniform(SI->getPointerOperand()) && 2973 "We do not allow storing to uniform addresses"); 2974 setDebugLocFromInst(Builder, SI); 2975 // We don't want to update the value in the map as it might be used in 2976 // another expression. So don't use a reference type for "StoredVal". 2977 VectorParts StoredVal = getVectorValue(SI->getValueOperand()); 2978 2979 for (unsigned Part = 0; Part < UF; ++Part) { 2980 Instruction *NewSI = nullptr; 2981 if (CreateGatherScatter) { 2982 Value *MaskPart = Legal->isMaskRequired(SI) ? Mask[Part] : nullptr; 2983 NewSI = Builder.CreateMaskedScatter(StoredVal[Part], VectorGep[Part], 2984 Alignment, MaskPart); 2985 } else { 2986 // Calculate the pointer for the specific unroll-part. 2987 Value *PartPtr = 2988 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2989 2990 if (Reverse) { 2991 // If we store to reverse consecutive memory locations, then we need 2992 // to reverse the order of elements in the stored value. 2993 StoredVal[Part] = reverseVector(StoredVal[Part]); 2994 // If the address is consecutive but reversed, then the 2995 // wide store needs to start at the last vector element. 2996 PartPtr = 2997 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2998 PartPtr = 2999 Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 3000 Mask[Part] = reverseVector(Mask[Part]); 3001 } 3002 3003 Value *VecPtr = 3004 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 3005 3006 if (Legal->isMaskRequired(SI)) 3007 NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment, 3008 Mask[Part]); 3009 else 3010 NewSI = 3011 Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment); 3012 } 3013 addMetadata(NewSI, SI); 3014 } 3015 return; 3016 } 3017 3018 // Handle loads. 3019 assert(LI && "Must have a load instruction"); 3020 setDebugLocFromInst(Builder, LI); 3021 VectorParts Entry(UF); 3022 for (unsigned Part = 0; Part < UF; ++Part) { 3023 Instruction *NewLI; 3024 if (CreateGatherScatter) { 3025 Value *MaskPart = Legal->isMaskRequired(LI) ? Mask[Part] : nullptr; 3026 NewLI = Builder.CreateMaskedGather(VectorGep[Part], Alignment, MaskPart, 3027 0, "wide.masked.gather"); 3028 Entry[Part] = NewLI; 3029 } else { 3030 // Calculate the pointer for the specific unroll-part. 3031 Value *PartPtr = 3032 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 3033 3034 if (Reverse) { 3035 // If the address is consecutive but reversed, then the 3036 // wide load needs to start at the last vector element. 3037 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 3038 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 3039 Mask[Part] = reverseVector(Mask[Part]); 3040 } 3041 3042 Value *VecPtr = 3043 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 3044 if (Legal->isMaskRequired(LI)) 3045 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 3046 UndefValue::get(DataTy), 3047 "wide.masked.load"); 3048 else 3049 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 3050 Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI; 3051 } 3052 addMetadata(NewLI, LI); 3053 } 3054 VectorLoopValueMap.initVector(Instr, Entry); 3055 } 3056 3057 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 3058 bool IfPredicateInstr) { 3059 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 3060 DEBUG(dbgs() << "LV: Scalarizing" 3061 << (IfPredicateInstr ? " and predicating:" : ":") << *Instr 3062 << '\n'); 3063 // Holds vector parameters or scalars, in case of uniform vals. 3064 SmallVector<VectorParts, 4> Params; 3065 3066 setDebugLocFromInst(Builder, Instr); 3067 3068 // Does this instruction return a value ? 3069 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 3070 3071 // Initialize a new scalar map entry. 3072 ScalarParts Entry(UF); 3073 3074 VectorParts Cond; 3075 if (IfPredicateInstr) 3076 Cond = createBlockInMask(Instr->getParent()); 3077 3078 // Determine the number of scalars we need to generate for each unroll 3079 // iteration. If the instruction is uniform, we only need to generate the 3080 // first lane. Otherwise, we generate all VF values. 3081 unsigned Lanes = Cost->isUniformAfterVectorization(Instr, VF) ? 1 : VF; 3082 3083 // For each vector unroll 'part': 3084 for (unsigned Part = 0; Part < UF; ++Part) { 3085 Entry[Part].resize(VF); 3086 // For each scalar that we create: 3087 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 3088 3089 // Start if-block. 3090 Value *Cmp = nullptr; 3091 if (IfPredicateInstr) { 3092 Cmp = Builder.CreateExtractElement(Cond[Part], Builder.getInt32(Lane)); 3093 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cmp, 3094 ConstantInt::get(Cmp->getType(), 1)); 3095 } 3096 3097 Instruction *Cloned = Instr->clone(); 3098 if (!IsVoidRetTy) 3099 Cloned->setName(Instr->getName() + ".cloned"); 3100 3101 // Replace the operands of the cloned instructions with their scalar 3102 // equivalents in the new loop. 3103 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 3104 auto *NewOp = getScalarValue(Instr->getOperand(op), Part, Lane); 3105 Cloned->setOperand(op, NewOp); 3106 } 3107 addNewMetadata(Cloned, Instr); 3108 3109 // Place the cloned scalar in the new loop. 3110 Builder.Insert(Cloned); 3111 3112 // Add the cloned scalar to the scalar map entry. 3113 Entry[Part][Lane] = Cloned; 3114 3115 // If we just cloned a new assumption, add it the assumption cache. 3116 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 3117 if (II->getIntrinsicID() == Intrinsic::assume) 3118 AC->registerAssumption(II); 3119 3120 // End if-block. 3121 if (IfPredicateInstr) 3122 PredicatedInstructions.push_back(std::make_pair(Cloned, Cmp)); 3123 } 3124 } 3125 VectorLoopValueMap.initScalar(Instr, Entry); 3126 } 3127 3128 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3129 Value *End, Value *Step, 3130 Instruction *DL) { 3131 BasicBlock *Header = L->getHeader(); 3132 BasicBlock *Latch = L->getLoopLatch(); 3133 // As we're just creating this loop, it's possible no latch exists 3134 // yet. If so, use the header as this will be a single block loop. 3135 if (!Latch) 3136 Latch = Header; 3137 3138 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 3139 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 3140 setDebugLocFromInst(Builder, OldInst); 3141 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 3142 3143 Builder.SetInsertPoint(Latch->getTerminator()); 3144 setDebugLocFromInst(Builder, OldInst); 3145 3146 // Create i+1 and fill the PHINode. 3147 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 3148 Induction->addIncoming(Start, L->getLoopPreheader()); 3149 Induction->addIncoming(Next, Latch); 3150 // Create the compare. 3151 Value *ICmp = Builder.CreateICmpEQ(Next, End); 3152 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 3153 3154 // Now we have two terminators. Remove the old one from the block. 3155 Latch->getTerminator()->eraseFromParent(); 3156 3157 return Induction; 3158 } 3159 3160 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3161 if (TripCount) 3162 return TripCount; 3163 3164 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3165 // Find the loop boundaries. 3166 ScalarEvolution *SE = PSE.getSE(); 3167 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3168 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 3169 "Invalid loop count"); 3170 3171 Type *IdxTy = Legal->getWidestInductionType(); 3172 3173 // The exit count might have the type of i64 while the phi is i32. This can 3174 // happen if we have an induction variable that is sign extended before the 3175 // compare. The only way that we get a backedge taken count is that the 3176 // induction variable was signed and as such will not overflow. In such a case 3177 // truncation is legal. 3178 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 3179 IdxTy->getPrimitiveSizeInBits()) 3180 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3181 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3182 3183 // Get the total trip count from the count by adding 1. 3184 const SCEV *ExitCount = SE->getAddExpr( 3185 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3186 3187 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3188 3189 // Expand the trip count and place the new instructions in the preheader. 3190 // Notice that the pre-header does not change, only the loop body. 3191 SCEVExpander Exp(*SE, DL, "induction"); 3192 3193 // Count holds the overall loop count (N). 3194 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3195 L->getLoopPreheader()->getTerminator()); 3196 3197 if (TripCount->getType()->isPointerTy()) 3198 TripCount = 3199 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3200 L->getLoopPreheader()->getTerminator()); 3201 3202 return TripCount; 3203 } 3204 3205 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3206 if (VectorTripCount) 3207 return VectorTripCount; 3208 3209 Value *TC = getOrCreateTripCount(L); 3210 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3211 3212 // Now we need to generate the expression for the part of the loop that the 3213 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3214 // iterations are not required for correctness, or N - Step, otherwise. Step 3215 // is equal to the vectorization factor (number of SIMD elements) times the 3216 // unroll factor (number of SIMD instructions). 3217 Constant *Step = ConstantInt::get(TC->getType(), VF * UF); 3218 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3219 3220 // If there is a non-reversed interleaved group that may speculatively access 3221 // memory out-of-bounds, we need to ensure that there will be at least one 3222 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 3223 // the trip count, we set the remainder to be equal to the step. If the step 3224 // does not evenly divide the trip count, no adjustment is necessary since 3225 // there will already be scalar iterations. Note that the minimum iterations 3226 // check ensures that N >= Step. 3227 if (VF > 1 && Legal->requiresScalarEpilogue()) { 3228 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3229 R = Builder.CreateSelect(IsZero, Step, R); 3230 } 3231 3232 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3233 3234 return VectorTripCount; 3235 } 3236 3237 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3238 BasicBlock *Bypass) { 3239 Value *Count = getOrCreateTripCount(L); 3240 BasicBlock *BB = L->getLoopPreheader(); 3241 IRBuilder<> Builder(BB->getTerminator()); 3242 3243 // Generate code to check that the loop's trip count that we computed by 3244 // adding one to the backedge-taken count will not overflow. 3245 Value *CheckMinIters = Builder.CreateICmpULT( 3246 Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check"); 3247 3248 BasicBlock *NewBB = 3249 BB->splitBasicBlock(BB->getTerminator(), "min.iters.checked"); 3250 // Update dominator tree immediately if the generated block is a 3251 // LoopBypassBlock because SCEV expansions to generate loop bypass 3252 // checks may query it before the current function is finished. 3253 DT->addNewBlock(NewBB, BB); 3254 if (L->getParentLoop()) 3255 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3256 ReplaceInstWithInst(BB->getTerminator(), 3257 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 3258 LoopBypassBlocks.push_back(BB); 3259 } 3260 3261 void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L, 3262 BasicBlock *Bypass) { 3263 Value *TC = getOrCreateVectorTripCount(L); 3264 BasicBlock *BB = L->getLoopPreheader(); 3265 IRBuilder<> Builder(BB->getTerminator()); 3266 3267 // Now, compare the new count to zero. If it is zero skip the vector loop and 3268 // jump to the scalar loop. 3269 Value *Cmp = Builder.CreateICmpEQ(TC, Constant::getNullValue(TC->getType()), 3270 "cmp.zero"); 3271 3272 // Generate code to check that the loop's trip count that we computed by 3273 // adding one to the backedge-taken count will not overflow. 3274 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3275 // Update dominator tree immediately if the generated block is a 3276 // LoopBypassBlock because SCEV expansions to generate loop bypass 3277 // checks may query it before the current function is finished. 3278 DT->addNewBlock(NewBB, BB); 3279 if (L->getParentLoop()) 3280 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3281 ReplaceInstWithInst(BB->getTerminator(), 3282 BranchInst::Create(Bypass, NewBB, Cmp)); 3283 LoopBypassBlocks.push_back(BB); 3284 } 3285 3286 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3287 BasicBlock *BB = L->getLoopPreheader(); 3288 3289 // Generate the code to check that the SCEV assumptions that we made. 3290 // We want the new basic block to start at the first instruction in a 3291 // sequence of instructions that form a check. 3292 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 3293 "scev.check"); 3294 Value *SCEVCheck = 3295 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 3296 3297 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 3298 if (C->isZero()) 3299 return; 3300 3301 // Create a new block containing the stride check. 3302 BB->setName("vector.scevcheck"); 3303 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3304 // Update dominator tree immediately if the generated block is a 3305 // LoopBypassBlock because SCEV expansions to generate loop bypass 3306 // checks may query it before the current function is finished. 3307 DT->addNewBlock(NewBB, BB); 3308 if (L->getParentLoop()) 3309 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3310 ReplaceInstWithInst(BB->getTerminator(), 3311 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 3312 LoopBypassBlocks.push_back(BB); 3313 AddedSafetyChecks = true; 3314 } 3315 3316 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 3317 BasicBlock *BB = L->getLoopPreheader(); 3318 3319 // Generate the code that checks in runtime if arrays overlap. We put the 3320 // checks into a separate block to make the more common case of few elements 3321 // faster. 3322 Instruction *FirstCheckInst; 3323 Instruction *MemRuntimeCheck; 3324 std::tie(FirstCheckInst, MemRuntimeCheck) = 3325 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 3326 if (!MemRuntimeCheck) 3327 return; 3328 3329 // Create a new block containing the memory check. 3330 BB->setName("vector.memcheck"); 3331 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3332 // Update dominator tree immediately if the generated block is a 3333 // LoopBypassBlock because SCEV expansions to generate loop bypass 3334 // checks may query it before the current function is finished. 3335 DT->addNewBlock(NewBB, BB); 3336 if (L->getParentLoop()) 3337 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3338 ReplaceInstWithInst(BB->getTerminator(), 3339 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 3340 LoopBypassBlocks.push_back(BB); 3341 AddedSafetyChecks = true; 3342 3343 // We currently don't use LoopVersioning for the actual loop cloning but we 3344 // still use it to add the noalias metadata. 3345 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 3346 PSE.getSE()); 3347 LVer->prepareNoAliasMetadata(); 3348 } 3349 3350 void InnerLoopVectorizer::createEmptyLoop() { 3351 /* 3352 In this function we generate a new loop. The new loop will contain 3353 the vectorized instructions while the old loop will continue to run the 3354 scalar remainder. 3355 3356 [ ] <-- loop iteration number check. 3357 / | 3358 / v 3359 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3360 | / | 3361 | / v 3362 || [ ] <-- vector pre header. 3363 |/ | 3364 | v 3365 | [ ] \ 3366 | [ ]_| <-- vector loop. 3367 | | 3368 | v 3369 | -[ ] <--- middle-block. 3370 | / | 3371 | / v 3372 -|- >[ ] <--- new preheader. 3373 | | 3374 | v 3375 | [ ] \ 3376 | [ ]_| <-- old scalar loop to handle remainder. 3377 \ | 3378 \ v 3379 >[ ] <-- exit block. 3380 ... 3381 */ 3382 3383 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 3384 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 3385 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 3386 assert(VectorPH && "Invalid loop structure"); 3387 assert(ExitBlock && "Must have an exit block"); 3388 3389 // Some loops have a single integer induction variable, while other loops 3390 // don't. One example is c++ iterators that often have multiple pointer 3391 // induction variables. In the code below we also support a case where we 3392 // don't have a single induction variable. 3393 // 3394 // We try to obtain an induction variable from the original loop as hard 3395 // as possible. However if we don't find one that: 3396 // - is an integer 3397 // - counts from zero, stepping by one 3398 // - is the size of the widest induction variable type 3399 // then we create a new one. 3400 OldInduction = Legal->getInduction(); 3401 Type *IdxTy = Legal->getWidestInductionType(); 3402 3403 // Split the single block loop into the two loop structure described above. 3404 BasicBlock *VecBody = 3405 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 3406 BasicBlock *MiddleBlock = 3407 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 3408 BasicBlock *ScalarPH = 3409 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 3410 3411 // Create and register the new vector loop. 3412 Loop *Lp = new Loop(); 3413 Loop *ParentLoop = OrigLoop->getParentLoop(); 3414 3415 // Insert the new loop into the loop nest and register the new basic blocks 3416 // before calling any utilities such as SCEV that require valid LoopInfo. 3417 if (ParentLoop) { 3418 ParentLoop->addChildLoop(Lp); 3419 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 3420 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 3421 } else { 3422 LI->addTopLevelLoop(Lp); 3423 } 3424 Lp->addBasicBlockToLoop(VecBody, *LI); 3425 3426 // Find the loop boundaries. 3427 Value *Count = getOrCreateTripCount(Lp); 3428 3429 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3430 3431 // We need to test whether the backedge-taken count is uint##_max. Adding one 3432 // to it will cause overflow and an incorrect loop trip count in the vector 3433 // body. In case of overflow we want to directly jump to the scalar remainder 3434 // loop. 3435 emitMinimumIterationCountCheck(Lp, ScalarPH); 3436 // Now, compare the new count to zero. If it is zero skip the vector loop and 3437 // jump to the scalar loop. 3438 emitVectorLoopEnteredCheck(Lp, ScalarPH); 3439 // Generate the code to check any assumptions that we've made for SCEV 3440 // expressions. 3441 emitSCEVChecks(Lp, ScalarPH); 3442 3443 // Generate the code that checks in runtime if arrays overlap. We put the 3444 // checks into a separate block to make the more common case of few elements 3445 // faster. 3446 emitMemRuntimeChecks(Lp, ScalarPH); 3447 3448 // Generate the induction variable. 3449 // The loop step is equal to the vectorization factor (num of SIMD elements) 3450 // times the unroll factor (num of SIMD instructions). 3451 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3452 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 3453 Induction = 3454 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3455 getDebugLocFromInstOrOperands(OldInduction)); 3456 3457 // We are going to resume the execution of the scalar loop. 3458 // Go over all of the induction variables that we found and fix the 3459 // PHIs that are left in the scalar version of the loop. 3460 // The starting values of PHI nodes depend on the counter of the last 3461 // iteration in the vectorized loop. 3462 // If we come from a bypass edge then we need to start from the original 3463 // start value. 3464 3465 // This variable saves the new starting index for the scalar loop. It is used 3466 // to test if there are any tail iterations left once the vector loop has 3467 // completed. 3468 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 3469 for (auto &InductionEntry : *List) { 3470 PHINode *OrigPhi = InductionEntry.first; 3471 InductionDescriptor II = InductionEntry.second; 3472 3473 // Create phi nodes to merge from the backedge-taken check block. 3474 PHINode *BCResumeVal = PHINode::Create( 3475 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 3476 Value *&EndValue = IVEndValues[OrigPhi]; 3477 if (OrigPhi == OldInduction) { 3478 // We know what the end value is. 3479 EndValue = CountRoundDown; 3480 } else { 3481 IRBuilder<> B(LoopBypassBlocks.back()->getTerminator()); 3482 Type *StepType = II.getStep()->getType(); 3483 Instruction::CastOps CastOp = 3484 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 3485 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 3486 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3487 EndValue = II.transform(B, CRD, PSE.getSE(), DL); 3488 EndValue->setName("ind.end"); 3489 } 3490 3491 // The new PHI merges the original incoming value, in case of a bypass, 3492 // or the value at the end of the vectorized loop. 3493 BCResumeVal->addIncoming(EndValue, MiddleBlock); 3494 3495 // Fix the scalar body counter (PHI node). 3496 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 3497 3498 // The old induction's phi node in the scalar body needs the truncated 3499 // value. 3500 for (BasicBlock *BB : LoopBypassBlocks) 3501 BCResumeVal->addIncoming(II.getStartValue(), BB); 3502 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 3503 } 3504 3505 // Add a check in the middle block to see if we have completed 3506 // all of the iterations in the first vector loop. 3507 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3508 Value *CmpN = 3509 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 3510 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 3511 ReplaceInstWithInst(MiddleBlock->getTerminator(), 3512 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 3513 3514 // Get ready to start creating new instructions into the vectorized body. 3515 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 3516 3517 // Save the state. 3518 LoopVectorPreHeader = Lp->getLoopPreheader(); 3519 LoopScalarPreHeader = ScalarPH; 3520 LoopMiddleBlock = MiddleBlock; 3521 LoopExitBlock = ExitBlock; 3522 LoopVectorBody = VecBody; 3523 LoopScalarBody = OldBasicBlock; 3524 3525 // Keep all loop hints from the original loop on the vector loop (we'll 3526 // replace the vectorizer-specific hints below). 3527 if (MDNode *LID = OrigLoop->getLoopID()) 3528 Lp->setLoopID(LID); 3529 3530 LoopVectorizeHints Hints(Lp, true, *ORE); 3531 Hints.setAlreadyVectorized(); 3532 } 3533 3534 // Fix up external users of the induction variable. At this point, we are 3535 // in LCSSA form, with all external PHIs that use the IV having one input value, 3536 // coming from the remainder loop. We need those PHIs to also have a correct 3537 // value for the IV when arriving directly from the middle block. 3538 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3539 const InductionDescriptor &II, 3540 Value *CountRoundDown, Value *EndValue, 3541 BasicBlock *MiddleBlock) { 3542 // There are two kinds of external IV usages - those that use the value 3543 // computed in the last iteration (the PHI) and those that use the penultimate 3544 // value (the value that feeds into the phi from the loop latch). 3545 // We allow both, but they, obviously, have different values. 3546 3547 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 3548 3549 DenseMap<Value *, Value *> MissingVals; 3550 3551 // An external user of the last iteration's value should see the value that 3552 // the remainder loop uses to initialize its own IV. 3553 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3554 for (User *U : PostInc->users()) { 3555 Instruction *UI = cast<Instruction>(U); 3556 if (!OrigLoop->contains(UI)) { 3557 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3558 MissingVals[UI] = EndValue; 3559 } 3560 } 3561 3562 // An external user of the penultimate value need to see EndValue - Step. 3563 // The simplest way to get this is to recompute it from the constituent SCEVs, 3564 // that is Start + (Step * (CRD - 1)). 3565 for (User *U : OrigPhi->users()) { 3566 auto *UI = cast<Instruction>(U); 3567 if (!OrigLoop->contains(UI)) { 3568 const DataLayout &DL = 3569 OrigLoop->getHeader()->getModule()->getDataLayout(); 3570 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3571 3572 IRBuilder<> B(MiddleBlock->getTerminator()); 3573 Value *CountMinusOne = B.CreateSub( 3574 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3575 Value *CMO = B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType(), 3576 "cast.cmo"); 3577 Value *Escape = II.transform(B, CMO, PSE.getSE(), DL); 3578 Escape->setName("ind.escape"); 3579 MissingVals[UI] = Escape; 3580 } 3581 } 3582 3583 for (auto &I : MissingVals) { 3584 PHINode *PHI = cast<PHINode>(I.first); 3585 // One corner case we have to handle is two IVs "chasing" each-other, 3586 // that is %IV2 = phi [...], [ %IV1, %latch ] 3587 // In this case, if IV1 has an external use, we need to avoid adding both 3588 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3589 // don't already have an incoming value for the middle block. 3590 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3591 PHI->addIncoming(I.second, MiddleBlock); 3592 } 3593 } 3594 3595 namespace { 3596 struct CSEDenseMapInfo { 3597 static bool canHandle(Instruction *I) { 3598 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3599 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3600 } 3601 static inline Instruction *getEmptyKey() { 3602 return DenseMapInfo<Instruction *>::getEmptyKey(); 3603 } 3604 static inline Instruction *getTombstoneKey() { 3605 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3606 } 3607 static unsigned getHashValue(Instruction *I) { 3608 assert(canHandle(I) && "Unknown instruction!"); 3609 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3610 I->value_op_end())); 3611 } 3612 static bool isEqual(Instruction *LHS, Instruction *RHS) { 3613 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3614 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3615 return LHS == RHS; 3616 return LHS->isIdenticalTo(RHS); 3617 } 3618 }; 3619 } 3620 3621 ///\brief Perform cse of induction variable instructions. 3622 static void cse(BasicBlock *BB) { 3623 // Perform simple cse. 3624 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3625 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3626 Instruction *In = &*I++; 3627 3628 if (!CSEDenseMapInfo::canHandle(In)) 3629 continue; 3630 3631 // Check if we can replace this instruction with any of the 3632 // visited instructions. 3633 if (Instruction *V = CSEMap.lookup(In)) { 3634 In->replaceAllUsesWith(V); 3635 In->eraseFromParent(); 3636 continue; 3637 } 3638 3639 CSEMap[In] = In; 3640 } 3641 } 3642 3643 /// \brief Adds a 'fast' flag to floating point operations. 3644 static Value *addFastMathFlag(Value *V) { 3645 if (isa<FPMathOperator>(V)) { 3646 FastMathFlags Flags; 3647 Flags.setUnsafeAlgebra(); 3648 cast<Instruction>(V)->setFastMathFlags(Flags); 3649 } 3650 return V; 3651 } 3652 3653 /// \brief Estimate the overhead of scalarizing an instruction. This is a 3654 /// convenience wrapper for the type-based getScalarizationOverhead API. 3655 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF, 3656 const TargetTransformInfo &TTI) { 3657 if (VF == 1) 3658 return 0; 3659 3660 unsigned Cost = 0; 3661 Type *RetTy = ToVectorTy(I->getType(), VF); 3662 if (!RetTy->isVoidTy()) 3663 Cost += TTI.getScalarizationOverhead(RetTy, true, false); 3664 3665 if (CallInst *CI = dyn_cast<CallInst>(I)) { 3666 SmallVector<const Value *, 4> Operands(CI->arg_operands()); 3667 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3668 } else { 3669 SmallVector<const Value *, 4> Operands(I->operand_values()); 3670 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3671 } 3672 3673 return Cost; 3674 } 3675 3676 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3677 // Return the cost of the instruction, including scalarization overhead if it's 3678 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3679 // i.e. either vector version isn't available, or is too expensive. 3680 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3681 const TargetTransformInfo &TTI, 3682 const TargetLibraryInfo *TLI, 3683 bool &NeedToScalarize) { 3684 Function *F = CI->getCalledFunction(); 3685 StringRef FnName = CI->getCalledFunction()->getName(); 3686 Type *ScalarRetTy = CI->getType(); 3687 SmallVector<Type *, 4> Tys, ScalarTys; 3688 for (auto &ArgOp : CI->arg_operands()) 3689 ScalarTys.push_back(ArgOp->getType()); 3690 3691 // Estimate cost of scalarized vector call. The source operands are assumed 3692 // to be vectors, so we need to extract individual elements from there, 3693 // execute VF scalar calls, and then gather the result into the vector return 3694 // value. 3695 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3696 if (VF == 1) 3697 return ScalarCallCost; 3698 3699 // Compute corresponding vector type for return value and arguments. 3700 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3701 for (Type *ScalarTy : ScalarTys) 3702 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3703 3704 // Compute costs of unpacking argument values for the scalar calls and 3705 // packing the return values to a vector. 3706 unsigned ScalarizationCost = getScalarizationOverhead(CI, VF, TTI); 3707 3708 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3709 3710 // If we can't emit a vector call for this function, then the currently found 3711 // cost is the cost we need to return. 3712 NeedToScalarize = true; 3713 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3714 return Cost; 3715 3716 // If the corresponding vector cost is cheaper, return its cost. 3717 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3718 if (VectorCallCost < Cost) { 3719 NeedToScalarize = false; 3720 return VectorCallCost; 3721 } 3722 return Cost; 3723 } 3724 3725 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3726 // factor VF. Return the cost of the instruction, including scalarization 3727 // overhead if it's needed. 3728 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3729 const TargetTransformInfo &TTI, 3730 const TargetLibraryInfo *TLI) { 3731 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3732 assert(ID && "Expected intrinsic call!"); 3733 3734 Type *RetTy = ToVectorTy(CI->getType(), VF); 3735 SmallVector<Type *, 4> Tys; 3736 for (Value *ArgOperand : CI->arg_operands()) 3737 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 3738 3739 FastMathFlags FMF; 3740 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3741 FMF = FPMO->getFastMathFlags(); 3742 3743 return TTI.getIntrinsicInstrCost(ID, RetTy, Tys, FMF); 3744 } 3745 3746 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3747 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3748 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3749 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3750 } 3751 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3752 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3753 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3754 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3755 } 3756 3757 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3758 // For every instruction `I` in MinBWs, truncate the operands, create a 3759 // truncated version of `I` and reextend its result. InstCombine runs 3760 // later and will remove any ext/trunc pairs. 3761 // 3762 SmallPtrSet<Value *, 4> Erased; 3763 for (const auto &KV : Cost->getMinimalBitwidths()) { 3764 // If the value wasn't vectorized, we must maintain the original scalar 3765 // type. The absence of the value from VectorLoopValueMap indicates that it 3766 // wasn't vectorized. 3767 if (!VectorLoopValueMap.hasVector(KV.first)) 3768 continue; 3769 VectorParts &Parts = VectorLoopValueMap.getVector(KV.first); 3770 for (Value *&I : Parts) { 3771 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3772 continue; 3773 Type *OriginalTy = I->getType(); 3774 Type *ScalarTruncatedTy = 3775 IntegerType::get(OriginalTy->getContext(), KV.second); 3776 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3777 OriginalTy->getVectorNumElements()); 3778 if (TruncatedTy == OriginalTy) 3779 continue; 3780 3781 IRBuilder<> B(cast<Instruction>(I)); 3782 auto ShrinkOperand = [&](Value *V) -> Value * { 3783 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3784 if (ZI->getSrcTy() == TruncatedTy) 3785 return ZI->getOperand(0); 3786 return B.CreateZExtOrTrunc(V, TruncatedTy); 3787 }; 3788 3789 // The actual instruction modification depends on the instruction type, 3790 // unfortunately. 3791 Value *NewI = nullptr; 3792 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3793 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3794 ShrinkOperand(BO->getOperand(1))); 3795 cast<BinaryOperator>(NewI)->copyIRFlags(I); 3796 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3797 NewI = 3798 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3799 ShrinkOperand(CI->getOperand(1))); 3800 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3801 NewI = B.CreateSelect(SI->getCondition(), 3802 ShrinkOperand(SI->getTrueValue()), 3803 ShrinkOperand(SI->getFalseValue())); 3804 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3805 switch (CI->getOpcode()) { 3806 default: 3807 llvm_unreachable("Unhandled cast!"); 3808 case Instruction::Trunc: 3809 NewI = ShrinkOperand(CI->getOperand(0)); 3810 break; 3811 case Instruction::SExt: 3812 NewI = B.CreateSExtOrTrunc( 3813 CI->getOperand(0), 3814 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3815 break; 3816 case Instruction::ZExt: 3817 NewI = B.CreateZExtOrTrunc( 3818 CI->getOperand(0), 3819 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3820 break; 3821 } 3822 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3823 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3824 auto *O0 = B.CreateZExtOrTrunc( 3825 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3826 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3827 auto *O1 = B.CreateZExtOrTrunc( 3828 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3829 3830 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3831 } else if (isa<LoadInst>(I)) { 3832 // Don't do anything with the operands, just extend the result. 3833 continue; 3834 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3835 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3836 auto *O0 = B.CreateZExtOrTrunc( 3837 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3838 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3839 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3840 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3841 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3842 auto *O0 = B.CreateZExtOrTrunc( 3843 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3844 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3845 } else { 3846 llvm_unreachable("Unhandled instruction type!"); 3847 } 3848 3849 // Lastly, extend the result. 3850 NewI->takeName(cast<Instruction>(I)); 3851 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3852 I->replaceAllUsesWith(Res); 3853 cast<Instruction>(I)->eraseFromParent(); 3854 Erased.insert(I); 3855 I = Res; 3856 } 3857 } 3858 3859 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3860 for (const auto &KV : Cost->getMinimalBitwidths()) { 3861 // If the value wasn't vectorized, we must maintain the original scalar 3862 // type. The absence of the value from VectorLoopValueMap indicates that it 3863 // wasn't vectorized. 3864 if (!VectorLoopValueMap.hasVector(KV.first)) 3865 continue; 3866 VectorParts &Parts = VectorLoopValueMap.getVector(KV.first); 3867 for (Value *&I : Parts) { 3868 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3869 if (Inst && Inst->use_empty()) { 3870 Value *NewI = Inst->getOperand(0); 3871 Inst->eraseFromParent(); 3872 I = NewI; 3873 } 3874 } 3875 } 3876 } 3877 3878 void InnerLoopVectorizer::vectorizeLoop() { 3879 //===------------------------------------------------===// 3880 // 3881 // Notice: any optimization or new instruction that go 3882 // into the code below should be also be implemented in 3883 // the cost-model. 3884 // 3885 //===------------------------------------------------===// 3886 Constant *Zero = Builder.getInt32(0); 3887 3888 // In order to support recurrences we need to be able to vectorize Phi nodes. 3889 // Phi nodes have cycles, so we need to vectorize them in two stages. First, 3890 // we create a new vector PHI node with no incoming edges. We use this value 3891 // when we vectorize all of the instructions that use the PHI. Next, after 3892 // all of the instructions in the block are complete we add the new incoming 3893 // edges to the PHI. At this point all of the instructions in the basic block 3894 // are vectorized, so we can use them to construct the PHI. 3895 PhiVector PHIsToFix; 3896 3897 // Collect instructions from the original loop that will become trivially 3898 // dead in the vectorized loop. We don't need to vectorize these 3899 // instructions. 3900 collectTriviallyDeadInstructions(); 3901 3902 // Scan the loop in a topological order to ensure that defs are vectorized 3903 // before users. 3904 LoopBlocksDFS DFS(OrigLoop); 3905 DFS.perform(LI); 3906 3907 // Vectorize all of the blocks in the original loop. 3908 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 3909 vectorizeBlockInLoop(BB, &PHIsToFix); 3910 3911 // Insert truncates and extends for any truncated instructions as hints to 3912 // InstCombine. 3913 if (VF > 1) 3914 truncateToMinimalBitwidths(); 3915 3916 // At this point every instruction in the original loop is widened to a 3917 // vector form. Now we need to fix the recurrences in PHIsToFix. These PHI 3918 // nodes are currently empty because we did not want to introduce cycles. 3919 // This is the second stage of vectorizing recurrences. 3920 for (PHINode *Phi : PHIsToFix) { 3921 assert(Phi && "Unable to recover vectorized PHI"); 3922 3923 // Handle first-order recurrences that need to be fixed. 3924 if (Legal->isFirstOrderRecurrence(Phi)) { 3925 fixFirstOrderRecurrence(Phi); 3926 continue; 3927 } 3928 3929 // If the phi node is not a first-order recurrence, it must be a reduction. 3930 // Get it's reduction variable descriptor. 3931 assert(Legal->isReductionVariable(Phi) && 3932 "Unable to find the reduction variable"); 3933 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 3934 3935 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3936 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3937 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3938 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3939 RdxDesc.getMinMaxRecurrenceKind(); 3940 setDebugLocFromInst(Builder, ReductionStartValue); 3941 3942 // We need to generate a reduction vector from the incoming scalar. 3943 // To do so, we need to generate the 'identity' vector and override 3944 // one of the elements with the incoming scalar reduction. We need 3945 // to do it in the vector-loop preheader. 3946 Builder.SetInsertPoint(LoopBypassBlocks[1]->getTerminator()); 3947 3948 // This is the vector-clone of the value that leaves the loop. 3949 const VectorParts &VectorExit = getVectorValue(LoopExitInst); 3950 Type *VecTy = VectorExit[0]->getType(); 3951 3952 // Find the reduction identity variable. Zero for addition, or, xor, 3953 // one for multiplication, -1 for And. 3954 Value *Identity; 3955 Value *VectorStart; 3956 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3957 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3958 // MinMax reduction have the start value as their identify. 3959 if (VF == 1) { 3960 VectorStart = Identity = ReductionStartValue; 3961 } else { 3962 VectorStart = Identity = 3963 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3964 } 3965 } else { 3966 // Handle other reduction kinds: 3967 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3968 RK, VecTy->getScalarType()); 3969 if (VF == 1) { 3970 Identity = Iden; 3971 // This vector is the Identity vector where the first element is the 3972 // incoming scalar reduction. 3973 VectorStart = ReductionStartValue; 3974 } else { 3975 Identity = ConstantVector::getSplat(VF, Iden); 3976 3977 // This vector is the Identity vector where the first element is the 3978 // incoming scalar reduction. 3979 VectorStart = 3980 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3981 } 3982 } 3983 3984 // Fix the vector-loop phi. 3985 3986 // Reductions do not have to start at zero. They can start with 3987 // any loop invariant values. 3988 const VectorParts &VecRdxPhi = getVectorValue(Phi); 3989 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3990 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3991 const VectorParts &Val = getVectorValue(LoopVal); 3992 for (unsigned part = 0; part < UF; ++part) { 3993 // Make sure to add the reduction stat value only to the 3994 // first unroll part. 3995 Value *StartVal = (part == 0) ? VectorStart : Identity; 3996 cast<PHINode>(VecRdxPhi[part]) 3997 ->addIncoming(StartVal, LoopVectorPreHeader); 3998 cast<PHINode>(VecRdxPhi[part]) 3999 ->addIncoming(Val[part], LoopVectorBody); 4000 } 4001 4002 // Before each round, move the insertion point right between 4003 // the PHIs and the values we are going to write. 4004 // This allows us to write both PHINodes and the extractelement 4005 // instructions. 4006 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4007 4008 VectorParts &RdxParts = VectorLoopValueMap.getVector(LoopExitInst); 4009 setDebugLocFromInst(Builder, LoopExitInst); 4010 4011 // If the vector reduction can be performed in a smaller type, we truncate 4012 // then extend the loop exit value to enable InstCombine to evaluate the 4013 // entire expression in the smaller type. 4014 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 4015 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4016 Builder.SetInsertPoint(LoopVectorBody->getTerminator()); 4017 for (unsigned part = 0; part < UF; ++part) { 4018 Value *Trunc = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 4019 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4020 : Builder.CreateZExt(Trunc, VecTy); 4021 for (Value::user_iterator UI = RdxParts[part]->user_begin(); 4022 UI != RdxParts[part]->user_end();) 4023 if (*UI != Trunc) { 4024 (*UI++)->replaceUsesOfWith(RdxParts[part], Extnd); 4025 RdxParts[part] = Extnd; 4026 } else { 4027 ++UI; 4028 } 4029 } 4030 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4031 for (unsigned part = 0; part < UF; ++part) 4032 RdxParts[part] = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 4033 } 4034 4035 // Reduce all of the unrolled parts into a single vector. 4036 Value *ReducedPartRdx = RdxParts[0]; 4037 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 4038 setDebugLocFromInst(Builder, ReducedPartRdx); 4039 for (unsigned part = 1; part < UF; ++part) { 4040 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 4041 // Floating point operations had to be 'fast' to enable the reduction. 4042 ReducedPartRdx = addFastMathFlag( 4043 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxParts[part], 4044 ReducedPartRdx, "bin.rdx")); 4045 else 4046 ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp( 4047 Builder, MinMaxKind, ReducedPartRdx, RdxParts[part]); 4048 } 4049 4050 if (VF > 1) { 4051 // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles 4052 // and vector ops, reducing the set of values being computed by half each 4053 // round. 4054 assert(isPowerOf2_32(VF) && 4055 "Reduction emission only supported for pow2 vectors!"); 4056 Value *TmpVec = ReducedPartRdx; 4057 SmallVector<Constant *, 32> ShuffleMask(VF, nullptr); 4058 for (unsigned i = VF; i != 1; i >>= 1) { 4059 // Move the upper half of the vector to the lower half. 4060 for (unsigned j = 0; j != i / 2; ++j) 4061 ShuffleMask[j] = Builder.getInt32(i / 2 + j); 4062 4063 // Fill the rest of the mask with undef. 4064 std::fill(&ShuffleMask[i / 2], ShuffleMask.end(), 4065 UndefValue::get(Builder.getInt32Ty())); 4066 4067 Value *Shuf = Builder.CreateShuffleVector( 4068 TmpVec, UndefValue::get(TmpVec->getType()), 4069 ConstantVector::get(ShuffleMask), "rdx.shuf"); 4070 4071 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 4072 // Floating point operations had to be 'fast' to enable the reduction. 4073 TmpVec = addFastMathFlag(Builder.CreateBinOp( 4074 (Instruction::BinaryOps)Op, TmpVec, Shuf, "bin.rdx")); 4075 else 4076 TmpVec = RecurrenceDescriptor::createMinMaxOp(Builder, MinMaxKind, 4077 TmpVec, Shuf); 4078 } 4079 4080 // The result is in the first element of the vector. 4081 ReducedPartRdx = 4082 Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 4083 4084 // If the reduction can be performed in a smaller type, we need to extend 4085 // the reduction to the wider type before we branch to the original loop. 4086 if (Phi->getType() != RdxDesc.getRecurrenceType()) 4087 ReducedPartRdx = 4088 RdxDesc.isSigned() 4089 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 4090 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 4091 } 4092 4093 // Create a phi node that merges control-flow from the backedge-taken check 4094 // block and the middle block. 4095 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 4096 LoopScalarPreHeader->getTerminator()); 4097 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4098 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4099 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4100 4101 // Now, we need to fix the users of the reduction variable 4102 // inside and outside of the scalar remainder loop. 4103 // We know that the loop is in LCSSA form. We need to update the 4104 // PHI nodes in the exit blocks. 4105 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 4106 LEE = LoopExitBlock->end(); 4107 LEI != LEE; ++LEI) { 4108 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 4109 if (!LCSSAPhi) 4110 break; 4111 4112 // All PHINodes need to have a single entry edge, or two if 4113 // we already fixed them. 4114 assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 4115 4116 // We found a reduction value exit-PHI. Update it with the 4117 // incoming bypass edge. 4118 if (LCSSAPhi->getIncomingValue(0) == LoopExitInst) 4119 LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4120 } // end of the LCSSA phi scan. 4121 4122 // Fix the scalar loop reduction variable with the incoming reduction sum 4123 // from the vector body and from the backedge value. 4124 int IncomingEdgeBlockIdx = 4125 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4126 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4127 // Pick the other block. 4128 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4129 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4130 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4131 } // end of for each Phi in PHIsToFix. 4132 4133 // Update the dominator tree. 4134 // 4135 // FIXME: After creating the structure of the new loop, the dominator tree is 4136 // no longer up-to-date, and it remains that way until we update it 4137 // here. An out-of-date dominator tree is problematic for SCEV, 4138 // because SCEVExpander uses it to guide code generation. The 4139 // vectorizer use SCEVExpanders in several places. Instead, we should 4140 // keep the dominator tree up-to-date as we go. 4141 updateAnalysis(); 4142 4143 // Fix-up external users of the induction variables. 4144 for (auto &Entry : *Legal->getInductionVars()) 4145 fixupIVUsers(Entry.first, Entry.second, 4146 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 4147 IVEndValues[Entry.first], LoopMiddleBlock); 4148 4149 fixLCSSAPHIs(); 4150 predicateInstructions(); 4151 4152 // Remove redundant induction instructions. 4153 cse(LoopVectorBody); 4154 } 4155 4156 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 4157 4158 // This is the second phase of vectorizing first-order recurrences. An 4159 // overview of the transformation is described below. Suppose we have the 4160 // following loop. 4161 // 4162 // for (int i = 0; i < n; ++i) 4163 // b[i] = a[i] - a[i - 1]; 4164 // 4165 // There is a first-order recurrence on "a". For this loop, the shorthand 4166 // scalar IR looks like: 4167 // 4168 // scalar.ph: 4169 // s_init = a[-1] 4170 // br scalar.body 4171 // 4172 // scalar.body: 4173 // i = phi [0, scalar.ph], [i+1, scalar.body] 4174 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4175 // s2 = a[i] 4176 // b[i] = s2 - s1 4177 // br cond, scalar.body, ... 4178 // 4179 // In this example, s1 is a recurrence because it's value depends on the 4180 // previous iteration. In the first phase of vectorization, we created a 4181 // temporary value for s1. We now complete the vectorization and produce the 4182 // shorthand vector IR shown below (for VF = 4, UF = 1). 4183 // 4184 // vector.ph: 4185 // v_init = vector(..., ..., ..., a[-1]) 4186 // br vector.body 4187 // 4188 // vector.body 4189 // i = phi [0, vector.ph], [i+4, vector.body] 4190 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4191 // v2 = a[i, i+1, i+2, i+3]; 4192 // v3 = vector(v1(3), v2(0, 1, 2)) 4193 // b[i, i+1, i+2, i+3] = v2 - v3 4194 // br cond, vector.body, middle.block 4195 // 4196 // middle.block: 4197 // x = v2(3) 4198 // br scalar.ph 4199 // 4200 // scalar.ph: 4201 // s_init = phi [x, middle.block], [a[-1], otherwise] 4202 // br scalar.body 4203 // 4204 // After execution completes the vector loop, we extract the next value of 4205 // the recurrence (x) to use as the initial value in the scalar loop. 4206 4207 // Get the original loop preheader and single loop latch. 4208 auto *Preheader = OrigLoop->getLoopPreheader(); 4209 auto *Latch = OrigLoop->getLoopLatch(); 4210 4211 // Get the initial and previous values of the scalar recurrence. 4212 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 4213 auto *Previous = Phi->getIncomingValueForBlock(Latch); 4214 4215 // Create a vector from the initial value. 4216 auto *VectorInit = ScalarInit; 4217 if (VF > 1) { 4218 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4219 VectorInit = Builder.CreateInsertElement( 4220 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 4221 Builder.getInt32(VF - 1), "vector.recur.init"); 4222 } 4223 4224 // We constructed a temporary phi node in the first phase of vectorization. 4225 // This phi node will eventually be deleted. 4226 VectorParts &PhiParts = VectorLoopValueMap.getVector(Phi); 4227 Builder.SetInsertPoint(cast<Instruction>(PhiParts[0])); 4228 4229 // Create a phi node for the new recurrence. The current value will either be 4230 // the initial value inserted into a vector or loop-varying vector value. 4231 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 4232 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 4233 4234 // Get the vectorized previous value. We ensured the previous values was an 4235 // instruction when detecting the recurrence. 4236 auto &PreviousParts = getVectorValue(Previous); 4237 4238 // Set the insertion point to be after this instruction. We ensured the 4239 // previous value dominated all uses of the phi when detecting the 4240 // recurrence. 4241 Builder.SetInsertPoint( 4242 &*++BasicBlock::iterator(cast<Instruction>(PreviousParts[UF - 1]))); 4243 4244 // We will construct a vector for the recurrence by combining the values for 4245 // the current and previous iterations. This is the required shuffle mask. 4246 SmallVector<Constant *, 8> ShuffleMask(VF); 4247 ShuffleMask[0] = Builder.getInt32(VF - 1); 4248 for (unsigned I = 1; I < VF; ++I) 4249 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 4250 4251 // The vector from which to take the initial value for the current iteration 4252 // (actual or unrolled). Initially, this is the vector phi node. 4253 Value *Incoming = VecPhi; 4254 4255 // Shuffle the current and previous vector and update the vector parts. 4256 for (unsigned Part = 0; Part < UF; ++Part) { 4257 auto *Shuffle = 4258 VF > 1 4259 ? Builder.CreateShuffleVector(Incoming, PreviousParts[Part], 4260 ConstantVector::get(ShuffleMask)) 4261 : Incoming; 4262 PhiParts[Part]->replaceAllUsesWith(Shuffle); 4263 cast<Instruction>(PhiParts[Part])->eraseFromParent(); 4264 PhiParts[Part] = Shuffle; 4265 Incoming = PreviousParts[Part]; 4266 } 4267 4268 // Fix the latch value of the new recurrence in the vector loop. 4269 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4270 4271 // Extract the last vector element in the middle block. This will be the 4272 // initial value for the recurrence when jumping to the scalar loop. 4273 auto *Extract = Incoming; 4274 if (VF > 1) { 4275 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4276 Extract = Builder.CreateExtractElement(Extract, Builder.getInt32(VF - 1), 4277 "vector.recur.extract"); 4278 } 4279 4280 // Fix the initial value of the original recurrence in the scalar loop. 4281 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4282 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4283 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4284 auto *Incoming = BB == LoopMiddleBlock ? Extract : ScalarInit; 4285 Start->addIncoming(Incoming, BB); 4286 } 4287 4288 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 4289 Phi->setName("scalar.recur"); 4290 4291 // Finally, fix users of the recurrence outside the loop. The users will need 4292 // either the last value of the scalar recurrence or the last value of the 4293 // vector recurrence we extracted in the middle block. Since the loop is in 4294 // LCSSA form, we just need to find the phi node for the original scalar 4295 // recurrence in the exit block, and then add an edge for the middle block. 4296 for (auto &I : *LoopExitBlock) { 4297 auto *LCSSAPhi = dyn_cast<PHINode>(&I); 4298 if (!LCSSAPhi) 4299 break; 4300 if (LCSSAPhi->getIncomingValue(0) == Phi) { 4301 LCSSAPhi->addIncoming(Extract, LoopMiddleBlock); 4302 break; 4303 } 4304 } 4305 } 4306 4307 void InnerLoopVectorizer::fixLCSSAPHIs() { 4308 for (Instruction &LEI : *LoopExitBlock) { 4309 auto *LCSSAPhi = dyn_cast<PHINode>(&LEI); 4310 if (!LCSSAPhi) 4311 break; 4312 if (LCSSAPhi->getNumIncomingValues() == 1) 4313 LCSSAPhi->addIncoming(UndefValue::get(LCSSAPhi->getType()), 4314 LoopMiddleBlock); 4315 } 4316 } 4317 4318 void InnerLoopVectorizer::collectTriviallyDeadInstructions() { 4319 BasicBlock *Latch = OrigLoop->getLoopLatch(); 4320 4321 // We create new control-flow for the vectorized loop, so the original 4322 // condition will be dead after vectorization if it's only used by the 4323 // branch. 4324 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4325 if (Cmp && Cmp->hasOneUse()) 4326 DeadInstructions.insert(Cmp); 4327 4328 // We create new "steps" for induction variable updates to which the original 4329 // induction variables map. An original update instruction will be dead if 4330 // all its users except the induction variable are dead. 4331 for (auto &Induction : *Legal->getInductionVars()) { 4332 PHINode *Ind = Induction.first; 4333 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4334 if (all_of(IndUpdate->users(), [&](User *U) -> bool { 4335 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 4336 })) 4337 DeadInstructions.insert(IndUpdate); 4338 } 4339 } 4340 4341 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4342 4343 // The basic block and loop containing the predicated instruction. 4344 auto *PredBB = PredInst->getParent(); 4345 auto *VectorLoop = LI->getLoopFor(PredBB); 4346 4347 // Initialize a worklist with the operands of the predicated instruction. 4348 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4349 4350 // Holds instructions that we need to analyze again. An instruction may be 4351 // reanalyzed if we don't yet know if we can sink it or not. 4352 SmallVector<Instruction *, 8> InstsToReanalyze; 4353 4354 // Returns true if a given use occurs in the predicated block. Phi nodes use 4355 // their operands in their corresponding predecessor blocks. 4356 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4357 auto *I = cast<Instruction>(U.getUser()); 4358 BasicBlock *BB = I->getParent(); 4359 if (auto *Phi = dyn_cast<PHINode>(I)) 4360 BB = Phi->getIncomingBlock( 4361 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4362 return BB == PredBB; 4363 }; 4364 4365 // Iteratively sink the scalarized operands of the predicated instruction 4366 // into the block we created for it. When an instruction is sunk, it's 4367 // operands are then added to the worklist. The algorithm ends after one pass 4368 // through the worklist doesn't sink a single instruction. 4369 bool Changed; 4370 do { 4371 4372 // Add the instructions that need to be reanalyzed to the worklist, and 4373 // reset the changed indicator. 4374 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4375 InstsToReanalyze.clear(); 4376 Changed = false; 4377 4378 while (!Worklist.empty()) { 4379 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4380 4381 // We can't sink an instruction if it is a phi node, is already in the 4382 // predicated block, is not in the loop, or may have side effects. 4383 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 4384 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 4385 continue; 4386 4387 // It's legal to sink the instruction if all its uses occur in the 4388 // predicated block. Otherwise, there's nothing to do yet, and we may 4389 // need to reanalyze the instruction. 4390 if (!all_of(I->uses(), isBlockOfUsePredicated)) { 4391 InstsToReanalyze.push_back(I); 4392 continue; 4393 } 4394 4395 // Move the instruction to the beginning of the predicated block, and add 4396 // it's operands to the worklist. 4397 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4398 Worklist.insert(I->op_begin(), I->op_end()); 4399 4400 // The sinking may have enabled other instructions to be sunk, so we will 4401 // need to iterate. 4402 Changed = true; 4403 } 4404 } while (Changed); 4405 } 4406 4407 void InnerLoopVectorizer::predicateInstructions() { 4408 4409 // For each instruction I marked for predication on value C, split I into its 4410 // own basic block to form an if-then construct over C. Since I may be fed by 4411 // an extractelement instruction or other scalar operand, we try to 4412 // iteratively sink its scalar operands into the predicated block. If I feeds 4413 // an insertelement instruction, we try to move this instruction into the 4414 // predicated block as well. For non-void types, a phi node will be created 4415 // for the resulting value (either vector or scalar). 4416 // 4417 // So for some predicated instruction, e.g. the conditional sdiv in: 4418 // 4419 // for.body: 4420 // ... 4421 // %add = add nsw i32 %mul, %0 4422 // %cmp5 = icmp sgt i32 %2, 7 4423 // br i1 %cmp5, label %if.then, label %if.end 4424 // 4425 // if.then: 4426 // %div = sdiv i32 %0, %1 4427 // br label %if.end 4428 // 4429 // if.end: 4430 // %x.0 = phi i32 [ %div, %if.then ], [ %add, %for.body ] 4431 // 4432 // the sdiv at this point is scalarized and if-converted using a select. 4433 // The inactive elements in the vector are not used, but the predicated 4434 // instruction is still executed for all vector elements, essentially: 4435 // 4436 // vector.body: 4437 // ... 4438 // %17 = add nsw <2 x i32> %16, %wide.load 4439 // %29 = extractelement <2 x i32> %wide.load, i32 0 4440 // %30 = extractelement <2 x i32> %wide.load51, i32 0 4441 // %31 = sdiv i32 %29, %30 4442 // %32 = insertelement <2 x i32> undef, i32 %31, i32 0 4443 // %35 = extractelement <2 x i32> %wide.load, i32 1 4444 // %36 = extractelement <2 x i32> %wide.load51, i32 1 4445 // %37 = sdiv i32 %35, %36 4446 // %38 = insertelement <2 x i32> %32, i32 %37, i32 1 4447 // %predphi = select <2 x i1> %26, <2 x i32> %38, <2 x i32> %17 4448 // 4449 // Predication will now re-introduce the original control flow to avoid false 4450 // side-effects by the sdiv instructions on the inactive elements, yielding 4451 // (after cleanup): 4452 // 4453 // vector.body: 4454 // ... 4455 // %5 = add nsw <2 x i32> %4, %wide.load 4456 // %8 = icmp sgt <2 x i32> %wide.load52, <i32 7, i32 7> 4457 // %9 = extractelement <2 x i1> %8, i32 0 4458 // br i1 %9, label %pred.sdiv.if, label %pred.sdiv.continue 4459 // 4460 // pred.sdiv.if: 4461 // %10 = extractelement <2 x i32> %wide.load, i32 0 4462 // %11 = extractelement <2 x i32> %wide.load51, i32 0 4463 // %12 = sdiv i32 %10, %11 4464 // %13 = insertelement <2 x i32> undef, i32 %12, i32 0 4465 // br label %pred.sdiv.continue 4466 // 4467 // pred.sdiv.continue: 4468 // %14 = phi <2 x i32> [ undef, %vector.body ], [ %13, %pred.sdiv.if ] 4469 // %15 = extractelement <2 x i1> %8, i32 1 4470 // br i1 %15, label %pred.sdiv.if54, label %pred.sdiv.continue55 4471 // 4472 // pred.sdiv.if54: 4473 // %16 = extractelement <2 x i32> %wide.load, i32 1 4474 // %17 = extractelement <2 x i32> %wide.load51, i32 1 4475 // %18 = sdiv i32 %16, %17 4476 // %19 = insertelement <2 x i32> %14, i32 %18, i32 1 4477 // br label %pred.sdiv.continue55 4478 // 4479 // pred.sdiv.continue55: 4480 // %20 = phi <2 x i32> [ %14, %pred.sdiv.continue ], [ %19, %pred.sdiv.if54 ] 4481 // %predphi = select <2 x i1> %8, <2 x i32> %20, <2 x i32> %5 4482 4483 for (auto KV : PredicatedInstructions) { 4484 BasicBlock::iterator I(KV.first); 4485 BasicBlock *Head = I->getParent(); 4486 auto *BB = SplitBlock(Head, &*std::next(I), DT, LI); 4487 auto *T = SplitBlockAndInsertIfThen(KV.second, &*I, /*Unreachable=*/false, 4488 /*BranchWeights=*/nullptr, DT, LI); 4489 I->moveBefore(T); 4490 sinkScalarOperands(&*I); 4491 4492 I->getParent()->setName(Twine("pred.") + I->getOpcodeName() + ".if"); 4493 BB->setName(Twine("pred.") + I->getOpcodeName() + ".continue"); 4494 4495 // If the instruction is non-void create a Phi node at reconvergence point. 4496 if (!I->getType()->isVoidTy()) { 4497 Value *IncomingTrue = nullptr; 4498 Value *IncomingFalse = nullptr; 4499 4500 if (I->hasOneUse() && isa<InsertElementInst>(*I->user_begin())) { 4501 // If the predicated instruction is feeding an insert-element, move it 4502 // into the Then block; Phi node will be created for the vector. 4503 InsertElementInst *IEI = cast<InsertElementInst>(*I->user_begin()); 4504 IEI->moveBefore(T); 4505 IncomingTrue = IEI; // the new vector with the inserted element. 4506 IncomingFalse = IEI->getOperand(0); // the unmodified vector 4507 } else { 4508 // Phi node will be created for the scalar predicated instruction. 4509 IncomingTrue = &*I; 4510 IncomingFalse = UndefValue::get(I->getType()); 4511 } 4512 4513 BasicBlock *PostDom = I->getParent()->getSingleSuccessor(); 4514 assert(PostDom && "Then block has multiple successors"); 4515 PHINode *Phi = 4516 PHINode::Create(IncomingTrue->getType(), 2, "", &PostDom->front()); 4517 IncomingTrue->replaceAllUsesWith(Phi); 4518 Phi->addIncoming(IncomingFalse, Head); 4519 Phi->addIncoming(IncomingTrue, I->getParent()); 4520 } 4521 } 4522 4523 DEBUG(DT->verifyDomTree()); 4524 } 4525 4526 InnerLoopVectorizer::VectorParts 4527 InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) { 4528 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 4529 4530 // Look for cached value. 4531 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 4532 EdgeMaskCache::iterator ECEntryIt = MaskCache.find(Edge); 4533 if (ECEntryIt != MaskCache.end()) 4534 return ECEntryIt->second; 4535 4536 VectorParts SrcMask = createBlockInMask(Src); 4537 4538 // The terminator has to be a branch inst! 4539 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 4540 assert(BI && "Unexpected terminator found"); 4541 4542 if (BI->isConditional()) { 4543 VectorParts EdgeMask = getVectorValue(BI->getCondition()); 4544 4545 if (BI->getSuccessor(0) != Dst) 4546 for (unsigned part = 0; part < UF; ++part) 4547 EdgeMask[part] = Builder.CreateNot(EdgeMask[part]); 4548 4549 for (unsigned part = 0; part < UF; ++part) 4550 EdgeMask[part] = Builder.CreateAnd(EdgeMask[part], SrcMask[part]); 4551 4552 MaskCache[Edge] = EdgeMask; 4553 return EdgeMask; 4554 } 4555 4556 MaskCache[Edge] = SrcMask; 4557 return SrcMask; 4558 } 4559 4560 InnerLoopVectorizer::VectorParts 4561 InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) { 4562 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 4563 4564 // Loop incoming mask is all-one. 4565 if (OrigLoop->getHeader() == BB) { 4566 Value *C = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 1); 4567 return getVectorValue(C); 4568 } 4569 4570 // This is the block mask. We OR all incoming edges, and with zero. 4571 Value *Zero = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 0); 4572 VectorParts BlockMask = getVectorValue(Zero); 4573 4574 // For each pred: 4575 for (pred_iterator it = pred_begin(BB), e = pred_end(BB); it != e; ++it) { 4576 VectorParts EM = createEdgeMask(*it, BB); 4577 for (unsigned part = 0; part < UF; ++part) 4578 BlockMask[part] = Builder.CreateOr(BlockMask[part], EM[part]); 4579 } 4580 4581 return BlockMask; 4582 } 4583 4584 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 4585 unsigned VF, PhiVector *PV) { 4586 PHINode *P = cast<PHINode>(PN); 4587 // Handle recurrences. 4588 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 4589 VectorParts Entry(UF); 4590 for (unsigned part = 0; part < UF; ++part) { 4591 // This is phase one of vectorizing PHIs. 4592 Type *VecTy = 4593 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4594 Entry[part] = PHINode::Create( 4595 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4596 } 4597 VectorLoopValueMap.initVector(P, Entry); 4598 PV->push_back(P); 4599 return; 4600 } 4601 4602 setDebugLocFromInst(Builder, P); 4603 // Check for PHI nodes that are lowered to vector selects. 4604 if (P->getParent() != OrigLoop->getHeader()) { 4605 // We know that all PHIs in non-header blocks are converted into 4606 // selects, so we don't have to worry about the insertion order and we 4607 // can just use the builder. 4608 // At this point we generate the predication tree. There may be 4609 // duplications since this is a simple recursive scan, but future 4610 // optimizations will clean it up. 4611 4612 unsigned NumIncoming = P->getNumIncomingValues(); 4613 4614 // Generate a sequence of selects of the form: 4615 // SELECT(Mask3, In3, 4616 // SELECT(Mask2, In2, 4617 // ( ...))) 4618 VectorParts Entry(UF); 4619 for (unsigned In = 0; In < NumIncoming; In++) { 4620 VectorParts Cond = 4621 createEdgeMask(P->getIncomingBlock(In), P->getParent()); 4622 const VectorParts &In0 = getVectorValue(P->getIncomingValue(In)); 4623 4624 for (unsigned part = 0; part < UF; ++part) { 4625 // We might have single edge PHIs (blocks) - use an identity 4626 // 'select' for the first PHI operand. 4627 if (In == 0) 4628 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], In0[part]); 4629 else 4630 // Select between the current value and the previous incoming edge 4631 // based on the incoming mask. 4632 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], Entry[part], 4633 "predphi"); 4634 } 4635 } 4636 VectorLoopValueMap.initVector(P, Entry); 4637 return; 4638 } 4639 4640 // This PHINode must be an induction variable. 4641 // Make sure that we know about it. 4642 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 4643 4644 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 4645 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4646 4647 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4648 // which can be found from the original scalar operations. 4649 switch (II.getKind()) { 4650 case InductionDescriptor::IK_NoInduction: 4651 llvm_unreachable("Unknown induction"); 4652 case InductionDescriptor::IK_IntInduction: 4653 return widenIntInduction(P); 4654 case InductionDescriptor::IK_PtrInduction: { 4655 // Handle the pointer induction variable case. 4656 assert(P->getType()->isPointerTy() && "Unexpected type."); 4657 // This is the normalized GEP that starts counting at zero. 4658 Value *PtrInd = Induction; 4659 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 4660 // Determine the number of scalars we need to generate for each unroll 4661 // iteration. If the instruction is uniform, we only need to generate the 4662 // first lane. Otherwise, we generate all VF values. 4663 unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF; 4664 // These are the scalar results. Notice that we don't generate vector GEPs 4665 // because scalar GEPs result in better code. 4666 ScalarParts Entry(UF); 4667 for (unsigned Part = 0; Part < UF; ++Part) { 4668 Entry[Part].resize(VF); 4669 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4670 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 4671 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4672 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 4673 SclrGep->setName("next.gep"); 4674 Entry[Part][Lane] = SclrGep; 4675 } 4676 } 4677 VectorLoopValueMap.initScalar(P, Entry); 4678 return; 4679 } 4680 case InductionDescriptor::IK_FpInduction: { 4681 assert(P->getType() == II.getStartValue()->getType() && 4682 "Types must match"); 4683 // Handle other induction variables that are now based on the 4684 // canonical one. 4685 assert(P != OldInduction && "Primary induction can be integer only"); 4686 4687 Value *V = Builder.CreateCast(Instruction::SIToFP, Induction, P->getType()); 4688 V = II.transform(Builder, V, PSE.getSE(), DL); 4689 V->setName("fp.offset.idx"); 4690 4691 // Now we have scalar op: %fp.offset.idx = StartVal +/- Induction*StepVal 4692 4693 Value *Broadcasted = getBroadcastInstrs(V); 4694 // After broadcasting the induction variable we need to make the vector 4695 // consecutive by adding StepVal*0, StepVal*1, StepVal*2, etc. 4696 Value *StepVal = cast<SCEVUnknown>(II.getStep())->getValue(); 4697 VectorParts Entry(UF); 4698 for (unsigned part = 0; part < UF; ++part) 4699 Entry[part] = getStepVector(Broadcasted, VF * part, StepVal, 4700 II.getInductionOpcode()); 4701 VectorLoopValueMap.initVector(P, Entry); 4702 return; 4703 } 4704 } 4705 } 4706 4707 /// A helper function for checking whether an integer division-related 4708 /// instruction may divide by zero (in which case it must be predicated if 4709 /// executed conditionally in the scalar code). 4710 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4711 /// Non-zero divisors that are non compile-time constants will not be 4712 /// converted into multiplication, so we will still end up scalarizing 4713 /// the division, but can do so w/o predication. 4714 static bool mayDivideByZero(Instruction &I) { 4715 assert((I.getOpcode() == Instruction::UDiv || 4716 I.getOpcode() == Instruction::SDiv || 4717 I.getOpcode() == Instruction::URem || 4718 I.getOpcode() == Instruction::SRem) && 4719 "Unexpected instruction"); 4720 Value *Divisor = I.getOperand(1); 4721 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4722 return !CInt || CInt->isZero(); 4723 } 4724 4725 void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { 4726 // For each instruction in the old loop. 4727 for (Instruction &I : *BB) { 4728 4729 // If the instruction will become trivially dead when vectorized, we don't 4730 // need to generate it. 4731 if (DeadInstructions.count(&I)) 4732 continue; 4733 4734 // Scalarize instructions that should remain scalar after vectorization. 4735 if (VF > 1 && 4736 !(isa<BranchInst>(&I) || isa<PHINode>(&I) || 4737 isa<DbgInfoIntrinsic>(&I)) && 4738 shouldScalarizeInstruction(&I)) { 4739 scalarizeInstruction(&I, Legal->isScalarWithPredication(&I)); 4740 continue; 4741 } 4742 4743 switch (I.getOpcode()) { 4744 case Instruction::Br: 4745 // Nothing to do for PHIs and BR, since we already took care of the 4746 // loop control flow instructions. 4747 continue; 4748 case Instruction::PHI: { 4749 // Vectorize PHINodes. 4750 widenPHIInstruction(&I, UF, VF, PV); 4751 continue; 4752 } // End of PHI. 4753 4754 case Instruction::UDiv: 4755 case Instruction::SDiv: 4756 case Instruction::SRem: 4757 case Instruction::URem: 4758 // Scalarize with predication if this instruction may divide by zero and 4759 // block execution is conditional, otherwise fallthrough. 4760 if (Legal->isScalarWithPredication(&I)) { 4761 scalarizeInstruction(&I, true); 4762 continue; 4763 } 4764 case Instruction::Add: 4765 case Instruction::FAdd: 4766 case Instruction::Sub: 4767 case Instruction::FSub: 4768 case Instruction::Mul: 4769 case Instruction::FMul: 4770 case Instruction::FDiv: 4771 case Instruction::FRem: 4772 case Instruction::Shl: 4773 case Instruction::LShr: 4774 case Instruction::AShr: 4775 case Instruction::And: 4776 case Instruction::Or: 4777 case Instruction::Xor: { 4778 // Just widen binops. 4779 auto *BinOp = cast<BinaryOperator>(&I); 4780 setDebugLocFromInst(Builder, BinOp); 4781 const VectorParts &A = getVectorValue(BinOp->getOperand(0)); 4782 const VectorParts &B = getVectorValue(BinOp->getOperand(1)); 4783 4784 // Use this vector value for all users of the original instruction. 4785 VectorParts Entry(UF); 4786 for (unsigned Part = 0; Part < UF; ++Part) { 4787 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A[Part], B[Part]); 4788 4789 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 4790 VecOp->copyIRFlags(BinOp); 4791 4792 Entry[Part] = V; 4793 } 4794 4795 VectorLoopValueMap.initVector(&I, Entry); 4796 addMetadata(Entry, BinOp); 4797 break; 4798 } 4799 case Instruction::Select: { 4800 // Widen selects. 4801 // If the selector is loop invariant we can create a select 4802 // instruction with a scalar condition. Otherwise, use vector-select. 4803 auto *SE = PSE.getSE(); 4804 bool InvariantCond = 4805 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); 4806 setDebugLocFromInst(Builder, &I); 4807 4808 // The condition can be loop invariant but still defined inside the 4809 // loop. This means that we can't just use the original 'cond' value. 4810 // We have to take the 'vectorized' value and pick the first lane. 4811 // Instcombine will make this a no-op. 4812 const VectorParts &Cond = getVectorValue(I.getOperand(0)); 4813 const VectorParts &Op0 = getVectorValue(I.getOperand(1)); 4814 const VectorParts &Op1 = getVectorValue(I.getOperand(2)); 4815 4816 auto *ScalarCond = getScalarValue(I.getOperand(0), 0, 0); 4817 4818 VectorParts Entry(UF); 4819 for (unsigned Part = 0; Part < UF; ++Part) { 4820 Entry[Part] = Builder.CreateSelect( 4821 InvariantCond ? ScalarCond : Cond[Part], Op0[Part], Op1[Part]); 4822 } 4823 4824 VectorLoopValueMap.initVector(&I, Entry); 4825 addMetadata(Entry, &I); 4826 break; 4827 } 4828 4829 case Instruction::ICmp: 4830 case Instruction::FCmp: { 4831 // Widen compares. Generate vector compares. 4832 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4833 auto *Cmp = dyn_cast<CmpInst>(&I); 4834 setDebugLocFromInst(Builder, Cmp); 4835 const VectorParts &A = getVectorValue(Cmp->getOperand(0)); 4836 const VectorParts &B = getVectorValue(Cmp->getOperand(1)); 4837 VectorParts Entry(UF); 4838 for (unsigned Part = 0; Part < UF; ++Part) { 4839 Value *C = nullptr; 4840 if (FCmp) { 4841 C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]); 4842 cast<FCmpInst>(C)->copyFastMathFlags(Cmp); 4843 } else { 4844 C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]); 4845 } 4846 Entry[Part] = C; 4847 } 4848 4849 VectorLoopValueMap.initVector(&I, Entry); 4850 addMetadata(Entry, &I); 4851 break; 4852 } 4853 4854 case Instruction::Store: 4855 case Instruction::Load: 4856 vectorizeMemoryInstruction(&I); 4857 break; 4858 case Instruction::ZExt: 4859 case Instruction::SExt: 4860 case Instruction::FPToUI: 4861 case Instruction::FPToSI: 4862 case Instruction::FPExt: 4863 case Instruction::PtrToInt: 4864 case Instruction::IntToPtr: 4865 case Instruction::SIToFP: 4866 case Instruction::UIToFP: 4867 case Instruction::Trunc: 4868 case Instruction::FPTrunc: 4869 case Instruction::BitCast: { 4870 auto *CI = dyn_cast<CastInst>(&I); 4871 setDebugLocFromInst(Builder, CI); 4872 4873 // Optimize the special case where the source is a constant integer 4874 // induction variable. Notice that we can only optimize the 'trunc' case 4875 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 4876 // (c) other casts depend on pointer size. 4877 auto ID = Legal->getInductionVars()->lookup(OldInduction); 4878 if (isa<TruncInst>(CI) && CI->getOperand(0) == OldInduction && 4879 ID.getConstIntStepValue()) { 4880 widenIntInduction(OldInduction, cast<TruncInst>(CI)); 4881 break; 4882 } 4883 4884 /// Vectorize casts. 4885 Type *DestTy = 4886 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4887 4888 const VectorParts &A = getVectorValue(CI->getOperand(0)); 4889 VectorParts Entry(UF); 4890 for (unsigned Part = 0; Part < UF; ++Part) 4891 Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy); 4892 VectorLoopValueMap.initVector(&I, Entry); 4893 addMetadata(Entry, &I); 4894 break; 4895 } 4896 4897 case Instruction::Call: { 4898 // Ignore dbg intrinsics. 4899 if (isa<DbgInfoIntrinsic>(I)) 4900 break; 4901 setDebugLocFromInst(Builder, &I); 4902 4903 Module *M = BB->getParent()->getParent(); 4904 auto *CI = cast<CallInst>(&I); 4905 4906 StringRef FnName = CI->getCalledFunction()->getName(); 4907 Function *F = CI->getCalledFunction(); 4908 Type *RetTy = ToVectorTy(CI->getType(), VF); 4909 SmallVector<Type *, 4> Tys; 4910 for (Value *ArgOperand : CI->arg_operands()) 4911 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4912 4913 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4914 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 4915 ID == Intrinsic::lifetime_start)) { 4916 scalarizeInstruction(&I); 4917 break; 4918 } 4919 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4920 // version of the instruction. 4921 // Is it beneficial to perform intrinsic call compared to lib call? 4922 bool NeedToScalarize; 4923 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4924 bool UseVectorIntrinsic = 4925 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4926 if (!UseVectorIntrinsic && NeedToScalarize) { 4927 scalarizeInstruction(&I); 4928 break; 4929 } 4930 4931 VectorParts Entry(UF); 4932 for (unsigned Part = 0; Part < UF; ++Part) { 4933 SmallVector<Value *, 4> Args; 4934 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4935 Value *Arg = CI->getArgOperand(i); 4936 // Some intrinsics have a scalar argument - don't replace it with a 4937 // vector. 4938 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) { 4939 const VectorParts &VectorArg = getVectorValue(CI->getArgOperand(i)); 4940 Arg = VectorArg[Part]; 4941 } 4942 Args.push_back(Arg); 4943 } 4944 4945 Function *VectorF; 4946 if (UseVectorIntrinsic) { 4947 // Use vector version of the intrinsic. 4948 Type *TysForDecl[] = {CI->getType()}; 4949 if (VF > 1) 4950 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4951 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4952 } else { 4953 // Use vector version of the library call. 4954 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4955 assert(!VFnName.empty() && "Vector function name is empty."); 4956 VectorF = M->getFunction(VFnName); 4957 if (!VectorF) { 4958 // Generate a declaration 4959 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4960 VectorF = 4961 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4962 VectorF->copyAttributesFrom(F); 4963 } 4964 } 4965 assert(VectorF && "Can't create vector function."); 4966 4967 SmallVector<OperandBundleDef, 1> OpBundles; 4968 CI->getOperandBundlesAsDefs(OpBundles); 4969 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4970 4971 if (isa<FPMathOperator>(V)) 4972 V->copyFastMathFlags(CI); 4973 4974 Entry[Part] = V; 4975 } 4976 4977 VectorLoopValueMap.initVector(&I, Entry); 4978 addMetadata(Entry, &I); 4979 break; 4980 } 4981 4982 default: 4983 // All other instructions are unsupported. Scalarize them. 4984 scalarizeInstruction(&I); 4985 break; 4986 } // end of switch. 4987 } // end of for_each instr. 4988 } 4989 4990 void InnerLoopVectorizer::updateAnalysis() { 4991 // Forget the original basic block. 4992 PSE.getSE()->forgetLoop(OrigLoop); 4993 4994 // Update the dominator tree information. 4995 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4996 "Entry does not dominate exit."); 4997 4998 // We don't predicate stores by this point, so the vector body should be a 4999 // single loop. 5000 DT->addNewBlock(LoopVectorBody, LoopVectorPreHeader); 5001 5002 DT->addNewBlock(LoopMiddleBlock, LoopVectorBody); 5003 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 5004 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 5005 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 5006 5007 DEBUG(DT->verifyDomTree()); 5008 } 5009 5010 /// \brief Check whether it is safe to if-convert this phi node. 5011 /// 5012 /// Phi nodes with constant expressions that can trap are not safe to if 5013 /// convert. 5014 static bool canIfConvertPHINodes(BasicBlock *BB) { 5015 for (Instruction &I : *BB) { 5016 auto *Phi = dyn_cast<PHINode>(&I); 5017 if (!Phi) 5018 return true; 5019 for (Value *V : Phi->incoming_values()) 5020 if (auto *C = dyn_cast<Constant>(V)) 5021 if (C->canTrap()) 5022 return false; 5023 } 5024 return true; 5025 } 5026 5027 bool LoopVectorizationLegality::canVectorizeWithIfConvert() { 5028 if (!EnableIfConversion) { 5029 ORE->emit(createMissedAnalysis("IfConversionDisabled") 5030 << "if-conversion is disabled"); 5031 return false; 5032 } 5033 5034 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable"); 5035 5036 // A list of pointers that we can safely read and write to. 5037 SmallPtrSet<Value *, 8> SafePointes; 5038 5039 // Collect safe addresses. 5040 for (BasicBlock *BB : TheLoop->blocks()) { 5041 if (blockNeedsPredication(BB)) 5042 continue; 5043 5044 for (Instruction &I : *BB) 5045 if (auto *Ptr = getPointerOperand(&I)) 5046 SafePointes.insert(Ptr); 5047 } 5048 5049 // Collect the blocks that need predication. 5050 BasicBlock *Header = TheLoop->getHeader(); 5051 for (BasicBlock *BB : TheLoop->blocks()) { 5052 // We don't support switch statements inside loops. 5053 if (!isa<BranchInst>(BB->getTerminator())) { 5054 ORE->emit(createMissedAnalysis("LoopContainsSwitch", BB->getTerminator()) 5055 << "loop contains a switch statement"); 5056 return false; 5057 } 5058 5059 // We must be able to predicate all blocks that need to be predicated. 5060 if (blockNeedsPredication(BB)) { 5061 if (!blockCanBePredicated(BB, SafePointes)) { 5062 ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator()) 5063 << "control flow cannot be substituted for a select"); 5064 return false; 5065 } 5066 } else if (BB != Header && !canIfConvertPHINodes(BB)) { 5067 ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator()) 5068 << "control flow cannot be substituted for a select"); 5069 return false; 5070 } 5071 } 5072 5073 // We can if-convert this loop. 5074 return true; 5075 } 5076 5077 bool LoopVectorizationLegality::canVectorize() { 5078 // We must have a loop in canonical form. Loops with indirectbr in them cannot 5079 // be canonicalized. 5080 if (!TheLoop->getLoopPreheader()) { 5081 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 5082 << "loop control flow is not understood by vectorizer"); 5083 return false; 5084 } 5085 5086 // FIXME: The code is currently dead, since the loop gets sent to 5087 // LoopVectorizationLegality is already an innermost loop. 5088 // 5089 // We can only vectorize innermost loops. 5090 if (!TheLoop->empty()) { 5091 ORE->emit(createMissedAnalysis("NotInnermostLoop") 5092 << "loop is not the innermost loop"); 5093 return false; 5094 } 5095 5096 // We must have a single backedge. 5097 if (TheLoop->getNumBackEdges() != 1) { 5098 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 5099 << "loop control flow is not understood by vectorizer"); 5100 return false; 5101 } 5102 5103 // We must have a single exiting block. 5104 if (!TheLoop->getExitingBlock()) { 5105 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 5106 << "loop control flow is not understood by vectorizer"); 5107 return false; 5108 } 5109 5110 // We only handle bottom-tested loops, i.e. loop in which the condition is 5111 // checked at the end of each iteration. With that we can assume that all 5112 // instructions in the loop are executed the same number of times. 5113 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5114 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 5115 << "loop control flow is not understood by vectorizer"); 5116 return false; 5117 } 5118 5119 // We need to have a loop header. 5120 DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName() 5121 << '\n'); 5122 5123 // Check if we can if-convert non-single-bb loops. 5124 unsigned NumBlocks = TheLoop->getNumBlocks(); 5125 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { 5126 DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); 5127 return false; 5128 } 5129 5130 // ScalarEvolution needs to be able to find the exit count. 5131 const SCEV *ExitCount = PSE.getBackedgeTakenCount(); 5132 if (ExitCount == PSE.getSE()->getCouldNotCompute()) { 5133 ORE->emit(createMissedAnalysis("CantComputeNumberOfIterations") 5134 << "could not determine number of loop iterations"); 5135 DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n"); 5136 return false; 5137 } 5138 5139 // Check if we can vectorize the instructions and CFG in this loop. 5140 if (!canVectorizeInstrs()) { 5141 DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); 5142 return false; 5143 } 5144 5145 // Go over each instruction and look at memory deps. 5146 if (!canVectorizeMemory()) { 5147 DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); 5148 return false; 5149 } 5150 5151 DEBUG(dbgs() << "LV: We can vectorize this loop" 5152 << (LAI->getRuntimePointerChecking()->Need 5153 ? " (with a runtime bound check)" 5154 : "") 5155 << "!\n"); 5156 5157 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 5158 5159 // If an override option has been passed in for interleaved accesses, use it. 5160 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 5161 UseInterleaved = EnableInterleavedMemAccesses; 5162 5163 // Analyze interleaved memory accesses. 5164 if (UseInterleaved) 5165 InterleaveInfo.analyzeInterleaving(*getSymbolicStrides()); 5166 5167 unsigned SCEVThreshold = VectorizeSCEVCheckThreshold; 5168 if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) 5169 SCEVThreshold = PragmaVectorizeSCEVCheckThreshold; 5170 5171 if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) { 5172 ORE->emit(createMissedAnalysis("TooManySCEVRunTimeChecks") 5173 << "Too many SCEV assumptions need to be made and checked " 5174 << "at runtime"); 5175 DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n"); 5176 return false; 5177 } 5178 5179 // Okay! We can vectorize. At this point we don't have any other mem analysis 5180 // which may limit our maximum vectorization factor, so just return true with 5181 // no restrictions. 5182 return true; 5183 } 5184 5185 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { 5186 if (Ty->isPointerTy()) 5187 return DL.getIntPtrType(Ty); 5188 5189 // It is possible that char's or short's overflow when we ask for the loop's 5190 // trip count, work around this by changing the type size. 5191 if (Ty->getScalarSizeInBits() < 32) 5192 return Type::getInt32Ty(Ty->getContext()); 5193 5194 return Ty; 5195 } 5196 5197 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { 5198 Ty0 = convertPointerToIntegerType(DL, Ty0); 5199 Ty1 = convertPointerToIntegerType(DL, Ty1); 5200 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) 5201 return Ty0; 5202 return Ty1; 5203 } 5204 5205 /// \brief Check that the instruction has outside loop users and is not an 5206 /// identified reduction variable. 5207 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, 5208 SmallPtrSetImpl<Value *> &AllowedExit) { 5209 // Reduction and Induction instructions are allowed to have exit users. All 5210 // other instructions must not have external users. 5211 if (!AllowedExit.count(Inst)) 5212 // Check that all of the users of the loop are inside the BB. 5213 for (User *U : Inst->users()) { 5214 Instruction *UI = cast<Instruction>(U); 5215 // This user may be a reduction exit value. 5216 if (!TheLoop->contains(UI)) { 5217 DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); 5218 return true; 5219 } 5220 } 5221 return false; 5222 } 5223 5224 void LoopVectorizationLegality::addInductionPhi( 5225 PHINode *Phi, const InductionDescriptor &ID, 5226 SmallPtrSetImpl<Value *> &AllowedExit) { 5227 Inductions[Phi] = ID; 5228 Type *PhiTy = Phi->getType(); 5229 const DataLayout &DL = Phi->getModule()->getDataLayout(); 5230 5231 // Get the widest type. 5232 if (!PhiTy->isFloatingPointTy()) { 5233 if (!WidestIndTy) 5234 WidestIndTy = convertPointerToIntegerType(DL, PhiTy); 5235 else 5236 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); 5237 } 5238 5239 // Int inductions are special because we only allow one IV. 5240 if (ID.getKind() == InductionDescriptor::IK_IntInduction && 5241 ID.getConstIntStepValue() && 5242 ID.getConstIntStepValue()->isOne() && 5243 isa<Constant>(ID.getStartValue()) && 5244 cast<Constant>(ID.getStartValue())->isNullValue()) { 5245 5246 // Use the phi node with the widest type as induction. Use the last 5247 // one if there are multiple (no good reason for doing this other 5248 // than it is expedient). We've checked that it begins at zero and 5249 // steps by one, so this is a canonical induction variable. 5250 if (!Induction || PhiTy == WidestIndTy) 5251 Induction = Phi; 5252 } 5253 5254 // Both the PHI node itself, and the "post-increment" value feeding 5255 // back into the PHI node may have external users. 5256 AllowedExit.insert(Phi); 5257 AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch())); 5258 5259 DEBUG(dbgs() << "LV: Found an induction variable.\n"); 5260 return; 5261 } 5262 5263 bool LoopVectorizationLegality::canVectorizeInstrs() { 5264 BasicBlock *Header = TheLoop->getHeader(); 5265 5266 // Look for the attribute signaling the absence of NaNs. 5267 Function &F = *Header->getParent(); 5268 HasFunNoNaNAttr = 5269 F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; 5270 5271 // For each block in the loop. 5272 for (BasicBlock *BB : TheLoop->blocks()) { 5273 // Scan the instructions in the block and look for hazards. 5274 for (Instruction &I : *BB) { 5275 if (auto *Phi = dyn_cast<PHINode>(&I)) { 5276 Type *PhiTy = Phi->getType(); 5277 // Check that this PHI type is allowed. 5278 if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() && 5279 !PhiTy->isPointerTy()) { 5280 ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi) 5281 << "loop control flow is not understood by vectorizer"); 5282 DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); 5283 return false; 5284 } 5285 5286 // If this PHINode is not in the header block, then we know that we 5287 // can convert it to select during if-conversion. No need to check if 5288 // the PHIs in this block are induction or reduction variables. 5289 if (BB != Header) { 5290 // Check that this instruction has no outside users or is an 5291 // identified reduction value with an outside user. 5292 if (!hasOutsideLoopUser(TheLoop, Phi, AllowedExit)) 5293 continue; 5294 ORE->emit(createMissedAnalysis("NeitherInductionNorReduction", Phi) 5295 << "value could not be identified as " 5296 "an induction or reduction variable"); 5297 return false; 5298 } 5299 5300 // We only allow if-converted PHIs with exactly two incoming values. 5301 if (Phi->getNumIncomingValues() != 2) { 5302 ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi) 5303 << "control flow not understood by vectorizer"); 5304 DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); 5305 return false; 5306 } 5307 5308 RecurrenceDescriptor RedDes; 5309 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) { 5310 if (RedDes.hasUnsafeAlgebra()) 5311 Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst()); 5312 AllowedExit.insert(RedDes.getLoopExitInstr()); 5313 Reductions[Phi] = RedDes; 5314 continue; 5315 } 5316 5317 InductionDescriptor ID; 5318 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID)) { 5319 addInductionPhi(Phi, ID, AllowedExit); 5320 if (ID.hasUnsafeAlgebra() && !HasFunNoNaNAttr) 5321 Requirements->addUnsafeAlgebraInst(ID.getUnsafeAlgebraInst()); 5322 continue; 5323 } 5324 5325 if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, DT)) { 5326 FirstOrderRecurrences.insert(Phi); 5327 continue; 5328 } 5329 5330 // As a last resort, coerce the PHI to a AddRec expression 5331 // and re-try classifying it a an induction PHI. 5332 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true)) { 5333 addInductionPhi(Phi, ID, AllowedExit); 5334 continue; 5335 } 5336 5337 ORE->emit(createMissedAnalysis("NonReductionValueUsedOutsideLoop", Phi) 5338 << "value that could not be identified as " 5339 "reduction is used outside the loop"); 5340 DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n"); 5341 return false; 5342 } // end of PHI handling 5343 5344 // We handle calls that: 5345 // * Are debug info intrinsics. 5346 // * Have a mapping to an IR intrinsic. 5347 // * Have a vector version available. 5348 auto *CI = dyn_cast<CallInst>(&I); 5349 if (CI && !getVectorIntrinsicIDForCall(CI, TLI) && 5350 !isa<DbgInfoIntrinsic>(CI) && 5351 !(CI->getCalledFunction() && TLI && 5352 TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) { 5353 ORE->emit(createMissedAnalysis("CantVectorizeCall", CI) 5354 << "call instruction cannot be vectorized"); 5355 DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); 5356 return false; 5357 } 5358 5359 // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the 5360 // second argument is the same (i.e. loop invariant) 5361 if (CI && hasVectorInstrinsicScalarOpd( 5362 getVectorIntrinsicIDForCall(CI, TLI), 1)) { 5363 auto *SE = PSE.getSE(); 5364 if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) { 5365 ORE->emit(createMissedAnalysis("CantVectorizeIntrinsic", CI) 5366 << "intrinsic instruction cannot be vectorized"); 5367 DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n"); 5368 return false; 5369 } 5370 } 5371 5372 // Check that the instruction return type is vectorizable. 5373 // Also, we can't vectorize extractelement instructions. 5374 if ((!VectorType::isValidElementType(I.getType()) && 5375 !I.getType()->isVoidTy()) || 5376 isa<ExtractElementInst>(I)) { 5377 ORE->emit(createMissedAnalysis("CantVectorizeInstructionReturnType", &I) 5378 << "instruction return type cannot be vectorized"); 5379 DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); 5380 return false; 5381 } 5382 5383 // Check that the stored type is vectorizable. 5384 if (auto *ST = dyn_cast<StoreInst>(&I)) { 5385 Type *T = ST->getValueOperand()->getType(); 5386 if (!VectorType::isValidElementType(T)) { 5387 ORE->emit(createMissedAnalysis("CantVectorizeStore", ST) 5388 << "store instruction cannot be vectorized"); 5389 return false; 5390 } 5391 5392 // FP instructions can allow unsafe algebra, thus vectorizable by 5393 // non-IEEE-754 compliant SIMD units. 5394 // This applies to floating-point math operations and calls, not memory 5395 // operations, shuffles, or casts, as they don't change precision or 5396 // semantics. 5397 } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) && 5398 !I.hasUnsafeAlgebra()) { 5399 DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n"); 5400 Hints->setPotentiallyUnsafe(); 5401 } 5402 5403 // Reduction instructions are allowed to have exit users. 5404 // All other instructions must not have external users. 5405 if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) { 5406 ORE->emit(createMissedAnalysis("ValueUsedOutsideLoop", &I) 5407 << "value cannot be used outside the loop"); 5408 return false; 5409 } 5410 5411 } // next instr. 5412 } 5413 5414 if (!Induction) { 5415 DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); 5416 if (Inductions.empty()) { 5417 ORE->emit(createMissedAnalysis("NoInductionVariable") 5418 << "loop induction variable could not be identified"); 5419 return false; 5420 } 5421 } 5422 5423 // Now we know the widest induction type, check if our found induction 5424 // is the same size. If it's not, unset it here and InnerLoopVectorizer 5425 // will create another. 5426 if (Induction && WidestIndTy != Induction->getType()) 5427 Induction = nullptr; 5428 5429 return true; 5430 } 5431 5432 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) { 5433 5434 // We should not collect Scalars more than once per VF. Right now, 5435 // this function is called from collectUniformsAndScalars(), which 5436 // already does this check. Collecting Scalars for VF=1 does not make any 5437 // sense. 5438 5439 assert(VF >= 2 && !Scalars.count(VF) && 5440 "This function should not be visited twice for the same VF"); 5441 5442 // If an instruction is uniform after vectorization, it will remain scalar. 5443 Scalars[VF].insert(Uniforms[VF].begin(), Uniforms[VF].end()); 5444 5445 // Collect the getelementptr instructions that will not be vectorized. A 5446 // getelementptr instruction is only vectorized if it is used for a legal 5447 // gather or scatter operation. 5448 for (auto *BB : TheLoop->blocks()) 5449 for (auto &I : *BB) { 5450 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 5451 Scalars[VF].insert(GEP); 5452 continue; 5453 } 5454 auto *Ptr = getPointerOperand(&I); 5455 if (!Ptr) 5456 continue; 5457 auto *GEP = getGEPInstruction(Ptr); 5458 if (GEP && getWideningDecision(&I, VF) == CM_GatherScatter) 5459 Scalars[VF].erase(GEP); 5460 } 5461 5462 // An induction variable will remain scalar if all users of the induction 5463 // variable and induction variable update remain scalar. 5464 auto *Latch = TheLoop->getLoopLatch(); 5465 for (auto &Induction : *Legal->getInductionVars()) { 5466 auto *Ind = Induction.first; 5467 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5468 5469 // Determine if all users of the induction variable are scalar after 5470 // vectorization. 5471 auto ScalarInd = all_of(Ind->users(), [&](User *U) -> bool { 5472 auto *I = cast<Instruction>(U); 5473 return I == IndUpdate || !TheLoop->contains(I) || Scalars[VF].count(I); 5474 }); 5475 if (!ScalarInd) 5476 continue; 5477 5478 // Determine if all users of the induction variable update instruction are 5479 // scalar after vectorization. 5480 auto ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool { 5481 auto *I = cast<Instruction>(U); 5482 return I == Ind || !TheLoop->contains(I) || Scalars[VF].count(I); 5483 }); 5484 if (!ScalarIndUpdate) 5485 continue; 5486 5487 // The induction variable and its update instruction will remain scalar. 5488 Scalars[VF].insert(Ind); 5489 Scalars[VF].insert(IndUpdate); 5490 } 5491 } 5492 5493 bool LoopVectorizationLegality::isScalarWithPredication(Instruction *I) { 5494 if (!blockNeedsPredication(I->getParent())) 5495 return false; 5496 switch(I->getOpcode()) { 5497 default: 5498 break; 5499 case Instruction::Store: 5500 return !isMaskRequired(I); 5501 case Instruction::UDiv: 5502 case Instruction::SDiv: 5503 case Instruction::SRem: 5504 case Instruction::URem: 5505 return mayDivideByZero(*I); 5506 } 5507 return false; 5508 } 5509 5510 bool LoopVectorizationLegality::memoryInstructionCanBeWidened(Instruction *I, 5511 unsigned VF) { 5512 // Get and ensure we have a valid memory instruction. 5513 LoadInst *LI = dyn_cast<LoadInst>(I); 5514 StoreInst *SI = dyn_cast<StoreInst>(I); 5515 assert((LI || SI) && "Invalid memory instruction"); 5516 5517 auto *Ptr = getPointerOperand(I); 5518 5519 // In order to be widened, the pointer should be consecutive, first of all. 5520 if (!isConsecutivePtr(Ptr)) 5521 return false; 5522 5523 // If the instruction is a store located in a predicated block, it will be 5524 // scalarized. 5525 if (isScalarWithPredication(I)) 5526 return false; 5527 5528 // If the instruction's allocated size doesn't equal it's type size, it 5529 // requires padding and will be scalarized. 5530 auto &DL = I->getModule()->getDataLayout(); 5531 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 5532 if (hasIrregularType(ScalarTy, DL, VF)) 5533 return false; 5534 5535 return true; 5536 } 5537 5538 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { 5539 5540 // We should not collect Uniforms more than once per VF. Right now, 5541 // this function is called from collectUniformsAndScalars(), which 5542 // already does this check. Collecting Uniforms for VF=1 does not make any 5543 // sense. 5544 5545 assert(VF >= 2 && !Uniforms.count(VF) && 5546 "This function should not be visited twice for the same VF"); 5547 5548 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5549 // not analyze again. Uniforms.count(VF) will return 1. 5550 Uniforms[VF].clear(); 5551 5552 // We now know that the loop is vectorizable! 5553 // Collect instructions inside the loop that will remain uniform after 5554 // vectorization. 5555 5556 // Global values, params and instructions outside of current loop are out of 5557 // scope. 5558 auto isOutOfScope = [&](Value *V) -> bool { 5559 Instruction *I = dyn_cast<Instruction>(V); 5560 return (!I || !TheLoop->contains(I)); 5561 }; 5562 5563 SetVector<Instruction *> Worklist; 5564 BasicBlock *Latch = TheLoop->getLoopLatch(); 5565 5566 // Start with the conditional branch. If the branch condition is an 5567 // instruction contained in the loop that is only used by the branch, it is 5568 // uniform. 5569 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5570 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) { 5571 Worklist.insert(Cmp); 5572 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); 5573 } 5574 5575 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers 5576 // are pointers that are treated like consecutive pointers during 5577 // vectorization. The pointer operands of interleaved accesses are an 5578 // example. 5579 SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs; 5580 5581 // Holds pointer operands of instructions that are possibly non-uniform. 5582 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs; 5583 5584 auto isUniformDecision = [&](Instruction *I, unsigned VF) { 5585 InstWidening WideningDecision = getWideningDecision(I, VF); 5586 assert(WideningDecision != CM_Unknown && 5587 "Widening decision should be ready at this moment"); 5588 5589 return (WideningDecision == CM_Widen || 5590 WideningDecision == CM_Interleave); 5591 }; 5592 // Iterate over the instructions in the loop, and collect all 5593 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible 5594 // that a consecutive-like pointer operand will be scalarized, we collect it 5595 // in PossibleNonUniformPtrs instead. We use two sets here because a single 5596 // getelementptr instruction can be used by both vectorized and scalarized 5597 // memory instructions. For example, if a loop loads and stores from the same 5598 // location, but the store is conditional, the store will be scalarized, and 5599 // the getelementptr won't remain uniform. 5600 for (auto *BB : TheLoop->blocks()) 5601 for (auto &I : *BB) { 5602 5603 // If there's no pointer operand, there's nothing to do. 5604 auto *Ptr = dyn_cast_or_null<Instruction>(getPointerOperand(&I)); 5605 if (!Ptr) 5606 continue; 5607 5608 // True if all users of Ptr are memory accesses that have Ptr as their 5609 // pointer operand. 5610 auto UsersAreMemAccesses = all_of(Ptr->users(), [&](User *U) -> bool { 5611 return getPointerOperand(U) == Ptr; 5612 }); 5613 5614 // Ensure the memory instruction will not be scalarized or used by 5615 // gather/scatter, making its pointer operand non-uniform. If the pointer 5616 // operand is used by any instruction other than a memory access, we 5617 // conservatively assume the pointer operand may be non-uniform. 5618 if (!UsersAreMemAccesses || !isUniformDecision(&I, VF)) 5619 PossibleNonUniformPtrs.insert(Ptr); 5620 5621 // If the memory instruction will be vectorized and its pointer operand 5622 // is consecutive-like, or interleaving - the pointer operand should 5623 // remain uniform. 5624 else 5625 ConsecutiveLikePtrs.insert(Ptr); 5626 } 5627 5628 // Add to the Worklist all consecutive and consecutive-like pointers that 5629 // aren't also identified as possibly non-uniform. 5630 for (auto *V : ConsecutiveLikePtrs) 5631 if (!PossibleNonUniformPtrs.count(V)) { 5632 DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n"); 5633 Worklist.insert(V); 5634 } 5635 5636 // Expand Worklist in topological order: whenever a new instruction 5637 // is added , its users should be either already inside Worklist, or 5638 // out of scope. It ensures a uniform instruction will only be used 5639 // by uniform instructions or out of scope instructions. 5640 unsigned idx = 0; 5641 while (idx != Worklist.size()) { 5642 Instruction *I = Worklist[idx++]; 5643 5644 for (auto OV : I->operand_values()) { 5645 if (isOutOfScope(OV)) 5646 continue; 5647 auto *OI = cast<Instruction>(OV); 5648 if (all_of(OI->users(), [&](User *U) -> bool { 5649 return isOutOfScope(U) || Worklist.count(cast<Instruction>(U)); 5650 })) { 5651 Worklist.insert(OI); 5652 DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); 5653 } 5654 } 5655 } 5656 5657 // Returns true if Ptr is the pointer operand of a memory access instruction 5658 // I, and I is known to not require scalarization. 5659 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5660 return getPointerOperand(I) == Ptr && isUniformDecision(I, VF); 5661 }; 5662 5663 // For an instruction to be added into Worklist above, all its users inside 5664 // the loop should also be in Worklist. However, this condition cannot be 5665 // true for phi nodes that form a cyclic dependence. We must process phi 5666 // nodes separately. An induction variable will remain uniform if all users 5667 // of the induction variable and induction variable update remain uniform. 5668 // The code below handles both pointer and non-pointer induction variables. 5669 for (auto &Induction : *Legal->getInductionVars()) { 5670 auto *Ind = Induction.first; 5671 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5672 5673 // Determine if all users of the induction variable are uniform after 5674 // vectorization. 5675 auto UniformInd = all_of(Ind->users(), [&](User *U) -> bool { 5676 auto *I = cast<Instruction>(U); 5677 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5678 isVectorizedMemAccessUse(I, Ind); 5679 }); 5680 if (!UniformInd) 5681 continue; 5682 5683 // Determine if all users of the induction variable update instruction are 5684 // uniform after vectorization. 5685 auto UniformIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool { 5686 auto *I = cast<Instruction>(U); 5687 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5688 isVectorizedMemAccessUse(I, IndUpdate); 5689 }); 5690 if (!UniformIndUpdate) 5691 continue; 5692 5693 // The induction variable and its update instruction will remain uniform. 5694 Worklist.insert(Ind); 5695 Worklist.insert(IndUpdate); 5696 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n"); 5697 DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate << "\n"); 5698 } 5699 5700 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5701 } 5702 5703 bool LoopVectorizationLegality::canVectorizeMemory() { 5704 LAI = &(*GetLAA)(*TheLoop); 5705 InterleaveInfo.setLAI(LAI); 5706 const OptimizationRemarkAnalysis *LAR = LAI->getReport(); 5707 if (LAR) { 5708 OptimizationRemarkAnalysis VR(Hints->vectorizeAnalysisPassName(), 5709 "loop not vectorized: ", *LAR); 5710 ORE->emit(VR); 5711 } 5712 if (!LAI->canVectorizeMemory()) 5713 return false; 5714 5715 if (LAI->hasStoreToLoopInvariantAddress()) { 5716 ORE->emit(createMissedAnalysis("CantVectorizeStoreToLoopInvariantAddress") 5717 << "write to a loop invariant address could not be vectorized"); 5718 DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); 5719 return false; 5720 } 5721 5722 Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks()); 5723 PSE.addPredicate(LAI->getPSE().getUnionPredicate()); 5724 5725 return true; 5726 } 5727 5728 bool LoopVectorizationLegality::isInductionVariable(const Value *V) { 5729 Value *In0 = const_cast<Value *>(V); 5730 PHINode *PN = dyn_cast_or_null<PHINode>(In0); 5731 if (!PN) 5732 return false; 5733 5734 return Inductions.count(PN); 5735 } 5736 5737 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) { 5738 return FirstOrderRecurrences.count(Phi); 5739 } 5740 5741 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { 5742 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 5743 } 5744 5745 bool LoopVectorizationLegality::blockCanBePredicated( 5746 BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) { 5747 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 5748 5749 for (Instruction &I : *BB) { 5750 // Check that we don't have a constant expression that can trap as operand. 5751 for (Value *Operand : I.operands()) { 5752 if (auto *C = dyn_cast<Constant>(Operand)) 5753 if (C->canTrap()) 5754 return false; 5755 } 5756 // We might be able to hoist the load. 5757 if (I.mayReadFromMemory()) { 5758 auto *LI = dyn_cast<LoadInst>(&I); 5759 if (!LI) 5760 return false; 5761 if (!SafePtrs.count(LI->getPointerOperand())) { 5762 if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) || 5763 isLegalMaskedGather(LI->getType())) { 5764 MaskedOp.insert(LI); 5765 continue; 5766 } 5767 // !llvm.mem.parallel_loop_access implies if-conversion safety. 5768 if (IsAnnotatedParallel) 5769 continue; 5770 return false; 5771 } 5772 } 5773 5774 if (I.mayWriteToMemory()) { 5775 auto *SI = dyn_cast<StoreInst>(&I); 5776 // We only support predication of stores in basic blocks with one 5777 // predecessor. 5778 if (!SI) 5779 return false; 5780 5781 // Build a masked store if it is legal for the target. 5782 if (isLegalMaskedStore(SI->getValueOperand()->getType(), 5783 SI->getPointerOperand()) || 5784 isLegalMaskedScatter(SI->getValueOperand()->getType())) { 5785 MaskedOp.insert(SI); 5786 continue; 5787 } 5788 5789 bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0); 5790 bool isSinglePredecessor = SI->getParent()->getSinglePredecessor(); 5791 5792 if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr || 5793 !isSinglePredecessor) 5794 return false; 5795 } 5796 if (I.mayThrow()) 5797 return false; 5798 } 5799 5800 return true; 5801 } 5802 5803 void InterleavedAccessInfo::collectConstStrideAccesses( 5804 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 5805 const ValueToValueMap &Strides) { 5806 5807 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 5808 5809 // Since it's desired that the load/store instructions be maintained in 5810 // "program order" for the interleaved access analysis, we have to visit the 5811 // blocks in the loop in reverse postorder (i.e., in a topological order). 5812 // Such an ordering will ensure that any load/store that may be executed 5813 // before a second load/store will precede the second load/store in 5814 // AccessStrideInfo. 5815 LoopBlocksDFS DFS(TheLoop); 5816 DFS.perform(LI); 5817 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 5818 for (auto &I : *BB) { 5819 auto *LI = dyn_cast<LoadInst>(&I); 5820 auto *SI = dyn_cast<StoreInst>(&I); 5821 if (!LI && !SI) 5822 continue; 5823 5824 Value *Ptr = getPointerOperand(&I); 5825 // We don't check wrapping here because we don't know yet if Ptr will be 5826 // part of a full group or a group with gaps. Checking wrapping for all 5827 // pointers (even those that end up in groups with no gaps) will be overly 5828 // conservative. For full groups, wrapping should be ok since if we would 5829 // wrap around the address space we would do a memory access at nullptr 5830 // even without the transformation. The wrapping checks are therefore 5831 // deferred until after we've formed the interleaved groups. 5832 int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, 5833 /*Assume=*/true, /*ShouldCheckWrap=*/false); 5834 5835 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 5836 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 5837 uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 5838 5839 // An alignment of 0 means target ABI alignment. 5840 unsigned Align = getMemInstAlignment(&I); 5841 if (!Align) 5842 Align = DL.getABITypeAlignment(PtrTy->getElementType()); 5843 5844 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align); 5845 } 5846 } 5847 5848 // Analyze interleaved accesses and collect them into interleaved load and 5849 // store groups. 5850 // 5851 // When generating code for an interleaved load group, we effectively hoist all 5852 // loads in the group to the location of the first load in program order. When 5853 // generating code for an interleaved store group, we sink all stores to the 5854 // location of the last store. This code motion can change the order of load 5855 // and store instructions and may break dependences. 5856 // 5857 // The code generation strategy mentioned above ensures that we won't violate 5858 // any write-after-read (WAR) dependences. 5859 // 5860 // E.g., for the WAR dependence: a = A[i]; // (1) 5861 // A[i] = b; // (2) 5862 // 5863 // The store group of (2) is always inserted at or below (2), and the load 5864 // group of (1) is always inserted at or above (1). Thus, the instructions will 5865 // never be reordered. All other dependences are checked to ensure the 5866 // correctness of the instruction reordering. 5867 // 5868 // The algorithm visits all memory accesses in the loop in bottom-up program 5869 // order. Program order is established by traversing the blocks in the loop in 5870 // reverse postorder when collecting the accesses. 5871 // 5872 // We visit the memory accesses in bottom-up order because it can simplify the 5873 // construction of store groups in the presence of write-after-write (WAW) 5874 // dependences. 5875 // 5876 // E.g., for the WAW dependence: A[i] = a; // (1) 5877 // A[i] = b; // (2) 5878 // A[i + 1] = c; // (3) 5879 // 5880 // We will first create a store group with (3) and (2). (1) can't be added to 5881 // this group because it and (2) are dependent. However, (1) can be grouped 5882 // with other accesses that may precede it in program order. Note that a 5883 // bottom-up order does not imply that WAW dependences should not be checked. 5884 void InterleavedAccessInfo::analyzeInterleaving( 5885 const ValueToValueMap &Strides) { 5886 DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 5887 5888 // Holds all accesses with a constant stride. 5889 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 5890 collectConstStrideAccesses(AccessStrideInfo, Strides); 5891 5892 if (AccessStrideInfo.empty()) 5893 return; 5894 5895 // Collect the dependences in the loop. 5896 collectDependences(); 5897 5898 // Holds all interleaved store groups temporarily. 5899 SmallSetVector<InterleaveGroup *, 4> StoreGroups; 5900 // Holds all interleaved load groups temporarily. 5901 SmallSetVector<InterleaveGroup *, 4> LoadGroups; 5902 5903 // Search in bottom-up program order for pairs of accesses (A and B) that can 5904 // form interleaved load or store groups. In the algorithm below, access A 5905 // precedes access B in program order. We initialize a group for B in the 5906 // outer loop of the algorithm, and then in the inner loop, we attempt to 5907 // insert each A into B's group if: 5908 // 5909 // 1. A and B have the same stride, 5910 // 2. A and B have the same memory object size, and 5911 // 3. A belongs in B's group according to its distance from B. 5912 // 5913 // Special care is taken to ensure group formation will not break any 5914 // dependences. 5915 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 5916 BI != E; ++BI) { 5917 Instruction *B = BI->first; 5918 StrideDescriptor DesB = BI->second; 5919 5920 // Initialize a group for B if it has an allowable stride. Even if we don't 5921 // create a group for B, we continue with the bottom-up algorithm to ensure 5922 // we don't break any of B's dependences. 5923 InterleaveGroup *Group = nullptr; 5924 if (isStrided(DesB.Stride)) { 5925 Group = getInterleaveGroup(B); 5926 if (!Group) { 5927 DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n'); 5928 Group = createInterleaveGroup(B, DesB.Stride, DesB.Align); 5929 } 5930 if (B->mayWriteToMemory()) 5931 StoreGroups.insert(Group); 5932 else 5933 LoadGroups.insert(Group); 5934 } 5935 5936 for (auto AI = std::next(BI); AI != E; ++AI) { 5937 Instruction *A = AI->first; 5938 StrideDescriptor DesA = AI->second; 5939 5940 // Our code motion strategy implies that we can't have dependences 5941 // between accesses in an interleaved group and other accesses located 5942 // between the first and last member of the group. Note that this also 5943 // means that a group can't have more than one member at a given offset. 5944 // The accesses in a group can have dependences with other accesses, but 5945 // we must ensure we don't extend the boundaries of the group such that 5946 // we encompass those dependent accesses. 5947 // 5948 // For example, assume we have the sequence of accesses shown below in a 5949 // stride-2 loop: 5950 // 5951 // (1, 2) is a group | A[i] = a; // (1) 5952 // | A[i-1] = b; // (2) | 5953 // A[i-3] = c; // (3) 5954 // A[i] = d; // (4) | (2, 4) is not a group 5955 // 5956 // Because accesses (2) and (3) are dependent, we can group (2) with (1) 5957 // but not with (4). If we did, the dependent access (3) would be within 5958 // the boundaries of the (2, 4) group. 5959 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { 5960 5961 // If a dependence exists and A is already in a group, we know that A 5962 // must be a store since A precedes B and WAR dependences are allowed. 5963 // Thus, A would be sunk below B. We release A's group to prevent this 5964 // illegal code motion. A will then be free to form another group with 5965 // instructions that precede it. 5966 if (isInterleaved(A)) { 5967 InterleaveGroup *StoreGroup = getInterleaveGroup(A); 5968 StoreGroups.remove(StoreGroup); 5969 releaseGroup(StoreGroup); 5970 } 5971 5972 // If a dependence exists and A is not already in a group (or it was 5973 // and we just released it), B might be hoisted above A (if B is a 5974 // load) or another store might be sunk below A (if B is a store). In 5975 // either case, we can't add additional instructions to B's group. B 5976 // will only form a group with instructions that it precedes. 5977 break; 5978 } 5979 5980 // At this point, we've checked for illegal code motion. If either A or B 5981 // isn't strided, there's nothing left to do. 5982 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 5983 continue; 5984 5985 // Ignore A if it's already in a group or isn't the same kind of memory 5986 // operation as B. 5987 if (isInterleaved(A) || A->mayReadFromMemory() != B->mayReadFromMemory()) 5988 continue; 5989 5990 // Check rules 1 and 2. Ignore A if its stride or size is different from 5991 // that of B. 5992 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 5993 continue; 5994 5995 // Calculate the distance from A to B. 5996 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 5997 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 5998 if (!DistToB) 5999 continue; 6000 int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 6001 6002 // Check rule 3. Ignore A if its distance to B is not a multiple of the 6003 // size. 6004 if (DistanceToB % static_cast<int64_t>(DesB.Size)) 6005 continue; 6006 6007 // Ignore A if either A or B is in a predicated block. Although we 6008 // currently prevent group formation for predicated accesses, we may be 6009 // able to relax this limitation in the future once we handle more 6010 // complicated blocks. 6011 if (isPredicated(A->getParent()) || isPredicated(B->getParent())) 6012 continue; 6013 6014 // The index of A is the index of B plus A's distance to B in multiples 6015 // of the size. 6016 int IndexA = 6017 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 6018 6019 // Try to insert A into B's group. 6020 if (Group->insertMember(A, IndexA, DesA.Align)) { 6021 DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 6022 << " into the interleave group with" << *B << '\n'); 6023 InterleaveGroupMap[A] = Group; 6024 6025 // Set the first load in program order as the insert position. 6026 if (A->mayReadFromMemory()) 6027 Group->setInsertPos(A); 6028 } 6029 } // Iteration over A accesses. 6030 } // Iteration over B accesses. 6031 6032 // Remove interleaved store groups with gaps. 6033 for (InterleaveGroup *Group : StoreGroups) 6034 if (Group->getNumMembers() != Group->getFactor()) 6035 releaseGroup(Group); 6036 6037 // Remove interleaved groups with gaps (currently only loads) whose memory 6038 // accesses may wrap around. We have to revisit the getPtrStride analysis, 6039 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does 6040 // not check wrapping (see documentation there). 6041 // FORNOW we use Assume=false; 6042 // TODO: Change to Assume=true but making sure we don't exceed the threshold 6043 // of runtime SCEV assumptions checks (thereby potentially failing to 6044 // vectorize altogether). 6045 // Additional optional optimizations: 6046 // TODO: If we are peeling the loop and we know that the first pointer doesn't 6047 // wrap then we can deduce that all pointers in the group don't wrap. 6048 // This means that we can forcefully peel the loop in order to only have to 6049 // check the first pointer for no-wrap. When we'll change to use Assume=true 6050 // we'll only need at most one runtime check per interleaved group. 6051 // 6052 for (InterleaveGroup *Group : LoadGroups) { 6053 6054 // Case 1: A full group. Can Skip the checks; For full groups, if the wide 6055 // load would wrap around the address space we would do a memory access at 6056 // nullptr even without the transformation. 6057 if (Group->getNumMembers() == Group->getFactor()) 6058 continue; 6059 6060 // Case 2: If first and last members of the group don't wrap this implies 6061 // that all the pointers in the group don't wrap. 6062 // So we check only group member 0 (which is always guaranteed to exist), 6063 // and group member Factor - 1; If the latter doesn't exist we rely on 6064 // peeling (if it is a non-reveresed accsess -- see Case 3). 6065 Value *FirstMemberPtr = getPointerOperand(Group->getMember(0)); 6066 if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false, 6067 /*ShouldCheckWrap=*/true)) { 6068 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 6069 "first group member potentially pointer-wrapping.\n"); 6070 releaseGroup(Group); 6071 continue; 6072 } 6073 Instruction *LastMember = Group->getMember(Group->getFactor() - 1); 6074 if (LastMember) { 6075 Value *LastMemberPtr = getPointerOperand(LastMember); 6076 if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false, 6077 /*ShouldCheckWrap=*/true)) { 6078 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 6079 "last group member potentially pointer-wrapping.\n"); 6080 releaseGroup(Group); 6081 } 6082 } else { 6083 // Case 3: A non-reversed interleaved load group with gaps: We need 6084 // to execute at least one scalar epilogue iteration. This will ensure 6085 // we don't speculatively access memory out-of-bounds. We only need 6086 // to look for a member at index factor - 1, since every group must have 6087 // a member at index zero. 6088 if (Group->isReverse()) { 6089 releaseGroup(Group); 6090 continue; 6091 } 6092 DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 6093 RequiresScalarEpilogue = true; 6094 } 6095 } 6096 } 6097 6098 LoopVectorizationCostModel::VectorizationFactor 6099 LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { 6100 // Width 1 means no vectorize 6101 VectorizationFactor Factor = {1U, 0U}; 6102 if (OptForSize && Legal->getRuntimePointerChecking()->Need) { 6103 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 6104 << "runtime pointer checks needed. Enable vectorization of this " 6105 "loop with '#pragma clang loop vectorize(enable)' when " 6106 "compiling with -Os/-Oz"); 6107 DEBUG(dbgs() 6108 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 6109 return Factor; 6110 } 6111 6112 if (!EnableCondStoresVectorization && Legal->getNumPredStores()) { 6113 ORE->emit(createMissedAnalysis("ConditionalStore") 6114 << "store that is conditionally executed prevents vectorization"); 6115 DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); 6116 return Factor; 6117 } 6118 6119 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 6120 unsigned SmallestType, WidestType; 6121 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 6122 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 6123 unsigned MaxSafeDepDist = -1U; 6124 6125 // Get the maximum safe dependence distance in bits computed by LAA. If the 6126 // loop contains any interleaved accesses, we divide the dependence distance 6127 // by the maximum interleave factor of all interleaved groups. Note that 6128 // although the division ensures correctness, this is a fairly conservative 6129 // computation because the maximum distance computed by LAA may not involve 6130 // any of the interleaved accesses. 6131 if (Legal->getMaxSafeDepDistBytes() != -1U) 6132 MaxSafeDepDist = 6133 Legal->getMaxSafeDepDistBytes() * 8 / Legal->getMaxInterleaveFactor(); 6134 6135 WidestRegister = 6136 ((WidestRegister < MaxSafeDepDist) ? WidestRegister : MaxSafeDepDist); 6137 unsigned MaxVectorSize = WidestRegister / WidestType; 6138 6139 DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " 6140 << WidestType << " bits.\n"); 6141 DEBUG(dbgs() << "LV: The Widest register is: " << WidestRegister 6142 << " bits.\n"); 6143 6144 if (MaxVectorSize == 0) { 6145 DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 6146 MaxVectorSize = 1; 6147 } 6148 6149 assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements" 6150 " into one vector!"); 6151 6152 unsigned VF = MaxVectorSize; 6153 if (MaximizeBandwidth && !OptForSize) { 6154 // Collect all viable vectorization factors. 6155 SmallVector<unsigned, 8> VFs; 6156 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 6157 for (unsigned VS = MaxVectorSize; VS <= NewMaxVectorSize; VS *= 2) 6158 VFs.push_back(VS); 6159 6160 // For each VF calculate its register usage. 6161 auto RUs = calculateRegisterUsage(VFs); 6162 6163 // Select the largest VF which doesn't require more registers than existing 6164 // ones. 6165 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 6166 for (int i = RUs.size() - 1; i >= 0; --i) { 6167 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 6168 VF = VFs[i]; 6169 break; 6170 } 6171 } 6172 } 6173 6174 // If we optimize the program for size, avoid creating the tail loop. 6175 if (OptForSize) { 6176 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 6177 DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 6178 6179 // If we don't know the precise trip count, don't try to vectorize. 6180 if (TC < 2) { 6181 ORE->emit( 6182 createMissedAnalysis("UnknownLoopCountComplexCFG") 6183 << "unable to calculate the loop count due to complex control flow"); 6184 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 6185 return Factor; 6186 } 6187 6188 // Find the maximum SIMD width that can fit within the trip count. 6189 VF = TC % MaxVectorSize; 6190 6191 if (VF == 0) 6192 VF = MaxVectorSize; 6193 else { 6194 // If the trip count that we found modulo the vectorization factor is not 6195 // zero then we require a tail. 6196 ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize") 6197 << "cannot optimize for size and vectorize at the " 6198 "same time. Enable vectorization of this loop " 6199 "with '#pragma clang loop vectorize(enable)' " 6200 "when compiling with -Os/-Oz"); 6201 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 6202 return Factor; 6203 } 6204 } 6205 6206 int UserVF = Hints->getWidth(); 6207 if (UserVF != 0) { 6208 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 6209 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 6210 6211 Factor.Width = UserVF; 6212 6213 collectUniformsAndScalars(UserVF); 6214 collectInstsToScalarize(UserVF); 6215 return Factor; 6216 } 6217 6218 float Cost = expectedCost(1).first; 6219 #ifndef NDEBUG 6220 const float ScalarCost = Cost; 6221 #endif /* NDEBUG */ 6222 unsigned Width = 1; 6223 DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 6224 6225 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 6226 // Ignore scalar width, because the user explicitly wants vectorization. 6227 if (ForceVectorization && VF > 1) { 6228 Width = 2; 6229 Cost = expectedCost(Width).first / (float)Width; 6230 } 6231 6232 for (unsigned i = 2; i <= VF; i *= 2) { 6233 // Notice that the vector loop needs to be executed less times, so 6234 // we need to divide the cost of the vector loops by the width of 6235 // the vector elements. 6236 VectorizationCostTy C = expectedCost(i); 6237 float VectorCost = C.first / (float)i; 6238 DEBUG(dbgs() << "LV: Vector loop of width " << i 6239 << " costs: " << (int)VectorCost << ".\n"); 6240 if (!C.second && !ForceVectorization) { 6241 DEBUG( 6242 dbgs() << "LV: Not considering vector loop of width " << i 6243 << " because it will not generate any vector instructions.\n"); 6244 continue; 6245 } 6246 if (VectorCost < Cost) { 6247 Cost = VectorCost; 6248 Width = i; 6249 } 6250 } 6251 6252 DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 6253 << "LV: Vectorization seems to be not beneficial, " 6254 << "but was forced by a user.\n"); 6255 DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 6256 Factor.Width = Width; 6257 Factor.Cost = Width * Cost; 6258 return Factor; 6259 } 6260 6261 std::pair<unsigned, unsigned> 6262 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6263 unsigned MinWidth = -1U; 6264 unsigned MaxWidth = 8; 6265 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6266 6267 // For each block. 6268 for (BasicBlock *BB : TheLoop->blocks()) { 6269 // For each instruction in the loop. 6270 for (Instruction &I : *BB) { 6271 Type *T = I.getType(); 6272 6273 // Skip ignored values. 6274 if (ValuesToIgnore.count(&I)) 6275 continue; 6276 6277 // Only examine Loads, Stores and PHINodes. 6278 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6279 continue; 6280 6281 // Examine PHI nodes that are reduction variables. Update the type to 6282 // account for the recurrence type. 6283 if (auto *PN = dyn_cast<PHINode>(&I)) { 6284 if (!Legal->isReductionVariable(PN)) 6285 continue; 6286 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 6287 T = RdxDesc.getRecurrenceType(); 6288 } 6289 6290 // Examine the stored values. 6291 if (auto *ST = dyn_cast<StoreInst>(&I)) 6292 T = ST->getValueOperand()->getType(); 6293 6294 // Ignore loaded pointer types and stored pointer types that are not 6295 // consecutive. However, we do want to take consecutive stores/loads of 6296 // pointer vectors into account. 6297 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I)) 6298 continue; 6299 6300 MinWidth = std::min(MinWidth, 6301 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6302 MaxWidth = std::max(MaxWidth, 6303 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6304 } 6305 } 6306 6307 return {MinWidth, MaxWidth}; 6308 } 6309 6310 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 6311 unsigned VF, 6312 unsigned LoopCost) { 6313 6314 // -- The interleave heuristics -- 6315 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6316 // There are many micro-architectural considerations that we can't predict 6317 // at this level. For example, frontend pressure (on decode or fetch) due to 6318 // code size, or the number and capabilities of the execution ports. 6319 // 6320 // We use the following heuristics to select the interleave count: 6321 // 1. If the code has reductions, then we interleave to break the cross 6322 // iteration dependency. 6323 // 2. If the loop is really small, then we interleave to reduce the loop 6324 // overhead. 6325 // 3. We don't interleave if we think that we will spill registers to memory 6326 // due to the increased register pressure. 6327 6328 // When we optimize for size, we don't interleave. 6329 if (OptForSize) 6330 return 1; 6331 6332 // We used the distance for the interleave count. 6333 if (Legal->getMaxSafeDepDistBytes() != -1U) 6334 return 1; 6335 6336 // Do not interleave loops with a relatively small trip count. 6337 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 6338 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 6339 return 1; 6340 6341 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 6342 DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6343 << " registers\n"); 6344 6345 if (VF == 1) { 6346 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6347 TargetNumRegisters = ForceTargetNumScalarRegs; 6348 } else { 6349 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6350 TargetNumRegisters = ForceTargetNumVectorRegs; 6351 } 6352 6353 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6354 // We divide by these constants so assume that we have at least one 6355 // instruction that uses at least one register. 6356 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 6357 R.NumInstructions = std::max(R.NumInstructions, 1U); 6358 6359 // We calculate the interleave count using the following formula. 6360 // Subtract the number of loop invariants from the number of available 6361 // registers. These registers are used by all of the interleaved instances. 6362 // Next, divide the remaining registers by the number of registers that is 6363 // required by the loop, in order to estimate how many parallel instances 6364 // fit without causing spills. All of this is rounded down if necessary to be 6365 // a power of two. We want power of two interleave count to simplify any 6366 // addressing operations or alignment considerations. 6367 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 6368 R.MaxLocalUsers); 6369 6370 // Don't count the induction variable as interleaved. 6371 if (EnableIndVarRegisterHeur) 6372 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 6373 std::max(1U, (R.MaxLocalUsers - 1))); 6374 6375 // Clamp the interleave ranges to reasonable counts. 6376 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 6377 6378 // Check if the user has overridden the max. 6379 if (VF == 1) { 6380 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6381 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6382 } else { 6383 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6384 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6385 } 6386 6387 // If we did not calculate the cost for VF (because the user selected the VF) 6388 // then we calculate the cost of VF here. 6389 if (LoopCost == 0) 6390 LoopCost = expectedCost(VF).first; 6391 6392 // Clamp the calculated IC to be between the 1 and the max interleave count 6393 // that the target allows. 6394 if (IC > MaxInterleaveCount) 6395 IC = MaxInterleaveCount; 6396 else if (IC < 1) 6397 IC = 1; 6398 6399 // Interleave if we vectorized this loop and there is a reduction that could 6400 // benefit from interleaving. 6401 if (VF > 1 && Legal->getReductionVars()->size()) { 6402 DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6403 return IC; 6404 } 6405 6406 // Note that if we've already vectorized the loop we will have done the 6407 // runtime check and so interleaving won't require further checks. 6408 bool InterleavingRequiresRuntimePointerCheck = 6409 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 6410 6411 // We want to interleave small loops in order to reduce the loop overhead and 6412 // potentially expose ILP opportunities. 6413 DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 6414 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6415 // We assume that the cost overhead is 1 and we use the cost model 6416 // to estimate the cost of the loop and interleave until the cost of the 6417 // loop overhead is about 5% of the cost of the loop. 6418 unsigned SmallIC = 6419 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6420 6421 // Interleave until store/load ports (estimated by max interleave count) are 6422 // saturated. 6423 unsigned NumStores = Legal->getNumStores(); 6424 unsigned NumLoads = Legal->getNumLoads(); 6425 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6426 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6427 6428 // If we have a scalar reduction (vector reductions are already dealt with 6429 // by this point), we can increase the critical path length if the loop 6430 // we're interleaving is inside another loop. Limit, by default to 2, so the 6431 // critical path only gets increased by one reduction operation. 6432 if (Legal->getReductionVars()->size() && TheLoop->getLoopDepth() > 1) { 6433 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6434 SmallIC = std::min(SmallIC, F); 6435 StoresIC = std::min(StoresIC, F); 6436 LoadsIC = std::min(LoadsIC, F); 6437 } 6438 6439 if (EnableLoadStoreRuntimeInterleave && 6440 std::max(StoresIC, LoadsIC) > SmallIC) { 6441 DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6442 return std::max(StoresIC, LoadsIC); 6443 } 6444 6445 DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6446 return SmallIC; 6447 } 6448 6449 // Interleave if this is a large loop (small loops are already dealt with by 6450 // this point) that could benefit from interleaving. 6451 bool HasReductions = (Legal->getReductionVars()->size() > 0); 6452 if (TTI.enableAggressiveInterleaving(HasReductions)) { 6453 DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6454 return IC; 6455 } 6456 6457 DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6458 return 1; 6459 } 6460 6461 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6462 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 6463 // This function calculates the register usage by measuring the highest number 6464 // of values that are alive at a single location. Obviously, this is a very 6465 // rough estimation. We scan the loop in a topological order in order and 6466 // assign a number to each instruction. We use RPO to ensure that defs are 6467 // met before their users. We assume that each instruction that has in-loop 6468 // users starts an interval. We record every time that an in-loop value is 6469 // used, so we have a list of the first and last occurrences of each 6470 // instruction. Next, we transpose this data structure into a multi map that 6471 // holds the list of intervals that *end* at a specific location. This multi 6472 // map allows us to perform a linear search. We scan the instructions linearly 6473 // and record each time that a new interval starts, by placing it in a set. 6474 // If we find this value in the multi-map then we remove it from the set. 6475 // The max register usage is the maximum size of the set. 6476 // We also search for instructions that are defined outside the loop, but are 6477 // used inside the loop. We need this number separately from the max-interval 6478 // usage number because when we unroll, loop-invariant values do not take 6479 // more register. 6480 LoopBlocksDFS DFS(TheLoop); 6481 DFS.perform(LI); 6482 6483 RegisterUsage RU; 6484 RU.NumInstructions = 0; 6485 6486 // Each 'key' in the map opens a new interval. The values 6487 // of the map are the index of the 'last seen' usage of the 6488 // instruction that is the key. 6489 typedef DenseMap<Instruction *, unsigned> IntervalMap; 6490 // Maps instruction to its index. 6491 DenseMap<unsigned, Instruction *> IdxToInstr; 6492 // Marks the end of each interval. 6493 IntervalMap EndPoint; 6494 // Saves the list of instruction indices that are used in the loop. 6495 SmallSet<Instruction *, 8> Ends; 6496 // Saves the list of values that are used in the loop but are 6497 // defined outside the loop, such as arguments and constants. 6498 SmallPtrSet<Value *, 8> LoopInvariants; 6499 6500 unsigned Index = 0; 6501 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6502 RU.NumInstructions += BB->size(); 6503 for (Instruction &I : *BB) { 6504 IdxToInstr[Index++] = &I; 6505 6506 // Save the end location of each USE. 6507 for (Value *U : I.operands()) { 6508 auto *Instr = dyn_cast<Instruction>(U); 6509 6510 // Ignore non-instruction values such as arguments, constants, etc. 6511 if (!Instr) 6512 continue; 6513 6514 // If this instruction is outside the loop then record it and continue. 6515 if (!TheLoop->contains(Instr)) { 6516 LoopInvariants.insert(Instr); 6517 continue; 6518 } 6519 6520 // Overwrite previous end points. 6521 EndPoint[Instr] = Index; 6522 Ends.insert(Instr); 6523 } 6524 } 6525 } 6526 6527 // Saves the list of intervals that end with the index in 'key'. 6528 typedef SmallVector<Instruction *, 2> InstrList; 6529 DenseMap<unsigned, InstrList> TransposeEnds; 6530 6531 // Transpose the EndPoints to a list of values that end at each index. 6532 for (auto &Interval : EndPoint) 6533 TransposeEnds[Interval.second].push_back(Interval.first); 6534 6535 SmallSet<Instruction *, 8> OpenIntervals; 6536 6537 // Get the size of the widest register. 6538 unsigned MaxSafeDepDist = -1U; 6539 if (Legal->getMaxSafeDepDistBytes() != -1U) 6540 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 6541 unsigned WidestRegister = 6542 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 6543 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6544 6545 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6546 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 6547 6548 DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6549 6550 // A lambda that gets the register usage for the given type and VF. 6551 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 6552 if (Ty->isTokenTy()) 6553 return 0U; 6554 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 6555 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 6556 }; 6557 6558 for (unsigned int i = 0; i < Index; ++i) { 6559 Instruction *I = IdxToInstr[i]; 6560 6561 // Remove all of the instructions that end at this location. 6562 InstrList &List = TransposeEnds[i]; 6563 for (Instruction *ToRemove : List) 6564 OpenIntervals.erase(ToRemove); 6565 6566 // Ignore instructions that are never used within the loop. 6567 if (!Ends.count(I)) 6568 continue; 6569 6570 // Skip ignored values. 6571 if (ValuesToIgnore.count(I)) 6572 continue; 6573 6574 // For each VF find the maximum usage of registers. 6575 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6576 if (VFs[j] == 1) { 6577 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 6578 continue; 6579 } 6580 collectUniformsAndScalars(VFs[j]); 6581 // Count the number of live intervals. 6582 unsigned RegUsage = 0; 6583 for (auto Inst : OpenIntervals) { 6584 // Skip ignored values for VF > 1. 6585 if (VecValuesToIgnore.count(Inst) || 6586 isScalarAfterVectorization(Inst, VFs[j])) 6587 continue; 6588 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 6589 } 6590 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 6591 } 6592 6593 DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6594 << OpenIntervals.size() << '\n'); 6595 6596 // Add the current instruction to the list of open intervals. 6597 OpenIntervals.insert(I); 6598 } 6599 6600 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6601 unsigned Invariant = 0; 6602 if (VFs[i] == 1) 6603 Invariant = LoopInvariants.size(); 6604 else { 6605 for (auto Inst : LoopInvariants) 6606 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 6607 } 6608 6609 DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 6610 DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 6611 DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); 6612 DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n'); 6613 6614 RU.LoopInvariantRegs = Invariant; 6615 RU.MaxLocalUsers = MaxUsages[i]; 6616 RUs[i] = RU; 6617 } 6618 6619 return RUs; 6620 } 6621 6622 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) { 6623 6624 // If we aren't vectorizing the loop, or if we've already collected the 6625 // instructions to scalarize, there's nothing to do. Collection may already 6626 // have occurred if we have a user-selected VF and are now computing the 6627 // expected cost for interleaving. 6628 if (VF < 2 || InstsToScalarize.count(VF)) 6629 return; 6630 6631 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6632 // not profitable to scalarize any instructions, the presence of VF in the 6633 // map will indicate that we've analyzed it already. 6634 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6635 6636 // Find all the instructions that are scalar with predication in the loop and 6637 // determine if it would be better to not if-convert the blocks they are in. 6638 // If so, we also record the instructions to scalarize. 6639 for (BasicBlock *BB : TheLoop->blocks()) { 6640 if (!Legal->blockNeedsPredication(BB)) 6641 continue; 6642 for (Instruction &I : *BB) 6643 if (Legal->isScalarWithPredication(&I)) { 6644 ScalarCostsTy ScalarCosts; 6645 if (computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6646 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6647 } 6648 } 6649 } 6650 6651 int LoopVectorizationCostModel::computePredInstDiscount( 6652 Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts, 6653 unsigned VF) { 6654 6655 assert(!isUniformAfterVectorization(PredInst, VF) && 6656 "Instruction marked uniform-after-vectorization will be predicated"); 6657 6658 // Initialize the discount to zero, meaning that the scalar version and the 6659 // vector version cost the same. 6660 int Discount = 0; 6661 6662 // Holds instructions to analyze. The instructions we visit are mapped in 6663 // ScalarCosts. Those instructions are the ones that would be scalarized if 6664 // we find that the scalar version costs less. 6665 SmallVector<Instruction *, 8> Worklist; 6666 6667 // Returns true if the given instruction can be scalarized. 6668 auto canBeScalarized = [&](Instruction *I) -> bool { 6669 6670 // We only attempt to scalarize instructions forming a single-use chain 6671 // from the original predicated block that would otherwise be vectorized. 6672 // Although not strictly necessary, we give up on instructions we know will 6673 // already be scalar to avoid traversing chains that are unlikely to be 6674 // beneficial. 6675 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6676 isScalarAfterVectorization(I, VF)) 6677 return false; 6678 6679 // If the instruction is scalar with predication, it will be analyzed 6680 // separately. We ignore it within the context of PredInst. 6681 if (Legal->isScalarWithPredication(I)) 6682 return false; 6683 6684 // If any of the instruction's operands are uniform after vectorization, 6685 // the instruction cannot be scalarized. This prevents, for example, a 6686 // masked load from being scalarized. 6687 // 6688 // We assume we will only emit a value for lane zero of an instruction 6689 // marked uniform after vectorization, rather than VF identical values. 6690 // Thus, if we scalarize an instruction that uses a uniform, we would 6691 // create uses of values corresponding to the lanes we aren't emitting code 6692 // for. This behavior can be changed by allowing getScalarValue to clone 6693 // the lane zero values for uniforms rather than asserting. 6694 for (Use &U : I->operands()) 6695 if (auto *J = dyn_cast<Instruction>(U.get())) 6696 if (isUniformAfterVectorization(J, VF)) 6697 return false; 6698 6699 // Otherwise, we can scalarize the instruction. 6700 return true; 6701 }; 6702 6703 // Returns true if an operand that cannot be scalarized must be extracted 6704 // from a vector. We will account for this scalarization overhead below. Note 6705 // that the non-void predicated instructions are placed in their own blocks, 6706 // and their return values are inserted into vectors. Thus, an extract would 6707 // still be required. 6708 auto needsExtract = [&](Instruction *I) -> bool { 6709 return TheLoop->contains(I) && !isScalarAfterVectorization(I, VF); 6710 }; 6711 6712 // Compute the expected cost discount from scalarizing the entire expression 6713 // feeding the predicated instruction. We currently only consider expressions 6714 // that are single-use instruction chains. 6715 Worklist.push_back(PredInst); 6716 while (!Worklist.empty()) { 6717 Instruction *I = Worklist.pop_back_val(); 6718 6719 // If we've already analyzed the instruction, there's nothing to do. 6720 if (ScalarCosts.count(I)) 6721 continue; 6722 6723 // Compute the cost of the vector instruction. Note that this cost already 6724 // includes the scalarization overhead of the predicated instruction. 6725 unsigned VectorCost = getInstructionCost(I, VF).first; 6726 6727 // Compute the cost of the scalarized instruction. This cost is the cost of 6728 // the instruction as if it wasn't if-converted and instead remained in the 6729 // predicated block. We will scale this cost by block probability after 6730 // computing the scalarization overhead. 6731 unsigned ScalarCost = VF * getInstructionCost(I, 1).first; 6732 6733 // Compute the scalarization overhead of needed insertelement instructions 6734 // and phi nodes. 6735 if (Legal->isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6736 ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF), 6737 true, false); 6738 ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI); 6739 } 6740 6741 // Compute the scalarization overhead of needed extractelement 6742 // instructions. For each of the instruction's operands, if the operand can 6743 // be scalarized, add it to the worklist; otherwise, account for the 6744 // overhead. 6745 for (Use &U : I->operands()) 6746 if (auto *J = dyn_cast<Instruction>(U.get())) { 6747 assert(VectorType::isValidElementType(J->getType()) && 6748 "Instruction has non-scalar type"); 6749 if (canBeScalarized(J)) 6750 Worklist.push_back(J); 6751 else if (needsExtract(J)) 6752 ScalarCost += TTI.getScalarizationOverhead( 6753 ToVectorTy(J->getType(),VF), false, true); 6754 } 6755 6756 // Scale the total scalar cost by block probability. 6757 ScalarCost /= getReciprocalPredBlockProb(); 6758 6759 // Compute the discount. A non-negative discount means the vector version 6760 // of the instruction costs more, and scalarizing would be beneficial. 6761 Discount += VectorCost - ScalarCost; 6762 ScalarCosts[I] = ScalarCost; 6763 } 6764 6765 return Discount; 6766 } 6767 6768 LoopVectorizationCostModel::VectorizationCostTy 6769 LoopVectorizationCostModel::expectedCost(unsigned VF) { 6770 VectorizationCostTy Cost; 6771 6772 // Collect Uniform and Scalar instructions after vectorization with VF. 6773 collectUniformsAndScalars(VF); 6774 6775 // Collect the instructions (and their associated costs) that will be more 6776 // profitable to scalarize. 6777 collectInstsToScalarize(VF); 6778 6779 // For each block. 6780 for (BasicBlock *BB : TheLoop->blocks()) { 6781 VectorizationCostTy BlockCost; 6782 6783 // For each instruction in the old loop. 6784 for (Instruction &I : *BB) { 6785 // Skip dbg intrinsics. 6786 if (isa<DbgInfoIntrinsic>(I)) 6787 continue; 6788 6789 // Skip ignored values. 6790 if (ValuesToIgnore.count(&I)) 6791 continue; 6792 6793 VectorizationCostTy C = getInstructionCost(&I, VF); 6794 6795 // Check if we should override the cost. 6796 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6797 C.first = ForceTargetInstructionCost; 6798 6799 BlockCost.first += C.first; 6800 BlockCost.second |= C.second; 6801 DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " 6802 << VF << " For instruction: " << I << '\n'); 6803 } 6804 6805 // If we are vectorizing a predicated block, it will have been 6806 // if-converted. This means that the block's instructions (aside from 6807 // stores and instructions that may divide by zero) will now be 6808 // unconditionally executed. For the scalar case, we may not always execute 6809 // the predicated block. Thus, scale the block's cost by the probability of 6810 // executing it. 6811 if (VF == 1 && Legal->blockNeedsPredication(BB)) 6812 BlockCost.first /= getReciprocalPredBlockProb(); 6813 6814 Cost.first += BlockCost.first; 6815 Cost.second |= BlockCost.second; 6816 } 6817 6818 return Cost; 6819 } 6820 6821 /// \brief Gets Address Access SCEV after verifying that the access pattern 6822 /// is loop invariant except the induction variable dependence. 6823 /// 6824 /// This SCEV can be sent to the Target in order to estimate the address 6825 /// calculation cost. 6826 static const SCEV *getAddressAccessSCEV( 6827 Value *Ptr, 6828 LoopVectorizationLegality *Legal, 6829 ScalarEvolution *SE, 6830 const Loop *TheLoop) { 6831 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6832 if (!Gep) 6833 return nullptr; 6834 6835 // We are looking for a gep with all loop invariant indices except for one 6836 // which should be an induction variable. 6837 unsigned NumOperands = Gep->getNumOperands(); 6838 for (unsigned i = 1; i < NumOperands; ++i) { 6839 Value *Opd = Gep->getOperand(i); 6840 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6841 !Legal->isInductionVariable(Opd)) 6842 return nullptr; 6843 } 6844 6845 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6846 return SE->getSCEV(Ptr); 6847 } 6848 6849 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6850 return Legal->hasStride(I->getOperand(0)) || 6851 Legal->hasStride(I->getOperand(1)); 6852 } 6853 6854 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6855 unsigned VF) { 6856 Type *ValTy = getMemInstValueType(I); 6857 auto SE = PSE.getSE(); 6858 6859 unsigned Alignment = getMemInstAlignment(I); 6860 unsigned AS = getMemInstAddressSpace(I); 6861 Value *Ptr = getPointerOperand(I); 6862 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6863 6864 // Figure out whether the access is strided and get the stride value 6865 // if it's known in compile time 6866 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, SE, TheLoop); 6867 6868 // Get the cost of the scalar memory instruction and address computation. 6869 unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6870 6871 Cost += VF * 6872 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6873 AS); 6874 6875 // Get the overhead of the extractelement and insertelement instructions 6876 // we might create due to scalarization. 6877 Cost += getScalarizationOverhead(I, VF, TTI); 6878 6879 // If we have a predicated store, it may not be executed for each vector 6880 // lane. Scale the cost by the probability of executing the predicated 6881 // block. 6882 if (Legal->isScalarWithPredication(I)) 6883 Cost /= getReciprocalPredBlockProb(); 6884 6885 return Cost; 6886 } 6887 6888 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6889 unsigned VF) { 6890 Type *ValTy = getMemInstValueType(I); 6891 Type *VectorTy = ToVectorTy(ValTy, VF); 6892 unsigned Alignment = getMemInstAlignment(I); 6893 Value *Ptr = getPointerOperand(I); 6894 unsigned AS = getMemInstAddressSpace(I); 6895 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6896 6897 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6898 "Stride should be 1 or -1 for consecutive memory access"); 6899 unsigned Cost = 0; 6900 if (Legal->isMaskRequired(I)) 6901 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6902 else 6903 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6904 6905 bool Reverse = ConsecutiveStride < 0; 6906 if (Reverse) 6907 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6908 return Cost; 6909 } 6910 6911 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6912 unsigned VF) { 6913 LoadInst *LI = cast<LoadInst>(I); 6914 Type *ValTy = LI->getType(); 6915 Type *VectorTy = ToVectorTy(ValTy, VF); 6916 unsigned Alignment = LI->getAlignment(); 6917 unsigned AS = LI->getPointerAddressSpace(); 6918 6919 return TTI.getAddressComputationCost(ValTy) + 6920 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) + 6921 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6922 } 6923 6924 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6925 unsigned VF) { 6926 Type *ValTy = getMemInstValueType(I); 6927 Type *VectorTy = ToVectorTy(ValTy, VF); 6928 unsigned Alignment = getMemInstAlignment(I); 6929 Value *Ptr = getPointerOperand(I); 6930 6931 return TTI.getAddressComputationCost(VectorTy) + 6932 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 6933 Legal->isMaskRequired(I), Alignment); 6934 } 6935 6936 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6937 unsigned VF) { 6938 Type *ValTy = getMemInstValueType(I); 6939 Type *VectorTy = ToVectorTy(ValTy, VF); 6940 unsigned AS = getMemInstAddressSpace(I); 6941 6942 auto Group = Legal->getInterleavedAccessGroup(I); 6943 assert(Group && "Fail to get an interleaved access group."); 6944 6945 unsigned InterleaveFactor = Group->getFactor(); 6946 Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6947 6948 // Holds the indices of existing members in an interleaved load group. 6949 // An interleaved store group doesn't need this as it doesn't allow gaps. 6950 SmallVector<unsigned, 4> Indices; 6951 if (isa<LoadInst>(I)) { 6952 for (unsigned i = 0; i < InterleaveFactor; i++) 6953 if (Group->getMember(i)) 6954 Indices.push_back(i); 6955 } 6956 6957 // Calculate the cost of the whole interleaved group. 6958 unsigned Cost = TTI.getInterleavedMemoryOpCost(I->getOpcode(), WideVecTy, 6959 Group->getFactor(), Indices, 6960 Group->getAlignment(), AS); 6961 6962 if (Group->isReverse()) 6963 Cost += Group->getNumMembers() * 6964 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6965 return Cost; 6966 } 6967 6968 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 6969 unsigned VF) { 6970 6971 // Calculate scalar cost only. Vectorization cost should be ready at this 6972 // moment. 6973 if (VF == 1) { 6974 Type *ValTy = getMemInstValueType(I); 6975 unsigned Alignment = getMemInstAlignment(I); 6976 unsigned AS = getMemInstAlignment(I); 6977 6978 return TTI.getAddressComputationCost(ValTy) + 6979 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS); 6980 } 6981 return getWideningCost(I, VF); 6982 } 6983 6984 LoopVectorizationCostModel::VectorizationCostTy 6985 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 6986 // If we know that this instruction will remain uniform, check the cost of 6987 // the scalar version. 6988 if (isUniformAfterVectorization(I, VF)) 6989 VF = 1; 6990 6991 if (VF > 1 && isProfitableToScalarize(I, VF)) 6992 return VectorizationCostTy(InstsToScalarize[VF][I], false); 6993 6994 Type *VectorTy; 6995 unsigned C = getInstructionCost(I, VF, VectorTy); 6996 6997 bool TypeNotScalarized = 6998 VF > 1 && !VectorTy->isVoidTy() && TTI.getNumberOfParts(VectorTy) < VF; 6999 return VectorizationCostTy(C, TypeNotScalarized); 7000 } 7001 7002 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) { 7003 if (VF == 1) 7004 return; 7005 for (BasicBlock *BB : TheLoop->blocks()) { 7006 // For each instruction in the old loop. 7007 for (Instruction &I : *BB) { 7008 Value *Ptr = getPointerOperand(&I); 7009 if (!Ptr) 7010 continue; 7011 7012 if (isa<LoadInst>(&I) && Legal->isUniform(Ptr)) { 7013 // Scalar load + broadcast 7014 unsigned Cost = getUniformMemOpCost(&I, VF); 7015 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7016 continue; 7017 } 7018 7019 // We assume that widening is the best solution when possible. 7020 if (Legal->memoryInstructionCanBeWidened(&I, VF)) { 7021 unsigned Cost = getConsecutiveMemOpCost(&I, VF); 7022 setWideningDecision(&I, VF, CM_Widen, Cost); 7023 continue; 7024 } 7025 7026 // Choose between Interleaving, Gather/Scatter or Scalarization. 7027 unsigned InterleaveCost = UINT_MAX; 7028 unsigned NumAccesses = 1; 7029 if (Legal->isAccessInterleaved(&I)) { 7030 auto Group = Legal->getInterleavedAccessGroup(&I); 7031 assert(Group && "Fail to get an interleaved access group."); 7032 7033 // Make one decision for the whole group. 7034 if (getWideningDecision(&I, VF) != CM_Unknown) 7035 continue; 7036 7037 NumAccesses = Group->getNumMembers(); 7038 InterleaveCost = getInterleaveGroupCost(&I, VF); 7039 } 7040 7041 unsigned GatherScatterCost = 7042 Legal->isLegalGatherOrScatter(&I) 7043 ? getGatherScatterCost(&I, VF) * NumAccesses 7044 : UINT_MAX; 7045 7046 unsigned ScalarizationCost = 7047 getMemInstScalarizationCost(&I, VF) * NumAccesses; 7048 7049 // Choose better solution for the current VF, 7050 // write down this decision and use it during vectorization. 7051 unsigned Cost; 7052 InstWidening Decision; 7053 if (InterleaveCost <= GatherScatterCost && 7054 InterleaveCost < ScalarizationCost) { 7055 Decision = CM_Interleave; 7056 Cost = InterleaveCost; 7057 } else if (GatherScatterCost < ScalarizationCost) { 7058 Decision = CM_GatherScatter; 7059 Cost = GatherScatterCost; 7060 } else { 7061 Decision = CM_Scalarize; 7062 Cost = ScalarizationCost; 7063 } 7064 // If the instructions belongs to an interleave group, the whole group 7065 // receives the same decision. The whole group receives the cost, but 7066 // the cost will actually be assigned to one instruction. 7067 if (auto Group = Legal->getInterleavedAccessGroup(&I)) 7068 setWideningDecision(Group, VF, Decision, Cost); 7069 else 7070 setWideningDecision(&I, VF, Decision, Cost); 7071 } 7072 } 7073 } 7074 7075 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7076 unsigned VF, 7077 Type *&VectorTy) { 7078 Type *RetTy = I->getType(); 7079 if (canTruncateToMinimalBitwidth(I, VF)) 7080 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7081 VectorTy = ToVectorTy(RetTy, VF); 7082 auto SE = PSE.getSE(); 7083 7084 // TODO: We need to estimate the cost of intrinsic calls. 7085 switch (I->getOpcode()) { 7086 case Instruction::GetElementPtr: 7087 // We mark this instruction as zero-cost because the cost of GEPs in 7088 // vectorized code depends on whether the corresponding memory instruction 7089 // is scalarized or not. Therefore, we handle GEPs with the memory 7090 // instruction cost. 7091 return 0; 7092 case Instruction::Br: { 7093 return TTI.getCFInstrCost(I->getOpcode()); 7094 } 7095 case Instruction::PHI: { 7096 auto *Phi = cast<PHINode>(I); 7097 7098 // First-order recurrences are replaced by vector shuffles inside the loop. 7099 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 7100 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 7101 VectorTy, VF - 1, VectorTy); 7102 7103 // TODO: IF-converted IFs become selects. 7104 return 0; 7105 } 7106 case Instruction::UDiv: 7107 case Instruction::SDiv: 7108 case Instruction::URem: 7109 case Instruction::SRem: 7110 // If we have a predicated instruction, it may not be executed for each 7111 // vector lane. Get the scalarization cost and scale this amount by the 7112 // probability of executing the predicated block. If the instruction is not 7113 // predicated, we fall through to the next case. 7114 if (VF > 1 && Legal->isScalarWithPredication(I)) { 7115 unsigned Cost = 0; 7116 7117 // These instructions have a non-void type, so account for the phi nodes 7118 // that we will create. This cost is likely to be zero. The phi node 7119 // cost, if any, should be scaled by the block probability because it 7120 // models a copy at the end of each predicated block. 7121 Cost += VF * TTI.getCFInstrCost(Instruction::PHI); 7122 7123 // The cost of the non-predicated instruction. 7124 Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy); 7125 7126 // The cost of insertelement and extractelement instructions needed for 7127 // scalarization. 7128 Cost += getScalarizationOverhead(I, VF, TTI); 7129 7130 // Scale the cost by the probability of executing the predicated blocks. 7131 // This assumes the predicated block for each vector lane is equally 7132 // likely. 7133 return Cost / getReciprocalPredBlockProb(); 7134 } 7135 case Instruction::Add: 7136 case Instruction::FAdd: 7137 case Instruction::Sub: 7138 case Instruction::FSub: 7139 case Instruction::Mul: 7140 case Instruction::FMul: 7141 case Instruction::FDiv: 7142 case Instruction::FRem: 7143 case Instruction::Shl: 7144 case Instruction::LShr: 7145 case Instruction::AShr: 7146 case Instruction::And: 7147 case Instruction::Or: 7148 case Instruction::Xor: { 7149 // Since we will replace the stride by 1 the multiplication should go away. 7150 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7151 return 0; 7152 // Certain instructions can be cheaper to vectorize if they have a constant 7153 // second vector operand. One example of this are shifts on x86. 7154 TargetTransformInfo::OperandValueKind Op1VK = 7155 TargetTransformInfo::OK_AnyValue; 7156 TargetTransformInfo::OperandValueKind Op2VK = 7157 TargetTransformInfo::OK_AnyValue; 7158 TargetTransformInfo::OperandValueProperties Op1VP = 7159 TargetTransformInfo::OP_None; 7160 TargetTransformInfo::OperandValueProperties Op2VP = 7161 TargetTransformInfo::OP_None; 7162 Value *Op2 = I->getOperand(1); 7163 7164 // Check for a splat or for a non uniform vector of constants. 7165 if (isa<ConstantInt>(Op2)) { 7166 ConstantInt *CInt = cast<ConstantInt>(Op2); 7167 if (CInt && CInt->getValue().isPowerOf2()) 7168 Op2VP = TargetTransformInfo::OP_PowerOf2; 7169 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 7170 } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) { 7171 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 7172 Constant *SplatValue = cast<Constant>(Op2)->getSplatValue(); 7173 if (SplatValue) { 7174 ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue); 7175 if (CInt && CInt->getValue().isPowerOf2()) 7176 Op2VP = TargetTransformInfo::OP_PowerOf2; 7177 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 7178 } 7179 } else if (Legal->isUniform(Op2)) { 7180 Op2VK = TargetTransformInfo::OK_UniformValue; 7181 } 7182 SmallVector<const Value *, 4> Operands(I->operand_values()); 7183 return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, 7184 Op2VK, Op1VP, Op2VP, Operands); 7185 } 7186 case Instruction::Select: { 7187 SelectInst *SI = cast<SelectInst>(I); 7188 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7189 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7190 Type *CondTy = SI->getCondition()->getType(); 7191 if (!ScalarCond) 7192 CondTy = VectorType::get(CondTy, VF); 7193 7194 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy); 7195 } 7196 case Instruction::ICmp: 7197 case Instruction::FCmp: { 7198 Type *ValTy = I->getOperand(0)->getType(); 7199 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7200 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7201 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7202 VectorTy = ToVectorTy(ValTy, VF); 7203 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy); 7204 } 7205 case Instruction::Store: 7206 case Instruction::Load: { 7207 VectorTy = ToVectorTy(getMemInstValueType(I), VF); 7208 return getMemoryInstructionCost(I, VF); 7209 } 7210 case Instruction::ZExt: 7211 case Instruction::SExt: 7212 case Instruction::FPToUI: 7213 case Instruction::FPToSI: 7214 case Instruction::FPExt: 7215 case Instruction::PtrToInt: 7216 case Instruction::IntToPtr: 7217 case Instruction::SIToFP: 7218 case Instruction::UIToFP: 7219 case Instruction::Trunc: 7220 case Instruction::FPTrunc: 7221 case Instruction::BitCast: { 7222 // We optimize the truncation of induction variable. 7223 // The cost of these is the same as the scalar operation. 7224 if (I->getOpcode() == Instruction::Trunc && 7225 Legal->isInductionVariable(I->getOperand(0))) 7226 return TTI.getCastInstrCost(I->getOpcode(), I->getType(), 7227 I->getOperand(0)->getType()); 7228 7229 Type *SrcScalarTy = I->getOperand(0)->getType(); 7230 Type *SrcVecTy = ToVectorTy(SrcScalarTy, VF); 7231 if (canTruncateToMinimalBitwidth(I, VF)) { 7232 // This cast is going to be shrunk. This may remove the cast or it might 7233 // turn it into slightly different cast. For example, if MinBW == 16, 7234 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7235 // 7236 // Calculate the modified src and dest types. 7237 Type *MinVecTy = VectorTy; 7238 if (I->getOpcode() == Instruction::Trunc) { 7239 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7240 VectorTy = 7241 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7242 } else if (I->getOpcode() == Instruction::ZExt || 7243 I->getOpcode() == Instruction::SExt) { 7244 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7245 VectorTy = 7246 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7247 } 7248 } 7249 7250 return TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy); 7251 } 7252 case Instruction::Call: { 7253 bool NeedToScalarize; 7254 CallInst *CI = cast<CallInst>(I); 7255 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 7256 if (getVectorIntrinsicIDForCall(CI, TLI)) 7257 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 7258 return CallCost; 7259 } 7260 default: 7261 // The cost of executing VF copies of the scalar instruction. This opcode 7262 // is unknown. Assume that it is the same as 'mul'. 7263 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) + 7264 getScalarizationOverhead(I, VF, TTI); 7265 } // end of switch. 7266 } 7267 7268 char LoopVectorize::ID = 0; 7269 static const char lv_name[] = "Loop Vectorization"; 7270 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7271 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7272 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7273 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7274 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7275 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7276 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7277 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7278 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7279 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7280 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7281 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7282 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7283 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7284 7285 namespace llvm { 7286 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { 7287 return new LoopVectorize(NoUnrolling, AlwaysVectorize); 7288 } 7289 } 7290 7291 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7292 7293 // Check if the pointer operand of a load or store instruction is 7294 // consecutive. 7295 if (auto *Ptr = getPointerOperand(Inst)) 7296 return Legal->isConsecutivePtr(Ptr); 7297 return false; 7298 } 7299 7300 void LoopVectorizationCostModel::collectValuesToIgnore() { 7301 // Ignore ephemeral values. 7302 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7303 7304 // Ignore type-promoting instructions we identified during reduction 7305 // detection. 7306 for (auto &Reduction : *Legal->getReductionVars()) { 7307 RecurrenceDescriptor &RedDes = Reduction.second; 7308 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7309 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7310 } 7311 } 7312 7313 void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr, 7314 bool IfPredicateInstr) { 7315 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 7316 // Holds vector parameters or scalars, in case of uniform vals. 7317 SmallVector<VectorParts, 4> Params; 7318 7319 setDebugLocFromInst(Builder, Instr); 7320 7321 // Does this instruction return a value ? 7322 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 7323 7324 // Initialize a new scalar map entry. 7325 ScalarParts Entry(UF); 7326 7327 VectorParts Cond; 7328 if (IfPredicateInstr) 7329 Cond = createBlockInMask(Instr->getParent()); 7330 7331 // For each vector unroll 'part': 7332 for (unsigned Part = 0; Part < UF; ++Part) { 7333 Entry[Part].resize(1); 7334 // For each scalar that we create: 7335 7336 // Start an "if (pred) a[i] = ..." block. 7337 Value *Cmp = nullptr; 7338 if (IfPredicateInstr) { 7339 if (Cond[Part]->getType()->isVectorTy()) 7340 Cond[Part] = 7341 Builder.CreateExtractElement(Cond[Part], Builder.getInt32(0)); 7342 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cond[Part], 7343 ConstantInt::get(Cond[Part]->getType(), 1)); 7344 } 7345 7346 Instruction *Cloned = Instr->clone(); 7347 if (!IsVoidRetTy) 7348 Cloned->setName(Instr->getName() + ".cloned"); 7349 7350 // Replace the operands of the cloned instructions with their scalar 7351 // equivalents in the new loop. 7352 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 7353 auto *NewOp = getScalarValue(Instr->getOperand(op), Part, 0); 7354 Cloned->setOperand(op, NewOp); 7355 } 7356 7357 // Place the cloned scalar in the new loop. 7358 Builder.Insert(Cloned); 7359 7360 // Add the cloned scalar to the scalar map entry. 7361 Entry[Part][0] = Cloned; 7362 7363 // If we just cloned a new assumption, add it the assumption cache. 7364 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 7365 if (II->getIntrinsicID() == Intrinsic::assume) 7366 AC->registerAssumption(II); 7367 7368 // End if-block. 7369 if (IfPredicateInstr) 7370 PredicatedInstructions.push_back(std::make_pair(Cloned, Cmp)); 7371 } 7372 VectorLoopValueMap.initScalar(Instr, Entry); 7373 } 7374 7375 void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) { 7376 auto *SI = dyn_cast<StoreInst>(Instr); 7377 bool IfPredicateInstr = (SI && Legal->blockNeedsPredication(SI->getParent())); 7378 7379 return scalarizeInstruction(Instr, IfPredicateInstr); 7380 } 7381 7382 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 7383 7384 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7385 7386 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 7387 Instruction::BinaryOps BinOp) { 7388 // When unrolling and the VF is 1, we only need to add a simple scalar. 7389 Type *Ty = Val->getType(); 7390 assert(!Ty->isVectorTy() && "Val must be a scalar"); 7391 7392 if (Ty->isFloatingPointTy()) { 7393 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 7394 7395 // Floating point operations had to be 'fast' to enable the unrolling. 7396 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 7397 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 7398 } 7399 Constant *C = ConstantInt::get(Ty, StartIdx); 7400 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 7401 } 7402 7403 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7404 SmallVector<Metadata *, 4> MDs; 7405 // Reserve first location for self reference to the LoopID metadata node. 7406 MDs.push_back(nullptr); 7407 bool IsUnrollMetadata = false; 7408 MDNode *LoopID = L->getLoopID(); 7409 if (LoopID) { 7410 // First find existing loop unrolling disable metadata. 7411 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7412 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7413 if (MD) { 7414 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7415 IsUnrollMetadata = 7416 S && S->getString().startswith("llvm.loop.unroll.disable"); 7417 } 7418 MDs.push_back(LoopID->getOperand(i)); 7419 } 7420 } 7421 7422 if (!IsUnrollMetadata) { 7423 // Add runtime unroll disable metadata. 7424 LLVMContext &Context = L->getHeader()->getContext(); 7425 SmallVector<Metadata *, 1> DisableOperands; 7426 DisableOperands.push_back( 7427 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7428 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7429 MDs.push_back(DisableNode); 7430 MDNode *NewLoopID = MDNode::get(Context, MDs); 7431 // Set operand 0 to refer to the loop id itself. 7432 NewLoopID->replaceOperandWith(0, NewLoopID); 7433 L->setLoopID(NewLoopID); 7434 } 7435 } 7436 7437 bool LoopVectorizePass::processLoop(Loop *L) { 7438 assert(L->empty() && "Only process inner loops."); 7439 7440 #ifndef NDEBUG 7441 const std::string DebugLocStr = getDebugLocString(L); 7442 #endif /* NDEBUG */ 7443 7444 DEBUG(dbgs() << "\nLV: Checking a loop in \"" 7445 << L->getHeader()->getParent()->getName() << "\" from " 7446 << DebugLocStr << "\n"); 7447 7448 LoopVectorizeHints Hints(L, DisableUnrolling, *ORE); 7449 7450 DEBUG(dbgs() << "LV: Loop hints:" 7451 << " force=" 7452 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 7453 ? "disabled" 7454 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 7455 ? "enabled" 7456 : "?")) 7457 << " width=" << Hints.getWidth() 7458 << " unroll=" << Hints.getInterleave() << "\n"); 7459 7460 // Function containing loop 7461 Function *F = L->getHeader()->getParent(); 7462 7463 // Looking at the diagnostic output is the only way to determine if a loop 7464 // was vectorized (other than looking at the IR or machine code), so it 7465 // is important to generate an optimization remark for each loop. Most of 7466 // these messages are generated as OptimizationRemarkAnalysis. Remarks 7467 // generated as OptimizationRemark and OptimizationRemarkMissed are 7468 // less verbose reporting vectorized loops and unvectorized loops that may 7469 // benefit from vectorization, respectively. 7470 7471 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { 7472 DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 7473 return false; 7474 } 7475 7476 // Check the loop for a trip count threshold: 7477 // do not vectorize loops with a tiny trip count. 7478 const unsigned MaxTC = SE->getSmallConstantMaxTripCount(L); 7479 if (MaxTC > 0u && MaxTC < TinyTripCountVectorThreshold) { 7480 DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 7481 << "This loop is not worth vectorizing."); 7482 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 7483 DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 7484 else { 7485 DEBUG(dbgs() << "\n"); 7486 ORE->emit(createMissedAnalysis(Hints.vectorizeAnalysisPassName(), 7487 "NotBeneficial", L) 7488 << "vectorization is not beneficial " 7489 "and is not explicitly forced"); 7490 return false; 7491 } 7492 } 7493 7494 PredicatedScalarEvolution PSE(*SE, *L); 7495 7496 // Check if it is legal to vectorize the loop. 7497 LoopVectorizationRequirements Requirements(*ORE); 7498 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI, ORE, 7499 &Requirements, &Hints); 7500 if (!LVL.canVectorize()) { 7501 DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 7502 emitMissedWarning(F, L, Hints, ORE); 7503 return false; 7504 } 7505 7506 // Use the cost model. 7507 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F, 7508 &Hints); 7509 CM.collectValuesToIgnore(); 7510 7511 // Check the function attributes to find out if this function should be 7512 // optimized for size. 7513 bool OptForSize = 7514 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 7515 7516 // Compute the weighted frequency of this loop being executed and see if it 7517 // is less than 20% of the function entry baseline frequency. Note that we 7518 // always have a canonical loop here because we think we *can* vectorize. 7519 // FIXME: This is hidden behind a flag due to pervasive problems with 7520 // exactly what block frequency models. 7521 if (LoopVectorizeWithBlockFrequency) { 7522 BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader()); 7523 if (Hints.getForce() != LoopVectorizeHints::FK_Enabled && 7524 LoopEntryFreq < ColdEntryFreq) 7525 OptForSize = true; 7526 } 7527 7528 // Check the function attributes to see if implicit floats are allowed. 7529 // FIXME: This check doesn't seem possibly correct -- what if the loop is 7530 // an integer loop and the vector instructions selected are purely integer 7531 // vector instructions? 7532 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 7533 DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 7534 "attribute is used.\n"); 7535 ORE->emit(createMissedAnalysis(Hints.vectorizeAnalysisPassName(), 7536 "NoImplicitFloat", L) 7537 << "loop not vectorized due to NoImplicitFloat attribute"); 7538 emitMissedWarning(F, L, Hints, ORE); 7539 return false; 7540 } 7541 7542 // Check if the target supports potentially unsafe FP vectorization. 7543 // FIXME: Add a check for the type of safety issue (denormal, signaling) 7544 // for the target we're vectorizing for, to make sure none of the 7545 // additional fp-math flags can help. 7546 if (Hints.isPotentiallyUnsafe() && 7547 TTI->isFPVectorizationPotentiallyUnsafe()) { 7548 DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 7549 ORE->emit( 7550 createMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L) 7551 << "loop not vectorized due to unsafe FP support."); 7552 emitMissedWarning(F, L, Hints, ORE); 7553 return false; 7554 } 7555 7556 // Select the optimal vectorization factor. 7557 const LoopVectorizationCostModel::VectorizationFactor VF = 7558 CM.selectVectorizationFactor(OptForSize); 7559 7560 // Select the interleave count. 7561 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 7562 7563 // Get user interleave count. 7564 unsigned UserIC = Hints.getInterleave(); 7565 7566 // Identify the diagnostic messages that should be produced. 7567 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 7568 bool VectorizeLoop = true, InterleaveLoop = true; 7569 if (Requirements.doesNotMeet(F, L, Hints)) { 7570 DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 7571 "requirements.\n"); 7572 emitMissedWarning(F, L, Hints, ORE); 7573 return false; 7574 } 7575 7576 if (VF.Width == 1) { 7577 DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 7578 VecDiagMsg = std::make_pair( 7579 "VectorizationNotBeneficial", 7580 "the cost-model indicates that vectorization is not beneficial"); 7581 VectorizeLoop = false; 7582 } 7583 7584 if (IC == 1 && UserIC <= 1) { 7585 // Tell the user interleaving is not beneficial. 7586 DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 7587 IntDiagMsg = std::make_pair( 7588 "InterleavingNotBeneficial", 7589 "the cost-model indicates that interleaving is not beneficial"); 7590 InterleaveLoop = false; 7591 if (UserIC == 1) { 7592 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 7593 IntDiagMsg.second += 7594 " and is explicitly disabled or interleave count is set to 1"; 7595 } 7596 } else if (IC > 1 && UserIC == 1) { 7597 // Tell the user interleaving is beneficial, but it explicitly disabled. 7598 DEBUG(dbgs() 7599 << "LV: Interleaving is beneficial but is explicitly disabled."); 7600 IntDiagMsg = std::make_pair( 7601 "InterleavingBeneficialButDisabled", 7602 "the cost-model indicates that interleaving is beneficial " 7603 "but is explicitly disabled or interleave count is set to 1"); 7604 InterleaveLoop = false; 7605 } 7606 7607 // Override IC if user provided an interleave count. 7608 IC = UserIC > 0 ? UserIC : IC; 7609 7610 // Emit diagnostic messages, if any. 7611 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 7612 if (!VectorizeLoop && !InterleaveLoop) { 7613 // Do not vectorize or interleaving the loop. 7614 ORE->emit(OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 7615 L->getStartLoc(), L->getHeader()) 7616 << VecDiagMsg.second); 7617 ORE->emit(OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 7618 L->getStartLoc(), L->getHeader()) 7619 << IntDiagMsg.second); 7620 return false; 7621 } else if (!VectorizeLoop && InterleaveLoop) { 7622 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7623 ORE->emit(OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 7624 L->getStartLoc(), L->getHeader()) 7625 << VecDiagMsg.second); 7626 } else if (VectorizeLoop && !InterleaveLoop) { 7627 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 7628 << DebugLocStr << '\n'); 7629 ORE->emit(OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 7630 L->getStartLoc(), L->getHeader()) 7631 << IntDiagMsg.second); 7632 } else if (VectorizeLoop && InterleaveLoop) { 7633 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 7634 << DebugLocStr << '\n'); 7635 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7636 } 7637 7638 using namespace ore; 7639 if (!VectorizeLoop) { 7640 assert(IC > 1 && "interleave count should not be 1 or 0"); 7641 // If we decided that it is not legal to vectorize the loop, then 7642 // interleave it. 7643 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 7644 &CM); 7645 Unroller.vectorize(); 7646 7647 ORE->emit(OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 7648 L->getHeader()) 7649 << "interleaved loop (interleaved count: " 7650 << NV("InterleaveCount", IC) << ")"); 7651 } else { 7652 // If we decided that it is *legal* to vectorize the loop, then do it. 7653 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 7654 &LVL, &CM); 7655 LB.vectorize(); 7656 ++LoopsVectorized; 7657 7658 // Add metadata to disable runtime unrolling a scalar loop when there are 7659 // no runtime checks about strides and memory. A scalar loop that is 7660 // rarely used is not worth unrolling. 7661 if (!LB.areSafetyChecksAdded()) 7662 AddRuntimeUnrollDisableMetaData(L); 7663 7664 // Report the vectorization decision. 7665 ORE->emit(OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 7666 L->getHeader()) 7667 << "vectorized loop (vectorization width: " 7668 << NV("VectorizationFactor", VF.Width) 7669 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"); 7670 } 7671 7672 // Mark the loop as already vectorized to avoid vectorizing again. 7673 Hints.setAlreadyVectorized(); 7674 7675 DEBUG(verifyFunction(*L->getHeader()->getParent())); 7676 return true; 7677 } 7678 7679 bool LoopVectorizePass::runImpl( 7680 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 7681 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 7682 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 7683 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 7684 OptimizationRemarkEmitter &ORE_) { 7685 7686 SE = &SE_; 7687 LI = &LI_; 7688 TTI = &TTI_; 7689 DT = &DT_; 7690 BFI = &BFI_; 7691 TLI = TLI_; 7692 AA = &AA_; 7693 AC = &AC_; 7694 GetLAA = &GetLAA_; 7695 DB = &DB_; 7696 ORE = &ORE_; 7697 7698 // Compute some weights outside of the loop over the loops. Compute this 7699 // using a BranchProbability to re-use its scaling math. 7700 const BranchProbability ColdProb(1, 5); // 20% 7701 ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb; 7702 7703 // Don't attempt if 7704 // 1. the target claims to have no vector registers, and 7705 // 2. interleaving won't help ILP. 7706 // 7707 // The second condition is necessary because, even if the target has no 7708 // vector registers, loop vectorization may still enable scalar 7709 // interleaving. 7710 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 7711 return false; 7712 7713 bool Changed = false; 7714 7715 // The vectorizer requires loops to be in simplified form. 7716 // Since simplification may add new inner loops, it has to run before the 7717 // legality and profitability checks. This means running the loop vectorizer 7718 // will simplify all loops, regardless of whether anything end up being 7719 // vectorized. 7720 for (auto &L : *LI) 7721 Changed |= simplifyLoop(L, DT, LI, SE, AC, false /* PreserveLCSSA */); 7722 7723 // Build up a worklist of inner-loops to vectorize. This is necessary as 7724 // the act of vectorizing or partially unrolling a loop creates new loops 7725 // and can invalidate iterators across the loops. 7726 SmallVector<Loop *, 8> Worklist; 7727 7728 for (Loop *L : *LI) 7729 addAcyclicInnerLoop(*L, Worklist); 7730 7731 LoopsAnalyzed += Worklist.size(); 7732 7733 // Now walk the identified inner loops. 7734 while (!Worklist.empty()) { 7735 Loop *L = Worklist.pop_back_val(); 7736 7737 // For the inner loops we actually process, form LCSSA to simplify the 7738 // transform. 7739 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 7740 7741 Changed |= processLoop(L); 7742 } 7743 7744 // Process each loop nest in the function. 7745 return Changed; 7746 7747 } 7748 7749 7750 PreservedAnalyses LoopVectorizePass::run(Function &F, 7751 FunctionAnalysisManager &AM) { 7752 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 7753 auto &LI = AM.getResult<LoopAnalysis>(F); 7754 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 7755 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 7756 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 7757 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 7758 auto &AA = AM.getResult<AAManager>(F); 7759 auto &AC = AM.getResult<AssumptionAnalysis>(F); 7760 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 7761 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 7762 7763 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 7764 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 7765 [&](Loop &L) -> const LoopAccessInfo & { 7766 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI}; 7767 return LAM.getResult<LoopAccessAnalysis>(L, AR); 7768 }; 7769 bool Changed = 7770 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE); 7771 if (!Changed) 7772 return PreservedAnalyses::all(); 7773 PreservedAnalyses PA; 7774 PA.preserve<LoopAnalysis>(); 7775 PA.preserve<DominatorTreeAnalysis>(); 7776 PA.preserve<BasicAA>(); 7777 PA.preserve<GlobalsAA>(); 7778 return PA; 7779 } 7780