1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 11 // and generates target-independent LLVM-IR. 12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 13 // of instructions in order to estimate the profitability of vectorization. 14 // 15 // The loop vectorizer combines consecutive loop iterations into a single 16 // 'wide' iteration. After this transformation the index is incremented 17 // by the SIMD vector width, and not by one. 18 // 19 // This pass has three parts: 20 // 1. The main loop pass that drives the different parts. 21 // 2. LoopVectorizationLegality - A unit that checks for the legality 22 // of the vectorization. 23 // 3. InnerLoopVectorizer - A unit that performs the actual 24 // widening of instructions. 25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 26 // of vectorization. It decides on the optimal vector width, which 27 // can be one, if vectorization is not profitable. 28 // 29 //===----------------------------------------------------------------------===// 30 // 31 // The reduction-variable vectorization is based on the paper: 32 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 33 // 34 // Variable uniformity checks are inspired by: 35 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 36 // 37 // The interleaved access vectorization is based on the paper: 38 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 39 // Data for SIMD 40 // 41 // Other ideas/concepts are from: 42 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 43 // 44 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 45 // Vectorizing Compilers. 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 50 #include "llvm/ADT/DenseMap.h" 51 #include "llvm/ADT/Hashing.h" 52 #include "llvm/ADT/MapVector.h" 53 #include "llvm/ADT/SCCIterator.h" 54 #include "llvm/ADT/SetVector.h" 55 #include "llvm/ADT/SmallPtrSet.h" 56 #include "llvm/ADT/SmallSet.h" 57 #include "llvm/ADT/SmallVector.h" 58 #include "llvm/ADT/Statistic.h" 59 #include "llvm/ADT/StringExtras.h" 60 #include "llvm/Analysis/CodeMetrics.h" 61 #include "llvm/Analysis/GlobalsModRef.h" 62 #include "llvm/Analysis/LoopInfo.h" 63 #include "llvm/Analysis/LoopIterator.h" 64 #include "llvm/Analysis/LoopPass.h" 65 #include "llvm/Analysis/ScalarEvolutionExpander.h" 66 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 67 #include "llvm/Analysis/ValueTracking.h" 68 #include "llvm/Analysis/VectorUtils.h" 69 #include "llvm/IR/Constants.h" 70 #include "llvm/IR/DataLayout.h" 71 #include "llvm/IR/DebugInfo.h" 72 #include "llvm/IR/DerivedTypes.h" 73 #include "llvm/IR/DiagnosticInfo.h" 74 #include "llvm/IR/Dominators.h" 75 #include "llvm/IR/Function.h" 76 #include "llvm/IR/IRBuilder.h" 77 #include "llvm/IR/Instructions.h" 78 #include "llvm/IR/IntrinsicInst.h" 79 #include "llvm/IR/LLVMContext.h" 80 #include "llvm/IR/Module.h" 81 #include "llvm/IR/PatternMatch.h" 82 #include "llvm/IR/Type.h" 83 #include "llvm/IR/Value.h" 84 #include "llvm/IR/ValueHandle.h" 85 #include "llvm/IR/Verifier.h" 86 #include "llvm/Pass.h" 87 #include "llvm/Support/BranchProbability.h" 88 #include "llvm/Support/CommandLine.h" 89 #include "llvm/Support/Debug.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include "llvm/Transforms/Scalar.h" 92 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 93 #include "llvm/Transforms/Utils/Local.h" 94 #include "llvm/Transforms/Utils/LoopUtils.h" 95 #include "llvm/Transforms/Utils/LoopVersioning.h" 96 #include "llvm/Transforms/Vectorize.h" 97 #include <algorithm> 98 #include <map> 99 #include <tuple> 100 101 using namespace llvm; 102 using namespace llvm::PatternMatch; 103 104 #define LV_NAME "loop-vectorize" 105 #define DEBUG_TYPE LV_NAME 106 107 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 108 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 109 110 static cl::opt<bool> 111 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, 112 cl::desc("Enable if-conversion during vectorization.")); 113 114 /// We don't vectorize loops with a known constant trip count below this number. 115 static cl::opt<unsigned> TinyTripCountVectorThreshold( 116 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 117 cl::desc("Don't vectorize loops with a constant " 118 "trip count that is smaller than this " 119 "value.")); 120 121 static cl::opt<bool> MaximizeBandwidth( 122 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 123 cl::desc("Maximize bandwidth when selecting vectorization factor which " 124 "will be determined by the smallest type in loop.")); 125 126 static cl::opt<bool> EnableInterleavedMemAccesses( 127 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 128 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 129 130 /// Maximum factor for an interleaved memory access. 131 static cl::opt<unsigned> MaxInterleaveGroupFactor( 132 "max-interleave-group-factor", cl::Hidden, 133 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 134 cl::init(8)); 135 136 /// We don't interleave loops with a known constant trip count below this 137 /// number. 138 static const unsigned TinyTripCountInterleaveThreshold = 128; 139 140 static cl::opt<unsigned> ForceTargetNumScalarRegs( 141 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 142 cl::desc("A flag that overrides the target's number of scalar registers.")); 143 144 static cl::opt<unsigned> ForceTargetNumVectorRegs( 145 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 146 cl::desc("A flag that overrides the target's number of vector registers.")); 147 148 /// Maximum vectorization interleave count. 149 static const unsigned MaxInterleaveFactor = 16; 150 151 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 152 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 153 cl::desc("A flag that overrides the target's max interleave factor for " 154 "scalar loops.")); 155 156 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 157 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 158 cl::desc("A flag that overrides the target's max interleave factor for " 159 "vectorized loops.")); 160 161 static cl::opt<unsigned> ForceTargetInstructionCost( 162 "force-target-instruction-cost", cl::init(0), cl::Hidden, 163 cl::desc("A flag that overrides the target's expected cost for " 164 "an instruction to a single constant value. Mostly " 165 "useful for getting consistent testing.")); 166 167 static cl::opt<unsigned> SmallLoopCost( 168 "small-loop-cost", cl::init(20), cl::Hidden, 169 cl::desc( 170 "The cost of a loop that is considered 'small' by the interleaver.")); 171 172 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 173 "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden, 174 cl::desc("Enable the use of the block frequency analysis to access PGO " 175 "heuristics minimizing code growth in cold regions and being more " 176 "aggressive in hot regions.")); 177 178 // Runtime interleave loops for load/store throughput. 179 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 180 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 181 cl::desc( 182 "Enable runtime interleaving until load/store ports are saturated")); 183 184 /// The number of stores in a loop that are allowed to need predication. 185 static cl::opt<unsigned> NumberOfStoresToPredicate( 186 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 187 cl::desc("Max number of stores to be predicated behind an if.")); 188 189 static cl::opt<bool> EnableIndVarRegisterHeur( 190 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 191 cl::desc("Count the induction variable only once when interleaving")); 192 193 static cl::opt<bool> EnableCondStoresVectorization( 194 "enable-cond-stores-vec", cl::init(false), cl::Hidden, 195 cl::desc("Enable if predication of stores during vectorization.")); 196 197 static cl::opt<unsigned> MaxNestedScalarReductionIC( 198 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 199 cl::desc("The maximum interleave count to use when interleaving a scalar " 200 "reduction in a nested loop.")); 201 202 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 203 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 204 cl::desc("The maximum allowed number of runtime memory checks with a " 205 "vectorize(enable) pragma.")); 206 207 static cl::opt<unsigned> VectorizeSCEVCheckThreshold( 208 "vectorize-scev-check-threshold", cl::init(16), cl::Hidden, 209 cl::desc("The maximum number of SCEV checks allowed.")); 210 211 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold( 212 "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden, 213 cl::desc("The maximum number of SCEV checks allowed with a " 214 "vectorize(enable) pragma")); 215 216 /// Create an analysis remark that explains why vectorization failed 217 /// 218 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 219 /// RemarkName is the identifier for the remark. If \p I is passed it is an 220 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 221 /// the location of the remark. \return the remark object that can be 222 /// streamed to. 223 static OptimizationRemarkAnalysis 224 createMissedAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, 225 Instruction *I = nullptr) { 226 Value *CodeRegion = TheLoop->getHeader(); 227 DebugLoc DL = TheLoop->getStartLoc(); 228 229 if (I) { 230 CodeRegion = I->getParent(); 231 // If there is no debug location attached to the instruction, revert back to 232 // using the loop's. 233 if (I->getDebugLoc()) 234 DL = I->getDebugLoc(); 235 } 236 237 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion); 238 R << "loop not vectorized: "; 239 return R; 240 } 241 242 namespace { 243 244 // Forward declarations. 245 class LoopVectorizeHints; 246 class LoopVectorizationLegality; 247 class LoopVectorizationCostModel; 248 class LoopVectorizationRequirements; 249 250 /// Returns true if the given loop body has a cycle, excluding the loop 251 /// itself. 252 static bool hasCyclesInLoopBody(const Loop &L) { 253 if (!L.empty()) 254 return true; 255 256 for (const auto &SCC : 257 make_range(scc_iterator<Loop, LoopBodyTraits>::begin(L), 258 scc_iterator<Loop, LoopBodyTraits>::end(L))) { 259 if (SCC.size() > 1) { 260 DEBUG(dbgs() << "LVL: Detected a cycle in the loop body:\n"); 261 DEBUG(L.dump()); 262 return true; 263 } 264 } 265 return false; 266 } 267 268 /// \brief This modifies LoopAccessReport to initialize message with 269 /// loop-vectorizer-specific part. 270 class VectorizationReport : public LoopAccessReport { 271 public: 272 VectorizationReport(Instruction *I = nullptr) 273 : LoopAccessReport("loop not vectorized: ", I) {} 274 275 /// \brief This allows promotion of the loop-access analysis report into the 276 /// loop-vectorizer report. It modifies the message to add the 277 /// loop-vectorizer-specific part of the message. 278 explicit VectorizationReport(const LoopAccessReport &R) 279 : LoopAccessReport(Twine("loop not vectorized: ") + R.str(), 280 R.getInstr()) {} 281 }; 282 283 /// A helper function for converting Scalar types to vector types. 284 /// If the incoming type is void, we return void. If the VF is 1, we return 285 /// the scalar type. 286 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 287 if (Scalar->isVoidTy() || VF == 1) 288 return Scalar; 289 return VectorType::get(Scalar, VF); 290 } 291 292 /// A helper function that returns GEP instruction and knows to skip a 293 /// 'bitcast'. The 'bitcast' may be skipped if the source and the destination 294 /// pointee types of the 'bitcast' have the same size. 295 /// For example: 296 /// bitcast double** %var to i64* - can be skipped 297 /// bitcast double** %var to i8* - can not 298 static GetElementPtrInst *getGEPInstruction(Value *Ptr) { 299 300 if (isa<GetElementPtrInst>(Ptr)) 301 return cast<GetElementPtrInst>(Ptr); 302 303 if (isa<BitCastInst>(Ptr) && 304 isa<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0))) { 305 Type *BitcastTy = Ptr->getType(); 306 Type *GEPTy = cast<BitCastInst>(Ptr)->getSrcTy(); 307 if (!isa<PointerType>(BitcastTy) || !isa<PointerType>(GEPTy)) 308 return nullptr; 309 Type *Pointee1Ty = cast<PointerType>(BitcastTy)->getPointerElementType(); 310 Type *Pointee2Ty = cast<PointerType>(GEPTy)->getPointerElementType(); 311 const DataLayout &DL = cast<BitCastInst>(Ptr)->getModule()->getDataLayout(); 312 if (DL.getTypeSizeInBits(Pointee1Ty) == DL.getTypeSizeInBits(Pointee2Ty)) 313 return cast<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0)); 314 } 315 return nullptr; 316 } 317 318 /// A helper function that returns the pointer operand of a load or store 319 /// instruction. 320 static Value *getPointerOperand(Value *I) { 321 if (auto *LI = dyn_cast<LoadInst>(I)) 322 return LI->getPointerOperand(); 323 if (auto *SI = dyn_cast<StoreInst>(I)) 324 return SI->getPointerOperand(); 325 return nullptr; 326 } 327 328 /// A helper function that returns true if the given type is irregular. The 329 /// type is irregular if its allocated size doesn't equal the store size of an 330 /// element of the corresponding vector type at the given vectorization factor. 331 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) { 332 333 // Determine if an array of VF elements of type Ty is "bitcast compatible" 334 // with a <VF x Ty> vector. 335 if (VF > 1) { 336 auto *VectorTy = VectorType::get(Ty, VF); 337 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); 338 } 339 340 // If the vectorization factor is one, we just check if an array of type Ty 341 // requires padding between elements. 342 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 343 } 344 345 /// InnerLoopVectorizer vectorizes loops which contain only one basic 346 /// block to a specified vectorization factor (VF). 347 /// This class performs the widening of scalars into vectors, or multiple 348 /// scalars. This class also implements the following features: 349 /// * It inserts an epilogue loop for handling loops that don't have iteration 350 /// counts that are known to be a multiple of the vectorization factor. 351 /// * It handles the code generation for reduction variables. 352 /// * Scalarization (implementation using scalars) of un-vectorizable 353 /// instructions. 354 /// InnerLoopVectorizer does not perform any vectorization-legality 355 /// checks, and relies on the caller to check for the different legality 356 /// aspects. The InnerLoopVectorizer relies on the 357 /// LoopVectorizationLegality class to provide information about the induction 358 /// and reduction variables that were found to a given vectorization factor. 359 class InnerLoopVectorizer { 360 public: 361 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 362 LoopInfo *LI, DominatorTree *DT, 363 const TargetLibraryInfo *TLI, 364 const TargetTransformInfo *TTI, AssumptionCache *AC, 365 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 366 unsigned UnrollFactor) 367 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 368 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 369 Builder(PSE.getSE()->getContext()), Induction(nullptr), 370 OldInduction(nullptr), VectorLoopValueMap(UnrollFactor, VecWidth), 371 TripCount(nullptr), VectorTripCount(nullptr), Legal(nullptr), 372 AddedSafetyChecks(false) {} 373 374 // Perform the actual loop widening (vectorization). 375 // MinimumBitWidths maps scalar integer values to the smallest bitwidth they 376 // can be validly truncated to. The cost model has assumed this truncation 377 // will happen when vectorizing. VecValuesToIgnore contains scalar values 378 // that the cost model has chosen to ignore because they will not be 379 // vectorized. 380 void vectorize(LoopVectorizationLegality *L, 381 const MapVector<Instruction *, uint64_t> &MinimumBitWidths) { 382 MinBWs = &MinimumBitWidths; 383 Legal = L; 384 // Create a new empty loop. Unlink the old loop and connect the new one. 385 createEmptyLoop(); 386 // Widen each instruction in the old loop to a new one in the new loop. 387 // Use the Legality module to find the induction and reduction variables. 388 vectorizeLoop(); 389 } 390 391 // Return true if any runtime check is added. 392 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 393 394 virtual ~InnerLoopVectorizer() {} 395 396 protected: 397 /// A small list of PHINodes. 398 typedef SmallVector<PHINode *, 4> PhiVector; 399 400 /// A type for vectorized values in the new loop. Each value from the 401 /// original loop, when vectorized, is represented by UF vector values in the 402 /// new unrolled loop, where UF is the unroll factor. 403 typedef SmallVector<Value *, 2> VectorParts; 404 405 /// A type for scalarized values in the new loop. Each value from the 406 /// original loop, when scalarized, is represented by UF x VF scalar values 407 /// in the new unrolled loop, where UF is the unroll factor and VF is the 408 /// vectorization factor. 409 typedef SmallVector<SmallVector<Value *, 4>, 2> ScalarParts; 410 411 // When we if-convert we need to create edge masks. We have to cache values 412 // so that we don't end up with exponential recursion/IR. 413 typedef DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts> 414 EdgeMaskCache; 415 416 /// Create an empty loop, based on the loop ranges of the old loop. 417 void createEmptyLoop(); 418 419 /// Set up the values of the IVs correctly when exiting the vector loop. 420 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 421 Value *CountRoundDown, Value *EndValue, 422 BasicBlock *MiddleBlock); 423 424 /// Create a new induction variable inside L. 425 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 426 Value *Step, Instruction *DL); 427 /// Copy and widen the instructions from the old loop. 428 virtual void vectorizeLoop(); 429 430 /// Fix a first-order recurrence. This is the second phase of vectorizing 431 /// this phi node. 432 void fixFirstOrderRecurrence(PHINode *Phi); 433 434 /// \brief The Loop exit block may have single value PHI nodes where the 435 /// incoming value is 'Undef'. While vectorizing we only handled real values 436 /// that were defined inside the loop. Here we fix the 'undef case'. 437 /// See PR14725. 438 void fixLCSSAPHIs(); 439 440 /// Predicate conditional instructions that require predication on their 441 /// respective conditions. 442 void predicateInstructions(); 443 444 /// Shrinks vector element sizes based on information in "MinBWs". 445 void truncateToMinimalBitwidths(); 446 447 /// A helper function that computes the predicate of the block BB, assuming 448 /// that the header block of the loop is set to True. It returns the *entry* 449 /// mask for the block BB. 450 VectorParts createBlockInMask(BasicBlock *BB); 451 /// A helper function that computes the predicate of the edge between SRC 452 /// and DST. 453 VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst); 454 455 /// A helper function to vectorize a single BB within the innermost loop. 456 void vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV); 457 458 /// Vectorize a single PHINode in a block. This method handles the induction 459 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 460 /// arbitrary length vectors. 461 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF, 462 PhiVector *PV); 463 464 /// Insert the new loop to the loop hierarchy and pass manager 465 /// and update the analysis passes. 466 void updateAnalysis(); 467 468 /// This instruction is un-vectorizable. Implement it as a sequence 469 /// of scalars. If \p IfPredicateInstr is true we need to 'hide' each 470 /// scalarized instruction behind an if block predicated on the control 471 /// dependence of the instruction. 472 virtual void scalarizeInstruction(Instruction *Instr, 473 bool IfPredicateInstr = false); 474 475 /// Vectorize Load and Store instructions, 476 virtual void vectorizeMemoryInstruction(Instruction *Instr); 477 478 /// Create a broadcast instruction. This method generates a broadcast 479 /// instruction (shuffle) for loop invariant values and for the induction 480 /// value. If this is the induction variable then we extend it to N, N+1, ... 481 /// this is needed because each iteration in the loop corresponds to a SIMD 482 /// element. 483 virtual Value *getBroadcastInstrs(Value *V); 484 485 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 486 /// to each vector element of Val. The sequence starts at StartIndex. 487 /// \p Opcode is relevant for FP induction variable. 488 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 489 Instruction::BinaryOps Opcode = 490 Instruction::BinaryOpsEnd); 491 492 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 493 /// variable on which to base the steps, \p Step is the size of the step, and 494 /// \p EntryVal is the value from the original loop that maps to the steps. 495 /// Note that \p EntryVal doesn't have to be an induction variable (e.g., it 496 /// can be a truncate instruction). 497 void buildScalarSteps(Value *ScalarIV, Value *Step, Value *EntryVal); 498 499 /// Create a vector induction phi node based on an existing scalar one. This 500 /// currently only works for integer induction variables with a constant 501 /// step. \p EntryVal is the value from the original loop that maps to the 502 /// vector phi node. If \p EntryVal is a truncate instruction, instead of 503 /// widening the original IV, we widen a version of the IV truncated to \p 504 /// EntryVal's type. 505 void createVectorIntInductionPHI(const InductionDescriptor &II, 506 Instruction *EntryVal); 507 508 /// Widen an integer induction variable \p IV. If \p Trunc is provided, the 509 /// induction variable will first be truncated to the corresponding type. 510 void widenIntInduction(PHINode *IV, TruncInst *Trunc = nullptr); 511 512 /// Returns true if we should generate a scalar version of \p IV. 513 bool needsScalarInduction(Instruction *IV) const; 514 515 /// Return a constant reference to the VectorParts corresponding to \p V from 516 /// the original loop. If the value has already been vectorized, the 517 /// corresponding vector entry in VectorLoopValueMap is returned. If, 518 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 519 /// new vector values on-demand by inserting the scalar values into vectors 520 /// with an insertelement sequence. If the value has been neither vectorized 521 /// nor scalarized, it must be loop invariant, so we simply broadcast the 522 /// value into vectors. 523 const VectorParts &getVectorValue(Value *V); 524 525 /// Return a value in the new loop corresponding to \p V from the original 526 /// loop at unroll index \p Part and vector index \p Lane. If the value has 527 /// been vectorized but not scalarized, the necessary extractelement 528 /// instruction will be generated. 529 Value *getScalarValue(Value *V, unsigned Part, unsigned Lane); 530 531 /// Try to vectorize the interleaved access group that \p Instr belongs to. 532 void vectorizeInterleaveGroup(Instruction *Instr); 533 534 /// Generate a shuffle sequence that will reverse the vector Vec. 535 virtual Value *reverseVector(Value *Vec); 536 537 /// Returns (and creates if needed) the original loop trip count. 538 Value *getOrCreateTripCount(Loop *NewLoop); 539 540 /// Returns (and creates if needed) the trip count of the widened loop. 541 Value *getOrCreateVectorTripCount(Loop *NewLoop); 542 543 /// Emit a bypass check to see if the trip count would overflow, or we 544 /// wouldn't have enough iterations to execute one vector loop. 545 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 546 /// Emit a bypass check to see if the vector trip count is nonzero. 547 void emitVectorLoopEnteredCheck(Loop *L, BasicBlock *Bypass); 548 /// Emit a bypass check to see if all of the SCEV assumptions we've 549 /// had to make are correct. 550 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 551 /// Emit bypass checks to check any memory assumptions we may have made. 552 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 553 554 /// Add additional metadata to \p To that was not present on \p Orig. 555 /// 556 /// Currently this is used to add the noalias annotations based on the 557 /// inserted memchecks. Use this for instructions that are *cloned* into the 558 /// vector loop. 559 void addNewMetadata(Instruction *To, const Instruction *Orig); 560 561 /// Add metadata from one instruction to another. 562 /// 563 /// This includes both the original MDs from \p From and additional ones (\see 564 /// addNewMetadata). Use this for *newly created* instructions in the vector 565 /// loop. 566 void addMetadata(Instruction *To, Instruction *From); 567 568 /// \brief Similar to the previous function but it adds the metadata to a 569 /// vector of instructions. 570 void addMetadata(ArrayRef<Value *> To, Instruction *From); 571 572 /// This is a helper class for maintaining vectorization state. It's used for 573 /// mapping values from the original loop to their corresponding values in 574 /// the new loop. Two mappings are maintained: one for vectorized values and 575 /// one for scalarized values. Vectorized values are represented with UF 576 /// vector values in the new loop, and scalarized values are represented with 577 /// UF x VF scalar values in the new loop. UF and VF are the unroll and 578 /// vectorization factors, respectively. 579 /// 580 /// Entries can be added to either map with initVector and initScalar, which 581 /// initialize and return a constant reference to the new entry. If a 582 /// non-constant reference to a vector entry is required, getVector can be 583 /// used to retrieve a mutable entry. We currently directly modify the mapped 584 /// values during "fix-up" operations that occur once the first phase of 585 /// widening is complete. These operations include type truncation and the 586 /// second phase of recurrence widening. 587 /// 588 /// Otherwise, entries from either map should be accessed using the 589 /// getVectorValue or getScalarValue functions from InnerLoopVectorizer. 590 /// getVectorValue and getScalarValue coordinate to generate a vector or 591 /// scalar value on-demand if one is not yet available. When vectorizing a 592 /// loop, we visit the definition of an instruction before its uses. When 593 /// visiting the definition, we either vectorize or scalarize the 594 /// instruction, creating an entry for it in the corresponding map. (In some 595 /// cases, such as induction variables, we will create both vector and scalar 596 /// entries.) Then, as we encounter uses of the definition, we derive values 597 /// for each scalar or vector use unless such a value is already available. 598 /// For example, if we scalarize a definition and one of its uses is vector, 599 /// we build the required vector on-demand with an insertelement sequence 600 /// when visiting the use. Otherwise, if the use is scalar, we can use the 601 /// existing scalar definition. 602 struct ValueMap { 603 604 /// Construct an empty map with the given unroll and vectorization factors. 605 ValueMap(unsigned UnrollFactor, unsigned VecWidth) 606 : UF(UnrollFactor), VF(VecWidth) { 607 // The unroll and vectorization factors are only used in asserts builds 608 // to verify map entries are sized appropriately. 609 (void)UF; 610 (void)VF; 611 } 612 613 /// \return True if the map has a vector entry for \p Key. 614 bool hasVector(Value *Key) const { return VectorMapStorage.count(Key); } 615 616 /// \return True if the map has a scalar entry for \p Key. 617 bool hasScalar(Value *Key) const { return ScalarMapStorage.count(Key); } 618 619 /// \brief Map \p Key to the given VectorParts \p Entry, and return a 620 /// constant reference to the new vector map entry. The given key should 621 /// not already be in the map, and the given VectorParts should be 622 /// correctly sized for the current unroll factor. 623 const VectorParts &initVector(Value *Key, const VectorParts &Entry) { 624 assert(!hasVector(Key) && "Vector entry already initialized"); 625 assert(Entry.size() == UF && "VectorParts has wrong dimensions"); 626 VectorMapStorage[Key] = Entry; 627 return VectorMapStorage[Key]; 628 } 629 630 /// \brief Map \p Key to the given ScalarParts \p Entry, and return a 631 /// constant reference to the new scalar map entry. The given key should 632 /// not already be in the map, and the given ScalarParts should be 633 /// correctly sized for the current unroll and vectorization factors. 634 const ScalarParts &initScalar(Value *Key, const ScalarParts &Entry) { 635 assert(!hasScalar(Key) && "Scalar entry already initialized"); 636 assert(Entry.size() == UF && 637 all_of(make_range(Entry.begin(), Entry.end()), 638 [&](const SmallVectorImpl<Value *> &Values) -> bool { 639 return Values.size() == VF; 640 }) && 641 "ScalarParts has wrong dimensions"); 642 ScalarMapStorage[Key] = Entry; 643 return ScalarMapStorage[Key]; 644 } 645 646 /// \return A reference to the vector map entry corresponding to \p Key. 647 /// The key should already be in the map. This function should only be used 648 /// when it's necessary to update values that have already been vectorized. 649 /// This is the case for "fix-up" operations including type truncation and 650 /// the second phase of recurrence vectorization. If a non-const reference 651 /// isn't required, getVectorValue should be used instead. 652 VectorParts &getVector(Value *Key) { 653 assert(hasVector(Key) && "Vector entry not initialized"); 654 return VectorMapStorage.find(Key)->second; 655 } 656 657 /// Retrieve an entry from the vector or scalar maps. The preferred way to 658 /// access an existing mapped entry is with getVectorValue or 659 /// getScalarValue from InnerLoopVectorizer. Until those functions can be 660 /// moved inside ValueMap, we have to declare them as friends. 661 friend const VectorParts &InnerLoopVectorizer::getVectorValue(Value *V); 662 friend Value *InnerLoopVectorizer::getScalarValue(Value *V, unsigned Part, 663 unsigned Lane); 664 665 private: 666 /// The unroll factor. Each entry in the vector map contains UF vector 667 /// values. 668 unsigned UF; 669 670 /// The vectorization factor. Each entry in the scalar map contains UF x VF 671 /// scalar values. 672 unsigned VF; 673 674 /// The vector and scalar map storage. We use std::map and not DenseMap 675 /// because insertions to DenseMap invalidate its iterators. 676 std::map<Value *, VectorParts> VectorMapStorage; 677 std::map<Value *, ScalarParts> ScalarMapStorage; 678 }; 679 680 /// The original loop. 681 Loop *OrigLoop; 682 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 683 /// dynamic knowledge to simplify SCEV expressions and converts them to a 684 /// more usable form. 685 PredicatedScalarEvolution &PSE; 686 /// Loop Info. 687 LoopInfo *LI; 688 /// Dominator Tree. 689 DominatorTree *DT; 690 /// Alias Analysis. 691 AliasAnalysis *AA; 692 /// Target Library Info. 693 const TargetLibraryInfo *TLI; 694 /// Target Transform Info. 695 const TargetTransformInfo *TTI; 696 /// Assumption Cache. 697 AssumptionCache *AC; 698 /// Interface to emit optimization remarks. 699 OptimizationRemarkEmitter *ORE; 700 701 /// \brief LoopVersioning. It's only set up (non-null) if memchecks were 702 /// used. 703 /// 704 /// This is currently only used to add no-alias metadata based on the 705 /// memchecks. The actually versioning is performed manually. 706 std::unique_ptr<LoopVersioning> LVer; 707 708 /// The vectorization SIMD factor to use. Each vector will have this many 709 /// vector elements. 710 unsigned VF; 711 712 protected: 713 /// The vectorization unroll factor to use. Each scalar is vectorized to this 714 /// many different vector instructions. 715 unsigned UF; 716 717 /// The builder that we use 718 IRBuilder<> Builder; 719 720 // --- Vectorization state --- 721 722 /// The vector-loop preheader. 723 BasicBlock *LoopVectorPreHeader; 724 /// The scalar-loop preheader. 725 BasicBlock *LoopScalarPreHeader; 726 /// Middle Block between the vector and the scalar. 727 BasicBlock *LoopMiddleBlock; 728 /// The ExitBlock of the scalar loop. 729 BasicBlock *LoopExitBlock; 730 /// The vector loop body. 731 BasicBlock *LoopVectorBody; 732 /// The scalar loop body. 733 BasicBlock *LoopScalarBody; 734 /// A list of all bypass blocks. The first block is the entry of the loop. 735 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 736 737 /// The new Induction variable which was added to the new block. 738 PHINode *Induction; 739 /// The induction variable of the old basic block. 740 PHINode *OldInduction; 741 742 /// Maps values from the original loop to their corresponding values in the 743 /// vectorized loop. A key value can map to either vector values, scalar 744 /// values or both kinds of values, depending on whether the key was 745 /// vectorized and scalarized. 746 ValueMap VectorLoopValueMap; 747 748 /// Store instructions that should be predicated, as a pair 749 /// <StoreInst, Predicate> 750 SmallVector<std::pair<Instruction *, Value *>, 4> PredicatedInstructions; 751 EdgeMaskCache MaskCache; 752 /// Trip count of the original loop. 753 Value *TripCount; 754 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 755 Value *VectorTripCount; 756 757 /// Map of scalar integer values to the smallest bitwidth they can be legally 758 /// represented as. The vector equivalents of these values should be truncated 759 /// to this type. 760 const MapVector<Instruction *, uint64_t> *MinBWs; 761 762 LoopVectorizationLegality *Legal; 763 764 // Record whether runtime checks are added. 765 bool AddedSafetyChecks; 766 }; 767 768 class InnerLoopUnroller : public InnerLoopVectorizer { 769 public: 770 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 771 LoopInfo *LI, DominatorTree *DT, 772 const TargetLibraryInfo *TLI, 773 const TargetTransformInfo *TTI, AssumptionCache *AC, 774 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor) 775 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 776 UnrollFactor) {} 777 778 private: 779 void scalarizeInstruction(Instruction *Instr, 780 bool IfPredicateInstr = false) override; 781 void vectorizeMemoryInstruction(Instruction *Instr) override; 782 Value *getBroadcastInstrs(Value *V) override; 783 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 784 Instruction::BinaryOps Opcode = 785 Instruction::BinaryOpsEnd) override; 786 Value *reverseVector(Value *Vec) override; 787 }; 788 789 /// \brief Look for a meaningful debug location on the instruction or it's 790 /// operands. 791 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 792 if (!I) 793 return I; 794 795 DebugLoc Empty; 796 if (I->getDebugLoc() != Empty) 797 return I; 798 799 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 800 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 801 if (OpInst->getDebugLoc() != Empty) 802 return OpInst; 803 } 804 805 return I; 806 } 807 808 /// \brief Set the debug location in the builder using the debug location in the 809 /// instruction. 810 static void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 811 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) 812 B.SetCurrentDebugLocation(Inst->getDebugLoc()); 813 else 814 B.SetCurrentDebugLocation(DebugLoc()); 815 } 816 817 #ifndef NDEBUG 818 /// \return string containing a file name and a line # for the given loop. 819 static std::string getDebugLocString(const Loop *L) { 820 std::string Result; 821 if (L) { 822 raw_string_ostream OS(Result); 823 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 824 LoopDbgLoc.print(OS); 825 else 826 // Just print the module name. 827 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 828 OS.flush(); 829 } 830 return Result; 831 } 832 #endif 833 834 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 835 const Instruction *Orig) { 836 // If the loop was versioned with memchecks, add the corresponding no-alias 837 // metadata. 838 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 839 LVer->annotateInstWithNoAlias(To, Orig); 840 } 841 842 void InnerLoopVectorizer::addMetadata(Instruction *To, 843 Instruction *From) { 844 propagateMetadata(To, From); 845 addNewMetadata(To, From); 846 } 847 848 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 849 Instruction *From) { 850 for (Value *V : To) { 851 if (Instruction *I = dyn_cast<Instruction>(V)) 852 addMetadata(I, From); 853 } 854 } 855 856 /// \brief The group of interleaved loads/stores sharing the same stride and 857 /// close to each other. 858 /// 859 /// Each member in this group has an index starting from 0, and the largest 860 /// index should be less than interleaved factor, which is equal to the absolute 861 /// value of the access's stride. 862 /// 863 /// E.g. An interleaved load group of factor 4: 864 /// for (unsigned i = 0; i < 1024; i+=4) { 865 /// a = A[i]; // Member of index 0 866 /// b = A[i+1]; // Member of index 1 867 /// d = A[i+3]; // Member of index 3 868 /// ... 869 /// } 870 /// 871 /// An interleaved store group of factor 4: 872 /// for (unsigned i = 0; i < 1024; i+=4) { 873 /// ... 874 /// A[i] = a; // Member of index 0 875 /// A[i+1] = b; // Member of index 1 876 /// A[i+2] = c; // Member of index 2 877 /// A[i+3] = d; // Member of index 3 878 /// } 879 /// 880 /// Note: the interleaved load group could have gaps (missing members), but 881 /// the interleaved store group doesn't allow gaps. 882 class InterleaveGroup { 883 public: 884 InterleaveGroup(Instruction *Instr, int Stride, unsigned Align) 885 : Align(Align), SmallestKey(0), LargestKey(0), InsertPos(Instr) { 886 assert(Align && "The alignment should be non-zero"); 887 888 Factor = std::abs(Stride); 889 assert(Factor > 1 && "Invalid interleave factor"); 890 891 Reverse = Stride < 0; 892 Members[0] = Instr; 893 } 894 895 bool isReverse() const { return Reverse; } 896 unsigned getFactor() const { return Factor; } 897 unsigned getAlignment() const { return Align; } 898 unsigned getNumMembers() const { return Members.size(); } 899 900 /// \brief Try to insert a new member \p Instr with index \p Index and 901 /// alignment \p NewAlign. The index is related to the leader and it could be 902 /// negative if it is the new leader. 903 /// 904 /// \returns false if the instruction doesn't belong to the group. 905 bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) { 906 assert(NewAlign && "The new member's alignment should be non-zero"); 907 908 int Key = Index + SmallestKey; 909 910 // Skip if there is already a member with the same index. 911 if (Members.count(Key)) 912 return false; 913 914 if (Key > LargestKey) { 915 // The largest index is always less than the interleave factor. 916 if (Index >= static_cast<int>(Factor)) 917 return false; 918 919 LargestKey = Key; 920 } else if (Key < SmallestKey) { 921 // The largest index is always less than the interleave factor. 922 if (LargestKey - Key >= static_cast<int>(Factor)) 923 return false; 924 925 SmallestKey = Key; 926 } 927 928 // It's always safe to select the minimum alignment. 929 Align = std::min(Align, NewAlign); 930 Members[Key] = Instr; 931 return true; 932 } 933 934 /// \brief Get the member with the given index \p Index 935 /// 936 /// \returns nullptr if contains no such member. 937 Instruction *getMember(unsigned Index) const { 938 int Key = SmallestKey + Index; 939 if (!Members.count(Key)) 940 return nullptr; 941 942 return Members.find(Key)->second; 943 } 944 945 /// \brief Get the index for the given member. Unlike the key in the member 946 /// map, the index starts from 0. 947 unsigned getIndex(Instruction *Instr) const { 948 for (auto I : Members) 949 if (I.second == Instr) 950 return I.first - SmallestKey; 951 952 llvm_unreachable("InterleaveGroup contains no such member"); 953 } 954 955 Instruction *getInsertPos() const { return InsertPos; } 956 void setInsertPos(Instruction *Inst) { InsertPos = Inst; } 957 958 private: 959 unsigned Factor; // Interleave Factor. 960 bool Reverse; 961 unsigned Align; 962 DenseMap<int, Instruction *> Members; 963 int SmallestKey; 964 int LargestKey; 965 966 // To avoid breaking dependences, vectorized instructions of an interleave 967 // group should be inserted at either the first load or the last store in 968 // program order. 969 // 970 // E.g. %even = load i32 // Insert Position 971 // %add = add i32 %even // Use of %even 972 // %odd = load i32 973 // 974 // store i32 %even 975 // %odd = add i32 // Def of %odd 976 // store i32 %odd // Insert Position 977 Instruction *InsertPos; 978 }; 979 980 /// \brief Drive the analysis of interleaved memory accesses in the loop. 981 /// 982 /// Use this class to analyze interleaved accesses only when we can vectorize 983 /// a loop. Otherwise it's meaningless to do analysis as the vectorization 984 /// on interleaved accesses is unsafe. 985 /// 986 /// The analysis collects interleave groups and records the relationships 987 /// between the member and the group in a map. 988 class InterleavedAccessInfo { 989 public: 990 InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L, 991 DominatorTree *DT, LoopInfo *LI) 992 : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(nullptr), 993 RequiresScalarEpilogue(false) {} 994 995 ~InterleavedAccessInfo() { 996 SmallSet<InterleaveGroup *, 4> DelSet; 997 // Avoid releasing a pointer twice. 998 for (auto &I : InterleaveGroupMap) 999 DelSet.insert(I.second); 1000 for (auto *Ptr : DelSet) 1001 delete Ptr; 1002 } 1003 1004 /// \brief Analyze the interleaved accesses and collect them in interleave 1005 /// groups. Substitute symbolic strides using \p Strides. 1006 void analyzeInterleaving(const ValueToValueMap &Strides); 1007 1008 /// \brief Check if \p Instr belongs to any interleave group. 1009 bool isInterleaved(Instruction *Instr) const { 1010 return InterleaveGroupMap.count(Instr); 1011 } 1012 1013 /// \brief Return the maximum interleave factor of all interleaved groups. 1014 unsigned getMaxInterleaveFactor() const { 1015 unsigned MaxFactor = 1; 1016 for (auto &Entry : InterleaveGroupMap) 1017 MaxFactor = std::max(MaxFactor, Entry.second->getFactor()); 1018 return MaxFactor; 1019 } 1020 1021 /// \brief Get the interleave group that \p Instr belongs to. 1022 /// 1023 /// \returns nullptr if doesn't have such group. 1024 InterleaveGroup *getInterleaveGroup(Instruction *Instr) const { 1025 if (InterleaveGroupMap.count(Instr)) 1026 return InterleaveGroupMap.find(Instr)->second; 1027 return nullptr; 1028 } 1029 1030 /// \brief Returns true if an interleaved group that may access memory 1031 /// out-of-bounds requires a scalar epilogue iteration for correctness. 1032 bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; } 1033 1034 /// \brief Initialize the LoopAccessInfo used for dependence checking. 1035 void setLAI(const LoopAccessInfo *Info) { LAI = Info; } 1036 1037 private: 1038 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks. 1039 /// Simplifies SCEV expressions in the context of existing SCEV assumptions. 1040 /// The interleaved access analysis can also add new predicates (for example 1041 /// by versioning strides of pointers). 1042 PredicatedScalarEvolution &PSE; 1043 Loop *TheLoop; 1044 DominatorTree *DT; 1045 LoopInfo *LI; 1046 const LoopAccessInfo *LAI; 1047 1048 /// True if the loop may contain non-reversed interleaved groups with 1049 /// out-of-bounds accesses. We ensure we don't speculatively access memory 1050 /// out-of-bounds by executing at least one scalar epilogue iteration. 1051 bool RequiresScalarEpilogue; 1052 1053 /// Holds the relationships between the members and the interleave group. 1054 DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap; 1055 1056 /// Holds dependences among the memory accesses in the loop. It maps a source 1057 /// access to a set of dependent sink accesses. 1058 DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences; 1059 1060 /// \brief The descriptor for a strided memory access. 1061 struct StrideDescriptor { 1062 StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size, 1063 unsigned Align) 1064 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {} 1065 1066 StrideDescriptor() = default; 1067 1068 // The access's stride. It is negative for a reverse access. 1069 int64_t Stride = 0; 1070 const SCEV *Scev = nullptr; // The scalar expression of this access 1071 uint64_t Size = 0; // The size of the memory object. 1072 unsigned Align = 0; // The alignment of this access. 1073 }; 1074 1075 /// \brief A type for holding instructions and their stride descriptors. 1076 typedef std::pair<Instruction *, StrideDescriptor> StrideEntry; 1077 1078 /// \brief Create a new interleave group with the given instruction \p Instr, 1079 /// stride \p Stride and alignment \p Align. 1080 /// 1081 /// \returns the newly created interleave group. 1082 InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride, 1083 unsigned Align) { 1084 assert(!InterleaveGroupMap.count(Instr) && 1085 "Already in an interleaved access group"); 1086 InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align); 1087 return InterleaveGroupMap[Instr]; 1088 } 1089 1090 /// \brief Release the group and remove all the relationships. 1091 void releaseGroup(InterleaveGroup *Group) { 1092 for (unsigned i = 0; i < Group->getFactor(); i++) 1093 if (Instruction *Member = Group->getMember(i)) 1094 InterleaveGroupMap.erase(Member); 1095 1096 delete Group; 1097 } 1098 1099 /// \brief Collect all the accesses with a constant stride in program order. 1100 void collectConstStrideAccesses( 1101 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 1102 const ValueToValueMap &Strides); 1103 1104 /// \brief Returns true if \p Stride is allowed in an interleaved group. 1105 static bool isStrided(int Stride) { 1106 unsigned Factor = std::abs(Stride); 1107 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 1108 } 1109 1110 /// \brief Returns true if \p BB is a predicated block. 1111 bool isPredicated(BasicBlock *BB) const { 1112 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 1113 } 1114 1115 /// \brief Returns true if LoopAccessInfo can be used for dependence queries. 1116 bool areDependencesValid() const { 1117 return LAI && LAI->getDepChecker().getDependences(); 1118 } 1119 1120 /// \brief Returns true if memory accesses \p A and \p B can be reordered, if 1121 /// necessary, when constructing interleaved groups. 1122 /// 1123 /// \p A must precede \p B in program order. We return false if reordering is 1124 /// not necessary or is prevented because \p A and \p B may be dependent. 1125 bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A, 1126 StrideEntry *B) const { 1127 1128 // Code motion for interleaved accesses can potentially hoist strided loads 1129 // and sink strided stores. The code below checks the legality of the 1130 // following two conditions: 1131 // 1132 // 1. Potentially moving a strided load (B) before any store (A) that 1133 // precedes B, or 1134 // 1135 // 2. Potentially moving a strided store (A) after any load or store (B) 1136 // that A precedes. 1137 // 1138 // It's legal to reorder A and B if we know there isn't a dependence from A 1139 // to B. Note that this determination is conservative since some 1140 // dependences could potentially be reordered safely. 1141 1142 // A is potentially the source of a dependence. 1143 auto *Src = A->first; 1144 auto SrcDes = A->second; 1145 1146 // B is potentially the sink of a dependence. 1147 auto *Sink = B->first; 1148 auto SinkDes = B->second; 1149 1150 // Code motion for interleaved accesses can't violate WAR dependences. 1151 // Thus, reordering is legal if the source isn't a write. 1152 if (!Src->mayWriteToMemory()) 1153 return true; 1154 1155 // At least one of the accesses must be strided. 1156 if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride)) 1157 return true; 1158 1159 // If dependence information is not available from LoopAccessInfo, 1160 // conservatively assume the instructions can't be reordered. 1161 if (!areDependencesValid()) 1162 return false; 1163 1164 // If we know there is a dependence from source to sink, assume the 1165 // instructions can't be reordered. Otherwise, reordering is legal. 1166 return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink); 1167 } 1168 1169 /// \brief Collect the dependences from LoopAccessInfo. 1170 /// 1171 /// We process the dependences once during the interleaved access analysis to 1172 /// enable constant-time dependence queries. 1173 void collectDependences() { 1174 if (!areDependencesValid()) 1175 return; 1176 auto *Deps = LAI->getDepChecker().getDependences(); 1177 for (auto Dep : *Deps) 1178 Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI)); 1179 } 1180 }; 1181 1182 /// Utility class for getting and setting loop vectorizer hints in the form 1183 /// of loop metadata. 1184 /// This class keeps a number of loop annotations locally (as member variables) 1185 /// and can, upon request, write them back as metadata on the loop. It will 1186 /// initially scan the loop for existing metadata, and will update the local 1187 /// values based on information in the loop. 1188 /// We cannot write all values to metadata, as the mere presence of some info, 1189 /// for example 'force', means a decision has been made. So, we need to be 1190 /// careful NOT to add them if the user hasn't specifically asked so. 1191 class LoopVectorizeHints { 1192 enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE }; 1193 1194 /// Hint - associates name and validation with the hint value. 1195 struct Hint { 1196 const char *Name; 1197 unsigned Value; // This may have to change for non-numeric values. 1198 HintKind Kind; 1199 1200 Hint(const char *Name, unsigned Value, HintKind Kind) 1201 : Name(Name), Value(Value), Kind(Kind) {} 1202 1203 bool validate(unsigned Val) { 1204 switch (Kind) { 1205 case HK_WIDTH: 1206 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth; 1207 case HK_UNROLL: 1208 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor; 1209 case HK_FORCE: 1210 return (Val <= 1); 1211 } 1212 return false; 1213 } 1214 }; 1215 1216 /// Vectorization width. 1217 Hint Width; 1218 /// Vectorization interleave factor. 1219 Hint Interleave; 1220 /// Vectorization forced 1221 Hint Force; 1222 1223 /// Return the loop metadata prefix. 1224 static StringRef Prefix() { return "llvm.loop."; } 1225 1226 /// True if there is any unsafe math in the loop. 1227 bool PotentiallyUnsafe; 1228 1229 public: 1230 enum ForceKind { 1231 FK_Undefined = -1, ///< Not selected. 1232 FK_Disabled = 0, ///< Forcing disabled. 1233 FK_Enabled = 1, ///< Forcing enabled. 1234 }; 1235 1236 LoopVectorizeHints(const Loop *L, bool DisableInterleaving, 1237 OptimizationRemarkEmitter &ORE) 1238 : Width("vectorize.width", VectorizerParams::VectorizationFactor, 1239 HK_WIDTH), 1240 Interleave("interleave.count", DisableInterleaving, HK_UNROLL), 1241 Force("vectorize.enable", FK_Undefined, HK_FORCE), 1242 PotentiallyUnsafe(false), TheLoop(L), ORE(ORE) { 1243 // Populate values with existing loop metadata. 1244 getHintsFromMetadata(); 1245 1246 // force-vector-interleave overrides DisableInterleaving. 1247 if (VectorizerParams::isInterleaveForced()) 1248 Interleave.Value = VectorizerParams::VectorizationInterleave; 1249 1250 DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs() 1251 << "LV: Interleaving disabled by the pass manager\n"); 1252 } 1253 1254 /// Mark the loop L as already vectorized by setting the width to 1. 1255 void setAlreadyVectorized() { 1256 Width.Value = Interleave.Value = 1; 1257 Hint Hints[] = {Width, Interleave}; 1258 writeHintsToMetadata(Hints); 1259 } 1260 1261 bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const { 1262 if (getForce() == LoopVectorizeHints::FK_Disabled) { 1263 DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); 1264 emitRemarkWithHints(); 1265 return false; 1266 } 1267 1268 if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) { 1269 DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); 1270 emitRemarkWithHints(); 1271 return false; 1272 } 1273 1274 if (getWidth() == 1 && getInterleave() == 1) { 1275 // FIXME: Add a separate metadata to indicate when the loop has already 1276 // been vectorized instead of setting width and count to 1. 1277 DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); 1278 // FIXME: Add interleave.disable metadata. This will allow 1279 // vectorize.disable to be used without disabling the pass and errors 1280 // to differentiate between disabled vectorization and a width of 1. 1281 ORE.emit(OptimizationRemarkAnalysis(vectorizeAnalysisPassName(), 1282 "AllDisabled", L->getStartLoc(), 1283 L->getHeader()) 1284 << "loop not vectorized: vectorization and interleaving are " 1285 "explicitly disabled, or vectorize width and interleave " 1286 "count are both set to 1"); 1287 return false; 1288 } 1289 1290 return true; 1291 } 1292 1293 /// Dumps all the hint information. 1294 void emitRemarkWithHints() const { 1295 using namespace ore; 1296 if (Force.Value == LoopVectorizeHints::FK_Disabled) 1297 ORE.emit(OptimizationRemarkMissed(LV_NAME, "MissedExplicitlyDisabled", 1298 TheLoop->getStartLoc(), 1299 TheLoop->getHeader()) 1300 << "loop not vectorized: vectorization is explicitly disabled"); 1301 else { 1302 OptimizationRemarkMissed R(LV_NAME, "MissedDetails", 1303 TheLoop->getStartLoc(), TheLoop->getHeader()); 1304 R << "loop not vectorized: use -Rpass-analysis=loop-vectorize for more " 1305 "info"; 1306 if (Force.Value == LoopVectorizeHints::FK_Enabled) { 1307 R << " (Force=" << NV("Force", true); 1308 if (Width.Value != 0) 1309 R << ", Vector Width=" << NV("VectorWidth", Width.Value); 1310 if (Interleave.Value != 0) 1311 R << ", Interleave Count=" << NV("InterleaveCount", Interleave.Value); 1312 R << ")"; 1313 } 1314 ORE.emit(R); 1315 } 1316 } 1317 1318 unsigned getWidth() const { return Width.Value; } 1319 unsigned getInterleave() const { return Interleave.Value; } 1320 enum ForceKind getForce() const { return (ForceKind)Force.Value; } 1321 1322 /// \brief If hints are provided that force vectorization, use the AlwaysPrint 1323 /// pass name to force the frontend to print the diagnostic. 1324 const char *vectorizeAnalysisPassName() const { 1325 if (getWidth() == 1) 1326 return LV_NAME; 1327 if (getForce() == LoopVectorizeHints::FK_Disabled) 1328 return LV_NAME; 1329 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0) 1330 return LV_NAME; 1331 return OptimizationRemarkAnalysis::AlwaysPrint; 1332 } 1333 1334 bool allowReordering() const { 1335 // When enabling loop hints are provided we allow the vectorizer to change 1336 // the order of operations that is given by the scalar loop. This is not 1337 // enabled by default because can be unsafe or inefficient. For example, 1338 // reordering floating-point operations will change the way round-off 1339 // error accumulates in the loop. 1340 return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1; 1341 } 1342 1343 bool isPotentiallyUnsafe() const { 1344 // Avoid FP vectorization if the target is unsure about proper support. 1345 // This may be related to the SIMD unit in the target not handling 1346 // IEEE 754 FP ops properly, or bad single-to-double promotions. 1347 // Otherwise, a sequence of vectorized loops, even without reduction, 1348 // could lead to different end results on the destination vectors. 1349 return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe; 1350 } 1351 1352 void setPotentiallyUnsafe() { PotentiallyUnsafe = true; } 1353 1354 private: 1355 /// Find hints specified in the loop metadata and update local values. 1356 void getHintsFromMetadata() { 1357 MDNode *LoopID = TheLoop->getLoopID(); 1358 if (!LoopID) 1359 return; 1360 1361 // First operand should refer to the loop id itself. 1362 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 1363 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 1364 1365 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1366 const MDString *S = nullptr; 1367 SmallVector<Metadata *, 4> Args; 1368 1369 // The expected hint is either a MDString or a MDNode with the first 1370 // operand a MDString. 1371 if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) { 1372 if (!MD || MD->getNumOperands() == 0) 1373 continue; 1374 S = dyn_cast<MDString>(MD->getOperand(0)); 1375 for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i) 1376 Args.push_back(MD->getOperand(i)); 1377 } else { 1378 S = dyn_cast<MDString>(LoopID->getOperand(i)); 1379 assert(Args.size() == 0 && "too many arguments for MDString"); 1380 } 1381 1382 if (!S) 1383 continue; 1384 1385 // Check if the hint starts with the loop metadata prefix. 1386 StringRef Name = S->getString(); 1387 if (Args.size() == 1) 1388 setHint(Name, Args[0]); 1389 } 1390 } 1391 1392 /// Checks string hint with one operand and set value if valid. 1393 void setHint(StringRef Name, Metadata *Arg) { 1394 if (!Name.startswith(Prefix())) 1395 return; 1396 Name = Name.substr(Prefix().size(), StringRef::npos); 1397 1398 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); 1399 if (!C) 1400 return; 1401 unsigned Val = C->getZExtValue(); 1402 1403 Hint *Hints[] = {&Width, &Interleave, &Force}; 1404 for (auto H : Hints) { 1405 if (Name == H->Name) { 1406 if (H->validate(Val)) 1407 H->Value = Val; 1408 else 1409 DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); 1410 break; 1411 } 1412 } 1413 } 1414 1415 /// Create a new hint from name / value pair. 1416 MDNode *createHintMetadata(StringRef Name, unsigned V) const { 1417 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1418 Metadata *MDs[] = {MDString::get(Context, Name), 1419 ConstantAsMetadata::get( 1420 ConstantInt::get(Type::getInt32Ty(Context), V))}; 1421 return MDNode::get(Context, MDs); 1422 } 1423 1424 /// Matches metadata with hint name. 1425 bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) { 1426 MDString *Name = dyn_cast<MDString>(Node->getOperand(0)); 1427 if (!Name) 1428 return false; 1429 1430 for (auto H : HintTypes) 1431 if (Name->getString().endswith(H.Name)) 1432 return true; 1433 return false; 1434 } 1435 1436 /// Sets current hints into loop metadata, keeping other values intact. 1437 void writeHintsToMetadata(ArrayRef<Hint> HintTypes) { 1438 if (HintTypes.size() == 0) 1439 return; 1440 1441 // Reserve the first element to LoopID (see below). 1442 SmallVector<Metadata *, 4> MDs(1); 1443 // If the loop already has metadata, then ignore the existing operands. 1444 MDNode *LoopID = TheLoop->getLoopID(); 1445 if (LoopID) { 1446 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1447 MDNode *Node = cast<MDNode>(LoopID->getOperand(i)); 1448 // If node in update list, ignore old value. 1449 if (!matchesHintMetadataName(Node, HintTypes)) 1450 MDs.push_back(Node); 1451 } 1452 } 1453 1454 // Now, add the missing hints. 1455 for (auto H : HintTypes) 1456 MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value)); 1457 1458 // Replace current metadata node with new one. 1459 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1460 MDNode *NewLoopID = MDNode::get(Context, MDs); 1461 // Set operand 0 to refer to the loop id itself. 1462 NewLoopID->replaceOperandWith(0, NewLoopID); 1463 1464 TheLoop->setLoopID(NewLoopID); 1465 } 1466 1467 /// The loop these hints belong to. 1468 const Loop *TheLoop; 1469 1470 /// Interface to emit optimization remarks. 1471 OptimizationRemarkEmitter &ORE; 1472 }; 1473 1474 static void emitAnalysisDiag(const Loop *TheLoop, 1475 const LoopVectorizeHints &Hints, 1476 OptimizationRemarkEmitter &ORE, 1477 const LoopAccessReport &Message) { 1478 const char *Name = Hints.vectorizeAnalysisPassName(); 1479 LoopAccessReport::emitAnalysis(Message, TheLoop, Name, ORE); 1480 } 1481 1482 static void emitMissedWarning(Function *F, Loop *L, 1483 const LoopVectorizeHints &LH, 1484 OptimizationRemarkEmitter *ORE) { 1485 LH.emitRemarkWithHints(); 1486 1487 if (LH.getForce() == LoopVectorizeHints::FK_Enabled) { 1488 if (LH.getWidth() != 1) 1489 emitLoopVectorizeWarning( 1490 F->getContext(), *F, L->getStartLoc(), 1491 "failed explicitly specified loop vectorization"); 1492 else if (LH.getInterleave() != 1) 1493 emitLoopInterleaveWarning( 1494 F->getContext(), *F, L->getStartLoc(), 1495 "failed explicitly specified loop interleaving"); 1496 } 1497 } 1498 1499 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and 1500 /// to what vectorization factor. 1501 /// This class does not look at the profitability of vectorization, only the 1502 /// legality. This class has two main kinds of checks: 1503 /// * Memory checks - The code in canVectorizeMemory checks if vectorization 1504 /// will change the order of memory accesses in a way that will change the 1505 /// correctness of the program. 1506 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory 1507 /// checks for a number of different conditions, such as the availability of a 1508 /// single induction variable, that all types are supported and vectorize-able, 1509 /// etc. This code reflects the capabilities of InnerLoopVectorizer. 1510 /// This class is also used by InnerLoopVectorizer for identifying 1511 /// induction variable and the different reduction variables. 1512 class LoopVectorizationLegality { 1513 public: 1514 LoopVectorizationLegality( 1515 Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT, 1516 TargetLibraryInfo *TLI, AliasAnalysis *AA, Function *F, 1517 const TargetTransformInfo *TTI, 1518 std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI, 1519 OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R, 1520 LoopVectorizeHints *H) 1521 : NumPredStores(0), TheLoop(L), PSE(PSE), TLI(TLI), TTI(TTI), DT(DT), 1522 GetLAA(GetLAA), LAI(nullptr), ORE(ORE), InterleaveInfo(PSE, L, DT, LI), 1523 Induction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false), 1524 Requirements(R), Hints(H) {} 1525 1526 /// ReductionList contains the reduction descriptors for all 1527 /// of the reductions that were found in the loop. 1528 typedef DenseMap<PHINode *, RecurrenceDescriptor> ReductionList; 1529 1530 /// InductionList saves induction variables and maps them to the 1531 /// induction descriptor. 1532 typedef MapVector<PHINode *, InductionDescriptor> InductionList; 1533 1534 /// RecurrenceSet contains the phi nodes that are recurrences other than 1535 /// inductions and reductions. 1536 typedef SmallPtrSet<const PHINode *, 8> RecurrenceSet; 1537 1538 /// Returns true if it is legal to vectorize this loop. 1539 /// This does not mean that it is profitable to vectorize this 1540 /// loop, only that it is legal to do so. 1541 bool canVectorize(); 1542 1543 /// Returns the Induction variable. 1544 PHINode *getInduction() { return Induction; } 1545 1546 /// Returns the reduction variables found in the loop. 1547 ReductionList *getReductionVars() { return &Reductions; } 1548 1549 /// Returns the induction variables found in the loop. 1550 InductionList *getInductionVars() { return &Inductions; } 1551 1552 /// Return the first-order recurrences found in the loop. 1553 RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; } 1554 1555 /// Returns the widest induction type. 1556 Type *getWidestInductionType() { return WidestIndTy; } 1557 1558 /// Returns True if V is an induction variable in this loop. 1559 bool isInductionVariable(const Value *V); 1560 1561 /// Returns True if PN is a reduction variable in this loop. 1562 bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); } 1563 1564 /// Returns True if Phi is a first-order recurrence in this loop. 1565 bool isFirstOrderRecurrence(const PHINode *Phi); 1566 1567 /// Return true if the block BB needs to be predicated in order for the loop 1568 /// to be vectorized. 1569 bool blockNeedsPredication(BasicBlock *BB); 1570 1571 /// Check if this pointer is consecutive when vectorizing. This happens 1572 /// when the last index of the GEP is the induction variable, or that the 1573 /// pointer itself is an induction variable. 1574 /// This check allows us to vectorize A[idx] into a wide load/store. 1575 /// Returns: 1576 /// 0 - Stride is unknown or non-consecutive. 1577 /// 1 - Address is consecutive. 1578 /// -1 - Address is consecutive, and decreasing. 1579 int isConsecutivePtr(Value *Ptr); 1580 1581 /// Returns true if the value V is uniform within the loop. 1582 bool isUniform(Value *V); 1583 1584 /// Returns true if \p I is known to be uniform after vectorization. 1585 bool isUniformAfterVectorization(Instruction *I) { return Uniforms.count(I); } 1586 1587 /// Returns true if \p I is known to be scalar after vectorization. 1588 bool isScalarAfterVectorization(Instruction *I) { return Scalars.count(I); } 1589 1590 /// Returns the information that we collected about runtime memory check. 1591 const RuntimePointerChecking *getRuntimePointerChecking() const { 1592 return LAI->getRuntimePointerChecking(); 1593 } 1594 1595 const LoopAccessInfo *getLAI() const { return LAI; } 1596 1597 /// \brief Check if \p Instr belongs to any interleaved access group. 1598 bool isAccessInterleaved(Instruction *Instr) { 1599 return InterleaveInfo.isInterleaved(Instr); 1600 } 1601 1602 /// \brief Return the maximum interleave factor of all interleaved groups. 1603 unsigned getMaxInterleaveFactor() const { 1604 return InterleaveInfo.getMaxInterleaveFactor(); 1605 } 1606 1607 /// \brief Get the interleaved access group that \p Instr belongs to. 1608 const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { 1609 return InterleaveInfo.getInterleaveGroup(Instr); 1610 } 1611 1612 /// \brief Returns true if an interleaved group requires a scalar iteration 1613 /// to handle accesses with gaps. 1614 bool requiresScalarEpilogue() const { 1615 return InterleaveInfo.requiresScalarEpilogue(); 1616 } 1617 1618 unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); } 1619 1620 bool hasStride(Value *V) { return LAI->hasStride(V); } 1621 1622 /// Returns true if the target machine supports masked store operation 1623 /// for the given \p DataType and kind of access to \p Ptr. 1624 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1625 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType); 1626 } 1627 /// Returns true if the target machine supports masked load operation 1628 /// for the given \p DataType and kind of access to \p Ptr. 1629 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1630 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType); 1631 } 1632 /// Returns true if the target machine supports masked scatter operation 1633 /// for the given \p DataType. 1634 bool isLegalMaskedScatter(Type *DataType) { 1635 return TTI->isLegalMaskedScatter(DataType); 1636 } 1637 /// Returns true if the target machine supports masked gather operation 1638 /// for the given \p DataType. 1639 bool isLegalMaskedGather(Type *DataType) { 1640 return TTI->isLegalMaskedGather(DataType); 1641 } 1642 /// Returns true if the target machine can represent \p V as a masked gather 1643 /// or scatter operation. 1644 bool isLegalGatherOrScatter(Value *V) { 1645 auto *LI = dyn_cast<LoadInst>(V); 1646 auto *SI = dyn_cast<StoreInst>(V); 1647 if (!LI && !SI) 1648 return false; 1649 auto *Ptr = getPointerOperand(V); 1650 auto *Ty = cast<PointerType>(Ptr->getType())->getElementType(); 1651 return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty)); 1652 } 1653 1654 /// Returns true if vector representation of the instruction \p I 1655 /// requires mask. 1656 bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); } 1657 unsigned getNumStores() const { return LAI->getNumStores(); } 1658 unsigned getNumLoads() const { return LAI->getNumLoads(); } 1659 unsigned getNumPredStores() const { return NumPredStores; } 1660 1661 /// Returns true if \p I is a store instruction in a predicated block that 1662 /// will be scalarized during vectorization. 1663 bool isPredicatedStore(Instruction *I); 1664 1665 /// Returns true if \p I is a memory instruction that has a consecutive or 1666 /// consecutive-like pointer operand. Consecutive-like pointers are pointers 1667 /// that are treated like consecutive pointers during vectorization. The 1668 /// pointer operands of interleaved accesses are an example. 1669 bool hasConsecutiveLikePtrOperand(Instruction *I); 1670 1671 /// Returns true if \p I is a memory instruction that must be scalarized 1672 /// during vectorization. 1673 bool memoryInstructionMustBeScalarized(Instruction *I, unsigned VF = 1); 1674 1675 private: 1676 /// Check if a single basic block loop is vectorizable. 1677 /// At this point we know that this is a loop with a constant trip count 1678 /// and we only need to check individual instructions. 1679 bool canVectorizeInstrs(); 1680 1681 /// When we vectorize loops we may change the order in which 1682 /// we read and write from memory. This method checks if it is 1683 /// legal to vectorize the code, considering only memory constrains. 1684 /// Returns true if the loop is vectorizable 1685 bool canVectorizeMemory(); 1686 1687 /// Return true if we can vectorize this loop using the IF-conversion 1688 /// transformation. 1689 bool canVectorizeWithIfConvert(); 1690 1691 /// Collect the instructions that are uniform after vectorization. An 1692 /// instruction is uniform if we represent it with a single scalar value in 1693 /// the vectorized loop corresponding to each vector iteration. Examples of 1694 /// uniform instructions include pointer operands of consecutive or 1695 /// interleaved memory accesses. Note that although uniformity implies an 1696 /// instruction will be scalar, the reverse is not true. In general, a 1697 /// scalarized instruction will be represented by VF scalar values in the 1698 /// vectorized loop, each corresponding to an iteration of the original 1699 /// scalar loop. 1700 void collectLoopUniforms(); 1701 1702 /// Collect the instructions that are scalar after vectorization. An 1703 /// instruction is scalar if it is known to be uniform or will be scalarized 1704 /// during vectorization. Non-uniform scalarized instructions will be 1705 /// represented by VF values in the vectorized loop, each corresponding to an 1706 /// iteration of the original scalar loop. 1707 void collectLoopScalars(); 1708 1709 /// Return true if all of the instructions in the block can be speculatively 1710 /// executed. \p SafePtrs is a list of addresses that are known to be legal 1711 /// and we know that we can read from them without segfault. 1712 bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs); 1713 1714 /// Updates the vectorization state by adding \p Phi to the inductions list. 1715 /// This can set \p Phi as the main induction of the loop if \p Phi is a 1716 /// better choice for the main induction than the existing one. 1717 void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID, 1718 SmallPtrSetImpl<Value *> &AllowedExit); 1719 1720 /// Report an analysis message to assist the user in diagnosing loops that are 1721 /// not vectorized. These are handled as LoopAccessReport rather than 1722 /// VectorizationReport because the << operator of VectorizationReport returns 1723 /// LoopAccessReport. 1724 void emitAnalysis(const LoopAccessReport &Message) const { 1725 emitAnalysisDiag(TheLoop, *Hints, *ORE, Message); 1726 } 1727 1728 /// Create an analysis remark that explains why vectorization failed 1729 /// 1730 /// \p RemarkName is the identifier for the remark. If \p I is passed it is 1731 /// an instruction that prevents vectorization. Otherwise the loop is used 1732 /// for the location of the remark. \return the remark object that can be 1733 /// streamed to. 1734 OptimizationRemarkAnalysis 1735 createMissedAnalysis(StringRef RemarkName, Instruction *I = nullptr) const { 1736 return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(), 1737 RemarkName, TheLoop, I); 1738 } 1739 1740 /// \brief If an access has a symbolic strides, this maps the pointer value to 1741 /// the stride symbol. 1742 const ValueToValueMap *getSymbolicStrides() { 1743 // FIXME: Currently, the set of symbolic strides is sometimes queried before 1744 // it's collected. This happens from canVectorizeWithIfConvert, when the 1745 // pointer is checked to reference consecutive elements suitable for a 1746 // masked access. 1747 return LAI ? &LAI->getSymbolicStrides() : nullptr; 1748 } 1749 1750 unsigned NumPredStores; 1751 1752 /// The loop that we evaluate. 1753 Loop *TheLoop; 1754 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. 1755 /// Applies dynamic knowledge to simplify SCEV expressions in the context 1756 /// of existing SCEV assumptions. The analysis will also add a minimal set 1757 /// of new predicates if this is required to enable vectorization and 1758 /// unrolling. 1759 PredicatedScalarEvolution &PSE; 1760 /// Target Library Info. 1761 TargetLibraryInfo *TLI; 1762 /// Target Transform Info 1763 const TargetTransformInfo *TTI; 1764 /// Dominator Tree. 1765 DominatorTree *DT; 1766 // LoopAccess analysis. 1767 std::function<const LoopAccessInfo &(Loop &)> *GetLAA; 1768 // And the loop-accesses info corresponding to this loop. This pointer is 1769 // null until canVectorizeMemory sets it up. 1770 const LoopAccessInfo *LAI; 1771 /// Interface to emit optimization remarks. 1772 OptimizationRemarkEmitter *ORE; 1773 1774 /// The interleave access information contains groups of interleaved accesses 1775 /// with the same stride and close to each other. 1776 InterleavedAccessInfo InterleaveInfo; 1777 1778 // --- vectorization state --- // 1779 1780 /// Holds the integer induction variable. This is the counter of the 1781 /// loop. 1782 PHINode *Induction; 1783 /// Holds the reduction variables. 1784 ReductionList Reductions; 1785 /// Holds all of the induction variables that we found in the loop. 1786 /// Notice that inductions don't need to start at zero and that induction 1787 /// variables can be pointers. 1788 InductionList Inductions; 1789 /// Holds the phi nodes that are first-order recurrences. 1790 RecurrenceSet FirstOrderRecurrences; 1791 /// Holds the widest induction type encountered. 1792 Type *WidestIndTy; 1793 1794 /// Allowed outside users. This holds the induction and reduction 1795 /// vars which can be accessed from outside the loop. 1796 SmallPtrSet<Value *, 4> AllowedExit; 1797 1798 /// Holds the instructions known to be uniform after vectorization. 1799 SmallPtrSet<Instruction *, 4> Uniforms; 1800 1801 /// Holds the instructions known to be scalar after vectorization. 1802 SmallPtrSet<Instruction *, 4> Scalars; 1803 1804 /// Can we assume the absence of NaNs. 1805 bool HasFunNoNaNAttr; 1806 1807 /// Vectorization requirements that will go through late-evaluation. 1808 LoopVectorizationRequirements *Requirements; 1809 1810 /// Used to emit an analysis of any legality issues. 1811 LoopVectorizeHints *Hints; 1812 1813 /// While vectorizing these instructions we have to generate a 1814 /// call to the appropriate masked intrinsic 1815 SmallPtrSet<const Instruction *, 8> MaskedOp; 1816 }; 1817 1818 /// LoopVectorizationCostModel - estimates the expected speedups due to 1819 /// vectorization. 1820 /// In many cases vectorization is not profitable. This can happen because of 1821 /// a number of reasons. In this class we mainly attempt to predict the 1822 /// expected speedup/slowdowns due to the supported instruction set. We use the 1823 /// TargetTransformInfo to query the different backends for the cost of 1824 /// different operations. 1825 class LoopVectorizationCostModel { 1826 public: 1827 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, 1828 LoopInfo *LI, LoopVectorizationLegality *Legal, 1829 const TargetTransformInfo &TTI, 1830 const TargetLibraryInfo *TLI, DemandedBits *DB, 1831 AssumptionCache *AC, 1832 OptimizationRemarkEmitter *ORE, const Function *F, 1833 const LoopVectorizeHints *Hints) 1834 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 1835 AC(AC), ORE(ORE), TheFunction(F), Hints(Hints) {} 1836 1837 /// Information about vectorization costs 1838 struct VectorizationFactor { 1839 unsigned Width; // Vector width with best cost 1840 unsigned Cost; // Cost of the loop with that width 1841 }; 1842 /// \return The most profitable vectorization factor and the cost of that VF. 1843 /// This method checks every power of two up to VF. If UserVF is not ZERO 1844 /// then this vectorization factor will be selected if vectorization is 1845 /// possible. 1846 VectorizationFactor selectVectorizationFactor(bool OptForSize); 1847 1848 /// \return The size (in bits) of the smallest and widest types in the code 1849 /// that needs to be vectorized. We ignore values that remain scalar such as 1850 /// 64 bit loop indices. 1851 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1852 1853 /// \return The desired interleave count. 1854 /// If interleave count has been specified by metadata it will be returned. 1855 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1856 /// are the selected vectorization factor and the cost of the selected VF. 1857 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 1858 unsigned LoopCost); 1859 1860 /// \return The most profitable unroll factor. 1861 /// This method finds the best unroll-factor based on register pressure and 1862 /// other parameters. VF and LoopCost are the selected vectorization factor 1863 /// and the cost of the selected VF. 1864 unsigned computeInterleaveCount(bool OptForSize, unsigned VF, 1865 unsigned LoopCost); 1866 1867 /// \brief A struct that represents some properties of the register usage 1868 /// of a loop. 1869 struct RegisterUsage { 1870 /// Holds the number of loop invariant values that are used in the loop. 1871 unsigned LoopInvariantRegs; 1872 /// Holds the maximum number of concurrent live intervals in the loop. 1873 unsigned MaxLocalUsers; 1874 /// Holds the number of instructions in the loop. 1875 unsigned NumInstructions; 1876 }; 1877 1878 /// \return Returns information about the register usages of the loop for the 1879 /// given vectorization factors. 1880 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 1881 1882 /// Collect values we want to ignore in the cost model. 1883 void collectValuesToIgnore(); 1884 1885 private: 1886 /// The vectorization cost is a combination of the cost itself and a boolean 1887 /// indicating whether any of the contributing operations will actually 1888 /// operate on 1889 /// vector values after type legalization in the backend. If this latter value 1890 /// is 1891 /// false, then all operations will be scalarized (i.e. no vectorization has 1892 /// actually taken place). 1893 typedef std::pair<unsigned, bool> VectorizationCostTy; 1894 1895 /// Returns the expected execution cost. The unit of the cost does 1896 /// not matter because we use the 'cost' units to compare different 1897 /// vector widths. The cost that is returned is *not* normalized by 1898 /// the factor width. 1899 VectorizationCostTy expectedCost(unsigned VF); 1900 1901 /// Returns the execution time cost of an instruction for a given vector 1902 /// width. Vector width of one means scalar. 1903 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1904 1905 /// The cost-computation logic from getInstructionCost which provides 1906 /// the vector type as an output parameter. 1907 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1908 1909 /// Returns whether the instruction is a load or store and will be a emitted 1910 /// as a vector operation. 1911 bool isConsecutiveLoadOrStore(Instruction *I); 1912 1913 /// Create an analysis remark that explains why vectorization failed 1914 /// 1915 /// \p RemarkName is the identifier for the remark. \return the remark object 1916 /// that can be streamed to. 1917 OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) { 1918 return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(), 1919 RemarkName, TheLoop); 1920 } 1921 1922 public: 1923 /// Map of scalar integer values to the smallest bitwidth they can be legally 1924 /// represented as. The vector equivalents of these values should be truncated 1925 /// to this type. 1926 MapVector<Instruction *, uint64_t> MinBWs; 1927 1928 /// The loop that we evaluate. 1929 Loop *TheLoop; 1930 /// Predicated scalar evolution analysis. 1931 PredicatedScalarEvolution &PSE; 1932 /// Loop Info analysis. 1933 LoopInfo *LI; 1934 /// Vectorization legality. 1935 LoopVectorizationLegality *Legal; 1936 /// Vector target information. 1937 const TargetTransformInfo &TTI; 1938 /// Target Library Info. 1939 const TargetLibraryInfo *TLI; 1940 /// Demanded bits analysis. 1941 DemandedBits *DB; 1942 /// Assumption cache. 1943 AssumptionCache *AC; 1944 /// Interface to emit optimization remarks. 1945 OptimizationRemarkEmitter *ORE; 1946 1947 const Function *TheFunction; 1948 /// Loop Vectorize Hint. 1949 const LoopVectorizeHints *Hints; 1950 /// Values to ignore in the cost model. 1951 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1952 /// Values to ignore in the cost model when VF > 1. 1953 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1954 }; 1955 1956 /// \brief This holds vectorization requirements that must be verified late in 1957 /// the process. The requirements are set by legalize and costmodel. Once 1958 /// vectorization has been determined to be possible and profitable the 1959 /// requirements can be verified by looking for metadata or compiler options. 1960 /// For example, some loops require FP commutativity which is only allowed if 1961 /// vectorization is explicitly specified or if the fast-math compiler option 1962 /// has been provided. 1963 /// Late evaluation of these requirements allows helpful diagnostics to be 1964 /// composed that tells the user what need to be done to vectorize the loop. For 1965 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late 1966 /// evaluation should be used only when diagnostics can generated that can be 1967 /// followed by a non-expert user. 1968 class LoopVectorizationRequirements { 1969 public: 1970 LoopVectorizationRequirements(OptimizationRemarkEmitter &ORE) 1971 : NumRuntimePointerChecks(0), UnsafeAlgebraInst(nullptr), ORE(ORE) {} 1972 1973 void addUnsafeAlgebraInst(Instruction *I) { 1974 // First unsafe algebra instruction. 1975 if (!UnsafeAlgebraInst) 1976 UnsafeAlgebraInst = I; 1977 } 1978 1979 void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; } 1980 1981 bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) { 1982 const char *PassName = Hints.vectorizeAnalysisPassName(); 1983 bool Failed = false; 1984 if (UnsafeAlgebraInst && !Hints.allowReordering()) { 1985 ORE.emit( 1986 OptimizationRemarkAnalysisFPCommute(PassName, "CantReorderFPOps", 1987 UnsafeAlgebraInst->getDebugLoc(), 1988 UnsafeAlgebraInst->getParent()) 1989 << "loop not vectorized: cannot prove it is safe to reorder " 1990 "floating-point operations"); 1991 Failed = true; 1992 } 1993 1994 // Test if runtime memcheck thresholds are exceeded. 1995 bool PragmaThresholdReached = 1996 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 1997 bool ThresholdReached = 1998 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 1999 if ((ThresholdReached && !Hints.allowReordering()) || 2000 PragmaThresholdReached) { 2001 ORE.emit(OptimizationRemarkAnalysisAliasing(PassName, "CantReorderMemOps", 2002 L->getStartLoc(), 2003 L->getHeader()) 2004 << "loop not vectorized: cannot prove it is safe to reorder " 2005 "memory operations"); 2006 DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 2007 Failed = true; 2008 } 2009 2010 return Failed; 2011 } 2012 2013 private: 2014 unsigned NumRuntimePointerChecks; 2015 Instruction *UnsafeAlgebraInst; 2016 2017 /// Interface to emit optimization remarks. 2018 OptimizationRemarkEmitter &ORE; 2019 }; 2020 2021 static void addAcyclicInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) { 2022 if (L.empty()) { 2023 if (!hasCyclesInLoopBody(L)) 2024 V.push_back(&L); 2025 return; 2026 } 2027 for (Loop *InnerL : L) 2028 addAcyclicInnerLoop(*InnerL, V); 2029 } 2030 2031 /// The LoopVectorize Pass. 2032 struct LoopVectorize : public FunctionPass { 2033 /// Pass identification, replacement for typeid 2034 static char ID; 2035 2036 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) 2037 : FunctionPass(ID) { 2038 Impl.DisableUnrolling = NoUnrolling; 2039 Impl.AlwaysVectorize = AlwaysVectorize; 2040 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2041 } 2042 2043 LoopVectorizePass Impl; 2044 2045 bool runOnFunction(Function &F) override { 2046 if (skipFunction(F)) 2047 return false; 2048 2049 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2050 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2051 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2052 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2053 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2054 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2055 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 2056 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2057 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2058 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2059 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2060 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2061 2062 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2063 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2064 2065 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2066 GetLAA, *ORE); 2067 } 2068 2069 void getAnalysisUsage(AnalysisUsage &AU) const override { 2070 AU.addRequired<AssumptionCacheTracker>(); 2071 AU.addRequiredID(LoopSimplifyID); 2072 AU.addRequiredID(LCSSAID); 2073 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2074 AU.addRequired<DominatorTreeWrapperPass>(); 2075 AU.addRequired<LoopInfoWrapperPass>(); 2076 AU.addRequired<ScalarEvolutionWrapperPass>(); 2077 AU.addRequired<TargetTransformInfoWrapperPass>(); 2078 AU.addRequired<AAResultsWrapperPass>(); 2079 AU.addRequired<LoopAccessLegacyAnalysis>(); 2080 AU.addRequired<DemandedBitsWrapperPass>(); 2081 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2082 AU.addPreserved<LoopInfoWrapperPass>(); 2083 AU.addPreserved<DominatorTreeWrapperPass>(); 2084 AU.addPreserved<BasicAAWrapperPass>(); 2085 AU.addPreserved<GlobalsAAWrapperPass>(); 2086 } 2087 }; 2088 2089 } // end anonymous namespace 2090 2091 //===----------------------------------------------------------------------===// 2092 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2093 // LoopVectorizationCostModel. 2094 //===----------------------------------------------------------------------===// 2095 2096 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2097 // We need to place the broadcast of invariant variables outside the loop. 2098 Instruction *Instr = dyn_cast<Instruction>(V); 2099 bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody); 2100 bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr; 2101 2102 // Place the code for broadcasting invariant variables in the new preheader. 2103 IRBuilder<>::InsertPointGuard Guard(Builder); 2104 if (Invariant) 2105 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2106 2107 // Broadcast the scalar into all locations in the vector. 2108 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2109 2110 return Shuf; 2111 } 2112 2113 void InnerLoopVectorizer::createVectorIntInductionPHI( 2114 const InductionDescriptor &II, Instruction *EntryVal) { 2115 Value *Start = II.getStartValue(); 2116 ConstantInt *Step = II.getConstIntStepValue(); 2117 assert(Step && "Can not widen an IV with a non-constant step"); 2118 2119 // Construct the initial value of the vector IV in the vector loop preheader 2120 auto CurrIP = Builder.saveIP(); 2121 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2122 if (isa<TruncInst>(EntryVal)) { 2123 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2124 Step = ConstantInt::getSigned(TruncType, Step->getSExtValue()); 2125 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2126 } 2127 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2128 Value *SteppedStart = getStepVector(SplatStart, 0, Step); 2129 Builder.restoreIP(CurrIP); 2130 2131 Value *SplatVF = 2132 ConstantVector::getSplat(VF, ConstantInt::getSigned(Start->getType(), 2133 VF * Step->getSExtValue())); 2134 // We may need to add the step a number of times, depending on the unroll 2135 // factor. The last of those goes into the PHI. 2136 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2137 &*LoopVectorBody->getFirstInsertionPt()); 2138 Instruction *LastInduction = VecInd; 2139 VectorParts Entry(UF); 2140 for (unsigned Part = 0; Part < UF; ++Part) { 2141 Entry[Part] = LastInduction; 2142 LastInduction = cast<Instruction>( 2143 Builder.CreateAdd(LastInduction, SplatVF, "step.add")); 2144 } 2145 VectorLoopValueMap.initVector(EntryVal, Entry); 2146 if (isa<TruncInst>(EntryVal)) 2147 addMetadata(Entry, EntryVal); 2148 2149 // Move the last step to the end of the latch block. This ensures consistent 2150 // placement of all induction updates. 2151 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2152 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2153 auto *ICmp = cast<Instruction>(Br->getCondition()); 2154 LastInduction->moveBefore(ICmp); 2155 LastInduction->setName("vec.ind.next"); 2156 2157 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2158 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2159 } 2160 2161 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2162 if (Legal->isScalarAfterVectorization(IV)) 2163 return true; 2164 auto isScalarInst = [&](User *U) -> bool { 2165 auto *I = cast<Instruction>(U); 2166 return (OrigLoop->contains(I) && Legal->isScalarAfterVectorization(I)); 2167 }; 2168 return any_of(IV->users(), isScalarInst); 2169 } 2170 2171 void InnerLoopVectorizer::widenIntInduction(PHINode *IV, TruncInst *Trunc) { 2172 2173 auto II = Legal->getInductionVars()->find(IV); 2174 assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); 2175 2176 auto ID = II->second; 2177 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2178 2179 // The scalar value to broadcast. This will be derived from the canonical 2180 // induction variable. 2181 Value *ScalarIV = nullptr; 2182 2183 // The step of the induction. 2184 Value *Step = nullptr; 2185 2186 // The value from the original loop to which we are mapping the new induction 2187 // variable. 2188 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2189 2190 // True if we have vectorized the induction variable. 2191 auto VectorizedIV = false; 2192 2193 // Determine if we want a scalar version of the induction variable. This is 2194 // true if the induction variable itself is not widened, or if it has at 2195 // least one user in the loop that is not widened. 2196 auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal); 2197 2198 // If the induction variable has a constant integer step value, go ahead and 2199 // get it now. 2200 if (ID.getConstIntStepValue()) 2201 Step = ID.getConstIntStepValue(); 2202 2203 // Try to create a new independent vector induction variable. If we can't 2204 // create the phi node, we will splat the scalar induction variable in each 2205 // loop iteration. 2206 if (VF > 1 && IV->getType() == Induction->getType() && Step && 2207 !Legal->isScalarAfterVectorization(EntryVal)) { 2208 createVectorIntInductionPHI(ID, EntryVal); 2209 VectorizedIV = true; 2210 } 2211 2212 // If we haven't yet vectorized the induction variable, or if we will create 2213 // a scalar one, we need to define the scalar induction variable and step 2214 // values. If we were given a truncation type, truncate the canonical 2215 // induction variable and constant step. Otherwise, derive these values from 2216 // the induction descriptor. 2217 if (!VectorizedIV || NeedsScalarIV) { 2218 if (Trunc) { 2219 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2220 assert(Step && "Truncation requires constant integer step"); 2221 auto StepInt = cast<ConstantInt>(Step)->getSExtValue(); 2222 ScalarIV = Builder.CreateCast(Instruction::Trunc, Induction, TruncType); 2223 Step = ConstantInt::getSigned(TruncType, StepInt); 2224 } else { 2225 ScalarIV = Induction; 2226 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2227 if (IV != OldInduction) { 2228 ScalarIV = Builder.CreateSExtOrTrunc(ScalarIV, IV->getType()); 2229 ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL); 2230 ScalarIV->setName("offset.idx"); 2231 } 2232 if (!Step) { 2233 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2234 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), 2235 &*Builder.GetInsertPoint()); 2236 } 2237 } 2238 } 2239 2240 // If we haven't yet vectorized the induction variable, splat the scalar 2241 // induction variable, and build the necessary step vectors. 2242 if (!VectorizedIV) { 2243 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2244 VectorParts Entry(UF); 2245 for (unsigned Part = 0; Part < UF; ++Part) 2246 Entry[Part] = getStepVector(Broadcasted, VF * Part, Step); 2247 VectorLoopValueMap.initVector(EntryVal, Entry); 2248 if (Trunc) 2249 addMetadata(Entry, Trunc); 2250 } 2251 2252 // If an induction variable is only used for counting loop iterations or 2253 // calculating addresses, it doesn't need to be widened. Create scalar steps 2254 // that can be used by instructions we will later scalarize. Note that the 2255 // addition of the scalar steps will not increase the number of instructions 2256 // in the loop in the common case prior to InstCombine. We will be trading 2257 // one vector extract for each scalar step. 2258 if (NeedsScalarIV) 2259 buildScalarSteps(ScalarIV, Step, EntryVal); 2260 } 2261 2262 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2263 Instruction::BinaryOps BinOp) { 2264 // Create and check the types. 2265 assert(Val->getType()->isVectorTy() && "Must be a vector"); 2266 int VLen = Val->getType()->getVectorNumElements(); 2267 2268 Type *STy = Val->getType()->getScalarType(); 2269 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2270 "Induction Step must be an integer or FP"); 2271 assert(Step->getType() == STy && "Step has wrong type"); 2272 2273 SmallVector<Constant *, 8> Indices; 2274 2275 if (STy->isIntegerTy()) { 2276 // Create a vector of consecutive numbers from zero to VF. 2277 for (int i = 0; i < VLen; ++i) 2278 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 2279 2280 // Add the consecutive indices to the vector value. 2281 Constant *Cv = ConstantVector::get(Indices); 2282 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2283 Step = Builder.CreateVectorSplat(VLen, Step); 2284 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2285 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2286 // which can be found from the original scalar operations. 2287 Step = Builder.CreateMul(Cv, Step); 2288 return Builder.CreateAdd(Val, Step, "induction"); 2289 } 2290 2291 // Floating point induction. 2292 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2293 "Binary Opcode should be specified for FP induction"); 2294 // Create a vector of consecutive numbers from zero to VF. 2295 for (int i = 0; i < VLen; ++i) 2296 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2297 2298 // Add the consecutive indices to the vector value. 2299 Constant *Cv = ConstantVector::get(Indices); 2300 2301 Step = Builder.CreateVectorSplat(VLen, Step); 2302 2303 // Floating point operations had to be 'fast' to enable the induction. 2304 FastMathFlags Flags; 2305 Flags.setUnsafeAlgebra(); 2306 2307 Value *MulOp = Builder.CreateFMul(Cv, Step); 2308 if (isa<Instruction>(MulOp)) 2309 // Have to check, MulOp may be a constant 2310 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2311 2312 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2313 if (isa<Instruction>(BOp)) 2314 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2315 return BOp; 2316 } 2317 2318 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2319 Value *EntryVal) { 2320 2321 // We shouldn't have to build scalar steps if we aren't vectorizing. 2322 assert(VF > 1 && "VF should be greater than one"); 2323 2324 // Get the value type and ensure it and the step have the same integer type. 2325 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2326 assert(ScalarIVTy->isIntegerTy() && ScalarIVTy == Step->getType() && 2327 "Val and Step should have the same integer type"); 2328 2329 // Determine the number of scalars we need to generate for each unroll 2330 // iteration. If EntryVal is uniform, we only need to generate the first 2331 // lane. Otherwise, we generate all VF values. 2332 unsigned Lanes = 2333 Legal->isUniformAfterVectorization(cast<Instruction>(EntryVal)) ? 1 : VF; 2334 2335 // Compute the scalar steps and save the results in VectorLoopValueMap. 2336 ScalarParts Entry(UF); 2337 for (unsigned Part = 0; Part < UF; ++Part) { 2338 Entry[Part].resize(VF); 2339 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2340 auto *StartIdx = ConstantInt::get(ScalarIVTy, VF * Part + Lane); 2341 auto *Mul = Builder.CreateMul(StartIdx, Step); 2342 auto *Add = Builder.CreateAdd(ScalarIV, Mul); 2343 Entry[Part][Lane] = Add; 2344 } 2345 } 2346 VectorLoopValueMap.initScalar(EntryVal, Entry); 2347 } 2348 2349 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { 2350 2351 const ValueToValueMap &Strides = getSymbolicStrides() ? *getSymbolicStrides() : 2352 ValueToValueMap(); 2353 2354 int Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, true, false); 2355 if (Stride == 1 || Stride == -1) 2356 return Stride; 2357 return 0; 2358 } 2359 2360 bool LoopVectorizationLegality::isUniform(Value *V) { 2361 return LAI->isUniform(V); 2362 } 2363 2364 const InnerLoopVectorizer::VectorParts & 2365 InnerLoopVectorizer::getVectorValue(Value *V) { 2366 assert(V != Induction && "The new induction variable should not be used."); 2367 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2368 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2369 2370 // If we have a stride that is replaced by one, do it here. 2371 if (Legal->hasStride(V)) 2372 V = ConstantInt::get(V->getType(), 1); 2373 2374 // If we have this scalar in the map, return it. 2375 if (VectorLoopValueMap.hasVector(V)) 2376 return VectorLoopValueMap.VectorMapStorage[V]; 2377 2378 // If the value has not been vectorized, check if it has been scalarized 2379 // instead. If it has been scalarized, and we actually need the value in 2380 // vector form, we will construct the vector values on demand. 2381 if (VectorLoopValueMap.hasScalar(V)) { 2382 2383 // Initialize a new vector map entry. 2384 VectorParts Entry(UF); 2385 2386 // If we've scalarized a value, that value should be an instruction. 2387 auto *I = cast<Instruction>(V); 2388 2389 // If we aren't vectorizing, we can just copy the scalar map values over to 2390 // the vector map. 2391 if (VF == 1) { 2392 for (unsigned Part = 0; Part < UF; ++Part) 2393 Entry[Part] = getScalarValue(V, Part, 0); 2394 return VectorLoopValueMap.initVector(V, Entry); 2395 } 2396 2397 // Get the last scalar instruction we generated for V. If the value is 2398 // known to be uniform after vectorization, this corresponds to lane zero 2399 // of the last unroll iteration. Otherwise, the last instruction is the one 2400 // we created for the last vector lane of the last unroll iteration. 2401 unsigned LastLane = Legal->isUniformAfterVectorization(I) ? 0 : VF - 1; 2402 auto *LastInst = cast<Instruction>(getScalarValue(V, UF - 1, LastLane)); 2403 2404 // Set the insert point after the last scalarized instruction. This ensures 2405 // the insertelement sequence will directly follow the scalar definitions. 2406 auto OldIP = Builder.saveIP(); 2407 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 2408 Builder.SetInsertPoint(&*NewIP); 2409 2410 // However, if we are vectorizing, we need to construct the vector values. 2411 // If the value is known to be uniform after vectorization, we can just 2412 // broadcast the scalar value corresponding to lane zero for each unroll 2413 // iteration. Otherwise, we construct the vector values using insertelement 2414 // instructions. Since the resulting vectors are stored in 2415 // VectorLoopValueMap, we will only generate the insertelements once. 2416 for (unsigned Part = 0; Part < UF; ++Part) { 2417 Value *VectorValue = nullptr; 2418 if (Legal->isUniformAfterVectorization(I)) { 2419 VectorValue = getBroadcastInstrs(getScalarValue(V, Part, 0)); 2420 } else { 2421 VectorValue = UndefValue::get(VectorType::get(V->getType(), VF)); 2422 for (unsigned Lane = 0; Lane < VF; ++Lane) 2423 VectorValue = Builder.CreateInsertElement( 2424 VectorValue, getScalarValue(V, Part, Lane), 2425 Builder.getInt32(Lane)); 2426 } 2427 Entry[Part] = VectorValue; 2428 } 2429 Builder.restoreIP(OldIP); 2430 return VectorLoopValueMap.initVector(V, Entry); 2431 } 2432 2433 // If this scalar is unknown, assume that it is a constant or that it is 2434 // loop invariant. Broadcast V and save the value for future uses. 2435 Value *B = getBroadcastInstrs(V); 2436 return VectorLoopValueMap.initVector(V, VectorParts(UF, B)); 2437 } 2438 2439 Value *InnerLoopVectorizer::getScalarValue(Value *V, unsigned Part, 2440 unsigned Lane) { 2441 2442 // If the value is not an instruction contained in the loop, it should 2443 // already be scalar. 2444 if (OrigLoop->isLoopInvariant(V)) 2445 return V; 2446 2447 assert(Lane > 0 ? !Legal->isUniformAfterVectorization(cast<Instruction>(V)) 2448 : true && "Uniform values only have lane zero"); 2449 2450 // If the value from the original loop has not been vectorized, it is 2451 // represented by UF x VF scalar values in the new loop. Return the requested 2452 // scalar value. 2453 if (VectorLoopValueMap.hasScalar(V)) 2454 return VectorLoopValueMap.ScalarMapStorage[V][Part][Lane]; 2455 2456 // If the value has not been scalarized, get its entry in VectorLoopValueMap 2457 // for the given unroll part. If this entry is not a vector type (i.e., the 2458 // vectorization factor is one), there is no need to generate an 2459 // extractelement instruction. 2460 auto *U = getVectorValue(V)[Part]; 2461 if (!U->getType()->isVectorTy()) { 2462 assert(VF == 1 && "Value not scalarized has non-vector type"); 2463 return U; 2464 } 2465 2466 // Otherwise, the value from the original loop has been vectorized and is 2467 // represented by UF vector values. Extract and return the requested scalar 2468 // value from the appropriate vector lane. 2469 return Builder.CreateExtractElement(U, Builder.getInt32(Lane)); 2470 } 2471 2472 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2473 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2474 SmallVector<Constant *, 8> ShuffleMask; 2475 for (unsigned i = 0; i < VF; ++i) 2476 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 2477 2478 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2479 ConstantVector::get(ShuffleMask), 2480 "reverse"); 2481 } 2482 2483 // Get a mask to interleave \p NumVec vectors into a wide vector. 2484 // I.e. <0, VF, VF*2, ..., VF*(NumVec-1), 1, VF+1, VF*2+1, ...> 2485 // E.g. For 2 interleaved vectors, if VF is 4, the mask is: 2486 // <0, 4, 1, 5, 2, 6, 3, 7> 2487 static Constant *getInterleavedMask(IRBuilder<> &Builder, unsigned VF, 2488 unsigned NumVec) { 2489 SmallVector<Constant *, 16> Mask; 2490 for (unsigned i = 0; i < VF; i++) 2491 for (unsigned j = 0; j < NumVec; j++) 2492 Mask.push_back(Builder.getInt32(j * VF + i)); 2493 2494 return ConstantVector::get(Mask); 2495 } 2496 2497 // Get the strided mask starting from index \p Start. 2498 // I.e. <Start, Start + Stride, ..., Start + Stride*(VF-1)> 2499 static Constant *getStridedMask(IRBuilder<> &Builder, unsigned Start, 2500 unsigned Stride, unsigned VF) { 2501 SmallVector<Constant *, 16> Mask; 2502 for (unsigned i = 0; i < VF; i++) 2503 Mask.push_back(Builder.getInt32(Start + i * Stride)); 2504 2505 return ConstantVector::get(Mask); 2506 } 2507 2508 // Get a mask of two parts: The first part consists of sequential integers 2509 // starting from 0, The second part consists of UNDEFs. 2510 // I.e. <0, 1, 2, ..., NumInt - 1, undef, ..., undef> 2511 static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned NumInt, 2512 unsigned NumUndef) { 2513 SmallVector<Constant *, 16> Mask; 2514 for (unsigned i = 0; i < NumInt; i++) 2515 Mask.push_back(Builder.getInt32(i)); 2516 2517 Constant *Undef = UndefValue::get(Builder.getInt32Ty()); 2518 for (unsigned i = 0; i < NumUndef; i++) 2519 Mask.push_back(Undef); 2520 2521 return ConstantVector::get(Mask); 2522 } 2523 2524 // Concatenate two vectors with the same element type. The 2nd vector should 2525 // not have more elements than the 1st vector. If the 2nd vector has less 2526 // elements, extend it with UNDEFs. 2527 static Value *ConcatenateTwoVectors(IRBuilder<> &Builder, Value *V1, 2528 Value *V2) { 2529 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); 2530 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); 2531 assert(VecTy1 && VecTy2 && 2532 VecTy1->getScalarType() == VecTy2->getScalarType() && 2533 "Expect two vectors with the same element type"); 2534 2535 unsigned NumElts1 = VecTy1->getNumElements(); 2536 unsigned NumElts2 = VecTy2->getNumElements(); 2537 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); 2538 2539 if (NumElts1 > NumElts2) { 2540 // Extend with UNDEFs. 2541 Constant *ExtMask = 2542 getSequentialMask(Builder, NumElts2, NumElts1 - NumElts2); 2543 V2 = Builder.CreateShuffleVector(V2, UndefValue::get(VecTy2), ExtMask); 2544 } 2545 2546 Constant *Mask = getSequentialMask(Builder, NumElts1 + NumElts2, 0); 2547 return Builder.CreateShuffleVector(V1, V2, Mask); 2548 } 2549 2550 // Concatenate vectors in the given list. All vectors have the same type. 2551 static Value *ConcatenateVectors(IRBuilder<> &Builder, 2552 ArrayRef<Value *> InputList) { 2553 unsigned NumVec = InputList.size(); 2554 assert(NumVec > 1 && "Should be at least two vectors"); 2555 2556 SmallVector<Value *, 8> ResList; 2557 ResList.append(InputList.begin(), InputList.end()); 2558 do { 2559 SmallVector<Value *, 8> TmpList; 2560 for (unsigned i = 0; i < NumVec - 1; i += 2) { 2561 Value *V0 = ResList[i], *V1 = ResList[i + 1]; 2562 assert((V0->getType() == V1->getType() || i == NumVec - 2) && 2563 "Only the last vector may have a different type"); 2564 2565 TmpList.push_back(ConcatenateTwoVectors(Builder, V0, V1)); 2566 } 2567 2568 // Push the last vector if the total number of vectors is odd. 2569 if (NumVec % 2 != 0) 2570 TmpList.push_back(ResList[NumVec - 1]); 2571 2572 ResList = TmpList; 2573 NumVec = ResList.size(); 2574 } while (NumVec > 1); 2575 2576 return ResList[0]; 2577 } 2578 2579 // Try to vectorize the interleave group that \p Instr belongs to. 2580 // 2581 // E.g. Translate following interleaved load group (factor = 3): 2582 // for (i = 0; i < N; i+=3) { 2583 // R = Pic[i]; // Member of index 0 2584 // G = Pic[i+1]; // Member of index 1 2585 // B = Pic[i+2]; // Member of index 2 2586 // ... // do something to R, G, B 2587 // } 2588 // To: 2589 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2590 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2591 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2592 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2593 // 2594 // Or translate following interleaved store group (factor = 3): 2595 // for (i = 0; i < N; i+=3) { 2596 // ... do something to R, G, B 2597 // Pic[i] = R; // Member of index 0 2598 // Pic[i+1] = G; // Member of index 1 2599 // Pic[i+2] = B; // Member of index 2 2600 // } 2601 // To: 2602 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2603 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2604 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2605 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2606 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2607 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { 2608 const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr); 2609 assert(Group && "Fail to get an interleaved access group."); 2610 2611 // Skip if current instruction is not the insert position. 2612 if (Instr != Group->getInsertPos()) 2613 return; 2614 2615 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2616 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2617 Value *Ptr = getPointerOperand(Instr); 2618 2619 // Prepare for the vector type of the interleaved load/store. 2620 Type *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2621 unsigned InterleaveFactor = Group->getFactor(); 2622 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2623 Type *PtrTy = VecTy->getPointerTo(Ptr->getType()->getPointerAddressSpace()); 2624 2625 // Prepare for the new pointers. 2626 setDebugLocFromInst(Builder, Ptr); 2627 SmallVector<Value *, 2> NewPtrs; 2628 unsigned Index = Group->getIndex(Instr); 2629 2630 // If the group is reverse, adjust the index to refer to the last vector lane 2631 // instead of the first. We adjust the index from the first vector lane, 2632 // rather than directly getting the pointer for lane VF - 1, because the 2633 // pointer operand of the interleaved access is supposed to be uniform. For 2634 // uniform instructions, we're only required to generate a value for the 2635 // first vector lane in each unroll iteration. 2636 if (Group->isReverse()) 2637 Index += (VF - 1) * Group->getFactor(); 2638 2639 for (unsigned Part = 0; Part < UF; Part++) { 2640 Value *NewPtr = getScalarValue(Ptr, Part, 0); 2641 2642 // Notice current instruction could be any index. Need to adjust the address 2643 // to the member of index 0. 2644 // 2645 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2646 // b = A[i]; // Member of index 0 2647 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2648 // 2649 // E.g. A[i+1] = a; // Member of index 1 2650 // A[i] = b; // Member of index 0 2651 // A[i+2] = c; // Member of index 2 (Current instruction) 2652 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2653 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 2654 2655 // Cast to the vector pointer type. 2656 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2657 } 2658 2659 setDebugLocFromInst(Builder, Instr); 2660 Value *UndefVec = UndefValue::get(VecTy); 2661 2662 // Vectorize the interleaved load group. 2663 if (LI) { 2664 2665 // For each unroll part, create a wide load for the group. 2666 SmallVector<Value *, 2> NewLoads; 2667 for (unsigned Part = 0; Part < UF; Part++) { 2668 auto *NewLoad = Builder.CreateAlignedLoad( 2669 NewPtrs[Part], Group->getAlignment(), "wide.vec"); 2670 addMetadata(NewLoad, Instr); 2671 NewLoads.push_back(NewLoad); 2672 } 2673 2674 // For each member in the group, shuffle out the appropriate data from the 2675 // wide loads. 2676 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2677 Instruction *Member = Group->getMember(I); 2678 2679 // Skip the gaps in the group. 2680 if (!Member) 2681 continue; 2682 2683 VectorParts Entry(UF); 2684 Constant *StrideMask = getStridedMask(Builder, I, InterleaveFactor, VF); 2685 for (unsigned Part = 0; Part < UF; Part++) { 2686 Value *StridedVec = Builder.CreateShuffleVector( 2687 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 2688 2689 // If this member has different type, cast the result type. 2690 if (Member->getType() != ScalarTy) { 2691 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2692 StridedVec = Builder.CreateBitOrPointerCast(StridedVec, OtherVTy); 2693 } 2694 2695 Entry[Part] = 2696 Group->isReverse() ? reverseVector(StridedVec) : StridedVec; 2697 } 2698 VectorLoopValueMap.initVector(Member, Entry); 2699 } 2700 return; 2701 } 2702 2703 // The sub vector type for current instruction. 2704 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2705 2706 // Vectorize the interleaved store group. 2707 for (unsigned Part = 0; Part < UF; Part++) { 2708 // Collect the stored vector from each member. 2709 SmallVector<Value *, 4> StoredVecs; 2710 for (unsigned i = 0; i < InterleaveFactor; i++) { 2711 // Interleaved store group doesn't allow a gap, so each index has a member 2712 Instruction *Member = Group->getMember(i); 2713 assert(Member && "Fail to get a member from an interleaved store group"); 2714 2715 Value *StoredVec = 2716 getVectorValue(cast<StoreInst>(Member)->getValueOperand())[Part]; 2717 if (Group->isReverse()) 2718 StoredVec = reverseVector(StoredVec); 2719 2720 // If this member has different type, cast it to an unified type. 2721 if (StoredVec->getType() != SubVT) 2722 StoredVec = Builder.CreateBitOrPointerCast(StoredVec, SubVT); 2723 2724 StoredVecs.push_back(StoredVec); 2725 } 2726 2727 // Concatenate all vectors into a wide vector. 2728 Value *WideVec = ConcatenateVectors(Builder, StoredVecs); 2729 2730 // Interleave the elements in the wide vector. 2731 Constant *IMask = getInterleavedMask(Builder, VF, InterleaveFactor); 2732 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2733 "interleaved.vec"); 2734 2735 Instruction *NewStoreInstr = 2736 Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment()); 2737 addMetadata(NewStoreInstr, Instr); 2738 } 2739 } 2740 2741 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { 2742 // Attempt to issue a wide load. 2743 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2744 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2745 2746 assert((LI || SI) && "Invalid Load/Store instruction"); 2747 2748 // Try to vectorize the interleave group if this access is interleaved. 2749 if (Legal->isAccessInterleaved(Instr)) 2750 return vectorizeInterleaveGroup(Instr); 2751 2752 Type *ScalarDataTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2753 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2754 Value *Ptr = getPointerOperand(Instr); 2755 unsigned Alignment = LI ? LI->getAlignment() : SI->getAlignment(); 2756 // An alignment of 0 means target abi alignment. We need to use the scalar's 2757 // target abi alignment in such a case. 2758 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2759 if (!Alignment) 2760 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2761 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2762 2763 // Scalarize the memory instruction if necessary. 2764 if (Legal->memoryInstructionMustBeScalarized(Instr, VF)) 2765 return scalarizeInstruction(Instr, Legal->isPredicatedStore(Instr)); 2766 2767 // Determine if the pointer operand of the access is either consecutive or 2768 // reverse consecutive. 2769 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 2770 bool Reverse = ConsecutiveStride < 0; 2771 2772 // Determine if either a gather or scatter operation is legal. 2773 bool CreateGatherScatter = 2774 !ConsecutiveStride && Legal->isLegalGatherOrScatter(Instr); 2775 2776 VectorParts VectorGep; 2777 2778 // Handle consecutive loads/stores. 2779 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2780 if (ConsecutiveStride) { 2781 if (Gep) { 2782 unsigned NumOperands = Gep->getNumOperands(); 2783 #ifndef NDEBUG 2784 // The original GEP that identified as a consecutive memory access 2785 // should have only one loop-variant operand. 2786 unsigned NumOfLoopVariantOps = 0; 2787 for (unsigned i = 0; i < NumOperands; ++i) 2788 if (!PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), 2789 OrigLoop)) 2790 NumOfLoopVariantOps++; 2791 assert(NumOfLoopVariantOps == 1 && 2792 "Consecutive GEP should have only one loop-variant operand"); 2793 #endif 2794 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2795 Gep2->setName("gep.indvar"); 2796 2797 // A new GEP is created for a 0-lane value of the first unroll iteration. 2798 // The GEPs for the rest of the unroll iterations are computed below as an 2799 // offset from this GEP. 2800 for (unsigned i = 0; i < NumOperands; ++i) 2801 // We can apply getScalarValue() for all GEP indices. It returns an 2802 // original value for loop-invariant operand and 0-lane for consecutive 2803 // operand. 2804 Gep2->setOperand(i, getScalarValue(Gep->getOperand(i), 2805 0, /* First unroll iteration */ 2806 0 /* 0-lane of the vector */ )); 2807 setDebugLocFromInst(Builder, Gep); 2808 Ptr = Builder.Insert(Gep2); 2809 2810 } else { // No GEP 2811 setDebugLocFromInst(Builder, Ptr); 2812 Ptr = getScalarValue(Ptr, 0, 0); 2813 } 2814 } else { 2815 // At this point we should vector version of GEP for Gather or Scatter 2816 assert(CreateGatherScatter && "The instruction should be scalarized"); 2817 if (Gep) { 2818 // Vectorizing GEP, across UF parts. We want to get a vector value for base 2819 // and each index that's defined inside the loop, even if it is 2820 // loop-invariant but wasn't hoisted out. Otherwise we want to keep them 2821 // scalar. 2822 SmallVector<VectorParts, 4> OpsV; 2823 for (Value *Op : Gep->operands()) { 2824 Instruction *SrcInst = dyn_cast<Instruction>(Op); 2825 if (SrcInst && OrigLoop->contains(SrcInst)) 2826 OpsV.push_back(getVectorValue(Op)); 2827 else 2828 OpsV.push_back(VectorParts(UF, Op)); 2829 } 2830 for (unsigned Part = 0; Part < UF; ++Part) { 2831 SmallVector<Value *, 4> Ops; 2832 Value *GEPBasePtr = OpsV[0][Part]; 2833 for (unsigned i = 1; i < Gep->getNumOperands(); i++) 2834 Ops.push_back(OpsV[i][Part]); 2835 Value *NewGep = Builder.CreateGEP(GEPBasePtr, Ops, "VectorGep"); 2836 cast<GetElementPtrInst>(NewGep)->setIsInBounds(Gep->isInBounds()); 2837 assert(NewGep->getType()->isVectorTy() && "Expected vector GEP"); 2838 2839 NewGep = 2840 Builder.CreateBitCast(NewGep, VectorType::get(Ptr->getType(), VF)); 2841 VectorGep.push_back(NewGep); 2842 } 2843 } else 2844 VectorGep = getVectorValue(Ptr); 2845 } 2846 2847 VectorParts Mask = createBlockInMask(Instr->getParent()); 2848 // Handle Stores: 2849 if (SI) { 2850 assert(!Legal->isUniform(SI->getPointerOperand()) && 2851 "We do not allow storing to uniform addresses"); 2852 setDebugLocFromInst(Builder, SI); 2853 // We don't want to update the value in the map as it might be used in 2854 // another expression. So don't use a reference type for "StoredVal". 2855 VectorParts StoredVal = getVectorValue(SI->getValueOperand()); 2856 2857 for (unsigned Part = 0; Part < UF; ++Part) { 2858 Instruction *NewSI = nullptr; 2859 if (CreateGatherScatter) { 2860 Value *MaskPart = Legal->isMaskRequired(SI) ? Mask[Part] : nullptr; 2861 NewSI = Builder.CreateMaskedScatter(StoredVal[Part], VectorGep[Part], 2862 Alignment, MaskPart); 2863 } else { 2864 // Calculate the pointer for the specific unroll-part. 2865 Value *PartPtr = 2866 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2867 2868 if (Reverse) { 2869 // If we store to reverse consecutive memory locations, then we need 2870 // to reverse the order of elements in the stored value. 2871 StoredVal[Part] = reverseVector(StoredVal[Part]); 2872 // If the address is consecutive but reversed, then the 2873 // wide store needs to start at the last vector element. 2874 PartPtr = 2875 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2876 PartPtr = 2877 Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2878 Mask[Part] = reverseVector(Mask[Part]); 2879 } 2880 2881 Value *VecPtr = 2882 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2883 2884 if (Legal->isMaskRequired(SI)) 2885 NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment, 2886 Mask[Part]); 2887 else 2888 NewSI = 2889 Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment); 2890 } 2891 addMetadata(NewSI, SI); 2892 } 2893 return; 2894 } 2895 2896 // Handle loads. 2897 assert(LI && "Must have a load instruction"); 2898 setDebugLocFromInst(Builder, LI); 2899 VectorParts Entry(UF); 2900 for (unsigned Part = 0; Part < UF; ++Part) { 2901 Instruction *NewLI; 2902 if (CreateGatherScatter) { 2903 Value *MaskPart = Legal->isMaskRequired(LI) ? Mask[Part] : nullptr; 2904 NewLI = Builder.CreateMaskedGather(VectorGep[Part], Alignment, MaskPart, 2905 0, "wide.masked.gather"); 2906 Entry[Part] = NewLI; 2907 } else { 2908 // Calculate the pointer for the specific unroll-part. 2909 Value *PartPtr = 2910 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2911 2912 if (Reverse) { 2913 // If the address is consecutive but reversed, then the 2914 // wide load needs to start at the last vector element. 2915 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2916 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2917 Mask[Part] = reverseVector(Mask[Part]); 2918 } 2919 2920 Value *VecPtr = 2921 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2922 if (Legal->isMaskRequired(LI)) 2923 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 2924 UndefValue::get(DataTy), 2925 "wide.masked.load"); 2926 else 2927 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 2928 Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI; 2929 } 2930 addMetadata(NewLI, LI); 2931 } 2932 VectorLoopValueMap.initVector(Instr, Entry); 2933 } 2934 2935 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2936 bool IfPredicateInstr) { 2937 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2938 DEBUG(dbgs() << "LV: Scalarizing" 2939 << (IfPredicateInstr ? " and predicating:" : ":") << *Instr 2940 << '\n'); 2941 // Holds vector parameters or scalars, in case of uniform vals. 2942 SmallVector<VectorParts, 4> Params; 2943 2944 setDebugLocFromInst(Builder, Instr); 2945 2946 // Does this instruction return a value ? 2947 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2948 2949 // Initialize a new scalar map entry. 2950 ScalarParts Entry(UF); 2951 2952 VectorParts Cond; 2953 if (IfPredicateInstr) 2954 Cond = createBlockInMask(Instr->getParent()); 2955 2956 // Determine the number of scalars we need to generate for each unroll 2957 // iteration. If the instruction is uniform, we only need to generate the 2958 // first lane. Otherwise, we generate all VF values. 2959 unsigned Lanes = Legal->isUniformAfterVectorization(Instr) ? 1 : VF; 2960 2961 // For each vector unroll 'part': 2962 for (unsigned Part = 0; Part < UF; ++Part) { 2963 Entry[Part].resize(VF); 2964 // For each scalar that we create: 2965 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2966 2967 // Start if-block. 2968 Value *Cmp = nullptr; 2969 if (IfPredicateInstr) { 2970 Cmp = Builder.CreateExtractElement(Cond[Part], Builder.getInt32(Lane)); 2971 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cmp, 2972 ConstantInt::get(Cmp->getType(), 1)); 2973 } 2974 2975 Instruction *Cloned = Instr->clone(); 2976 if (!IsVoidRetTy) 2977 Cloned->setName(Instr->getName() + ".cloned"); 2978 2979 // Replace the operands of the cloned instructions with their scalar 2980 // equivalents in the new loop. 2981 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2982 auto *NewOp = getScalarValue(Instr->getOperand(op), Part, Lane); 2983 Cloned->setOperand(op, NewOp); 2984 } 2985 addNewMetadata(Cloned, Instr); 2986 2987 // Place the cloned scalar in the new loop. 2988 Builder.Insert(Cloned); 2989 2990 // Add the cloned scalar to the scalar map entry. 2991 Entry[Part][Lane] = Cloned; 2992 2993 // If we just cloned a new assumption, add it the assumption cache. 2994 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2995 if (II->getIntrinsicID() == Intrinsic::assume) 2996 AC->registerAssumption(II); 2997 2998 // End if-block. 2999 if (IfPredicateInstr) 3000 PredicatedInstructions.push_back(std::make_pair(Cloned, Cmp)); 3001 } 3002 } 3003 VectorLoopValueMap.initScalar(Instr, Entry); 3004 } 3005 3006 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3007 Value *End, Value *Step, 3008 Instruction *DL) { 3009 BasicBlock *Header = L->getHeader(); 3010 BasicBlock *Latch = L->getLoopLatch(); 3011 // As we're just creating this loop, it's possible no latch exists 3012 // yet. If so, use the header as this will be a single block loop. 3013 if (!Latch) 3014 Latch = Header; 3015 3016 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 3017 setDebugLocFromInst(Builder, getDebugLocFromInstOrOperands(OldInduction)); 3018 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 3019 3020 Builder.SetInsertPoint(Latch->getTerminator()); 3021 3022 // Create i+1 and fill the PHINode. 3023 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 3024 Induction->addIncoming(Start, L->getLoopPreheader()); 3025 Induction->addIncoming(Next, Latch); 3026 // Create the compare. 3027 Value *ICmp = Builder.CreateICmpEQ(Next, End); 3028 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 3029 3030 // Now we have two terminators. Remove the old one from the block. 3031 Latch->getTerminator()->eraseFromParent(); 3032 3033 return Induction; 3034 } 3035 3036 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3037 if (TripCount) 3038 return TripCount; 3039 3040 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3041 // Find the loop boundaries. 3042 ScalarEvolution *SE = PSE.getSE(); 3043 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3044 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 3045 "Invalid loop count"); 3046 3047 Type *IdxTy = Legal->getWidestInductionType(); 3048 3049 // The exit count might have the type of i64 while the phi is i32. This can 3050 // happen if we have an induction variable that is sign extended before the 3051 // compare. The only way that we get a backedge taken count is that the 3052 // induction variable was signed and as such will not overflow. In such a case 3053 // truncation is legal. 3054 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 3055 IdxTy->getPrimitiveSizeInBits()) 3056 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3057 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3058 3059 // Get the total trip count from the count by adding 1. 3060 const SCEV *ExitCount = SE->getAddExpr( 3061 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3062 3063 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3064 3065 // Expand the trip count and place the new instructions in the preheader. 3066 // Notice that the pre-header does not change, only the loop body. 3067 SCEVExpander Exp(*SE, DL, "induction"); 3068 3069 // Count holds the overall loop count (N). 3070 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3071 L->getLoopPreheader()->getTerminator()); 3072 3073 if (TripCount->getType()->isPointerTy()) 3074 TripCount = 3075 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3076 L->getLoopPreheader()->getTerminator()); 3077 3078 return TripCount; 3079 } 3080 3081 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3082 if (VectorTripCount) 3083 return VectorTripCount; 3084 3085 Value *TC = getOrCreateTripCount(L); 3086 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3087 3088 // Now we need to generate the expression for the part of the loop that the 3089 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3090 // iterations are not required for correctness, or N - Step, otherwise. Step 3091 // is equal to the vectorization factor (number of SIMD elements) times the 3092 // unroll factor (number of SIMD instructions). 3093 Constant *Step = ConstantInt::get(TC->getType(), VF * UF); 3094 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3095 3096 // If there is a non-reversed interleaved group that may speculatively access 3097 // memory out-of-bounds, we need to ensure that there will be at least one 3098 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 3099 // the trip count, we set the remainder to be equal to the step. If the step 3100 // does not evenly divide the trip count, no adjustment is necessary since 3101 // there will already be scalar iterations. Note that the minimum iterations 3102 // check ensures that N >= Step. 3103 if (VF > 1 && Legal->requiresScalarEpilogue()) { 3104 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3105 R = Builder.CreateSelect(IsZero, Step, R); 3106 } 3107 3108 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3109 3110 return VectorTripCount; 3111 } 3112 3113 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3114 BasicBlock *Bypass) { 3115 Value *Count = getOrCreateTripCount(L); 3116 BasicBlock *BB = L->getLoopPreheader(); 3117 IRBuilder<> Builder(BB->getTerminator()); 3118 3119 // Generate code to check that the loop's trip count that we computed by 3120 // adding one to the backedge-taken count will not overflow. 3121 Value *CheckMinIters = Builder.CreateICmpULT( 3122 Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check"); 3123 3124 BasicBlock *NewBB = 3125 BB->splitBasicBlock(BB->getTerminator(), "min.iters.checked"); 3126 // Update dominator tree immediately if the generated block is a 3127 // LoopBypassBlock because SCEV expansions to generate loop bypass 3128 // checks may query it before the current function is finished. 3129 DT->addNewBlock(NewBB, BB); 3130 if (L->getParentLoop()) 3131 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3132 ReplaceInstWithInst(BB->getTerminator(), 3133 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 3134 LoopBypassBlocks.push_back(BB); 3135 } 3136 3137 void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L, 3138 BasicBlock *Bypass) { 3139 Value *TC = getOrCreateVectorTripCount(L); 3140 BasicBlock *BB = L->getLoopPreheader(); 3141 IRBuilder<> Builder(BB->getTerminator()); 3142 3143 // Now, compare the new count to zero. If it is zero skip the vector loop and 3144 // jump to the scalar loop. 3145 Value *Cmp = Builder.CreateICmpEQ(TC, Constant::getNullValue(TC->getType()), 3146 "cmp.zero"); 3147 3148 // Generate code to check that the loop's trip count that we computed by 3149 // adding one to the backedge-taken count will not overflow. 3150 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3151 // Update dominator tree immediately if the generated block is a 3152 // LoopBypassBlock because SCEV expansions to generate loop bypass 3153 // checks may query it before the current function is finished. 3154 DT->addNewBlock(NewBB, BB); 3155 if (L->getParentLoop()) 3156 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3157 ReplaceInstWithInst(BB->getTerminator(), 3158 BranchInst::Create(Bypass, NewBB, Cmp)); 3159 LoopBypassBlocks.push_back(BB); 3160 } 3161 3162 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3163 BasicBlock *BB = L->getLoopPreheader(); 3164 3165 // Generate the code to check that the SCEV assumptions that we made. 3166 // We want the new basic block to start at the first instruction in a 3167 // sequence of instructions that form a check. 3168 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 3169 "scev.check"); 3170 Value *SCEVCheck = 3171 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 3172 3173 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 3174 if (C->isZero()) 3175 return; 3176 3177 // Create a new block containing the stride check. 3178 BB->setName("vector.scevcheck"); 3179 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3180 // Update dominator tree immediately if the generated block is a 3181 // LoopBypassBlock because SCEV expansions to generate loop bypass 3182 // checks may query it before the current function is finished. 3183 DT->addNewBlock(NewBB, BB); 3184 if (L->getParentLoop()) 3185 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3186 ReplaceInstWithInst(BB->getTerminator(), 3187 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 3188 LoopBypassBlocks.push_back(BB); 3189 AddedSafetyChecks = true; 3190 } 3191 3192 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 3193 BasicBlock *BB = L->getLoopPreheader(); 3194 3195 // Generate the code that checks in runtime if arrays overlap. We put the 3196 // checks into a separate block to make the more common case of few elements 3197 // faster. 3198 Instruction *FirstCheckInst; 3199 Instruction *MemRuntimeCheck; 3200 std::tie(FirstCheckInst, MemRuntimeCheck) = 3201 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 3202 if (!MemRuntimeCheck) 3203 return; 3204 3205 // Create a new block containing the memory check. 3206 BB->setName("vector.memcheck"); 3207 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3208 // Update dominator tree immediately if the generated block is a 3209 // LoopBypassBlock because SCEV expansions to generate loop bypass 3210 // checks may query it before the current function is finished. 3211 DT->addNewBlock(NewBB, BB); 3212 if (L->getParentLoop()) 3213 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3214 ReplaceInstWithInst(BB->getTerminator(), 3215 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 3216 LoopBypassBlocks.push_back(BB); 3217 AddedSafetyChecks = true; 3218 3219 // We currently don't use LoopVersioning for the actual loop cloning but we 3220 // still use it to add the noalias metadata. 3221 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 3222 PSE.getSE()); 3223 LVer->prepareNoAliasMetadata(); 3224 } 3225 3226 void InnerLoopVectorizer::createEmptyLoop() { 3227 /* 3228 In this function we generate a new loop. The new loop will contain 3229 the vectorized instructions while the old loop will continue to run the 3230 scalar remainder. 3231 3232 [ ] <-- loop iteration number check. 3233 / | 3234 / v 3235 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3236 | / | 3237 | / v 3238 || [ ] <-- vector pre header. 3239 |/ | 3240 | v 3241 | [ ] \ 3242 | [ ]_| <-- vector loop. 3243 | | 3244 | v 3245 | -[ ] <--- middle-block. 3246 | / | 3247 | / v 3248 -|- >[ ] <--- new preheader. 3249 | | 3250 | v 3251 | [ ] \ 3252 | [ ]_| <-- old scalar loop to handle remainder. 3253 \ | 3254 \ v 3255 >[ ] <-- exit block. 3256 ... 3257 */ 3258 3259 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 3260 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 3261 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 3262 assert(VectorPH && "Invalid loop structure"); 3263 assert(ExitBlock && "Must have an exit block"); 3264 3265 // Some loops have a single integer induction variable, while other loops 3266 // don't. One example is c++ iterators that often have multiple pointer 3267 // induction variables. In the code below we also support a case where we 3268 // don't have a single induction variable. 3269 // 3270 // We try to obtain an induction variable from the original loop as hard 3271 // as possible. However if we don't find one that: 3272 // - is an integer 3273 // - counts from zero, stepping by one 3274 // - is the size of the widest induction variable type 3275 // then we create a new one. 3276 OldInduction = Legal->getInduction(); 3277 Type *IdxTy = Legal->getWidestInductionType(); 3278 3279 // Split the single block loop into the two loop structure described above. 3280 BasicBlock *VecBody = 3281 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 3282 BasicBlock *MiddleBlock = 3283 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 3284 BasicBlock *ScalarPH = 3285 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 3286 3287 // Create and register the new vector loop. 3288 Loop *Lp = new Loop(); 3289 Loop *ParentLoop = OrigLoop->getParentLoop(); 3290 3291 // Insert the new loop into the loop nest and register the new basic blocks 3292 // before calling any utilities such as SCEV that require valid LoopInfo. 3293 if (ParentLoop) { 3294 ParentLoop->addChildLoop(Lp); 3295 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 3296 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 3297 } else { 3298 LI->addTopLevelLoop(Lp); 3299 } 3300 Lp->addBasicBlockToLoop(VecBody, *LI); 3301 3302 // Find the loop boundaries. 3303 Value *Count = getOrCreateTripCount(Lp); 3304 3305 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3306 3307 // We need to test whether the backedge-taken count is uint##_max. Adding one 3308 // to it will cause overflow and an incorrect loop trip count in the vector 3309 // body. In case of overflow we want to directly jump to the scalar remainder 3310 // loop. 3311 emitMinimumIterationCountCheck(Lp, ScalarPH); 3312 // Now, compare the new count to zero. If it is zero skip the vector loop and 3313 // jump to the scalar loop. 3314 emitVectorLoopEnteredCheck(Lp, ScalarPH); 3315 // Generate the code to check any assumptions that we've made for SCEV 3316 // expressions. 3317 emitSCEVChecks(Lp, ScalarPH); 3318 3319 // Generate the code that checks in runtime if arrays overlap. We put the 3320 // checks into a separate block to make the more common case of few elements 3321 // faster. 3322 emitMemRuntimeChecks(Lp, ScalarPH); 3323 3324 // Generate the induction variable. 3325 // The loop step is equal to the vectorization factor (num of SIMD elements) 3326 // times the unroll factor (num of SIMD instructions). 3327 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3328 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 3329 Induction = 3330 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3331 getDebugLocFromInstOrOperands(OldInduction)); 3332 3333 // We are going to resume the execution of the scalar loop. 3334 // Go over all of the induction variables that we found and fix the 3335 // PHIs that are left in the scalar version of the loop. 3336 // The starting values of PHI nodes depend on the counter of the last 3337 // iteration in the vectorized loop. 3338 // If we come from a bypass edge then we need to start from the original 3339 // start value. 3340 3341 // This variable saves the new starting index for the scalar loop. It is used 3342 // to test if there are any tail iterations left once the vector loop has 3343 // completed. 3344 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 3345 for (auto &InductionEntry : *List) { 3346 PHINode *OrigPhi = InductionEntry.first; 3347 InductionDescriptor II = InductionEntry.second; 3348 3349 // Create phi nodes to merge from the backedge-taken check block. 3350 PHINode *BCResumeVal = PHINode::Create( 3351 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 3352 Value *EndValue; 3353 if (OrigPhi == OldInduction) { 3354 // We know what the end value is. 3355 EndValue = CountRoundDown; 3356 } else { 3357 IRBuilder<> B(LoopBypassBlocks.back()->getTerminator()); 3358 Type *StepType = II.getStep()->getType(); 3359 Instruction::CastOps CastOp = 3360 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 3361 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 3362 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3363 EndValue = II.transform(B, CRD, PSE.getSE(), DL); 3364 EndValue->setName("ind.end"); 3365 } 3366 3367 // The new PHI merges the original incoming value, in case of a bypass, 3368 // or the value at the end of the vectorized loop. 3369 BCResumeVal->addIncoming(EndValue, MiddleBlock); 3370 3371 // Fix up external users of the induction variable. 3372 fixupIVUsers(OrigPhi, II, CountRoundDown, EndValue, MiddleBlock); 3373 3374 // Fix the scalar body counter (PHI node). 3375 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 3376 3377 // The old induction's phi node in the scalar body needs the truncated 3378 // value. 3379 for (BasicBlock *BB : LoopBypassBlocks) 3380 BCResumeVal->addIncoming(II.getStartValue(), BB); 3381 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 3382 } 3383 3384 // Add a check in the middle block to see if we have completed 3385 // all of the iterations in the first vector loop. 3386 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3387 Value *CmpN = 3388 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 3389 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 3390 ReplaceInstWithInst(MiddleBlock->getTerminator(), 3391 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 3392 3393 // Get ready to start creating new instructions into the vectorized body. 3394 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 3395 3396 // Save the state. 3397 LoopVectorPreHeader = Lp->getLoopPreheader(); 3398 LoopScalarPreHeader = ScalarPH; 3399 LoopMiddleBlock = MiddleBlock; 3400 LoopExitBlock = ExitBlock; 3401 LoopVectorBody = VecBody; 3402 LoopScalarBody = OldBasicBlock; 3403 3404 // Keep all loop hints from the original loop on the vector loop (we'll 3405 // replace the vectorizer-specific hints below). 3406 if (MDNode *LID = OrigLoop->getLoopID()) 3407 Lp->setLoopID(LID); 3408 3409 LoopVectorizeHints Hints(Lp, true, *ORE); 3410 Hints.setAlreadyVectorized(); 3411 } 3412 3413 // Fix up external users of the induction variable. At this point, we are 3414 // in LCSSA form, with all external PHIs that use the IV having one input value, 3415 // coming from the remainder loop. We need those PHIs to also have a correct 3416 // value for the IV when arriving directly from the middle block. 3417 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3418 const InductionDescriptor &II, 3419 Value *CountRoundDown, Value *EndValue, 3420 BasicBlock *MiddleBlock) { 3421 // There are two kinds of external IV usages - those that use the value 3422 // computed in the last iteration (the PHI) and those that use the penultimate 3423 // value (the value that feeds into the phi from the loop latch). 3424 // We allow both, but they, obviously, have different values. 3425 3426 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 3427 3428 DenseMap<Value *, Value *> MissingVals; 3429 3430 // An external user of the last iteration's value should see the value that 3431 // the remainder loop uses to initialize its own IV. 3432 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3433 for (User *U : PostInc->users()) { 3434 Instruction *UI = cast<Instruction>(U); 3435 if (!OrigLoop->contains(UI)) { 3436 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3437 MissingVals[UI] = EndValue; 3438 } 3439 } 3440 3441 // An external user of the penultimate value need to see EndValue - Step. 3442 // The simplest way to get this is to recompute it from the constituent SCEVs, 3443 // that is Start + (Step * (CRD - 1)). 3444 for (User *U : OrigPhi->users()) { 3445 auto *UI = cast<Instruction>(U); 3446 if (!OrigLoop->contains(UI)) { 3447 const DataLayout &DL = 3448 OrigLoop->getHeader()->getModule()->getDataLayout(); 3449 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3450 3451 IRBuilder<> B(MiddleBlock->getTerminator()); 3452 Value *CountMinusOne = B.CreateSub( 3453 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3454 Value *CMO = B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType(), 3455 "cast.cmo"); 3456 Value *Escape = II.transform(B, CMO, PSE.getSE(), DL); 3457 Escape->setName("ind.escape"); 3458 MissingVals[UI] = Escape; 3459 } 3460 } 3461 3462 for (auto &I : MissingVals) { 3463 PHINode *PHI = cast<PHINode>(I.first); 3464 // One corner case we have to handle is two IVs "chasing" each-other, 3465 // that is %IV2 = phi [...], [ %IV1, %latch ] 3466 // In this case, if IV1 has an external use, we need to avoid adding both 3467 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3468 // don't already have an incoming value for the middle block. 3469 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3470 PHI->addIncoming(I.second, MiddleBlock); 3471 } 3472 } 3473 3474 namespace { 3475 struct CSEDenseMapInfo { 3476 static bool canHandle(Instruction *I) { 3477 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3478 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3479 } 3480 static inline Instruction *getEmptyKey() { 3481 return DenseMapInfo<Instruction *>::getEmptyKey(); 3482 } 3483 static inline Instruction *getTombstoneKey() { 3484 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3485 } 3486 static unsigned getHashValue(Instruction *I) { 3487 assert(canHandle(I) && "Unknown instruction!"); 3488 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3489 I->value_op_end())); 3490 } 3491 static bool isEqual(Instruction *LHS, Instruction *RHS) { 3492 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3493 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3494 return LHS == RHS; 3495 return LHS->isIdenticalTo(RHS); 3496 } 3497 }; 3498 } 3499 3500 ///\brief Perform cse of induction variable instructions. 3501 static void cse(BasicBlock *BB) { 3502 // Perform simple cse. 3503 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3504 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3505 Instruction *In = &*I++; 3506 3507 if (!CSEDenseMapInfo::canHandle(In)) 3508 continue; 3509 3510 // Check if we can replace this instruction with any of the 3511 // visited instructions. 3512 if (Instruction *V = CSEMap.lookup(In)) { 3513 In->replaceAllUsesWith(V); 3514 In->eraseFromParent(); 3515 continue; 3516 } 3517 3518 CSEMap[In] = In; 3519 } 3520 } 3521 3522 /// \brief Adds a 'fast' flag to floating point operations. 3523 static Value *addFastMathFlag(Value *V) { 3524 if (isa<FPMathOperator>(V)) { 3525 FastMathFlags Flags; 3526 Flags.setUnsafeAlgebra(); 3527 cast<Instruction>(V)->setFastMathFlags(Flags); 3528 } 3529 return V; 3530 } 3531 3532 /// \brief Estimate the overhead of scalarizing a value based on its type. 3533 /// Insert and Extract are set if the result needs to be inserted and/or 3534 /// extracted from vectors. 3535 /// If the instruction is also to be predicated, add the cost of a PHI 3536 /// node to the insertion cost. 3537 static unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract, 3538 bool Predicated, 3539 const TargetTransformInfo &TTI) { 3540 if (Ty->isVoidTy()) 3541 return 0; 3542 3543 assert(Ty->isVectorTy() && "Can only scalarize vectors"); 3544 unsigned Cost = 0; 3545 3546 for (unsigned I = 0, E = Ty->getVectorNumElements(); I < E; ++I) { 3547 if (Extract) 3548 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, Ty, I); 3549 if (Insert) { 3550 Cost += TTI.getVectorInstrCost(Instruction::InsertElement, Ty, I); 3551 if (Predicated) 3552 Cost += TTI.getCFInstrCost(Instruction::PHI); 3553 } 3554 } 3555 3556 // We assume that if-converted blocks have a 50% chance of being executed. 3557 // Predicated scalarized instructions are avoided due to the CF that bypasses 3558 // turned off lanes. The extracts and inserts will be sinked/hoisted to the 3559 // predicated basic-block and are subjected to the same assumption. 3560 if (Predicated) 3561 Cost /= 2; 3562 3563 return Cost; 3564 } 3565 3566 /// \brief Estimate the overhead of scalarizing an Instruction based on the 3567 /// types of its operands and return value. 3568 static unsigned getScalarizationOverhead(SmallVectorImpl<Type *> &OpTys, 3569 Type *RetTy, bool Predicated, 3570 const TargetTransformInfo &TTI) { 3571 unsigned ScalarizationCost = 3572 getScalarizationOverhead(RetTy, true, false, Predicated, TTI); 3573 3574 for (Type *Ty : OpTys) 3575 ScalarizationCost += 3576 getScalarizationOverhead(Ty, false, true, Predicated, TTI); 3577 3578 return ScalarizationCost; 3579 } 3580 3581 /// \brief Estimate the overhead of scalarizing an instruction. This is a 3582 /// convenience wrapper for the type-based getScalarizationOverhead API. 3583 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF, 3584 bool Predicated, 3585 const TargetTransformInfo &TTI) { 3586 if (VF == 1) 3587 return 0; 3588 3589 Type *RetTy = ToVectorTy(I->getType(), VF); 3590 3591 SmallVector<Type *, 4> OpTys; 3592 unsigned OperandsNum = I->getNumOperands(); 3593 for (unsigned OpInd = 0; OpInd < OperandsNum; ++OpInd) 3594 OpTys.push_back(ToVectorTy(I->getOperand(OpInd)->getType(), VF)); 3595 3596 return getScalarizationOverhead(OpTys, RetTy, Predicated, TTI); 3597 } 3598 3599 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3600 // Return the cost of the instruction, including scalarization overhead if it's 3601 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3602 // i.e. either vector version isn't available, or is too expensive. 3603 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3604 const TargetTransformInfo &TTI, 3605 const TargetLibraryInfo *TLI, 3606 bool &NeedToScalarize) { 3607 Function *F = CI->getCalledFunction(); 3608 StringRef FnName = CI->getCalledFunction()->getName(); 3609 Type *ScalarRetTy = CI->getType(); 3610 SmallVector<Type *, 4> Tys, ScalarTys; 3611 for (auto &ArgOp : CI->arg_operands()) 3612 ScalarTys.push_back(ArgOp->getType()); 3613 3614 // Estimate cost of scalarized vector call. The source operands are assumed 3615 // to be vectors, so we need to extract individual elements from there, 3616 // execute VF scalar calls, and then gather the result into the vector return 3617 // value. 3618 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3619 if (VF == 1) 3620 return ScalarCallCost; 3621 3622 // Compute corresponding vector type for return value and arguments. 3623 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3624 for (Type *ScalarTy : ScalarTys) 3625 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3626 3627 // Compute costs of unpacking argument values for the scalar calls and 3628 // packing the return values to a vector. 3629 unsigned ScalarizationCost = getScalarizationOverhead(Tys, RetTy, false, TTI); 3630 3631 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3632 3633 // If we can't emit a vector call for this function, then the currently found 3634 // cost is the cost we need to return. 3635 NeedToScalarize = true; 3636 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3637 return Cost; 3638 3639 // If the corresponding vector cost is cheaper, return its cost. 3640 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3641 if (VectorCallCost < Cost) { 3642 NeedToScalarize = false; 3643 return VectorCallCost; 3644 } 3645 return Cost; 3646 } 3647 3648 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3649 // factor VF. Return the cost of the instruction, including scalarization 3650 // overhead if it's needed. 3651 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3652 const TargetTransformInfo &TTI, 3653 const TargetLibraryInfo *TLI) { 3654 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3655 assert(ID && "Expected intrinsic call!"); 3656 3657 Type *RetTy = ToVectorTy(CI->getType(), VF); 3658 SmallVector<Type *, 4> Tys; 3659 for (Value *ArgOperand : CI->arg_operands()) 3660 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 3661 3662 FastMathFlags FMF; 3663 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3664 FMF = FPMO->getFastMathFlags(); 3665 3666 return TTI.getIntrinsicInstrCost(ID, RetTy, Tys, FMF); 3667 } 3668 3669 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3670 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3671 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3672 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3673 } 3674 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3675 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3676 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3677 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3678 } 3679 3680 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3681 // For every instruction `I` in MinBWs, truncate the operands, create a 3682 // truncated version of `I` and reextend its result. InstCombine runs 3683 // later and will remove any ext/trunc pairs. 3684 // 3685 SmallPtrSet<Value *, 4> Erased; 3686 for (const auto &KV : *MinBWs) { 3687 VectorParts &Parts = VectorLoopValueMap.getVector(KV.first); 3688 for (Value *&I : Parts) { 3689 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3690 continue; 3691 Type *OriginalTy = I->getType(); 3692 Type *ScalarTruncatedTy = 3693 IntegerType::get(OriginalTy->getContext(), KV.second); 3694 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3695 OriginalTy->getVectorNumElements()); 3696 if (TruncatedTy == OriginalTy) 3697 continue; 3698 3699 IRBuilder<> B(cast<Instruction>(I)); 3700 auto ShrinkOperand = [&](Value *V) -> Value * { 3701 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3702 if (ZI->getSrcTy() == TruncatedTy) 3703 return ZI->getOperand(0); 3704 return B.CreateZExtOrTrunc(V, TruncatedTy); 3705 }; 3706 3707 // The actual instruction modification depends on the instruction type, 3708 // unfortunately. 3709 Value *NewI = nullptr; 3710 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3711 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3712 ShrinkOperand(BO->getOperand(1))); 3713 cast<BinaryOperator>(NewI)->copyIRFlags(I); 3714 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3715 NewI = 3716 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3717 ShrinkOperand(CI->getOperand(1))); 3718 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3719 NewI = B.CreateSelect(SI->getCondition(), 3720 ShrinkOperand(SI->getTrueValue()), 3721 ShrinkOperand(SI->getFalseValue())); 3722 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3723 switch (CI->getOpcode()) { 3724 default: 3725 llvm_unreachable("Unhandled cast!"); 3726 case Instruction::Trunc: 3727 NewI = ShrinkOperand(CI->getOperand(0)); 3728 break; 3729 case Instruction::SExt: 3730 NewI = B.CreateSExtOrTrunc( 3731 CI->getOperand(0), 3732 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3733 break; 3734 case Instruction::ZExt: 3735 NewI = B.CreateZExtOrTrunc( 3736 CI->getOperand(0), 3737 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3738 break; 3739 } 3740 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3741 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3742 auto *O0 = B.CreateZExtOrTrunc( 3743 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3744 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3745 auto *O1 = B.CreateZExtOrTrunc( 3746 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3747 3748 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3749 } else if (isa<LoadInst>(I)) { 3750 // Don't do anything with the operands, just extend the result. 3751 continue; 3752 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3753 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3754 auto *O0 = B.CreateZExtOrTrunc( 3755 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3756 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3757 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3758 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3759 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3760 auto *O0 = B.CreateZExtOrTrunc( 3761 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3762 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3763 } else { 3764 llvm_unreachable("Unhandled instruction type!"); 3765 } 3766 3767 // Lastly, extend the result. 3768 NewI->takeName(cast<Instruction>(I)); 3769 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3770 I->replaceAllUsesWith(Res); 3771 cast<Instruction>(I)->eraseFromParent(); 3772 Erased.insert(I); 3773 I = Res; 3774 } 3775 } 3776 3777 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3778 for (const auto &KV : *MinBWs) { 3779 VectorParts &Parts = VectorLoopValueMap.getVector(KV.first); 3780 for (Value *&I : Parts) { 3781 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3782 if (Inst && Inst->use_empty()) { 3783 Value *NewI = Inst->getOperand(0); 3784 Inst->eraseFromParent(); 3785 I = NewI; 3786 } 3787 } 3788 } 3789 } 3790 3791 void InnerLoopVectorizer::vectorizeLoop() { 3792 //===------------------------------------------------===// 3793 // 3794 // Notice: any optimization or new instruction that go 3795 // into the code below should be also be implemented in 3796 // the cost-model. 3797 // 3798 //===------------------------------------------------===// 3799 Constant *Zero = Builder.getInt32(0); 3800 3801 // In order to support recurrences we need to be able to vectorize Phi nodes. 3802 // Phi nodes have cycles, so we need to vectorize them in two stages. First, 3803 // we create a new vector PHI node with no incoming edges. We use this value 3804 // when we vectorize all of the instructions that use the PHI. Next, after 3805 // all of the instructions in the block are complete we add the new incoming 3806 // edges to the PHI. At this point all of the instructions in the basic block 3807 // are vectorized, so we can use them to construct the PHI. 3808 PhiVector PHIsToFix; 3809 3810 // Scan the loop in a topological order to ensure that defs are vectorized 3811 // before users. 3812 LoopBlocksDFS DFS(OrigLoop); 3813 DFS.perform(LI); 3814 3815 // Vectorize all of the blocks in the original loop. 3816 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 3817 vectorizeBlockInLoop(BB, &PHIsToFix); 3818 3819 // Insert truncates and extends for any truncated instructions as hints to 3820 // InstCombine. 3821 if (VF > 1) 3822 truncateToMinimalBitwidths(); 3823 3824 // At this point every instruction in the original loop is widened to a 3825 // vector form. Now we need to fix the recurrences in PHIsToFix. These PHI 3826 // nodes are currently empty because we did not want to introduce cycles. 3827 // This is the second stage of vectorizing recurrences. 3828 for (PHINode *Phi : PHIsToFix) { 3829 assert(Phi && "Unable to recover vectorized PHI"); 3830 3831 // Handle first-order recurrences that need to be fixed. 3832 if (Legal->isFirstOrderRecurrence(Phi)) { 3833 fixFirstOrderRecurrence(Phi); 3834 continue; 3835 } 3836 3837 // If the phi node is not a first-order recurrence, it must be a reduction. 3838 // Get it's reduction variable descriptor. 3839 assert(Legal->isReductionVariable(Phi) && 3840 "Unable to find the reduction variable"); 3841 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 3842 3843 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3844 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3845 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3846 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3847 RdxDesc.getMinMaxRecurrenceKind(); 3848 setDebugLocFromInst(Builder, ReductionStartValue); 3849 3850 // We need to generate a reduction vector from the incoming scalar. 3851 // To do so, we need to generate the 'identity' vector and override 3852 // one of the elements with the incoming scalar reduction. We need 3853 // to do it in the vector-loop preheader. 3854 Builder.SetInsertPoint(LoopBypassBlocks[1]->getTerminator()); 3855 3856 // This is the vector-clone of the value that leaves the loop. 3857 const VectorParts &VectorExit = getVectorValue(LoopExitInst); 3858 Type *VecTy = VectorExit[0]->getType(); 3859 3860 // Find the reduction identity variable. Zero for addition, or, xor, 3861 // one for multiplication, -1 for And. 3862 Value *Identity; 3863 Value *VectorStart; 3864 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3865 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3866 // MinMax reduction have the start value as their identify. 3867 if (VF == 1) { 3868 VectorStart = Identity = ReductionStartValue; 3869 } else { 3870 VectorStart = Identity = 3871 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3872 } 3873 } else { 3874 // Handle other reduction kinds: 3875 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3876 RK, VecTy->getScalarType()); 3877 if (VF == 1) { 3878 Identity = Iden; 3879 // This vector is the Identity vector where the first element is the 3880 // incoming scalar reduction. 3881 VectorStart = ReductionStartValue; 3882 } else { 3883 Identity = ConstantVector::getSplat(VF, Iden); 3884 3885 // This vector is the Identity vector where the first element is the 3886 // incoming scalar reduction. 3887 VectorStart = 3888 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3889 } 3890 } 3891 3892 // Fix the vector-loop phi. 3893 3894 // Reductions do not have to start at zero. They can start with 3895 // any loop invariant values. 3896 const VectorParts &VecRdxPhi = getVectorValue(Phi); 3897 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3898 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3899 const VectorParts &Val = getVectorValue(LoopVal); 3900 for (unsigned part = 0; part < UF; ++part) { 3901 // Make sure to add the reduction stat value only to the 3902 // first unroll part. 3903 Value *StartVal = (part == 0) ? VectorStart : Identity; 3904 cast<PHINode>(VecRdxPhi[part]) 3905 ->addIncoming(StartVal, LoopVectorPreHeader); 3906 cast<PHINode>(VecRdxPhi[part]) 3907 ->addIncoming(Val[part], LoopVectorBody); 3908 } 3909 3910 // Before each round, move the insertion point right between 3911 // the PHIs and the values we are going to write. 3912 // This allows us to write both PHINodes and the extractelement 3913 // instructions. 3914 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3915 3916 VectorParts &RdxParts = VectorLoopValueMap.getVector(LoopExitInst); 3917 setDebugLocFromInst(Builder, LoopExitInst); 3918 3919 // If the vector reduction can be performed in a smaller type, we truncate 3920 // then extend the loop exit value to enable InstCombine to evaluate the 3921 // entire expression in the smaller type. 3922 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3923 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3924 Builder.SetInsertPoint(LoopVectorBody->getTerminator()); 3925 for (unsigned part = 0; part < UF; ++part) { 3926 Value *Trunc = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3927 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3928 : Builder.CreateZExt(Trunc, VecTy); 3929 for (Value::user_iterator UI = RdxParts[part]->user_begin(); 3930 UI != RdxParts[part]->user_end();) 3931 if (*UI != Trunc) { 3932 (*UI++)->replaceUsesOfWith(RdxParts[part], Extnd); 3933 RdxParts[part] = Extnd; 3934 } else { 3935 ++UI; 3936 } 3937 } 3938 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3939 for (unsigned part = 0; part < UF; ++part) 3940 RdxParts[part] = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3941 } 3942 3943 // Reduce all of the unrolled parts into a single vector. 3944 Value *ReducedPartRdx = RdxParts[0]; 3945 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3946 setDebugLocFromInst(Builder, ReducedPartRdx); 3947 for (unsigned part = 1; part < UF; ++part) { 3948 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3949 // Floating point operations had to be 'fast' to enable the reduction. 3950 ReducedPartRdx = addFastMathFlag( 3951 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxParts[part], 3952 ReducedPartRdx, "bin.rdx")); 3953 else 3954 ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp( 3955 Builder, MinMaxKind, ReducedPartRdx, RdxParts[part]); 3956 } 3957 3958 if (VF > 1) { 3959 // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles 3960 // and vector ops, reducing the set of values being computed by half each 3961 // round. 3962 assert(isPowerOf2_32(VF) && 3963 "Reduction emission only supported for pow2 vectors!"); 3964 Value *TmpVec = ReducedPartRdx; 3965 SmallVector<Constant *, 32> ShuffleMask(VF, nullptr); 3966 for (unsigned i = VF; i != 1; i >>= 1) { 3967 // Move the upper half of the vector to the lower half. 3968 for (unsigned j = 0; j != i / 2; ++j) 3969 ShuffleMask[j] = Builder.getInt32(i / 2 + j); 3970 3971 // Fill the rest of the mask with undef. 3972 std::fill(&ShuffleMask[i / 2], ShuffleMask.end(), 3973 UndefValue::get(Builder.getInt32Ty())); 3974 3975 Value *Shuf = Builder.CreateShuffleVector( 3976 TmpVec, UndefValue::get(TmpVec->getType()), 3977 ConstantVector::get(ShuffleMask), "rdx.shuf"); 3978 3979 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3980 // Floating point operations had to be 'fast' to enable the reduction. 3981 TmpVec = addFastMathFlag(Builder.CreateBinOp( 3982 (Instruction::BinaryOps)Op, TmpVec, Shuf, "bin.rdx")); 3983 else 3984 TmpVec = RecurrenceDescriptor::createMinMaxOp(Builder, MinMaxKind, 3985 TmpVec, Shuf); 3986 } 3987 3988 // The result is in the first element of the vector. 3989 ReducedPartRdx = 3990 Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 3991 3992 // If the reduction can be performed in a smaller type, we need to extend 3993 // the reduction to the wider type before we branch to the original loop. 3994 if (Phi->getType() != RdxDesc.getRecurrenceType()) 3995 ReducedPartRdx = 3996 RdxDesc.isSigned() 3997 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 3998 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 3999 } 4000 4001 // Create a phi node that merges control-flow from the backedge-taken check 4002 // block and the middle block. 4003 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 4004 LoopScalarPreHeader->getTerminator()); 4005 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4006 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4007 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4008 4009 // Now, we need to fix the users of the reduction variable 4010 // inside and outside of the scalar remainder loop. 4011 // We know that the loop is in LCSSA form. We need to update the 4012 // PHI nodes in the exit blocks. 4013 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 4014 LEE = LoopExitBlock->end(); 4015 LEI != LEE; ++LEI) { 4016 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 4017 if (!LCSSAPhi) 4018 break; 4019 4020 // All PHINodes need to have a single entry edge, or two if 4021 // we already fixed them. 4022 assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 4023 4024 // We found our reduction value exit-PHI. Update it with the 4025 // incoming bypass edge. 4026 if (LCSSAPhi->getIncomingValue(0) == LoopExitInst) { 4027 // Add an edge coming from the bypass. 4028 LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4029 break; 4030 } 4031 } // end of the LCSSA phi scan. 4032 4033 // Fix the scalar loop reduction variable with the incoming reduction sum 4034 // from the vector body and from the backedge value. 4035 int IncomingEdgeBlockIdx = 4036 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4037 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4038 // Pick the other block. 4039 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4040 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4041 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4042 } // end of for each Phi in PHIsToFix. 4043 4044 fixLCSSAPHIs(); 4045 4046 // Make sure DomTree is updated. 4047 updateAnalysis(); 4048 4049 predicateInstructions(); 4050 4051 // Remove redundant induction instructions. 4052 cse(LoopVectorBody); 4053 } 4054 4055 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 4056 4057 // This is the second phase of vectorizing first-order recurrences. An 4058 // overview of the transformation is described below. Suppose we have the 4059 // following loop. 4060 // 4061 // for (int i = 0; i < n; ++i) 4062 // b[i] = a[i] - a[i - 1]; 4063 // 4064 // There is a first-order recurrence on "a". For this loop, the shorthand 4065 // scalar IR looks like: 4066 // 4067 // scalar.ph: 4068 // s_init = a[-1] 4069 // br scalar.body 4070 // 4071 // scalar.body: 4072 // i = phi [0, scalar.ph], [i+1, scalar.body] 4073 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4074 // s2 = a[i] 4075 // b[i] = s2 - s1 4076 // br cond, scalar.body, ... 4077 // 4078 // In this example, s1 is a recurrence because it's value depends on the 4079 // previous iteration. In the first phase of vectorization, we created a 4080 // temporary value for s1. We now complete the vectorization and produce the 4081 // shorthand vector IR shown below (for VF = 4, UF = 1). 4082 // 4083 // vector.ph: 4084 // v_init = vector(..., ..., ..., a[-1]) 4085 // br vector.body 4086 // 4087 // vector.body 4088 // i = phi [0, vector.ph], [i+4, vector.body] 4089 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4090 // v2 = a[i, i+1, i+2, i+3]; 4091 // v3 = vector(v1(3), v2(0, 1, 2)) 4092 // b[i, i+1, i+2, i+3] = v2 - v3 4093 // br cond, vector.body, middle.block 4094 // 4095 // middle.block: 4096 // x = v2(3) 4097 // br scalar.ph 4098 // 4099 // scalar.ph: 4100 // s_init = phi [x, middle.block], [a[-1], otherwise] 4101 // br scalar.body 4102 // 4103 // After execution completes the vector loop, we extract the next value of 4104 // the recurrence (x) to use as the initial value in the scalar loop. 4105 4106 // Get the original loop preheader and single loop latch. 4107 auto *Preheader = OrigLoop->getLoopPreheader(); 4108 auto *Latch = OrigLoop->getLoopLatch(); 4109 4110 // Get the initial and previous values of the scalar recurrence. 4111 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 4112 auto *Previous = Phi->getIncomingValueForBlock(Latch); 4113 4114 // Create a vector from the initial value. 4115 auto *VectorInit = ScalarInit; 4116 if (VF > 1) { 4117 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4118 VectorInit = Builder.CreateInsertElement( 4119 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 4120 Builder.getInt32(VF - 1), "vector.recur.init"); 4121 } 4122 4123 // We constructed a temporary phi node in the first phase of vectorization. 4124 // This phi node will eventually be deleted. 4125 VectorParts &PhiParts = VectorLoopValueMap.getVector(Phi); 4126 Builder.SetInsertPoint(cast<Instruction>(PhiParts[0])); 4127 4128 // Create a phi node for the new recurrence. The current value will either be 4129 // the initial value inserted into a vector or loop-varying vector value. 4130 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 4131 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 4132 4133 // Get the vectorized previous value. We ensured the previous values was an 4134 // instruction when detecting the recurrence. 4135 auto &PreviousParts = getVectorValue(Previous); 4136 4137 // Set the insertion point to be after this instruction. We ensured the 4138 // previous value dominated all uses of the phi when detecting the 4139 // recurrence. 4140 Builder.SetInsertPoint( 4141 &*++BasicBlock::iterator(cast<Instruction>(PreviousParts[UF - 1]))); 4142 4143 // We will construct a vector for the recurrence by combining the values for 4144 // the current and previous iterations. This is the required shuffle mask. 4145 SmallVector<Constant *, 8> ShuffleMask(VF); 4146 ShuffleMask[0] = Builder.getInt32(VF - 1); 4147 for (unsigned I = 1; I < VF; ++I) 4148 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 4149 4150 // The vector from which to take the initial value for the current iteration 4151 // (actual or unrolled). Initially, this is the vector phi node. 4152 Value *Incoming = VecPhi; 4153 4154 // Shuffle the current and previous vector and update the vector parts. 4155 for (unsigned Part = 0; Part < UF; ++Part) { 4156 auto *Shuffle = 4157 VF > 1 4158 ? Builder.CreateShuffleVector(Incoming, PreviousParts[Part], 4159 ConstantVector::get(ShuffleMask)) 4160 : Incoming; 4161 PhiParts[Part]->replaceAllUsesWith(Shuffle); 4162 cast<Instruction>(PhiParts[Part])->eraseFromParent(); 4163 PhiParts[Part] = Shuffle; 4164 Incoming = PreviousParts[Part]; 4165 } 4166 4167 // Fix the latch value of the new recurrence in the vector loop. 4168 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4169 4170 // Extract the last vector element in the middle block. This will be the 4171 // initial value for the recurrence when jumping to the scalar loop. 4172 auto *Extract = Incoming; 4173 if (VF > 1) { 4174 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4175 Extract = Builder.CreateExtractElement(Extract, Builder.getInt32(VF - 1), 4176 "vector.recur.extract"); 4177 } 4178 4179 // Fix the initial value of the original recurrence in the scalar loop. 4180 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4181 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4182 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4183 auto *Incoming = BB == LoopMiddleBlock ? Extract : ScalarInit; 4184 Start->addIncoming(Incoming, BB); 4185 } 4186 4187 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 4188 Phi->setName("scalar.recur"); 4189 4190 // Finally, fix users of the recurrence outside the loop. The users will need 4191 // either the last value of the scalar recurrence or the last value of the 4192 // vector recurrence we extracted in the middle block. Since the loop is in 4193 // LCSSA form, we just need to find the phi node for the original scalar 4194 // recurrence in the exit block, and then add an edge for the middle block. 4195 for (auto &I : *LoopExitBlock) { 4196 auto *LCSSAPhi = dyn_cast<PHINode>(&I); 4197 if (!LCSSAPhi) 4198 break; 4199 if (LCSSAPhi->getIncomingValue(0) == Phi) { 4200 LCSSAPhi->addIncoming(Extract, LoopMiddleBlock); 4201 break; 4202 } 4203 } 4204 } 4205 4206 void InnerLoopVectorizer::fixLCSSAPHIs() { 4207 for (Instruction &LEI : *LoopExitBlock) { 4208 auto *LCSSAPhi = dyn_cast<PHINode>(&LEI); 4209 if (!LCSSAPhi) 4210 break; 4211 if (LCSSAPhi->getNumIncomingValues() == 1) 4212 LCSSAPhi->addIncoming(UndefValue::get(LCSSAPhi->getType()), 4213 LoopMiddleBlock); 4214 } 4215 } 4216 4217 void InnerLoopVectorizer::predicateInstructions() { 4218 4219 // For each instruction I marked for predication on value C, split I into its 4220 // own basic block to form an if-then construct over C. 4221 // Since I may be fed by extractelement and/or be feeding an insertelement 4222 // generated during scalarization we try to move such instructions into the 4223 // predicated basic block as well. For the insertelement this also means that 4224 // the PHI will be created for the resulting vector rather than for the 4225 // scalar instruction. 4226 // So for some predicated instruction, e.g. the conditional sdiv in: 4227 // 4228 // for.body: 4229 // ... 4230 // %add = add nsw i32 %mul, %0 4231 // %cmp5 = icmp sgt i32 %2, 7 4232 // br i1 %cmp5, label %if.then, label %if.end 4233 // 4234 // if.then: 4235 // %div = sdiv i32 %0, %1 4236 // br label %if.end 4237 // 4238 // if.end: 4239 // %x.0 = phi i32 [ %div, %if.then ], [ %add, %for.body ] 4240 // 4241 // the sdiv at this point is scalarized and if-converted using a select. 4242 // The inactive elements in the vector are not used, but the predicated 4243 // instruction is still executed for all vector elements, essentially: 4244 // 4245 // vector.body: 4246 // ... 4247 // %17 = add nsw <2 x i32> %16, %wide.load 4248 // %29 = extractelement <2 x i32> %wide.load, i32 0 4249 // %30 = extractelement <2 x i32> %wide.load51, i32 0 4250 // %31 = sdiv i32 %29, %30 4251 // %32 = insertelement <2 x i32> undef, i32 %31, i32 0 4252 // %35 = extractelement <2 x i32> %wide.load, i32 1 4253 // %36 = extractelement <2 x i32> %wide.load51, i32 1 4254 // %37 = sdiv i32 %35, %36 4255 // %38 = insertelement <2 x i32> %32, i32 %37, i32 1 4256 // %predphi = select <2 x i1> %26, <2 x i32> %38, <2 x i32> %17 4257 // 4258 // Predication will now re-introduce the original control flow to avoid false 4259 // side-effects by the sdiv instructions on the inactive elements, yielding 4260 // (after cleanup): 4261 // 4262 // vector.body: 4263 // ... 4264 // %5 = add nsw <2 x i32> %4, %wide.load 4265 // %8 = icmp sgt <2 x i32> %wide.load52, <i32 7, i32 7> 4266 // %9 = extractelement <2 x i1> %8, i32 0 4267 // br i1 %9, label %pred.sdiv.if, label %pred.sdiv.continue 4268 // 4269 // pred.sdiv.if: 4270 // %10 = extractelement <2 x i32> %wide.load, i32 0 4271 // %11 = extractelement <2 x i32> %wide.load51, i32 0 4272 // %12 = sdiv i32 %10, %11 4273 // %13 = insertelement <2 x i32> undef, i32 %12, i32 0 4274 // br label %pred.sdiv.continue 4275 // 4276 // pred.sdiv.continue: 4277 // %14 = phi <2 x i32> [ undef, %vector.body ], [ %13, %pred.sdiv.if ] 4278 // %15 = extractelement <2 x i1> %8, i32 1 4279 // br i1 %15, label %pred.sdiv.if54, label %pred.sdiv.continue55 4280 // 4281 // pred.sdiv.if54: 4282 // %16 = extractelement <2 x i32> %wide.load, i32 1 4283 // %17 = extractelement <2 x i32> %wide.load51, i32 1 4284 // %18 = sdiv i32 %16, %17 4285 // %19 = insertelement <2 x i32> %14, i32 %18, i32 1 4286 // br label %pred.sdiv.continue55 4287 // 4288 // pred.sdiv.continue55: 4289 // %20 = phi <2 x i32> [ %14, %pred.sdiv.continue ], [ %19, %pred.sdiv.if54 ] 4290 // %predphi = select <2 x i1> %8, <2 x i32> %20, <2 x i32> %5 4291 4292 for (auto KV : PredicatedInstructions) { 4293 BasicBlock::iterator I(KV.first); 4294 BasicBlock *Head = I->getParent(); 4295 auto *BB = SplitBlock(Head, &*std::next(I), DT, LI); 4296 auto *T = SplitBlockAndInsertIfThen(KV.second, &*I, /*Unreachable=*/false, 4297 /*BranchWeights=*/nullptr, DT, LI); 4298 I->moveBefore(T); 4299 // Try to move any extractelement we may have created for the predicated 4300 // instruction into the Then block. 4301 for (Use &Op : I->operands()) { 4302 auto *OpInst = dyn_cast<ExtractElementInst>(&*Op); 4303 if (OpInst && OpInst->hasOneUse()) // TODO: more accurately - hasOneUser() 4304 OpInst->moveBefore(&*I); 4305 } 4306 4307 I->getParent()->setName(Twine("pred.") + I->getOpcodeName() + ".if"); 4308 BB->setName(Twine("pred.") + I->getOpcodeName() + ".continue"); 4309 4310 // If the instruction is non-void create a Phi node at reconvergence point. 4311 if (!I->getType()->isVoidTy()) { 4312 Value *IncomingTrue = nullptr; 4313 Value *IncomingFalse = nullptr; 4314 4315 if (I->hasOneUse() && isa<InsertElementInst>(*I->user_begin())) { 4316 // If the predicated instruction is feeding an insert-element, move it 4317 // into the Then block; Phi node will be created for the vector. 4318 InsertElementInst *IEI = cast<InsertElementInst>(*I->user_begin()); 4319 IEI->moveBefore(T); 4320 IncomingTrue = IEI; // the new vector with the inserted element. 4321 IncomingFalse = IEI->getOperand(0); // the unmodified vector 4322 } else { 4323 // Phi node will be created for the scalar predicated instruction. 4324 IncomingTrue = &*I; 4325 IncomingFalse = UndefValue::get(I->getType()); 4326 } 4327 4328 BasicBlock *PostDom = I->getParent()->getSingleSuccessor(); 4329 assert(PostDom && "Then block has multiple successors"); 4330 PHINode *Phi = 4331 PHINode::Create(IncomingTrue->getType(), 2, "", &PostDom->front()); 4332 IncomingTrue->replaceAllUsesWith(Phi); 4333 Phi->addIncoming(IncomingFalse, Head); 4334 Phi->addIncoming(IncomingTrue, I->getParent()); 4335 } 4336 } 4337 4338 DEBUG(DT->verifyDomTree()); 4339 } 4340 4341 InnerLoopVectorizer::VectorParts 4342 InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) { 4343 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 4344 4345 // Look for cached value. 4346 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 4347 EdgeMaskCache::iterator ECEntryIt = MaskCache.find(Edge); 4348 if (ECEntryIt != MaskCache.end()) 4349 return ECEntryIt->second; 4350 4351 VectorParts SrcMask = createBlockInMask(Src); 4352 4353 // The terminator has to be a branch inst! 4354 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 4355 assert(BI && "Unexpected terminator found"); 4356 4357 if (BI->isConditional()) { 4358 VectorParts EdgeMask = getVectorValue(BI->getCondition()); 4359 4360 if (BI->getSuccessor(0) != Dst) 4361 for (unsigned part = 0; part < UF; ++part) 4362 EdgeMask[part] = Builder.CreateNot(EdgeMask[part]); 4363 4364 for (unsigned part = 0; part < UF; ++part) 4365 EdgeMask[part] = Builder.CreateAnd(EdgeMask[part], SrcMask[part]); 4366 4367 MaskCache[Edge] = EdgeMask; 4368 return EdgeMask; 4369 } 4370 4371 MaskCache[Edge] = SrcMask; 4372 return SrcMask; 4373 } 4374 4375 InnerLoopVectorizer::VectorParts 4376 InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) { 4377 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 4378 4379 // Loop incoming mask is all-one. 4380 if (OrigLoop->getHeader() == BB) { 4381 Value *C = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 1); 4382 return getVectorValue(C); 4383 } 4384 4385 // This is the block mask. We OR all incoming edges, and with zero. 4386 Value *Zero = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 0); 4387 VectorParts BlockMask = getVectorValue(Zero); 4388 4389 // For each pred: 4390 for (pred_iterator it = pred_begin(BB), e = pred_end(BB); it != e; ++it) { 4391 VectorParts EM = createEdgeMask(*it, BB); 4392 for (unsigned part = 0; part < UF; ++part) 4393 BlockMask[part] = Builder.CreateOr(BlockMask[part], EM[part]); 4394 } 4395 4396 return BlockMask; 4397 } 4398 4399 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 4400 unsigned VF, PhiVector *PV) { 4401 PHINode *P = cast<PHINode>(PN); 4402 // Handle recurrences. 4403 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 4404 VectorParts Entry(UF); 4405 for (unsigned part = 0; part < UF; ++part) { 4406 // This is phase one of vectorizing PHIs. 4407 Type *VecTy = 4408 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4409 Entry[part] = PHINode::Create( 4410 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4411 } 4412 VectorLoopValueMap.initVector(P, Entry); 4413 PV->push_back(P); 4414 return; 4415 } 4416 4417 setDebugLocFromInst(Builder, P); 4418 // Check for PHI nodes that are lowered to vector selects. 4419 if (P->getParent() != OrigLoop->getHeader()) { 4420 // We know that all PHIs in non-header blocks are converted into 4421 // selects, so we don't have to worry about the insertion order and we 4422 // can just use the builder. 4423 // At this point we generate the predication tree. There may be 4424 // duplications since this is a simple recursive scan, but future 4425 // optimizations will clean it up. 4426 4427 unsigned NumIncoming = P->getNumIncomingValues(); 4428 4429 // Generate a sequence of selects of the form: 4430 // SELECT(Mask3, In3, 4431 // SELECT(Mask2, In2, 4432 // ( ...))) 4433 VectorParts Entry(UF); 4434 for (unsigned In = 0; In < NumIncoming; In++) { 4435 VectorParts Cond = 4436 createEdgeMask(P->getIncomingBlock(In), P->getParent()); 4437 const VectorParts &In0 = getVectorValue(P->getIncomingValue(In)); 4438 4439 for (unsigned part = 0; part < UF; ++part) { 4440 // We might have single edge PHIs (blocks) - use an identity 4441 // 'select' for the first PHI operand. 4442 if (In == 0) 4443 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], In0[part]); 4444 else 4445 // Select between the current value and the previous incoming edge 4446 // based on the incoming mask. 4447 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], Entry[part], 4448 "predphi"); 4449 } 4450 } 4451 VectorLoopValueMap.initVector(P, Entry); 4452 return; 4453 } 4454 4455 // This PHINode must be an induction variable. 4456 // Make sure that we know about it. 4457 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 4458 4459 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 4460 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4461 4462 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4463 // which can be found from the original scalar operations. 4464 switch (II.getKind()) { 4465 case InductionDescriptor::IK_NoInduction: 4466 llvm_unreachable("Unknown induction"); 4467 case InductionDescriptor::IK_IntInduction: 4468 return widenIntInduction(P); 4469 case InductionDescriptor::IK_PtrInduction: { 4470 // Handle the pointer induction variable case. 4471 assert(P->getType()->isPointerTy() && "Unexpected type."); 4472 // This is the normalized GEP that starts counting at zero. 4473 Value *PtrInd = Induction; 4474 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 4475 // Determine the number of scalars we need to generate for each unroll 4476 // iteration. If the instruction is uniform, we only need to generate the 4477 // first lane. Otherwise, we generate all VF values. 4478 unsigned Lanes = Legal->isUniformAfterVectorization(P) ? 1 : VF; 4479 // These are the scalar results. Notice that we don't generate vector GEPs 4480 // because scalar GEPs result in better code. 4481 ScalarParts Entry(UF); 4482 for (unsigned Part = 0; Part < UF; ++Part) { 4483 Entry[Part].resize(VF); 4484 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4485 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 4486 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4487 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 4488 SclrGep->setName("next.gep"); 4489 Entry[Part][Lane] = SclrGep; 4490 } 4491 } 4492 VectorLoopValueMap.initScalar(P, Entry); 4493 return; 4494 } 4495 case InductionDescriptor::IK_FpInduction: { 4496 assert(P->getType() == II.getStartValue()->getType() && 4497 "Types must match"); 4498 // Handle other induction variables that are now based on the 4499 // canonical one. 4500 assert(P != OldInduction && "Primary induction can be integer only"); 4501 4502 Value *V = Builder.CreateCast(Instruction::SIToFP, Induction, P->getType()); 4503 V = II.transform(Builder, V, PSE.getSE(), DL); 4504 V->setName("fp.offset.idx"); 4505 4506 // Now we have scalar op: %fp.offset.idx = StartVal +/- Induction*StepVal 4507 4508 Value *Broadcasted = getBroadcastInstrs(V); 4509 // After broadcasting the induction variable we need to make the vector 4510 // consecutive by adding StepVal*0, StepVal*1, StepVal*2, etc. 4511 Value *StepVal = cast<SCEVUnknown>(II.getStep())->getValue(); 4512 VectorParts Entry(UF); 4513 for (unsigned part = 0; part < UF; ++part) 4514 Entry[part] = getStepVector(Broadcasted, VF * part, StepVal, 4515 II.getInductionOpcode()); 4516 VectorLoopValueMap.initVector(P, Entry); 4517 return; 4518 } 4519 } 4520 } 4521 4522 /// A helper function for checking whether an integer division-related 4523 /// instruction may divide by zero (in which case it must be predicated if 4524 /// executed conditionally in the scalar code). 4525 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4526 /// Non-zero divisors that are non compile-time constants will not be 4527 /// converted into multiplication, so we will still end up scalarizing 4528 /// the division, but can do so w/o predication. 4529 static bool mayDivideByZero(Instruction &I) { 4530 assert((I.getOpcode() == Instruction::UDiv || 4531 I.getOpcode() == Instruction::SDiv || 4532 I.getOpcode() == Instruction::URem || 4533 I.getOpcode() == Instruction::SRem) && 4534 "Unexpected instruction"); 4535 Value *Divisor = I.getOperand(1); 4536 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4537 return !CInt || CInt->isZero(); 4538 } 4539 4540 void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { 4541 // For each instruction in the old loop. 4542 for (Instruction &I : *BB) { 4543 4544 // Scalarize instructions that should remain scalar after vectorization. 4545 if (!(isa<BranchInst>(&I) || isa<PHINode>(&I) || 4546 isa<DbgInfoIntrinsic>(&I)) && 4547 Legal->isScalarAfterVectorization(&I)) { 4548 scalarizeInstruction(&I); 4549 continue; 4550 } 4551 4552 switch (I.getOpcode()) { 4553 case Instruction::Br: 4554 // Nothing to do for PHIs and BR, since we already took care of the 4555 // loop control flow instructions. 4556 continue; 4557 case Instruction::PHI: { 4558 // Vectorize PHINodes. 4559 widenPHIInstruction(&I, UF, VF, PV); 4560 continue; 4561 } // End of PHI. 4562 4563 case Instruction::UDiv: 4564 case Instruction::SDiv: 4565 case Instruction::SRem: 4566 case Instruction::URem: 4567 // Scalarize with predication if this instruction may divide by zero and 4568 // block execution is conditional, otherwise fallthrough. 4569 if (mayDivideByZero(I) && Legal->blockNeedsPredication(I.getParent())) { 4570 scalarizeInstruction(&I, true); 4571 continue; 4572 } 4573 case Instruction::Add: 4574 case Instruction::FAdd: 4575 case Instruction::Sub: 4576 case Instruction::FSub: 4577 case Instruction::Mul: 4578 case Instruction::FMul: 4579 case Instruction::FDiv: 4580 case Instruction::FRem: 4581 case Instruction::Shl: 4582 case Instruction::LShr: 4583 case Instruction::AShr: 4584 case Instruction::And: 4585 case Instruction::Or: 4586 case Instruction::Xor: { 4587 // Just widen binops. 4588 auto *BinOp = cast<BinaryOperator>(&I); 4589 setDebugLocFromInst(Builder, BinOp); 4590 const VectorParts &A = getVectorValue(BinOp->getOperand(0)); 4591 const VectorParts &B = getVectorValue(BinOp->getOperand(1)); 4592 4593 // Use this vector value for all users of the original instruction. 4594 VectorParts Entry(UF); 4595 for (unsigned Part = 0; Part < UF; ++Part) { 4596 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A[Part], B[Part]); 4597 4598 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 4599 VecOp->copyIRFlags(BinOp); 4600 4601 Entry[Part] = V; 4602 } 4603 4604 VectorLoopValueMap.initVector(&I, Entry); 4605 addMetadata(Entry, BinOp); 4606 break; 4607 } 4608 case Instruction::Select: { 4609 // Widen selects. 4610 // If the selector is loop invariant we can create a select 4611 // instruction with a scalar condition. Otherwise, use vector-select. 4612 auto *SE = PSE.getSE(); 4613 bool InvariantCond = 4614 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); 4615 setDebugLocFromInst(Builder, &I); 4616 4617 // The condition can be loop invariant but still defined inside the 4618 // loop. This means that we can't just use the original 'cond' value. 4619 // We have to take the 'vectorized' value and pick the first lane. 4620 // Instcombine will make this a no-op. 4621 const VectorParts &Cond = getVectorValue(I.getOperand(0)); 4622 const VectorParts &Op0 = getVectorValue(I.getOperand(1)); 4623 const VectorParts &Op1 = getVectorValue(I.getOperand(2)); 4624 4625 auto *ScalarCond = getScalarValue(I.getOperand(0), 0, 0); 4626 4627 VectorParts Entry(UF); 4628 for (unsigned Part = 0; Part < UF; ++Part) { 4629 Entry[Part] = Builder.CreateSelect( 4630 InvariantCond ? ScalarCond : Cond[Part], Op0[Part], Op1[Part]); 4631 } 4632 4633 VectorLoopValueMap.initVector(&I, Entry); 4634 addMetadata(Entry, &I); 4635 break; 4636 } 4637 4638 case Instruction::ICmp: 4639 case Instruction::FCmp: { 4640 // Widen compares. Generate vector compares. 4641 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4642 auto *Cmp = dyn_cast<CmpInst>(&I); 4643 setDebugLocFromInst(Builder, Cmp); 4644 const VectorParts &A = getVectorValue(Cmp->getOperand(0)); 4645 const VectorParts &B = getVectorValue(Cmp->getOperand(1)); 4646 VectorParts Entry(UF); 4647 for (unsigned Part = 0; Part < UF; ++Part) { 4648 Value *C = nullptr; 4649 if (FCmp) { 4650 C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]); 4651 cast<FCmpInst>(C)->copyFastMathFlags(Cmp); 4652 } else { 4653 C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]); 4654 } 4655 Entry[Part] = C; 4656 } 4657 4658 VectorLoopValueMap.initVector(&I, Entry); 4659 addMetadata(Entry, &I); 4660 break; 4661 } 4662 4663 case Instruction::Store: 4664 case Instruction::Load: 4665 vectorizeMemoryInstruction(&I); 4666 break; 4667 case Instruction::ZExt: 4668 case Instruction::SExt: 4669 case Instruction::FPToUI: 4670 case Instruction::FPToSI: 4671 case Instruction::FPExt: 4672 case Instruction::PtrToInt: 4673 case Instruction::IntToPtr: 4674 case Instruction::SIToFP: 4675 case Instruction::UIToFP: 4676 case Instruction::Trunc: 4677 case Instruction::FPTrunc: 4678 case Instruction::BitCast: { 4679 auto *CI = dyn_cast<CastInst>(&I); 4680 setDebugLocFromInst(Builder, CI); 4681 4682 // Optimize the special case where the source is a constant integer 4683 // induction variable. Notice that we can only optimize the 'trunc' case 4684 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 4685 // (c) other casts depend on pointer size. 4686 auto ID = Legal->getInductionVars()->lookup(OldInduction); 4687 if (isa<TruncInst>(CI) && CI->getOperand(0) == OldInduction && 4688 ID.getConstIntStepValue()) { 4689 widenIntInduction(OldInduction, cast<TruncInst>(CI)); 4690 break; 4691 } 4692 4693 /// Vectorize casts. 4694 Type *DestTy = 4695 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4696 4697 const VectorParts &A = getVectorValue(CI->getOperand(0)); 4698 VectorParts Entry(UF); 4699 for (unsigned Part = 0; Part < UF; ++Part) 4700 Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy); 4701 VectorLoopValueMap.initVector(&I, Entry); 4702 addMetadata(Entry, &I); 4703 break; 4704 } 4705 4706 case Instruction::Call: { 4707 // Ignore dbg intrinsics. 4708 if (isa<DbgInfoIntrinsic>(I)) 4709 break; 4710 setDebugLocFromInst(Builder, &I); 4711 4712 Module *M = BB->getParent()->getParent(); 4713 auto *CI = cast<CallInst>(&I); 4714 4715 StringRef FnName = CI->getCalledFunction()->getName(); 4716 Function *F = CI->getCalledFunction(); 4717 Type *RetTy = ToVectorTy(CI->getType(), VF); 4718 SmallVector<Type *, 4> Tys; 4719 for (Value *ArgOperand : CI->arg_operands()) 4720 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4721 4722 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4723 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 4724 ID == Intrinsic::lifetime_start)) { 4725 scalarizeInstruction(&I); 4726 break; 4727 } 4728 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4729 // version of the instruction. 4730 // Is it beneficial to perform intrinsic call compared to lib call? 4731 bool NeedToScalarize; 4732 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4733 bool UseVectorIntrinsic = 4734 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4735 if (!UseVectorIntrinsic && NeedToScalarize) { 4736 scalarizeInstruction(&I); 4737 break; 4738 } 4739 4740 VectorParts Entry(UF); 4741 for (unsigned Part = 0; Part < UF; ++Part) { 4742 SmallVector<Value *, 4> Args; 4743 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4744 Value *Arg = CI->getArgOperand(i); 4745 // Some intrinsics have a scalar argument - don't replace it with a 4746 // vector. 4747 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) { 4748 const VectorParts &VectorArg = getVectorValue(CI->getArgOperand(i)); 4749 Arg = VectorArg[Part]; 4750 } 4751 Args.push_back(Arg); 4752 } 4753 4754 Function *VectorF; 4755 if (UseVectorIntrinsic) { 4756 // Use vector version of the intrinsic. 4757 Type *TysForDecl[] = {CI->getType()}; 4758 if (VF > 1) 4759 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4760 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4761 } else { 4762 // Use vector version of the library call. 4763 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4764 assert(!VFnName.empty() && "Vector function name is empty."); 4765 VectorF = M->getFunction(VFnName); 4766 if (!VectorF) { 4767 // Generate a declaration 4768 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4769 VectorF = 4770 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4771 VectorF->copyAttributesFrom(F); 4772 } 4773 } 4774 assert(VectorF && "Can't create vector function."); 4775 4776 SmallVector<OperandBundleDef, 1> OpBundles; 4777 CI->getOperandBundlesAsDefs(OpBundles); 4778 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4779 4780 if (isa<FPMathOperator>(V)) 4781 V->copyFastMathFlags(CI); 4782 4783 Entry[Part] = V; 4784 } 4785 4786 VectorLoopValueMap.initVector(&I, Entry); 4787 addMetadata(Entry, &I); 4788 break; 4789 } 4790 4791 default: 4792 // All other instructions are unsupported. Scalarize them. 4793 scalarizeInstruction(&I); 4794 break; 4795 } // end of switch. 4796 } // end of for_each instr. 4797 } 4798 4799 void InnerLoopVectorizer::updateAnalysis() { 4800 // Forget the original basic block. 4801 PSE.getSE()->forgetLoop(OrigLoop); 4802 4803 // Update the dominator tree information. 4804 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4805 "Entry does not dominate exit."); 4806 4807 // We don't predicate stores by this point, so the vector body should be a 4808 // single loop. 4809 DT->addNewBlock(LoopVectorBody, LoopVectorPreHeader); 4810 4811 DT->addNewBlock(LoopMiddleBlock, LoopVectorBody); 4812 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4813 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4814 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4815 4816 DEBUG(DT->verifyDomTree()); 4817 } 4818 4819 /// \brief Check whether it is safe to if-convert this phi node. 4820 /// 4821 /// Phi nodes with constant expressions that can trap are not safe to if 4822 /// convert. 4823 static bool canIfConvertPHINodes(BasicBlock *BB) { 4824 for (Instruction &I : *BB) { 4825 auto *Phi = dyn_cast<PHINode>(&I); 4826 if (!Phi) 4827 return true; 4828 for (Value *V : Phi->incoming_values()) 4829 if (auto *C = dyn_cast<Constant>(V)) 4830 if (C->canTrap()) 4831 return false; 4832 } 4833 return true; 4834 } 4835 4836 bool LoopVectorizationLegality::canVectorizeWithIfConvert() { 4837 if (!EnableIfConversion) { 4838 ORE->emit(createMissedAnalysis("IfConversionDisabled") 4839 << "if-conversion is disabled"); 4840 return false; 4841 } 4842 4843 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable"); 4844 4845 // A list of pointers that we can safely read and write to. 4846 SmallPtrSet<Value *, 8> SafePointes; 4847 4848 // Collect safe addresses. 4849 for (BasicBlock *BB : TheLoop->blocks()) { 4850 if (blockNeedsPredication(BB)) 4851 continue; 4852 4853 for (Instruction &I : *BB) 4854 if (auto *Ptr = getPointerOperand(&I)) 4855 SafePointes.insert(Ptr); 4856 } 4857 4858 // Collect the blocks that need predication. 4859 BasicBlock *Header = TheLoop->getHeader(); 4860 for (BasicBlock *BB : TheLoop->blocks()) { 4861 // We don't support switch statements inside loops. 4862 if (!isa<BranchInst>(BB->getTerminator())) { 4863 ORE->emit(createMissedAnalysis("LoopContainsSwitch", BB->getTerminator()) 4864 << "loop contains a switch statement"); 4865 return false; 4866 } 4867 4868 // We must be able to predicate all blocks that need to be predicated. 4869 if (blockNeedsPredication(BB)) { 4870 if (!blockCanBePredicated(BB, SafePointes)) { 4871 ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator()) 4872 << "control flow cannot be substituted for a select"); 4873 return false; 4874 } 4875 } else if (BB != Header && !canIfConvertPHINodes(BB)) { 4876 ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator()) 4877 << "control flow cannot be substituted for a select"); 4878 return false; 4879 } 4880 } 4881 4882 // We can if-convert this loop. 4883 return true; 4884 } 4885 4886 bool LoopVectorizationLegality::canVectorize() { 4887 // We must have a loop in canonical form. Loops with indirectbr in them cannot 4888 // be canonicalized. 4889 if (!TheLoop->getLoopPreheader()) { 4890 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 4891 << "loop control flow is not understood by vectorizer"); 4892 return false; 4893 } 4894 4895 // FIXME: The code is currently dead, since the loop gets sent to 4896 // LoopVectorizationLegality is already an innermost loop. 4897 // 4898 // We can only vectorize innermost loops. 4899 if (!TheLoop->empty()) { 4900 ORE->emit(createMissedAnalysis("NotInnermostLoop") 4901 << "loop is not the innermost loop"); 4902 return false; 4903 } 4904 4905 // We must have a single backedge. 4906 if (TheLoop->getNumBackEdges() != 1) { 4907 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 4908 << "loop control flow is not understood by vectorizer"); 4909 return false; 4910 } 4911 4912 // We must have a single exiting block. 4913 if (!TheLoop->getExitingBlock()) { 4914 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 4915 << "loop control flow is not understood by vectorizer"); 4916 return false; 4917 } 4918 4919 // We only handle bottom-tested loops, i.e. loop in which the condition is 4920 // checked at the end of each iteration. With that we can assume that all 4921 // instructions in the loop are executed the same number of times. 4922 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 4923 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 4924 << "loop control flow is not understood by vectorizer"); 4925 return false; 4926 } 4927 4928 // We need to have a loop header. 4929 DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName() 4930 << '\n'); 4931 4932 // Check if we can if-convert non-single-bb loops. 4933 unsigned NumBlocks = TheLoop->getNumBlocks(); 4934 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { 4935 DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); 4936 return false; 4937 } 4938 4939 // ScalarEvolution needs to be able to find the exit count. 4940 const SCEV *ExitCount = PSE.getBackedgeTakenCount(); 4941 if (ExitCount == PSE.getSE()->getCouldNotCompute()) { 4942 ORE->emit(createMissedAnalysis("CantComputeNumberOfIterations") 4943 << "could not determine number of loop iterations"); 4944 DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n"); 4945 return false; 4946 } 4947 4948 // Check if we can vectorize the instructions and CFG in this loop. 4949 if (!canVectorizeInstrs()) { 4950 DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); 4951 return false; 4952 } 4953 4954 // Go over each instruction and look at memory deps. 4955 if (!canVectorizeMemory()) { 4956 DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); 4957 return false; 4958 } 4959 4960 DEBUG(dbgs() << "LV: We can vectorize this loop" 4961 << (LAI->getRuntimePointerChecking()->Need 4962 ? " (with a runtime bound check)" 4963 : "") 4964 << "!\n"); 4965 4966 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 4967 4968 // If an override option has been passed in for interleaved accesses, use it. 4969 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 4970 UseInterleaved = EnableInterleavedMemAccesses; 4971 4972 // Analyze interleaved memory accesses. 4973 if (UseInterleaved) 4974 InterleaveInfo.analyzeInterleaving(*getSymbolicStrides()); 4975 4976 // Collect all instructions that are known to be uniform after vectorization. 4977 collectLoopUniforms(); 4978 4979 // Collect all instructions that are known to be scalar after vectorization. 4980 collectLoopScalars(); 4981 4982 unsigned SCEVThreshold = VectorizeSCEVCheckThreshold; 4983 if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) 4984 SCEVThreshold = PragmaVectorizeSCEVCheckThreshold; 4985 4986 if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) { 4987 ORE->emit(createMissedAnalysis("TooManySCEVRunTimeChecks") 4988 << "Too many SCEV assumptions need to be made and checked " 4989 << "at runtime"); 4990 DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n"); 4991 return false; 4992 } 4993 4994 // Okay! We can vectorize. At this point we don't have any other mem analysis 4995 // which may limit our maximum vectorization factor, so just return true with 4996 // no restrictions. 4997 return true; 4998 } 4999 5000 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { 5001 if (Ty->isPointerTy()) 5002 return DL.getIntPtrType(Ty); 5003 5004 // It is possible that char's or short's overflow when we ask for the loop's 5005 // trip count, work around this by changing the type size. 5006 if (Ty->getScalarSizeInBits() < 32) 5007 return Type::getInt32Ty(Ty->getContext()); 5008 5009 return Ty; 5010 } 5011 5012 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { 5013 Ty0 = convertPointerToIntegerType(DL, Ty0); 5014 Ty1 = convertPointerToIntegerType(DL, Ty1); 5015 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) 5016 return Ty0; 5017 return Ty1; 5018 } 5019 5020 /// \brief Check that the instruction has outside loop users and is not an 5021 /// identified reduction variable. 5022 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, 5023 SmallPtrSetImpl<Value *> &AllowedExit) { 5024 // Reduction and Induction instructions are allowed to have exit users. All 5025 // other instructions must not have external users. 5026 if (!AllowedExit.count(Inst)) 5027 // Check that all of the users of the loop are inside the BB. 5028 for (User *U : Inst->users()) { 5029 Instruction *UI = cast<Instruction>(U); 5030 // This user may be a reduction exit value. 5031 if (!TheLoop->contains(UI)) { 5032 DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); 5033 return true; 5034 } 5035 } 5036 return false; 5037 } 5038 5039 void LoopVectorizationLegality::addInductionPhi( 5040 PHINode *Phi, const InductionDescriptor &ID, 5041 SmallPtrSetImpl<Value *> &AllowedExit) { 5042 Inductions[Phi] = ID; 5043 Type *PhiTy = Phi->getType(); 5044 const DataLayout &DL = Phi->getModule()->getDataLayout(); 5045 5046 // Get the widest type. 5047 if (!PhiTy->isFloatingPointTy()) { 5048 if (!WidestIndTy) 5049 WidestIndTy = convertPointerToIntegerType(DL, PhiTy); 5050 else 5051 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); 5052 } 5053 5054 // Int inductions are special because we only allow one IV. 5055 if (ID.getKind() == InductionDescriptor::IK_IntInduction && 5056 ID.getConstIntStepValue() && 5057 ID.getConstIntStepValue()->isOne() && 5058 isa<Constant>(ID.getStartValue()) && 5059 cast<Constant>(ID.getStartValue())->isNullValue()) { 5060 5061 // Use the phi node with the widest type as induction. Use the last 5062 // one if there are multiple (no good reason for doing this other 5063 // than it is expedient). We've checked that it begins at zero and 5064 // steps by one, so this is a canonical induction variable. 5065 if (!Induction || PhiTy == WidestIndTy) 5066 Induction = Phi; 5067 } 5068 5069 // Both the PHI node itself, and the "post-increment" value feeding 5070 // back into the PHI node may have external users. 5071 AllowedExit.insert(Phi); 5072 AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch())); 5073 5074 DEBUG(dbgs() << "LV: Found an induction variable.\n"); 5075 return; 5076 } 5077 5078 bool LoopVectorizationLegality::canVectorizeInstrs() { 5079 BasicBlock *Header = TheLoop->getHeader(); 5080 5081 // Look for the attribute signaling the absence of NaNs. 5082 Function &F = *Header->getParent(); 5083 HasFunNoNaNAttr = 5084 F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; 5085 5086 // For each block in the loop. 5087 for (BasicBlock *BB : TheLoop->blocks()) { 5088 // Scan the instructions in the block and look for hazards. 5089 for (Instruction &I : *BB) { 5090 if (auto *Phi = dyn_cast<PHINode>(&I)) { 5091 Type *PhiTy = Phi->getType(); 5092 // Check that this PHI type is allowed. 5093 if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() && 5094 !PhiTy->isPointerTy()) { 5095 ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi) 5096 << "loop control flow is not understood by vectorizer"); 5097 DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); 5098 return false; 5099 } 5100 5101 // If this PHINode is not in the header block, then we know that we 5102 // can convert it to select during if-conversion. No need to check if 5103 // the PHIs in this block are induction or reduction variables. 5104 if (BB != Header) { 5105 // Check that this instruction has no outside users or is an 5106 // identified reduction value with an outside user. 5107 if (!hasOutsideLoopUser(TheLoop, Phi, AllowedExit)) 5108 continue; 5109 ORE->emit(createMissedAnalysis("NeitherInductionNorReduction", Phi) 5110 << "value could not be identified as " 5111 "an induction or reduction variable"); 5112 return false; 5113 } 5114 5115 // We only allow if-converted PHIs with exactly two incoming values. 5116 if (Phi->getNumIncomingValues() != 2) { 5117 ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi) 5118 << "control flow not understood by vectorizer"); 5119 DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); 5120 return false; 5121 } 5122 5123 RecurrenceDescriptor RedDes; 5124 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) { 5125 if (RedDes.hasUnsafeAlgebra()) 5126 Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst()); 5127 AllowedExit.insert(RedDes.getLoopExitInstr()); 5128 Reductions[Phi] = RedDes; 5129 continue; 5130 } 5131 5132 InductionDescriptor ID; 5133 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID)) { 5134 addInductionPhi(Phi, ID, AllowedExit); 5135 if (ID.hasUnsafeAlgebra() && !HasFunNoNaNAttr) 5136 Requirements->addUnsafeAlgebraInst(ID.getUnsafeAlgebraInst()); 5137 continue; 5138 } 5139 5140 if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, DT)) { 5141 FirstOrderRecurrences.insert(Phi); 5142 continue; 5143 } 5144 5145 // As a last resort, coerce the PHI to a AddRec expression 5146 // and re-try classifying it a an induction PHI. 5147 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true)) { 5148 addInductionPhi(Phi, ID, AllowedExit); 5149 continue; 5150 } 5151 5152 ORE->emit(createMissedAnalysis("NonReductionValueUsedOutsideLoop", Phi) 5153 << "value that could not be identified as " 5154 "reduction is used outside the loop"); 5155 DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n"); 5156 return false; 5157 } // end of PHI handling 5158 5159 // We handle calls that: 5160 // * Are debug info intrinsics. 5161 // * Have a mapping to an IR intrinsic. 5162 // * Have a vector version available. 5163 auto *CI = dyn_cast<CallInst>(&I); 5164 if (CI && !getVectorIntrinsicIDForCall(CI, TLI) && 5165 !isa<DbgInfoIntrinsic>(CI) && 5166 !(CI->getCalledFunction() && TLI && 5167 TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) { 5168 ORE->emit(createMissedAnalysis("CantVectorizeCall", CI) 5169 << "call instruction cannot be vectorized"); 5170 DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); 5171 return false; 5172 } 5173 5174 // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the 5175 // second argument is the same (i.e. loop invariant) 5176 if (CI && hasVectorInstrinsicScalarOpd( 5177 getVectorIntrinsicIDForCall(CI, TLI), 1)) { 5178 auto *SE = PSE.getSE(); 5179 if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) { 5180 ORE->emit(createMissedAnalysis("CantVectorizeIntrinsic", CI) 5181 << "intrinsic instruction cannot be vectorized"); 5182 DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n"); 5183 return false; 5184 } 5185 } 5186 5187 // Check that the instruction return type is vectorizable. 5188 // Also, we can't vectorize extractelement instructions. 5189 if ((!VectorType::isValidElementType(I.getType()) && 5190 !I.getType()->isVoidTy()) || 5191 isa<ExtractElementInst>(I)) { 5192 ORE->emit(createMissedAnalysis("CantVectorizeInstructionReturnType", &I) 5193 << "instruction return type cannot be vectorized"); 5194 DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); 5195 return false; 5196 } 5197 5198 // Check that the stored type is vectorizable. 5199 if (auto *ST = dyn_cast<StoreInst>(&I)) { 5200 Type *T = ST->getValueOperand()->getType(); 5201 if (!VectorType::isValidElementType(T)) { 5202 ORE->emit(createMissedAnalysis("CantVectorizeStore", ST) 5203 << "store instruction cannot be vectorized"); 5204 return false; 5205 } 5206 5207 // FP instructions can allow unsafe algebra, thus vectorizable by 5208 // non-IEEE-754 compliant SIMD units. 5209 // This applies to floating-point math operations and calls, not memory 5210 // operations, shuffles, or casts, as they don't change precision or 5211 // semantics. 5212 } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) && 5213 !I.hasUnsafeAlgebra()) { 5214 DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n"); 5215 Hints->setPotentiallyUnsafe(); 5216 } 5217 5218 // Reduction instructions are allowed to have exit users. 5219 // All other instructions must not have external users. 5220 if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) { 5221 ORE->emit(createMissedAnalysis("ValueUsedOutsideLoop", &I) 5222 << "value cannot be used outside the loop"); 5223 return false; 5224 } 5225 5226 } // next instr. 5227 } 5228 5229 if (!Induction) { 5230 DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); 5231 if (Inductions.empty()) { 5232 ORE->emit(createMissedAnalysis("NoInductionVariable") 5233 << "loop induction variable could not be identified"); 5234 return false; 5235 } 5236 } 5237 5238 // Now we know the widest induction type, check if our found induction 5239 // is the same size. If it's not, unset it here and InnerLoopVectorizer 5240 // will create another. 5241 if (Induction && WidestIndTy != Induction->getType()) 5242 Induction = nullptr; 5243 5244 return true; 5245 } 5246 5247 void LoopVectorizationLegality::collectLoopScalars() { 5248 5249 // If an instruction is uniform after vectorization, it will remain scalar. 5250 Scalars.insert(Uniforms.begin(), Uniforms.end()); 5251 5252 // Collect the getelementptr instructions that will not be vectorized. A 5253 // getelementptr instruction is only vectorized if it is used for a legal 5254 // gather or scatter operation. 5255 for (auto *BB : TheLoop->blocks()) 5256 for (auto &I : *BB) { 5257 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 5258 Scalars.insert(GEP); 5259 continue; 5260 } 5261 auto *Ptr = getPointerOperand(&I); 5262 if (!Ptr) 5263 continue; 5264 auto *GEP = getGEPInstruction(Ptr); 5265 if (GEP && isLegalGatherOrScatter(&I)) 5266 Scalars.erase(GEP); 5267 } 5268 5269 // An induction variable will remain scalar if all users of the induction 5270 // variable and induction variable update remain scalar. 5271 auto *Latch = TheLoop->getLoopLatch(); 5272 for (auto &Induction : *getInductionVars()) { 5273 auto *Ind = Induction.first; 5274 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5275 5276 // Determine if all users of the induction variable are scalar after 5277 // vectorization. 5278 auto ScalarInd = all_of(Ind->users(), [&](User *U) -> bool { 5279 auto *I = cast<Instruction>(U); 5280 return I == IndUpdate || !TheLoop->contains(I) || Scalars.count(I); 5281 }); 5282 if (!ScalarInd) 5283 continue; 5284 5285 // Determine if all users of the induction variable update instruction are 5286 // scalar after vectorization. 5287 auto ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool { 5288 auto *I = cast<Instruction>(U); 5289 return I == Ind || !TheLoop->contains(I) || Scalars.count(I); 5290 }); 5291 if (!ScalarIndUpdate) 5292 continue; 5293 5294 // The induction variable and its update instruction will remain scalar. 5295 Scalars.insert(Ind); 5296 Scalars.insert(IndUpdate); 5297 } 5298 } 5299 5300 bool LoopVectorizationLegality::hasConsecutiveLikePtrOperand(Instruction *I) { 5301 if (isAccessInterleaved(I)) 5302 return true; 5303 if (auto *Ptr = getPointerOperand(I)) 5304 return isConsecutivePtr(Ptr); 5305 return false; 5306 } 5307 5308 bool LoopVectorizationLegality::isPredicatedStore(Instruction *I) { 5309 auto *SI = dyn_cast<StoreInst>(I); 5310 return SI && blockNeedsPredication(SI->getParent()) && !isMaskRequired(SI); 5311 } 5312 5313 bool LoopVectorizationLegality::memoryInstructionMustBeScalarized( 5314 Instruction *I, unsigned VF) { 5315 5316 // If the memory instruction is in an interleaved group, it will be 5317 // vectorized and its pointer will remain uniform. 5318 if (isAccessInterleaved(I)) 5319 return false; 5320 5321 // Get and ensure we have a valid memory instruction. 5322 LoadInst *LI = dyn_cast<LoadInst>(I); 5323 StoreInst *SI = dyn_cast<StoreInst>(I); 5324 assert((LI || SI) && "Invalid memory instruction"); 5325 5326 // If the pointer operand is uniform (loop invariant), the memory instruction 5327 // will be scalarized. 5328 auto *Ptr = getPointerOperand(I); 5329 if (LI && isUniform(Ptr)) 5330 return true; 5331 5332 // If the pointer operand is non-consecutive and neither a gather nor a 5333 // scatter operation is legal, the memory instruction will be scalarized. 5334 if (!isConsecutivePtr(Ptr) && !isLegalGatherOrScatter(I)) 5335 return true; 5336 5337 // If the instruction is a store located in a predicated block, it will be 5338 // scalarized. 5339 if (isPredicatedStore(I)) 5340 return true; 5341 5342 // If the instruction's allocated size doesn't equal it's type size, it 5343 // requires padding and will be scalarized. 5344 auto &DL = I->getModule()->getDataLayout(); 5345 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 5346 if (hasIrregularType(ScalarTy, DL, VF)) 5347 return true; 5348 5349 // Otherwise, the memory instruction should be vectorized if the rest of the 5350 // loop is. 5351 return false; 5352 } 5353 5354 void LoopVectorizationLegality::collectLoopUniforms() { 5355 // We now know that the loop is vectorizable! 5356 // Collect instructions inside the loop that will remain uniform after 5357 // vectorization. 5358 5359 // Global values, params and instructions outside of current loop are out of 5360 // scope. 5361 auto isOutOfScope = [&](Value *V) -> bool { 5362 Instruction *I = dyn_cast<Instruction>(V); 5363 return (!I || !TheLoop->contains(I)); 5364 }; 5365 5366 SetVector<Instruction *> Worklist; 5367 BasicBlock *Latch = TheLoop->getLoopLatch(); 5368 // Start with the conditional branch. 5369 if (!isOutOfScope(Latch->getTerminator()->getOperand(0))) { 5370 Instruction *Cmp = cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5371 Worklist.insert(Cmp); 5372 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); 5373 } 5374 5375 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers 5376 // are pointers that are treated like consecutive pointers during 5377 // vectorization. The pointer operands of interleaved accesses are an 5378 // example. 5379 SmallPtrSet<Instruction *, 8> ConsecutiveLikePtrs; 5380 5381 // Holds pointer operands of instructions that are possibly non-uniform. 5382 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs; 5383 5384 // Iterate over the instructions in the loop, and collect all 5385 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible 5386 // that a consecutive-like pointer operand will be scalarized, we collect it 5387 // in PossibleNonUniformPtrs instead. We use two sets here because a single 5388 // getelementptr instruction can be used by both vectorized and scalarized 5389 // memory instructions. For example, if a loop loads and stores from the same 5390 // location, but the store is conditional, the store will be scalarized, and 5391 // the getelementptr won't remain uniform. 5392 for (auto *BB : TheLoop->blocks()) 5393 for (auto &I : *BB) { 5394 5395 // If there's no pointer operand, there's nothing to do. 5396 auto *Ptr = dyn_cast_or_null<Instruction>(getPointerOperand(&I)); 5397 if (!Ptr) 5398 continue; 5399 5400 // True if all users of Ptr are memory accesses that have Ptr as their 5401 // pointer operand. 5402 auto UsersAreMemAccesses = all_of(Ptr->users(), [&](User *U) -> bool { 5403 return getPointerOperand(U) == Ptr; 5404 }); 5405 5406 // Ensure the memory instruction will not be scalarized, making its 5407 // pointer operand non-uniform. If the pointer operand is used by some 5408 // instruction other than a memory access, we're not going to check if 5409 // that other instruction may be scalarized here. Thus, conservatively 5410 // assume the pointer operand may be non-uniform. 5411 if (!UsersAreMemAccesses || memoryInstructionMustBeScalarized(&I)) 5412 PossibleNonUniformPtrs.insert(Ptr); 5413 5414 // If the memory instruction will be vectorized and its pointer operand 5415 // is consecutive-like, the pointer operand should remain uniform. 5416 else if (hasConsecutiveLikePtrOperand(&I)) 5417 ConsecutiveLikePtrs.insert(Ptr); 5418 } 5419 5420 // Add to the Worklist all consecutive and consecutive-like pointers that 5421 // aren't also identified as possibly non-uniform. 5422 for (auto *V : ConsecutiveLikePtrs) 5423 if (!PossibleNonUniformPtrs.count(V)) { 5424 DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n"); 5425 Worklist.insert(V); 5426 } 5427 5428 // Expand Worklist in topological order: whenever a new instruction 5429 // is added , its users should be either already inside Worklist, or 5430 // out of scope. It ensures a uniform instruction will only be used 5431 // by uniform instructions or out of scope instructions. 5432 unsigned idx = 0; 5433 while (idx != Worklist.size()) { 5434 Instruction *I = Worklist[idx++]; 5435 5436 for (auto OV : I->operand_values()) { 5437 if (isOutOfScope(OV)) 5438 continue; 5439 auto *OI = cast<Instruction>(OV); 5440 if (all_of(OI->users(), [&](User *U) -> bool { 5441 return isOutOfScope(U) || Worklist.count(cast<Instruction>(U)); 5442 })) { 5443 Worklist.insert(OI); 5444 DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); 5445 } 5446 } 5447 } 5448 5449 // Returns true if Ptr is the pointer operand of a memory access instruction 5450 // I, and I is known to not require scalarization. 5451 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5452 return getPointerOperand(I) == Ptr && !memoryInstructionMustBeScalarized(I); 5453 }; 5454 5455 // For an instruction to be added into Worklist above, all its users inside 5456 // the loop should also be in Worklist. However, this condition cannot be 5457 // true for phi nodes that form a cyclic dependence. We must process phi 5458 // nodes separately. An induction variable will remain uniform if all users 5459 // of the induction variable and induction variable update remain uniform. 5460 // The code below handles both pointer and non-pointer induction variables. 5461 for (auto &Induction : Inductions) { 5462 auto *Ind = Induction.first; 5463 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5464 5465 // Determine if all users of the induction variable are uniform after 5466 // vectorization. 5467 auto UniformInd = all_of(Ind->users(), [&](User *U) -> bool { 5468 auto *I = cast<Instruction>(U); 5469 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5470 isVectorizedMemAccessUse(I, Ind); 5471 }); 5472 if (!UniformInd) 5473 continue; 5474 5475 // Determine if all users of the induction variable update instruction are 5476 // uniform after vectorization. 5477 auto UniformIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool { 5478 auto *I = cast<Instruction>(U); 5479 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5480 isVectorizedMemAccessUse(I, IndUpdate); 5481 }); 5482 if (!UniformIndUpdate) 5483 continue; 5484 5485 // The induction variable and its update instruction will remain uniform. 5486 Worklist.insert(Ind); 5487 Worklist.insert(IndUpdate); 5488 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n"); 5489 DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate << "\n"); 5490 } 5491 5492 Uniforms.insert(Worklist.begin(), Worklist.end()); 5493 } 5494 5495 bool LoopVectorizationLegality::canVectorizeMemory() { 5496 LAI = &(*GetLAA)(*TheLoop); 5497 InterleaveInfo.setLAI(LAI); 5498 const OptimizationRemarkAnalysis *LAR = LAI->getReport(); 5499 if (LAR) { 5500 OptimizationRemarkAnalysis VR(Hints->vectorizeAnalysisPassName(), 5501 "loop not vectorized: ", *LAR); 5502 ORE->emit(VR); 5503 } 5504 if (!LAI->canVectorizeMemory()) 5505 return false; 5506 5507 if (LAI->hasStoreToLoopInvariantAddress()) { 5508 ORE->emit(createMissedAnalysis("CantVectorizeStoreToLoopInvariantAddress") 5509 << "write to a loop invariant address could not be vectorized"); 5510 DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); 5511 return false; 5512 } 5513 5514 Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks()); 5515 PSE.addPredicate(LAI->getPSE().getUnionPredicate()); 5516 5517 return true; 5518 } 5519 5520 bool LoopVectorizationLegality::isInductionVariable(const Value *V) { 5521 Value *In0 = const_cast<Value *>(V); 5522 PHINode *PN = dyn_cast_or_null<PHINode>(In0); 5523 if (!PN) 5524 return false; 5525 5526 return Inductions.count(PN); 5527 } 5528 5529 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) { 5530 return FirstOrderRecurrences.count(Phi); 5531 } 5532 5533 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { 5534 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 5535 } 5536 5537 bool LoopVectorizationLegality::blockCanBePredicated( 5538 BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) { 5539 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 5540 5541 for (Instruction &I : *BB) { 5542 // Check that we don't have a constant expression that can trap as operand. 5543 for (Value *Operand : I.operands()) { 5544 if (auto *C = dyn_cast<Constant>(Operand)) 5545 if (C->canTrap()) 5546 return false; 5547 } 5548 // We might be able to hoist the load. 5549 if (I.mayReadFromMemory()) { 5550 auto *LI = dyn_cast<LoadInst>(&I); 5551 if (!LI) 5552 return false; 5553 if (!SafePtrs.count(LI->getPointerOperand())) { 5554 if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) || 5555 isLegalMaskedGather(LI->getType())) { 5556 MaskedOp.insert(LI); 5557 continue; 5558 } 5559 // !llvm.mem.parallel_loop_access implies if-conversion safety. 5560 if (IsAnnotatedParallel) 5561 continue; 5562 return false; 5563 } 5564 } 5565 5566 if (I.mayWriteToMemory()) { 5567 auto *SI = dyn_cast<StoreInst>(&I); 5568 // We only support predication of stores in basic blocks with one 5569 // predecessor. 5570 if (!SI) 5571 return false; 5572 5573 // Build a masked store if it is legal for the target. 5574 if (isLegalMaskedStore(SI->getValueOperand()->getType(), 5575 SI->getPointerOperand()) || 5576 isLegalMaskedScatter(SI->getValueOperand()->getType())) { 5577 MaskedOp.insert(SI); 5578 continue; 5579 } 5580 5581 bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0); 5582 bool isSinglePredecessor = SI->getParent()->getSinglePredecessor(); 5583 5584 if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr || 5585 !isSinglePredecessor) 5586 return false; 5587 } 5588 if (I.mayThrow()) 5589 return false; 5590 } 5591 5592 return true; 5593 } 5594 5595 void InterleavedAccessInfo::collectConstStrideAccesses( 5596 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 5597 const ValueToValueMap &Strides) { 5598 5599 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 5600 5601 // Since it's desired that the load/store instructions be maintained in 5602 // "program order" for the interleaved access analysis, we have to visit the 5603 // blocks in the loop in reverse postorder (i.e., in a topological order). 5604 // Such an ordering will ensure that any load/store that may be executed 5605 // before a second load/store will precede the second load/store in 5606 // AccessStrideInfo. 5607 LoopBlocksDFS DFS(TheLoop); 5608 DFS.perform(LI); 5609 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 5610 for (auto &I : *BB) { 5611 auto *LI = dyn_cast<LoadInst>(&I); 5612 auto *SI = dyn_cast<StoreInst>(&I); 5613 if (!LI && !SI) 5614 continue; 5615 5616 Value *Ptr = getPointerOperand(&I); 5617 int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides); 5618 5619 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 5620 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 5621 uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 5622 5623 // An alignment of 0 means target ABI alignment. 5624 unsigned Align = LI ? LI->getAlignment() : SI->getAlignment(); 5625 if (!Align) 5626 Align = DL.getABITypeAlignment(PtrTy->getElementType()); 5627 5628 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align); 5629 } 5630 } 5631 5632 // Analyze interleaved accesses and collect them into interleaved load and 5633 // store groups. 5634 // 5635 // When generating code for an interleaved load group, we effectively hoist all 5636 // loads in the group to the location of the first load in program order. When 5637 // generating code for an interleaved store group, we sink all stores to the 5638 // location of the last store. This code motion can change the order of load 5639 // and store instructions and may break dependences. 5640 // 5641 // The code generation strategy mentioned above ensures that we won't violate 5642 // any write-after-read (WAR) dependences. 5643 // 5644 // E.g., for the WAR dependence: a = A[i]; // (1) 5645 // A[i] = b; // (2) 5646 // 5647 // The store group of (2) is always inserted at or below (2), and the load 5648 // group of (1) is always inserted at or above (1). Thus, the instructions will 5649 // never be reordered. All other dependences are checked to ensure the 5650 // correctness of the instruction reordering. 5651 // 5652 // The algorithm visits all memory accesses in the loop in bottom-up program 5653 // order. Program order is established by traversing the blocks in the loop in 5654 // reverse postorder when collecting the accesses. 5655 // 5656 // We visit the memory accesses in bottom-up order because it can simplify the 5657 // construction of store groups in the presence of write-after-write (WAW) 5658 // dependences. 5659 // 5660 // E.g., for the WAW dependence: A[i] = a; // (1) 5661 // A[i] = b; // (2) 5662 // A[i + 1] = c; // (3) 5663 // 5664 // We will first create a store group with (3) and (2). (1) can't be added to 5665 // this group because it and (2) are dependent. However, (1) can be grouped 5666 // with other accesses that may precede it in program order. Note that a 5667 // bottom-up order does not imply that WAW dependences should not be checked. 5668 void InterleavedAccessInfo::analyzeInterleaving( 5669 const ValueToValueMap &Strides) { 5670 DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 5671 5672 // Holds all accesses with a constant stride. 5673 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 5674 collectConstStrideAccesses(AccessStrideInfo, Strides); 5675 5676 if (AccessStrideInfo.empty()) 5677 return; 5678 5679 // Collect the dependences in the loop. 5680 collectDependences(); 5681 5682 // Holds all interleaved store groups temporarily. 5683 SmallSetVector<InterleaveGroup *, 4> StoreGroups; 5684 // Holds all interleaved load groups temporarily. 5685 SmallSetVector<InterleaveGroup *, 4> LoadGroups; 5686 5687 // Search in bottom-up program order for pairs of accesses (A and B) that can 5688 // form interleaved load or store groups. In the algorithm below, access A 5689 // precedes access B in program order. We initialize a group for B in the 5690 // outer loop of the algorithm, and then in the inner loop, we attempt to 5691 // insert each A into B's group if: 5692 // 5693 // 1. A and B have the same stride, 5694 // 2. A and B have the same memory object size, and 5695 // 3. A belongs in B's group according to its distance from B. 5696 // 5697 // Special care is taken to ensure group formation will not break any 5698 // dependences. 5699 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 5700 BI != E; ++BI) { 5701 Instruction *B = BI->first; 5702 StrideDescriptor DesB = BI->second; 5703 5704 // Initialize a group for B if it has an allowable stride. Even if we don't 5705 // create a group for B, we continue with the bottom-up algorithm to ensure 5706 // we don't break any of B's dependences. 5707 InterleaveGroup *Group = nullptr; 5708 if (isStrided(DesB.Stride)) { 5709 Group = getInterleaveGroup(B); 5710 if (!Group) { 5711 DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n'); 5712 Group = createInterleaveGroup(B, DesB.Stride, DesB.Align); 5713 } 5714 if (B->mayWriteToMemory()) 5715 StoreGroups.insert(Group); 5716 else 5717 LoadGroups.insert(Group); 5718 } 5719 5720 for (auto AI = std::next(BI); AI != E; ++AI) { 5721 Instruction *A = AI->first; 5722 StrideDescriptor DesA = AI->second; 5723 5724 // Our code motion strategy implies that we can't have dependences 5725 // between accesses in an interleaved group and other accesses located 5726 // between the first and last member of the group. Note that this also 5727 // means that a group can't have more than one member at a given offset. 5728 // The accesses in a group can have dependences with other accesses, but 5729 // we must ensure we don't extend the boundaries of the group such that 5730 // we encompass those dependent accesses. 5731 // 5732 // For example, assume we have the sequence of accesses shown below in a 5733 // stride-2 loop: 5734 // 5735 // (1, 2) is a group | A[i] = a; // (1) 5736 // | A[i-1] = b; // (2) | 5737 // A[i-3] = c; // (3) 5738 // A[i] = d; // (4) | (2, 4) is not a group 5739 // 5740 // Because accesses (2) and (3) are dependent, we can group (2) with (1) 5741 // but not with (4). If we did, the dependent access (3) would be within 5742 // the boundaries of the (2, 4) group. 5743 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { 5744 5745 // If a dependence exists and A is already in a group, we know that A 5746 // must be a store since A precedes B and WAR dependences are allowed. 5747 // Thus, A would be sunk below B. We release A's group to prevent this 5748 // illegal code motion. A will then be free to form another group with 5749 // instructions that precede it. 5750 if (isInterleaved(A)) { 5751 InterleaveGroup *StoreGroup = getInterleaveGroup(A); 5752 StoreGroups.remove(StoreGroup); 5753 releaseGroup(StoreGroup); 5754 } 5755 5756 // If a dependence exists and A is not already in a group (or it was 5757 // and we just released it), B might be hoisted above A (if B is a 5758 // load) or another store might be sunk below A (if B is a store). In 5759 // either case, we can't add additional instructions to B's group. B 5760 // will only form a group with instructions that it precedes. 5761 break; 5762 } 5763 5764 // At this point, we've checked for illegal code motion. If either A or B 5765 // isn't strided, there's nothing left to do. 5766 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 5767 continue; 5768 5769 // Ignore A if it's already in a group or isn't the same kind of memory 5770 // operation as B. 5771 if (isInterleaved(A) || A->mayReadFromMemory() != B->mayReadFromMemory()) 5772 continue; 5773 5774 // Check rules 1 and 2. Ignore A if its stride or size is different from 5775 // that of B. 5776 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 5777 continue; 5778 5779 // Calculate the distance from A to B. 5780 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 5781 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 5782 if (!DistToB) 5783 continue; 5784 int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 5785 5786 // Check rule 3. Ignore A if its distance to B is not a multiple of the 5787 // size. 5788 if (DistanceToB % static_cast<int64_t>(DesB.Size)) 5789 continue; 5790 5791 // Ignore A if either A or B is in a predicated block. Although we 5792 // currently prevent group formation for predicated accesses, we may be 5793 // able to relax this limitation in the future once we handle more 5794 // complicated blocks. 5795 if (isPredicated(A->getParent()) || isPredicated(B->getParent())) 5796 continue; 5797 5798 // The index of A is the index of B plus A's distance to B in multiples 5799 // of the size. 5800 int IndexA = 5801 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 5802 5803 // Try to insert A into B's group. 5804 if (Group->insertMember(A, IndexA, DesA.Align)) { 5805 DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 5806 << " into the interleave group with" << *B << '\n'); 5807 InterleaveGroupMap[A] = Group; 5808 5809 // Set the first load in program order as the insert position. 5810 if (A->mayReadFromMemory()) 5811 Group->setInsertPos(A); 5812 } 5813 } // Iteration over A accesses. 5814 } // Iteration over B accesses. 5815 5816 // Remove interleaved store groups with gaps. 5817 for (InterleaveGroup *Group : StoreGroups) 5818 if (Group->getNumMembers() != Group->getFactor()) 5819 releaseGroup(Group); 5820 5821 // If there is a non-reversed interleaved load group with gaps, we will need 5822 // to execute at least one scalar epilogue iteration. This will ensure that 5823 // we don't speculatively access memory out-of-bounds. Note that we only need 5824 // to look for a member at index factor - 1, since every group must have a 5825 // member at index zero. 5826 for (InterleaveGroup *Group : LoadGroups) 5827 if (!Group->getMember(Group->getFactor() - 1)) { 5828 if (Group->isReverse()) { 5829 releaseGroup(Group); 5830 } else { 5831 DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 5832 RequiresScalarEpilogue = true; 5833 } 5834 } 5835 } 5836 5837 LoopVectorizationCostModel::VectorizationFactor 5838 LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { 5839 // Width 1 means no vectorize 5840 VectorizationFactor Factor = {1U, 0U}; 5841 if (OptForSize && Legal->getRuntimePointerChecking()->Need) { 5842 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 5843 << "runtime pointer checks needed. Enable vectorization of this " 5844 "loop with '#pragma clang loop vectorize(enable)' when " 5845 "compiling with -Os/-Oz"); 5846 DEBUG(dbgs() 5847 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 5848 return Factor; 5849 } 5850 5851 if (!EnableCondStoresVectorization && Legal->getNumPredStores()) { 5852 ORE->emit(createMissedAnalysis("ConditionalStore") 5853 << "store that is conditionally executed prevents vectorization"); 5854 DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); 5855 return Factor; 5856 } 5857 5858 // Find the trip count. 5859 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5860 DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5861 5862 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5863 unsigned SmallestType, WidestType; 5864 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5865 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 5866 unsigned MaxSafeDepDist = -1U; 5867 5868 // Get the maximum safe dependence distance in bits computed by LAA. If the 5869 // loop contains any interleaved accesses, we divide the dependence distance 5870 // by the maximum interleave factor of all interleaved groups. Note that 5871 // although the division ensures correctness, this is a fairly conservative 5872 // computation because the maximum distance computed by LAA may not involve 5873 // any of the interleaved accesses. 5874 if (Legal->getMaxSafeDepDistBytes() != -1U) 5875 MaxSafeDepDist = 5876 Legal->getMaxSafeDepDistBytes() * 8 / Legal->getMaxInterleaveFactor(); 5877 5878 WidestRegister = 5879 ((WidestRegister < MaxSafeDepDist) ? WidestRegister : MaxSafeDepDist); 5880 unsigned MaxVectorSize = WidestRegister / WidestType; 5881 5882 DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " 5883 << WidestType << " bits.\n"); 5884 DEBUG(dbgs() << "LV: The Widest register is: " << WidestRegister 5885 << " bits.\n"); 5886 5887 if (MaxVectorSize == 0) { 5888 DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5889 MaxVectorSize = 1; 5890 } 5891 5892 assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements" 5893 " into one vector!"); 5894 5895 unsigned VF = MaxVectorSize; 5896 if (MaximizeBandwidth && !OptForSize) { 5897 // Collect all viable vectorization factors. 5898 SmallVector<unsigned, 8> VFs; 5899 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 5900 for (unsigned VS = MaxVectorSize; VS <= NewMaxVectorSize; VS *= 2) 5901 VFs.push_back(VS); 5902 5903 // For each VF calculate its register usage. 5904 auto RUs = calculateRegisterUsage(VFs); 5905 5906 // Select the largest VF which doesn't require more registers than existing 5907 // ones. 5908 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 5909 for (int i = RUs.size() - 1; i >= 0; --i) { 5910 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 5911 VF = VFs[i]; 5912 break; 5913 } 5914 } 5915 } 5916 5917 // If we optimize the program for size, avoid creating the tail loop. 5918 if (OptForSize) { 5919 // If we are unable to calculate the trip count then don't try to vectorize. 5920 if (TC < 2) { 5921 ORE->emit( 5922 createMissedAnalysis("UnknownLoopCountComplexCFG") 5923 << "unable to calculate the loop count due to complex control flow"); 5924 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 5925 return Factor; 5926 } 5927 5928 // Find the maximum SIMD width that can fit within the trip count. 5929 VF = TC % MaxVectorSize; 5930 5931 if (VF == 0) 5932 VF = MaxVectorSize; 5933 else { 5934 // If the trip count that we found modulo the vectorization factor is not 5935 // zero then we require a tail. 5936 ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize") 5937 << "cannot optimize for size and vectorize at the " 5938 "same time. Enable vectorization of this loop " 5939 "with '#pragma clang loop vectorize(enable)' " 5940 "when compiling with -Os/-Oz"); 5941 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 5942 return Factor; 5943 } 5944 } 5945 5946 int UserVF = Hints->getWidth(); 5947 if (UserVF != 0) { 5948 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 5949 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 5950 5951 Factor.Width = UserVF; 5952 return Factor; 5953 } 5954 5955 float Cost = expectedCost(1).first; 5956 #ifndef NDEBUG 5957 const float ScalarCost = Cost; 5958 #endif /* NDEBUG */ 5959 unsigned Width = 1; 5960 DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 5961 5962 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5963 // Ignore scalar width, because the user explicitly wants vectorization. 5964 if (ForceVectorization && VF > 1) { 5965 Width = 2; 5966 Cost = expectedCost(Width).first / (float)Width; 5967 } 5968 5969 for (unsigned i = 2; i <= VF; i *= 2) { 5970 // Notice that the vector loop needs to be executed less times, so 5971 // we need to divide the cost of the vector loops by the width of 5972 // the vector elements. 5973 VectorizationCostTy C = expectedCost(i); 5974 float VectorCost = C.first / (float)i; 5975 DEBUG(dbgs() << "LV: Vector loop of width " << i 5976 << " costs: " << (int)VectorCost << ".\n"); 5977 if (!C.second && !ForceVectorization) { 5978 DEBUG( 5979 dbgs() << "LV: Not considering vector loop of width " << i 5980 << " because it will not generate any vector instructions.\n"); 5981 continue; 5982 } 5983 if (VectorCost < Cost) { 5984 Cost = VectorCost; 5985 Width = i; 5986 } 5987 } 5988 5989 DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 5990 << "LV: Vectorization seems to be not beneficial, " 5991 << "but was forced by a user.\n"); 5992 DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 5993 Factor.Width = Width; 5994 Factor.Cost = Width * Cost; 5995 return Factor; 5996 } 5997 5998 std::pair<unsigned, unsigned> 5999 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6000 unsigned MinWidth = -1U; 6001 unsigned MaxWidth = 8; 6002 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6003 6004 // For each block. 6005 for (BasicBlock *BB : TheLoop->blocks()) { 6006 // For each instruction in the loop. 6007 for (Instruction &I : *BB) { 6008 Type *T = I.getType(); 6009 6010 // Skip ignored values. 6011 if (ValuesToIgnore.count(&I)) 6012 continue; 6013 6014 // Only examine Loads, Stores and PHINodes. 6015 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6016 continue; 6017 6018 // Examine PHI nodes that are reduction variables. Update the type to 6019 // account for the recurrence type. 6020 if (auto *PN = dyn_cast<PHINode>(&I)) { 6021 if (!Legal->isReductionVariable(PN)) 6022 continue; 6023 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 6024 T = RdxDesc.getRecurrenceType(); 6025 } 6026 6027 // Examine the stored values. 6028 if (auto *ST = dyn_cast<StoreInst>(&I)) 6029 T = ST->getValueOperand()->getType(); 6030 6031 // Ignore loaded pointer types and stored pointer types that are not 6032 // consecutive. However, we do want to take consecutive stores/loads of 6033 // pointer vectors into account. 6034 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I)) 6035 continue; 6036 6037 MinWidth = std::min(MinWidth, 6038 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6039 MaxWidth = std::max(MaxWidth, 6040 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6041 } 6042 } 6043 6044 return {MinWidth, MaxWidth}; 6045 } 6046 6047 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 6048 unsigned VF, 6049 unsigned LoopCost) { 6050 6051 // -- The interleave heuristics -- 6052 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6053 // There are many micro-architectural considerations that we can't predict 6054 // at this level. For example, frontend pressure (on decode or fetch) due to 6055 // code size, or the number and capabilities of the execution ports. 6056 // 6057 // We use the following heuristics to select the interleave count: 6058 // 1. If the code has reductions, then we interleave to break the cross 6059 // iteration dependency. 6060 // 2. If the loop is really small, then we interleave to reduce the loop 6061 // overhead. 6062 // 3. We don't interleave if we think that we will spill registers to memory 6063 // due to the increased register pressure. 6064 6065 // When we optimize for size, we don't interleave. 6066 if (OptForSize) 6067 return 1; 6068 6069 // We used the distance for the interleave count. 6070 if (Legal->getMaxSafeDepDistBytes() != -1U) 6071 return 1; 6072 6073 // Do not interleave loops with a relatively small trip count. 6074 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 6075 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 6076 return 1; 6077 6078 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 6079 DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6080 << " registers\n"); 6081 6082 if (VF == 1) { 6083 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6084 TargetNumRegisters = ForceTargetNumScalarRegs; 6085 } else { 6086 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6087 TargetNumRegisters = ForceTargetNumVectorRegs; 6088 } 6089 6090 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6091 // We divide by these constants so assume that we have at least one 6092 // instruction that uses at least one register. 6093 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 6094 R.NumInstructions = std::max(R.NumInstructions, 1U); 6095 6096 // We calculate the interleave count using the following formula. 6097 // Subtract the number of loop invariants from the number of available 6098 // registers. These registers are used by all of the interleaved instances. 6099 // Next, divide the remaining registers by the number of registers that is 6100 // required by the loop, in order to estimate how many parallel instances 6101 // fit without causing spills. All of this is rounded down if necessary to be 6102 // a power of two. We want power of two interleave count to simplify any 6103 // addressing operations or alignment considerations. 6104 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 6105 R.MaxLocalUsers); 6106 6107 // Don't count the induction variable as interleaved. 6108 if (EnableIndVarRegisterHeur) 6109 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 6110 std::max(1U, (R.MaxLocalUsers - 1))); 6111 6112 // Clamp the interleave ranges to reasonable counts. 6113 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 6114 6115 // Check if the user has overridden the max. 6116 if (VF == 1) { 6117 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6118 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6119 } else { 6120 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6121 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6122 } 6123 6124 // If we did not calculate the cost for VF (because the user selected the VF) 6125 // then we calculate the cost of VF here. 6126 if (LoopCost == 0) 6127 LoopCost = expectedCost(VF).first; 6128 6129 // Clamp the calculated IC to be between the 1 and the max interleave count 6130 // that the target allows. 6131 if (IC > MaxInterleaveCount) 6132 IC = MaxInterleaveCount; 6133 else if (IC < 1) 6134 IC = 1; 6135 6136 // Interleave if we vectorized this loop and there is a reduction that could 6137 // benefit from interleaving. 6138 if (VF > 1 && Legal->getReductionVars()->size()) { 6139 DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6140 return IC; 6141 } 6142 6143 // Note that if we've already vectorized the loop we will have done the 6144 // runtime check and so interleaving won't require further checks. 6145 bool InterleavingRequiresRuntimePointerCheck = 6146 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 6147 6148 // We want to interleave small loops in order to reduce the loop overhead and 6149 // potentially expose ILP opportunities. 6150 DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 6151 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6152 // We assume that the cost overhead is 1 and we use the cost model 6153 // to estimate the cost of the loop and interleave until the cost of the 6154 // loop overhead is about 5% of the cost of the loop. 6155 unsigned SmallIC = 6156 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6157 6158 // Interleave until store/load ports (estimated by max interleave count) are 6159 // saturated. 6160 unsigned NumStores = Legal->getNumStores(); 6161 unsigned NumLoads = Legal->getNumLoads(); 6162 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6163 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6164 6165 // If we have a scalar reduction (vector reductions are already dealt with 6166 // by this point), we can increase the critical path length if the loop 6167 // we're interleaving is inside another loop. Limit, by default to 2, so the 6168 // critical path only gets increased by one reduction operation. 6169 if (Legal->getReductionVars()->size() && TheLoop->getLoopDepth() > 1) { 6170 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6171 SmallIC = std::min(SmallIC, F); 6172 StoresIC = std::min(StoresIC, F); 6173 LoadsIC = std::min(LoadsIC, F); 6174 } 6175 6176 if (EnableLoadStoreRuntimeInterleave && 6177 std::max(StoresIC, LoadsIC) > SmallIC) { 6178 DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6179 return std::max(StoresIC, LoadsIC); 6180 } 6181 6182 DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6183 return SmallIC; 6184 } 6185 6186 // Interleave if this is a large loop (small loops are already dealt with by 6187 // this point) that could benefit from interleaving. 6188 bool HasReductions = (Legal->getReductionVars()->size() > 0); 6189 if (TTI.enableAggressiveInterleaving(HasReductions)) { 6190 DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6191 return IC; 6192 } 6193 6194 DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6195 return 1; 6196 } 6197 6198 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6199 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 6200 // This function calculates the register usage by measuring the highest number 6201 // of values that are alive at a single location. Obviously, this is a very 6202 // rough estimation. We scan the loop in a topological order in order and 6203 // assign a number to each instruction. We use RPO to ensure that defs are 6204 // met before their users. We assume that each instruction that has in-loop 6205 // users starts an interval. We record every time that an in-loop value is 6206 // used, so we have a list of the first and last occurrences of each 6207 // instruction. Next, we transpose this data structure into a multi map that 6208 // holds the list of intervals that *end* at a specific location. This multi 6209 // map allows us to perform a linear search. We scan the instructions linearly 6210 // and record each time that a new interval starts, by placing it in a set. 6211 // If we find this value in the multi-map then we remove it from the set. 6212 // The max register usage is the maximum size of the set. 6213 // We also search for instructions that are defined outside the loop, but are 6214 // used inside the loop. We need this number separately from the max-interval 6215 // usage number because when we unroll, loop-invariant values do not take 6216 // more register. 6217 LoopBlocksDFS DFS(TheLoop); 6218 DFS.perform(LI); 6219 6220 RegisterUsage RU; 6221 RU.NumInstructions = 0; 6222 6223 // Each 'key' in the map opens a new interval. The values 6224 // of the map are the index of the 'last seen' usage of the 6225 // instruction that is the key. 6226 typedef DenseMap<Instruction *, unsigned> IntervalMap; 6227 // Maps instruction to its index. 6228 DenseMap<unsigned, Instruction *> IdxToInstr; 6229 // Marks the end of each interval. 6230 IntervalMap EndPoint; 6231 // Saves the list of instruction indices that are used in the loop. 6232 SmallSet<Instruction *, 8> Ends; 6233 // Saves the list of values that are used in the loop but are 6234 // defined outside the loop, such as arguments and constants. 6235 SmallPtrSet<Value *, 8> LoopInvariants; 6236 6237 unsigned Index = 0; 6238 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6239 RU.NumInstructions += BB->size(); 6240 for (Instruction &I : *BB) { 6241 IdxToInstr[Index++] = &I; 6242 6243 // Save the end location of each USE. 6244 for (Value *U : I.operands()) { 6245 auto *Instr = dyn_cast<Instruction>(U); 6246 6247 // Ignore non-instruction values such as arguments, constants, etc. 6248 if (!Instr) 6249 continue; 6250 6251 // If this instruction is outside the loop then record it and continue. 6252 if (!TheLoop->contains(Instr)) { 6253 LoopInvariants.insert(Instr); 6254 continue; 6255 } 6256 6257 // Overwrite previous end points. 6258 EndPoint[Instr] = Index; 6259 Ends.insert(Instr); 6260 } 6261 } 6262 } 6263 6264 // Saves the list of intervals that end with the index in 'key'. 6265 typedef SmallVector<Instruction *, 2> InstrList; 6266 DenseMap<unsigned, InstrList> TransposeEnds; 6267 6268 // Transpose the EndPoints to a list of values that end at each index. 6269 for (auto &Interval : EndPoint) 6270 TransposeEnds[Interval.second].push_back(Interval.first); 6271 6272 SmallSet<Instruction *, 8> OpenIntervals; 6273 6274 // Get the size of the widest register. 6275 unsigned MaxSafeDepDist = -1U; 6276 if (Legal->getMaxSafeDepDistBytes() != -1U) 6277 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 6278 unsigned WidestRegister = 6279 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 6280 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6281 6282 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6283 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 6284 6285 DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6286 6287 // A lambda that gets the register usage for the given type and VF. 6288 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 6289 if (Ty->isTokenTy()) 6290 return 0U; 6291 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 6292 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 6293 }; 6294 6295 for (unsigned int i = 0; i < Index; ++i) { 6296 Instruction *I = IdxToInstr[i]; 6297 // Ignore instructions that are never used within the loop. 6298 if (!Ends.count(I)) 6299 continue; 6300 6301 // Remove all of the instructions that end at this location. 6302 InstrList &List = TransposeEnds[i]; 6303 for (Instruction *ToRemove : List) 6304 OpenIntervals.erase(ToRemove); 6305 6306 // Skip ignored values. 6307 if (ValuesToIgnore.count(I)) 6308 continue; 6309 6310 // For each VF find the maximum usage of registers. 6311 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6312 if (VFs[j] == 1) { 6313 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 6314 continue; 6315 } 6316 6317 // Count the number of live intervals. 6318 unsigned RegUsage = 0; 6319 for (auto Inst : OpenIntervals) { 6320 // Skip ignored values for VF > 1. 6321 if (VecValuesToIgnore.count(Inst)) 6322 continue; 6323 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 6324 } 6325 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 6326 } 6327 6328 DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6329 << OpenIntervals.size() << '\n'); 6330 6331 // Add the current instruction to the list of open intervals. 6332 OpenIntervals.insert(I); 6333 } 6334 6335 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6336 unsigned Invariant = 0; 6337 if (VFs[i] == 1) 6338 Invariant = LoopInvariants.size(); 6339 else { 6340 for (auto Inst : LoopInvariants) 6341 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 6342 } 6343 6344 DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 6345 DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 6346 DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); 6347 DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n'); 6348 6349 RU.LoopInvariantRegs = Invariant; 6350 RU.MaxLocalUsers = MaxUsages[i]; 6351 RUs[i] = RU; 6352 } 6353 6354 return RUs; 6355 } 6356 6357 LoopVectorizationCostModel::VectorizationCostTy 6358 LoopVectorizationCostModel::expectedCost(unsigned VF) { 6359 VectorizationCostTy Cost; 6360 6361 // For each block. 6362 for (BasicBlock *BB : TheLoop->blocks()) { 6363 VectorizationCostTy BlockCost; 6364 6365 // For each instruction in the old loop. 6366 for (Instruction &I : *BB) { 6367 // Skip dbg intrinsics. 6368 if (isa<DbgInfoIntrinsic>(I)) 6369 continue; 6370 6371 // Skip ignored values. 6372 if (ValuesToIgnore.count(&I)) 6373 continue; 6374 6375 VectorizationCostTy C = getInstructionCost(&I, VF); 6376 6377 // Check if we should override the cost. 6378 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6379 C.first = ForceTargetInstructionCost; 6380 6381 BlockCost.first += C.first; 6382 BlockCost.second |= C.second; 6383 DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " 6384 << VF << " For instruction: " << I << '\n'); 6385 } 6386 6387 // We assume that if-converted blocks have a 50% chance of being executed. 6388 // When the code is scalar then some of the blocks are avoided due to CF. 6389 // When the code is vectorized we execute all code paths. 6390 if (VF == 1 && Legal->blockNeedsPredication(BB)) 6391 BlockCost.first /= 2; 6392 6393 Cost.first += BlockCost.first; 6394 Cost.second |= BlockCost.second; 6395 } 6396 6397 return Cost; 6398 } 6399 6400 /// \brief Check whether the address computation for a non-consecutive memory 6401 /// access looks like an unlikely candidate for being merged into the indexing 6402 /// mode. 6403 /// 6404 /// We look for a GEP which has one index that is an induction variable and all 6405 /// other indices are loop invariant. If the stride of this access is also 6406 /// within a small bound we decide that this address computation can likely be 6407 /// merged into the addressing mode. 6408 /// In all other cases, we identify the address computation as complex. 6409 static bool isLikelyComplexAddressComputation(Value *Ptr, 6410 LoopVectorizationLegality *Legal, 6411 ScalarEvolution *SE, 6412 const Loop *TheLoop) { 6413 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6414 if (!Gep) 6415 return true; 6416 6417 // We are looking for a gep with all loop invariant indices except for one 6418 // which should be an induction variable. 6419 unsigned NumOperands = Gep->getNumOperands(); 6420 for (unsigned i = 1; i < NumOperands; ++i) { 6421 Value *Opd = Gep->getOperand(i); 6422 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6423 !Legal->isInductionVariable(Opd)) 6424 return true; 6425 } 6426 6427 // Now we know we have a GEP ptr, %inv, %ind, %inv. Make sure that the step 6428 // can likely be merged into the address computation. 6429 unsigned MaxMergeDistance = 64; 6430 6431 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Ptr)); 6432 if (!AddRec) 6433 return true; 6434 6435 // Check the step is constant. 6436 const SCEV *Step = AddRec->getStepRecurrence(*SE); 6437 // Calculate the pointer stride and check if it is consecutive. 6438 const auto *C = dyn_cast<SCEVConstant>(Step); 6439 if (!C) 6440 return true; 6441 6442 const APInt &APStepVal = C->getAPInt(); 6443 6444 // Huge step value - give up. 6445 if (APStepVal.getBitWidth() > 64) 6446 return true; 6447 6448 int64_t StepVal = APStepVal.getSExtValue(); 6449 6450 return StepVal > MaxMergeDistance; 6451 } 6452 6453 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6454 return Legal->hasStride(I->getOperand(0)) || 6455 Legal->hasStride(I->getOperand(1)); 6456 } 6457 6458 LoopVectorizationCostModel::VectorizationCostTy 6459 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 6460 // If we know that this instruction will remain uniform, check the cost of 6461 // the scalar version. 6462 if (Legal->isUniformAfterVectorization(I)) 6463 VF = 1; 6464 6465 Type *VectorTy; 6466 unsigned C = getInstructionCost(I, VF, VectorTy); 6467 6468 bool TypeNotScalarized = 6469 VF > 1 && !VectorTy->isVoidTy() && TTI.getNumberOfParts(VectorTy) < VF; 6470 return VectorizationCostTy(C, TypeNotScalarized); 6471 } 6472 6473 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6474 unsigned VF, 6475 Type *&VectorTy) { 6476 Type *RetTy = I->getType(); 6477 if (VF > 1 && MinBWs.count(I)) 6478 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 6479 VectorTy = ToVectorTy(RetTy, VF); 6480 auto SE = PSE.getSE(); 6481 6482 // TODO: We need to estimate the cost of intrinsic calls. 6483 switch (I->getOpcode()) { 6484 case Instruction::GetElementPtr: 6485 // We mark this instruction as zero-cost because the cost of GEPs in 6486 // vectorized code depends on whether the corresponding memory instruction 6487 // is scalarized or not. Therefore, we handle GEPs with the memory 6488 // instruction cost. 6489 return 0; 6490 case Instruction::Br: { 6491 return TTI.getCFInstrCost(I->getOpcode()); 6492 } 6493 case Instruction::PHI: { 6494 auto *Phi = cast<PHINode>(I); 6495 6496 // First-order recurrences are replaced by vector shuffles inside the loop. 6497 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 6498 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 6499 VectorTy, VF - 1, VectorTy); 6500 6501 // TODO: IF-converted IFs become selects. 6502 return 0; 6503 } 6504 case Instruction::UDiv: 6505 case Instruction::SDiv: 6506 case Instruction::URem: 6507 case Instruction::SRem: 6508 // We assume that if-converted blocks have a 50% chance of being executed. 6509 // Predicated scalarized instructions are avoided due to the CF that 6510 // bypasses turned off lanes. If we are not predicating, fallthrough. 6511 if (VF > 1 && mayDivideByZero(*I) && 6512 Legal->blockNeedsPredication(I->getParent())) 6513 return VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy) / 2 + 6514 getScalarizationOverhead(I, VF, true, TTI); 6515 case Instruction::Add: 6516 case Instruction::FAdd: 6517 case Instruction::Sub: 6518 case Instruction::FSub: 6519 case Instruction::Mul: 6520 case Instruction::FMul: 6521 case Instruction::FDiv: 6522 case Instruction::FRem: 6523 case Instruction::Shl: 6524 case Instruction::LShr: 6525 case Instruction::AShr: 6526 case Instruction::And: 6527 case Instruction::Or: 6528 case Instruction::Xor: { 6529 // Since we will replace the stride by 1 the multiplication should go away. 6530 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 6531 return 0; 6532 // Certain instructions can be cheaper to vectorize if they have a constant 6533 // second vector operand. One example of this are shifts on x86. 6534 TargetTransformInfo::OperandValueKind Op1VK = 6535 TargetTransformInfo::OK_AnyValue; 6536 TargetTransformInfo::OperandValueKind Op2VK = 6537 TargetTransformInfo::OK_AnyValue; 6538 TargetTransformInfo::OperandValueProperties Op1VP = 6539 TargetTransformInfo::OP_None; 6540 TargetTransformInfo::OperandValueProperties Op2VP = 6541 TargetTransformInfo::OP_None; 6542 Value *Op2 = I->getOperand(1); 6543 6544 // Check for a splat or for a non uniform vector of constants. 6545 if (isa<ConstantInt>(Op2)) { 6546 ConstantInt *CInt = cast<ConstantInt>(Op2); 6547 if (CInt && CInt->getValue().isPowerOf2()) 6548 Op2VP = TargetTransformInfo::OP_PowerOf2; 6549 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 6550 } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) { 6551 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 6552 Constant *SplatValue = cast<Constant>(Op2)->getSplatValue(); 6553 if (SplatValue) { 6554 ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue); 6555 if (CInt && CInt->getValue().isPowerOf2()) 6556 Op2VP = TargetTransformInfo::OP_PowerOf2; 6557 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 6558 } 6559 } else if (Legal->isUniform(Op2)) { 6560 Op2VK = TargetTransformInfo::OK_UniformValue; 6561 } 6562 6563 return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, Op2VK, 6564 Op1VP, Op2VP); 6565 } 6566 case Instruction::Select: { 6567 SelectInst *SI = cast<SelectInst>(I); 6568 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 6569 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 6570 Type *CondTy = SI->getCondition()->getType(); 6571 if (!ScalarCond) 6572 CondTy = VectorType::get(CondTy, VF); 6573 6574 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy); 6575 } 6576 case Instruction::ICmp: 6577 case Instruction::FCmp: { 6578 Type *ValTy = I->getOperand(0)->getType(); 6579 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 6580 auto It = MinBWs.find(Op0AsInstruction); 6581 if (VF > 1 && It != MinBWs.end()) 6582 ValTy = IntegerType::get(ValTy->getContext(), It->second); 6583 VectorTy = ToVectorTy(ValTy, VF); 6584 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy); 6585 } 6586 case Instruction::Store: 6587 case Instruction::Load: { 6588 StoreInst *SI = dyn_cast<StoreInst>(I); 6589 LoadInst *LI = dyn_cast<LoadInst>(I); 6590 Type *ValTy = (SI ? SI->getValueOperand()->getType() : LI->getType()); 6591 VectorTy = ToVectorTy(ValTy, VF); 6592 6593 unsigned Alignment = SI ? SI->getAlignment() : LI->getAlignment(); 6594 unsigned AS = 6595 SI ? SI->getPointerAddressSpace() : LI->getPointerAddressSpace(); 6596 Value *Ptr = getPointerOperand(I); 6597 // We add the cost of address computation here instead of with the gep 6598 // instruction because only here we know whether the operation is 6599 // scalarized. 6600 if (VF == 1) 6601 return TTI.getAddressComputationCost(VectorTy) + 6602 TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6603 6604 if (LI && Legal->isUniform(Ptr)) { 6605 // Scalar load + broadcast 6606 unsigned Cost = TTI.getAddressComputationCost(ValTy->getScalarType()); 6607 Cost += TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 6608 Alignment, AS); 6609 return Cost + 6610 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, ValTy); 6611 } 6612 6613 // For an interleaved access, calculate the total cost of the whole 6614 // interleave group. 6615 if (Legal->isAccessInterleaved(I)) { 6616 auto Group = Legal->getInterleavedAccessGroup(I); 6617 assert(Group && "Fail to get an interleaved access group."); 6618 6619 // Only calculate the cost once at the insert position. 6620 if (Group->getInsertPos() != I) 6621 return 0; 6622 6623 unsigned InterleaveFactor = Group->getFactor(); 6624 Type *WideVecTy = 6625 VectorType::get(VectorTy->getVectorElementType(), 6626 VectorTy->getVectorNumElements() * InterleaveFactor); 6627 6628 // Holds the indices of existing members in an interleaved load group. 6629 // An interleaved store group doesn't need this as it doesn't allow gaps. 6630 SmallVector<unsigned, 4> Indices; 6631 if (LI) { 6632 for (unsigned i = 0; i < InterleaveFactor; i++) 6633 if (Group->getMember(i)) 6634 Indices.push_back(i); 6635 } 6636 6637 // Calculate the cost of the whole interleaved group. 6638 unsigned Cost = TTI.getInterleavedMemoryOpCost( 6639 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 6640 Group->getAlignment(), AS); 6641 6642 if (Group->isReverse()) 6643 Cost += 6644 Group->getNumMembers() * 6645 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6646 6647 // FIXME: The interleaved load group with a huge gap could be even more 6648 // expensive than scalar operations. Then we could ignore such group and 6649 // use scalar operations instead. 6650 return Cost; 6651 } 6652 6653 // Check if the memory instruction will be scalarized. 6654 if (Legal->memoryInstructionMustBeScalarized(I, VF)) { 6655 bool IsComplexComputation = 6656 isLikelyComplexAddressComputation(Ptr, Legal, SE, TheLoop); 6657 unsigned Cost = 0; 6658 // The cost of extracting from the value vector and pointer vector. 6659 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6660 for (unsigned i = 0; i < VF; ++i) { 6661 // The cost of extracting the pointer operand. 6662 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, PtrTy, i); 6663 // In case of STORE, the cost of ExtractElement from the vector. 6664 // In case of LOAD, the cost of InsertElement into the returned 6665 // vector. 6666 Cost += TTI.getVectorInstrCost(SI ? Instruction::ExtractElement 6667 : Instruction::InsertElement, 6668 VectorTy, i); 6669 } 6670 6671 // The cost of the scalar loads/stores. 6672 Cost += VF * TTI.getAddressComputationCost(PtrTy, IsComplexComputation); 6673 Cost += VF * 6674 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 6675 Alignment, AS); 6676 return Cost; 6677 } 6678 6679 // Determine if the pointer operand of the access is either consecutive or 6680 // reverse consecutive. 6681 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6682 bool Reverse = ConsecutiveStride < 0; 6683 6684 // Determine if either a gather or scatter operation is legal. 6685 bool UseGatherOrScatter = 6686 !ConsecutiveStride && Legal->isLegalGatherOrScatter(I); 6687 6688 unsigned Cost = TTI.getAddressComputationCost(VectorTy); 6689 if (UseGatherOrScatter) { 6690 assert(ConsecutiveStride == 0 && 6691 "Gather/Scatter are not used for consecutive stride"); 6692 return Cost + 6693 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 6694 Legal->isMaskRequired(I), Alignment); 6695 } 6696 // Wide load/stores. 6697 if (Legal->isMaskRequired(I)) 6698 Cost += 6699 TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6700 else 6701 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6702 6703 if (Reverse) 6704 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6705 return Cost; 6706 } 6707 case Instruction::ZExt: 6708 case Instruction::SExt: 6709 case Instruction::FPToUI: 6710 case Instruction::FPToSI: 6711 case Instruction::FPExt: 6712 case Instruction::PtrToInt: 6713 case Instruction::IntToPtr: 6714 case Instruction::SIToFP: 6715 case Instruction::UIToFP: 6716 case Instruction::Trunc: 6717 case Instruction::FPTrunc: 6718 case Instruction::BitCast: { 6719 // We optimize the truncation of induction variable. 6720 // The cost of these is the same as the scalar operation. 6721 if (I->getOpcode() == Instruction::Trunc && 6722 Legal->isInductionVariable(I->getOperand(0))) 6723 return TTI.getCastInstrCost(I->getOpcode(), I->getType(), 6724 I->getOperand(0)->getType()); 6725 6726 Type *SrcScalarTy = I->getOperand(0)->getType(); 6727 Type *SrcVecTy = ToVectorTy(SrcScalarTy, VF); 6728 if (VF > 1 && MinBWs.count(I)) { 6729 // This cast is going to be shrunk. This may remove the cast or it might 6730 // turn it into slightly different cast. For example, if MinBW == 16, 6731 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 6732 // 6733 // Calculate the modified src and dest types. 6734 Type *MinVecTy = VectorTy; 6735 if (I->getOpcode() == Instruction::Trunc) { 6736 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 6737 VectorTy = 6738 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6739 } else if (I->getOpcode() == Instruction::ZExt || 6740 I->getOpcode() == Instruction::SExt) { 6741 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 6742 VectorTy = 6743 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6744 } 6745 } 6746 6747 return TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy); 6748 } 6749 case Instruction::Call: { 6750 bool NeedToScalarize; 6751 CallInst *CI = cast<CallInst>(I); 6752 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 6753 if (getVectorIntrinsicIDForCall(CI, TLI)) 6754 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 6755 return CallCost; 6756 } 6757 default: 6758 // The cost of executing VF copies of the scalar instruction. This opcode 6759 // is unknown. Assume that it is the same as 'mul'. 6760 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) + 6761 getScalarizationOverhead(I, VF, false, TTI); 6762 } // end of switch. 6763 } 6764 6765 char LoopVectorize::ID = 0; 6766 static const char lv_name[] = "Loop Vectorization"; 6767 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 6768 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6769 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 6770 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6771 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 6772 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6773 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6774 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6775 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6776 INITIALIZE_PASS_DEPENDENCY(LCSSAWrapperPass) 6777 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6778 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 6779 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 6780 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6781 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 6782 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6783 6784 namespace llvm { 6785 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { 6786 return new LoopVectorize(NoUnrolling, AlwaysVectorize); 6787 } 6788 } 6789 6790 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6791 6792 // Check if the pointer operand of a load or store instruction is 6793 // consecutive. 6794 if (auto *Ptr = getPointerOperand(Inst)) 6795 return Legal->isConsecutivePtr(Ptr); 6796 return false; 6797 } 6798 6799 void LoopVectorizationCostModel::collectValuesToIgnore() { 6800 // Ignore ephemeral values. 6801 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 6802 6803 // Ignore type-promoting instructions we identified during reduction 6804 // detection. 6805 for (auto &Reduction : *Legal->getReductionVars()) { 6806 RecurrenceDescriptor &RedDes = Reduction.second; 6807 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 6808 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6809 } 6810 6811 // Insert values known to be scalar into VecValuesToIgnore. 6812 for (auto *BB : TheLoop->getBlocks()) 6813 for (auto &I : *BB) 6814 if (Legal->isScalarAfterVectorization(&I)) 6815 VecValuesToIgnore.insert(&I); 6816 } 6817 6818 void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr, 6819 bool IfPredicateInstr) { 6820 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 6821 // Holds vector parameters or scalars, in case of uniform vals. 6822 SmallVector<VectorParts, 4> Params; 6823 6824 setDebugLocFromInst(Builder, Instr); 6825 6826 // Does this instruction return a value ? 6827 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 6828 6829 // Initialize a new scalar map entry. 6830 ScalarParts Entry(UF); 6831 6832 VectorParts Cond; 6833 if (IfPredicateInstr) 6834 Cond = createBlockInMask(Instr->getParent()); 6835 6836 // For each vector unroll 'part': 6837 for (unsigned Part = 0; Part < UF; ++Part) { 6838 Entry[Part].resize(1); 6839 // For each scalar that we create: 6840 6841 // Start an "if (pred) a[i] = ..." block. 6842 Value *Cmp = nullptr; 6843 if (IfPredicateInstr) { 6844 if (Cond[Part]->getType()->isVectorTy()) 6845 Cond[Part] = 6846 Builder.CreateExtractElement(Cond[Part], Builder.getInt32(0)); 6847 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cond[Part], 6848 ConstantInt::get(Cond[Part]->getType(), 1)); 6849 } 6850 6851 Instruction *Cloned = Instr->clone(); 6852 if (!IsVoidRetTy) 6853 Cloned->setName(Instr->getName() + ".cloned"); 6854 6855 // Replace the operands of the cloned instructions with their scalar 6856 // equivalents in the new loop. 6857 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 6858 auto *NewOp = getScalarValue(Instr->getOperand(op), Part, 0); 6859 Cloned->setOperand(op, NewOp); 6860 } 6861 6862 // Place the cloned scalar in the new loop. 6863 Builder.Insert(Cloned); 6864 6865 // Add the cloned scalar to the scalar map entry. 6866 Entry[Part][0] = Cloned; 6867 6868 // If we just cloned a new assumption, add it the assumption cache. 6869 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 6870 if (II->getIntrinsicID() == Intrinsic::assume) 6871 AC->registerAssumption(II); 6872 6873 // End if-block. 6874 if (IfPredicateInstr) 6875 PredicatedInstructions.push_back(std::make_pair(Cloned, Cmp)); 6876 } 6877 VectorLoopValueMap.initScalar(Instr, Entry); 6878 } 6879 6880 void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) { 6881 auto *SI = dyn_cast<StoreInst>(Instr); 6882 bool IfPredicateInstr = (SI && Legal->blockNeedsPredication(SI->getParent())); 6883 6884 return scalarizeInstruction(Instr, IfPredicateInstr); 6885 } 6886 6887 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 6888 6889 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 6890 6891 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 6892 Instruction::BinaryOps BinOp) { 6893 // When unrolling and the VF is 1, we only need to add a simple scalar. 6894 Type *Ty = Val->getType(); 6895 assert(!Ty->isVectorTy() && "Val must be a scalar"); 6896 6897 if (Ty->isFloatingPointTy()) { 6898 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 6899 6900 // Floating point operations had to be 'fast' to enable the unrolling. 6901 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 6902 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 6903 } 6904 Constant *C = ConstantInt::get(Ty, StartIdx); 6905 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 6906 } 6907 6908 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 6909 SmallVector<Metadata *, 4> MDs; 6910 // Reserve first location for self reference to the LoopID metadata node. 6911 MDs.push_back(nullptr); 6912 bool IsUnrollMetadata = false; 6913 MDNode *LoopID = L->getLoopID(); 6914 if (LoopID) { 6915 // First find existing loop unrolling disable metadata. 6916 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 6917 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 6918 if (MD) { 6919 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 6920 IsUnrollMetadata = 6921 S && S->getString().startswith("llvm.loop.unroll.disable"); 6922 } 6923 MDs.push_back(LoopID->getOperand(i)); 6924 } 6925 } 6926 6927 if (!IsUnrollMetadata) { 6928 // Add runtime unroll disable metadata. 6929 LLVMContext &Context = L->getHeader()->getContext(); 6930 SmallVector<Metadata *, 1> DisableOperands; 6931 DisableOperands.push_back( 6932 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 6933 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 6934 MDs.push_back(DisableNode); 6935 MDNode *NewLoopID = MDNode::get(Context, MDs); 6936 // Set operand 0 to refer to the loop id itself. 6937 NewLoopID->replaceOperandWith(0, NewLoopID); 6938 L->setLoopID(NewLoopID); 6939 } 6940 } 6941 6942 bool LoopVectorizePass::processLoop(Loop *L) { 6943 assert(L->empty() && "Only process inner loops."); 6944 6945 #ifndef NDEBUG 6946 const std::string DebugLocStr = getDebugLocString(L); 6947 #endif /* NDEBUG */ 6948 6949 DEBUG(dbgs() << "\nLV: Checking a loop in \"" 6950 << L->getHeader()->getParent()->getName() << "\" from " 6951 << DebugLocStr << "\n"); 6952 6953 LoopVectorizeHints Hints(L, DisableUnrolling, *ORE); 6954 6955 DEBUG(dbgs() << "LV: Loop hints:" 6956 << " force=" 6957 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 6958 ? "disabled" 6959 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 6960 ? "enabled" 6961 : "?")) 6962 << " width=" << Hints.getWidth() 6963 << " unroll=" << Hints.getInterleave() << "\n"); 6964 6965 // Function containing loop 6966 Function *F = L->getHeader()->getParent(); 6967 6968 // Looking at the diagnostic output is the only way to determine if a loop 6969 // was vectorized (other than looking at the IR or machine code), so it 6970 // is important to generate an optimization remark for each loop. Most of 6971 // these messages are generated as OptimizationRemarkAnalysis. Remarks 6972 // generated as OptimizationRemark and OptimizationRemarkMissed are 6973 // less verbose reporting vectorized loops and unvectorized loops that may 6974 // benefit from vectorization, respectively. 6975 6976 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { 6977 DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 6978 return false; 6979 } 6980 6981 // Check the loop for a trip count threshold: 6982 // do not vectorize loops with a tiny trip count. 6983 const unsigned TC = SE->getSmallConstantTripCount(L); 6984 if (TC > 0u && TC < TinyTripCountVectorThreshold) { 6985 DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 6986 << "This loop is not worth vectorizing."); 6987 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 6988 DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 6989 else { 6990 DEBUG(dbgs() << "\n"); 6991 ORE->emit(createMissedAnalysis(Hints.vectorizeAnalysisPassName(), 6992 "NotBeneficial", L) 6993 << "vectorization is not beneficial " 6994 "and is not explicitly forced"); 6995 return false; 6996 } 6997 } 6998 6999 PredicatedScalarEvolution PSE(*SE, *L); 7000 7001 // Check if it is legal to vectorize the loop. 7002 LoopVectorizationRequirements Requirements(*ORE); 7003 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI, ORE, 7004 &Requirements, &Hints); 7005 if (!LVL.canVectorize()) { 7006 DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 7007 emitMissedWarning(F, L, Hints, ORE); 7008 return false; 7009 } 7010 7011 // Use the cost model. 7012 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F, 7013 &Hints); 7014 CM.collectValuesToIgnore(); 7015 7016 // Check the function attributes to find out if this function should be 7017 // optimized for size. 7018 bool OptForSize = 7019 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 7020 7021 // Compute the weighted frequency of this loop being executed and see if it 7022 // is less than 20% of the function entry baseline frequency. Note that we 7023 // always have a canonical loop here because we think we *can* vectorize. 7024 // FIXME: This is hidden behind a flag due to pervasive problems with 7025 // exactly what block frequency models. 7026 if (LoopVectorizeWithBlockFrequency) { 7027 BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader()); 7028 if (Hints.getForce() != LoopVectorizeHints::FK_Enabled && 7029 LoopEntryFreq < ColdEntryFreq) 7030 OptForSize = true; 7031 } 7032 7033 // Check the function attributes to see if implicit floats are allowed. 7034 // FIXME: This check doesn't seem possibly correct -- what if the loop is 7035 // an integer loop and the vector instructions selected are purely integer 7036 // vector instructions? 7037 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 7038 DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 7039 "attribute is used.\n"); 7040 ORE->emit(createMissedAnalysis(Hints.vectorizeAnalysisPassName(), 7041 "NoImplicitFloat", L) 7042 << "loop not vectorized due to NoImplicitFloat attribute"); 7043 emitMissedWarning(F, L, Hints, ORE); 7044 return false; 7045 } 7046 7047 // Check if the target supports potentially unsafe FP vectorization. 7048 // FIXME: Add a check for the type of safety issue (denormal, signaling) 7049 // for the target we're vectorizing for, to make sure none of the 7050 // additional fp-math flags can help. 7051 if (Hints.isPotentiallyUnsafe() && 7052 TTI->isFPVectorizationPotentiallyUnsafe()) { 7053 DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 7054 ORE->emit( 7055 createMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L) 7056 << "loop not vectorized due to unsafe FP support."); 7057 emitMissedWarning(F, L, Hints, ORE); 7058 return false; 7059 } 7060 7061 // Select the optimal vectorization factor. 7062 const LoopVectorizationCostModel::VectorizationFactor VF = 7063 CM.selectVectorizationFactor(OptForSize); 7064 7065 // Select the interleave count. 7066 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 7067 7068 // Get user interleave count. 7069 unsigned UserIC = Hints.getInterleave(); 7070 7071 // Identify the diagnostic messages that should be produced. 7072 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 7073 bool VectorizeLoop = true, InterleaveLoop = true; 7074 if (Requirements.doesNotMeet(F, L, Hints)) { 7075 DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 7076 "requirements.\n"); 7077 emitMissedWarning(F, L, Hints, ORE); 7078 return false; 7079 } 7080 7081 if (VF.Width == 1) { 7082 DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 7083 VecDiagMsg = std::make_pair( 7084 "VectorizationNotBeneficial", 7085 "the cost-model indicates that vectorization is not beneficial"); 7086 VectorizeLoop = false; 7087 } 7088 7089 if (IC == 1 && UserIC <= 1) { 7090 // Tell the user interleaving is not beneficial. 7091 DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 7092 IntDiagMsg = std::make_pair( 7093 "InterleavingNotBeneficial", 7094 "the cost-model indicates that interleaving is not beneficial"); 7095 InterleaveLoop = false; 7096 if (UserIC == 1) { 7097 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 7098 IntDiagMsg.second += 7099 " and is explicitly disabled or interleave count is set to 1"; 7100 } 7101 } else if (IC > 1 && UserIC == 1) { 7102 // Tell the user interleaving is beneficial, but it explicitly disabled. 7103 DEBUG(dbgs() 7104 << "LV: Interleaving is beneficial but is explicitly disabled."); 7105 IntDiagMsg = std::make_pair( 7106 "InterleavingBeneficialButDisabled", 7107 "the cost-model indicates that interleaving is beneficial " 7108 "but is explicitly disabled or interleave count is set to 1"); 7109 InterleaveLoop = false; 7110 } 7111 7112 // Override IC if user provided an interleave count. 7113 IC = UserIC > 0 ? UserIC : IC; 7114 7115 // Emit diagnostic messages, if any. 7116 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 7117 if (!VectorizeLoop && !InterleaveLoop) { 7118 // Do not vectorize or interleaving the loop. 7119 ORE->emit(OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 7120 L->getStartLoc(), L->getHeader()) 7121 << VecDiagMsg.second); 7122 ORE->emit(OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 7123 L->getStartLoc(), L->getHeader()) 7124 << IntDiagMsg.second); 7125 return false; 7126 } else if (!VectorizeLoop && InterleaveLoop) { 7127 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7128 ORE->emit(OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 7129 L->getStartLoc(), L->getHeader()) 7130 << VecDiagMsg.second); 7131 } else if (VectorizeLoop && !InterleaveLoop) { 7132 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 7133 << DebugLocStr << '\n'); 7134 ORE->emit(OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 7135 L->getStartLoc(), L->getHeader()) 7136 << IntDiagMsg.second); 7137 } else if (VectorizeLoop && InterleaveLoop) { 7138 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 7139 << DebugLocStr << '\n'); 7140 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7141 } 7142 7143 using namespace ore; 7144 if (!VectorizeLoop) { 7145 assert(IC > 1 && "interleave count should not be 1 or 0"); 7146 // If we decided that it is not legal to vectorize the loop, then 7147 // interleave it. 7148 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC); 7149 Unroller.vectorize(&LVL, CM.MinBWs); 7150 7151 ORE->emit(OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 7152 L->getHeader()) 7153 << "interleaved loop (interleaved count: " 7154 << NV("InterleaveCount", IC) << ")"); 7155 } else { 7156 // If we decided that it is *legal* to vectorize the loop, then do it. 7157 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC); 7158 LB.vectorize(&LVL, CM.MinBWs); 7159 ++LoopsVectorized; 7160 7161 // Add metadata to disable runtime unrolling a scalar loop when there are 7162 // no runtime checks about strides and memory. A scalar loop that is 7163 // rarely used is not worth unrolling. 7164 if (!LB.areSafetyChecksAdded()) 7165 AddRuntimeUnrollDisableMetaData(L); 7166 7167 // Report the vectorization decision. 7168 ORE->emit(OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 7169 L->getHeader()) 7170 << "vectorized loop (vectorization width: " 7171 << NV("VectorizationFactor", VF.Width) 7172 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"); 7173 } 7174 7175 // Mark the loop as already vectorized to avoid vectorizing again. 7176 Hints.setAlreadyVectorized(); 7177 7178 DEBUG(verifyFunction(*L->getHeader()->getParent())); 7179 return true; 7180 } 7181 7182 bool LoopVectorizePass::runImpl( 7183 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 7184 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 7185 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 7186 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 7187 OptimizationRemarkEmitter &ORE_) { 7188 7189 SE = &SE_; 7190 LI = &LI_; 7191 TTI = &TTI_; 7192 DT = &DT_; 7193 BFI = &BFI_; 7194 TLI = TLI_; 7195 AA = &AA_; 7196 AC = &AC_; 7197 GetLAA = &GetLAA_; 7198 DB = &DB_; 7199 ORE = &ORE_; 7200 7201 // Compute some weights outside of the loop over the loops. Compute this 7202 // using a BranchProbability to re-use its scaling math. 7203 const BranchProbability ColdProb(1, 5); // 20% 7204 ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb; 7205 7206 // Don't attempt if 7207 // 1. the target claims to have no vector registers, and 7208 // 2. interleaving won't help ILP. 7209 // 7210 // The second condition is necessary because, even if the target has no 7211 // vector registers, loop vectorization may still enable scalar 7212 // interleaving. 7213 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 7214 return false; 7215 7216 // Build up a worklist of inner-loops to vectorize. This is necessary as 7217 // the act of vectorizing or partially unrolling a loop creates new loops 7218 // and can invalidate iterators across the loops. 7219 SmallVector<Loop *, 8> Worklist; 7220 7221 for (Loop *L : *LI) 7222 addAcyclicInnerLoop(*L, Worklist); 7223 7224 LoopsAnalyzed += Worklist.size(); 7225 7226 // Now walk the identified inner loops. 7227 bool Changed = false; 7228 while (!Worklist.empty()) 7229 Changed |= processLoop(Worklist.pop_back_val()); 7230 7231 // Process each loop nest in the function. 7232 return Changed; 7233 7234 } 7235 7236 7237 PreservedAnalyses LoopVectorizePass::run(Function &F, 7238 FunctionAnalysisManager &AM) { 7239 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 7240 auto &LI = AM.getResult<LoopAnalysis>(F); 7241 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 7242 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 7243 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 7244 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 7245 auto &AA = AM.getResult<AAManager>(F); 7246 auto &AC = AM.getResult<AssumptionAnalysis>(F); 7247 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 7248 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 7249 7250 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 7251 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 7252 [&](Loop &L) -> const LoopAccessInfo & { 7253 return LAM.getResult<LoopAccessAnalysis>(L); 7254 }; 7255 bool Changed = 7256 runImpl(F, SE, LI, TTI, DT, BFI, TLI, DB, AA, AC, GetLAA, ORE); 7257 if (!Changed) 7258 return PreservedAnalyses::all(); 7259 PreservedAnalyses PA; 7260 PA.preserve<LoopAnalysis>(); 7261 PA.preserve<DominatorTreeAnalysis>(); 7262 PA.preserve<BasicAA>(); 7263 PA.preserve<GlobalsAA>(); 7264 return PA; 7265 } 7266