1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 11 // and generates target-independent LLVM-IR. 12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 13 // of instructions in order to estimate the profitability of vectorization. 14 // 15 // The loop vectorizer combines consecutive loop iterations into a single 16 // 'wide' iteration. After this transformation the index is incremented 17 // by the SIMD vector width, and not by one. 18 // 19 // This pass has three parts: 20 // 1. The main loop pass that drives the different parts. 21 // 2. LoopVectorizationLegality - A unit that checks for the legality 22 // of the vectorization. 23 // 3. InnerLoopVectorizer - A unit that performs the actual 24 // widening of instructions. 25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 26 // of vectorization. It decides on the optimal vector width, which 27 // can be one, if vectorization is not profitable. 28 // 29 //===----------------------------------------------------------------------===// 30 // 31 // The reduction-variable vectorization is based on the paper: 32 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 33 // 34 // Variable uniformity checks are inspired by: 35 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 36 // 37 // The interleaved access vectorization is based on the paper: 38 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 39 // Data for SIMD 40 // 41 // Other ideas/concepts are from: 42 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 43 // 44 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 45 // Vectorizing Compilers. 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 50 #include "llvm/ADT/DenseMap.h" 51 #include "llvm/ADT/Hashing.h" 52 #include "llvm/ADT/MapVector.h" 53 #include "llvm/ADT/SCCIterator.h" 54 #include "llvm/ADT/SetVector.h" 55 #include "llvm/ADT/SmallPtrSet.h" 56 #include "llvm/ADT/SmallSet.h" 57 #include "llvm/ADT/SmallVector.h" 58 #include "llvm/ADT/Statistic.h" 59 #include "llvm/ADT/StringExtras.h" 60 #include "llvm/Analysis/CodeMetrics.h" 61 #include "llvm/Analysis/GlobalsModRef.h" 62 #include "llvm/Analysis/LoopInfo.h" 63 #include "llvm/Analysis/LoopIterator.h" 64 #include "llvm/Analysis/LoopPass.h" 65 #include "llvm/Analysis/ScalarEvolutionExpander.h" 66 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 67 #include "llvm/Analysis/ValueTracking.h" 68 #include "llvm/Analysis/VectorUtils.h" 69 #include "llvm/IR/Constants.h" 70 #include "llvm/IR/DataLayout.h" 71 #include "llvm/IR/DebugInfo.h" 72 #include "llvm/IR/DerivedTypes.h" 73 #include "llvm/IR/DiagnosticInfo.h" 74 #include "llvm/IR/Dominators.h" 75 #include "llvm/IR/Function.h" 76 #include "llvm/IR/IRBuilder.h" 77 #include "llvm/IR/Instructions.h" 78 #include "llvm/IR/IntrinsicInst.h" 79 #include "llvm/IR/LLVMContext.h" 80 #include "llvm/IR/Module.h" 81 #include "llvm/IR/PatternMatch.h" 82 #include "llvm/IR/Type.h" 83 #include "llvm/IR/Value.h" 84 #include "llvm/IR/ValueHandle.h" 85 #include "llvm/IR/Verifier.h" 86 #include "llvm/Pass.h" 87 #include "llvm/Support/BranchProbability.h" 88 #include "llvm/Support/CommandLine.h" 89 #include "llvm/Support/Debug.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include "llvm/Transforms/Scalar.h" 92 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 93 #include "llvm/Transforms/Utils/Local.h" 94 #include "llvm/Transforms/Utils/LoopUtils.h" 95 #include "llvm/Transforms/Utils/LoopVersioning.h" 96 #include "llvm/Transforms/Vectorize.h" 97 #include <algorithm> 98 #include <map> 99 #include <tuple> 100 101 using namespace llvm; 102 using namespace llvm::PatternMatch; 103 104 #define LV_NAME "loop-vectorize" 105 #define DEBUG_TYPE LV_NAME 106 107 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 108 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 109 110 static cl::opt<bool> 111 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, 112 cl::desc("Enable if-conversion during vectorization.")); 113 114 /// We don't vectorize loops with a known constant trip count below this number. 115 static cl::opt<unsigned> TinyTripCountVectorThreshold( 116 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 117 cl::desc("Don't vectorize loops with a constant " 118 "trip count that is smaller than this " 119 "value.")); 120 121 static cl::opt<bool> MaximizeBandwidth( 122 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 123 cl::desc("Maximize bandwidth when selecting vectorization factor which " 124 "will be determined by the smallest type in loop.")); 125 126 static cl::opt<bool> EnableInterleavedMemAccesses( 127 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 128 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 129 130 /// Maximum factor for an interleaved memory access. 131 static cl::opt<unsigned> MaxInterleaveGroupFactor( 132 "max-interleave-group-factor", cl::Hidden, 133 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 134 cl::init(8)); 135 136 /// We don't interleave loops with a known constant trip count below this 137 /// number. 138 static const unsigned TinyTripCountInterleaveThreshold = 128; 139 140 static cl::opt<unsigned> ForceTargetNumScalarRegs( 141 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 142 cl::desc("A flag that overrides the target's number of scalar registers.")); 143 144 static cl::opt<unsigned> ForceTargetNumVectorRegs( 145 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 146 cl::desc("A flag that overrides the target's number of vector registers.")); 147 148 /// Maximum vectorization interleave count. 149 static const unsigned MaxInterleaveFactor = 16; 150 151 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 152 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 153 cl::desc("A flag that overrides the target's max interleave factor for " 154 "scalar loops.")); 155 156 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 157 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 158 cl::desc("A flag that overrides the target's max interleave factor for " 159 "vectorized loops.")); 160 161 static cl::opt<unsigned> ForceTargetInstructionCost( 162 "force-target-instruction-cost", cl::init(0), cl::Hidden, 163 cl::desc("A flag that overrides the target's expected cost for " 164 "an instruction to a single constant value. Mostly " 165 "useful for getting consistent testing.")); 166 167 static cl::opt<unsigned> SmallLoopCost( 168 "small-loop-cost", cl::init(20), cl::Hidden, 169 cl::desc( 170 "The cost of a loop that is considered 'small' by the interleaver.")); 171 172 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 173 "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden, 174 cl::desc("Enable the use of the block frequency analysis to access PGO " 175 "heuristics minimizing code growth in cold regions and being more " 176 "aggressive in hot regions.")); 177 178 // Runtime interleave loops for load/store throughput. 179 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 180 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 181 cl::desc( 182 "Enable runtime interleaving until load/store ports are saturated")); 183 184 /// The number of stores in a loop that are allowed to need predication. 185 static cl::opt<unsigned> NumberOfStoresToPredicate( 186 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 187 cl::desc("Max number of stores to be predicated behind an if.")); 188 189 static cl::opt<bool> EnableIndVarRegisterHeur( 190 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 191 cl::desc("Count the induction variable only once when interleaving")); 192 193 static cl::opt<bool> EnableCondStoresVectorization( 194 "enable-cond-stores-vec", cl::init(false), cl::Hidden, 195 cl::desc("Enable if predication of stores during vectorization.")); 196 197 static cl::opt<unsigned> MaxNestedScalarReductionIC( 198 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 199 cl::desc("The maximum interleave count to use when interleaving a scalar " 200 "reduction in a nested loop.")); 201 202 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 203 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 204 cl::desc("The maximum allowed number of runtime memory checks with a " 205 "vectorize(enable) pragma.")); 206 207 static cl::opt<unsigned> VectorizeSCEVCheckThreshold( 208 "vectorize-scev-check-threshold", cl::init(16), cl::Hidden, 209 cl::desc("The maximum number of SCEV checks allowed.")); 210 211 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold( 212 "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden, 213 cl::desc("The maximum number of SCEV checks allowed with a " 214 "vectorize(enable) pragma")); 215 216 namespace { 217 218 // Forward declarations. 219 class LoopVectorizeHints; 220 class LoopVectorizationLegality; 221 class LoopVectorizationCostModel; 222 class LoopVectorizationRequirements; 223 224 /// Returns true if the given loop body has a cycle, excluding the loop 225 /// itself. 226 static bool hasCyclesInLoopBody(const Loop &L) { 227 if (!L.empty()) 228 return true; 229 230 for (const auto &SCC : 231 make_range(scc_iterator<Loop, LoopBodyTraits>::begin(L), 232 scc_iterator<Loop, LoopBodyTraits>::end(L))) { 233 if (SCC.size() > 1) { 234 DEBUG(dbgs() << "LVL: Detected a cycle in the loop body:\n"); 235 DEBUG(L.dump()); 236 return true; 237 } 238 } 239 return false; 240 } 241 242 /// \brief This modifies LoopAccessReport to initialize message with 243 /// loop-vectorizer-specific part. 244 class VectorizationReport : public LoopAccessReport { 245 public: 246 VectorizationReport(Instruction *I = nullptr) 247 : LoopAccessReport("loop not vectorized: ", I) {} 248 249 /// \brief This allows promotion of the loop-access analysis report into the 250 /// loop-vectorizer report. It modifies the message to add the 251 /// loop-vectorizer-specific part of the message. 252 explicit VectorizationReport(const LoopAccessReport &R) 253 : LoopAccessReport(Twine("loop not vectorized: ") + R.str(), 254 R.getInstr()) {} 255 }; 256 257 /// A helper function for converting Scalar types to vector types. 258 /// If the incoming type is void, we return void. If the VF is 1, we return 259 /// the scalar type. 260 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 261 if (Scalar->isVoidTy() || VF == 1) 262 return Scalar; 263 return VectorType::get(Scalar, VF); 264 } 265 266 /// A helper function that returns GEP instruction and knows to skip a 267 /// 'bitcast'. The 'bitcast' may be skipped if the source and the destination 268 /// pointee types of the 'bitcast' have the same size. 269 /// For example: 270 /// bitcast double** %var to i64* - can be skipped 271 /// bitcast double** %var to i8* - can not 272 static GetElementPtrInst *getGEPInstruction(Value *Ptr) { 273 274 if (isa<GetElementPtrInst>(Ptr)) 275 return cast<GetElementPtrInst>(Ptr); 276 277 if (isa<BitCastInst>(Ptr) && 278 isa<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0))) { 279 Type *BitcastTy = Ptr->getType(); 280 Type *GEPTy = cast<BitCastInst>(Ptr)->getSrcTy(); 281 if (!isa<PointerType>(BitcastTy) || !isa<PointerType>(GEPTy)) 282 return nullptr; 283 Type *Pointee1Ty = cast<PointerType>(BitcastTy)->getPointerElementType(); 284 Type *Pointee2Ty = cast<PointerType>(GEPTy)->getPointerElementType(); 285 const DataLayout &DL = cast<BitCastInst>(Ptr)->getModule()->getDataLayout(); 286 if (DL.getTypeSizeInBits(Pointee1Ty) == DL.getTypeSizeInBits(Pointee2Ty)) 287 return cast<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0)); 288 } 289 return nullptr; 290 } 291 292 /// A helper function that returns the pointer operand of a load or store 293 /// instruction. 294 static Value *getPointerOperand(Value *I) { 295 if (auto *LI = dyn_cast<LoadInst>(I)) 296 return LI->getPointerOperand(); 297 if (auto *SI = dyn_cast<StoreInst>(I)) 298 return SI->getPointerOperand(); 299 return nullptr; 300 } 301 302 /// InnerLoopVectorizer vectorizes loops which contain only one basic 303 /// block to a specified vectorization factor (VF). 304 /// This class performs the widening of scalars into vectors, or multiple 305 /// scalars. This class also implements the following features: 306 /// * It inserts an epilogue loop for handling loops that don't have iteration 307 /// counts that are known to be a multiple of the vectorization factor. 308 /// * It handles the code generation for reduction variables. 309 /// * Scalarization (implementation using scalars) of un-vectorizable 310 /// instructions. 311 /// InnerLoopVectorizer does not perform any vectorization-legality 312 /// checks, and relies on the caller to check for the different legality 313 /// aspects. The InnerLoopVectorizer relies on the 314 /// LoopVectorizationLegality class to provide information about the induction 315 /// and reduction variables that were found to a given vectorization factor. 316 class InnerLoopVectorizer { 317 public: 318 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 319 LoopInfo *LI, DominatorTree *DT, 320 const TargetLibraryInfo *TLI, 321 const TargetTransformInfo *TTI, AssumptionCache *AC, 322 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 323 unsigned UnrollFactor) 324 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 325 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 326 Builder(PSE.getSE()->getContext()), Induction(nullptr), 327 OldInduction(nullptr), WidenMap(UnrollFactor), TripCount(nullptr), 328 VectorTripCount(nullptr), Legal(nullptr), AddedSafetyChecks(false) {} 329 330 // Perform the actual loop widening (vectorization). 331 // MinimumBitWidths maps scalar integer values to the smallest bitwidth they 332 // can be validly truncated to. The cost model has assumed this truncation 333 // will happen when vectorizing. VecValuesToIgnore contains scalar values 334 // that the cost model has chosen to ignore because they will not be 335 // vectorized. 336 void vectorize(LoopVectorizationLegality *L, 337 const MapVector<Instruction *, uint64_t> &MinimumBitWidths) { 338 MinBWs = &MinimumBitWidths; 339 Legal = L; 340 // Create a new empty loop. Unlink the old loop and connect the new one. 341 createEmptyLoop(); 342 // Widen each instruction in the old loop to a new one in the new loop. 343 // Use the Legality module to find the induction and reduction variables. 344 vectorizeLoop(); 345 } 346 347 // Return true if any runtime check is added. 348 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 349 350 virtual ~InnerLoopVectorizer() {} 351 352 protected: 353 /// A small list of PHINodes. 354 typedef SmallVector<PHINode *, 4> PhiVector; 355 /// When we unroll loops we have multiple vector values for each scalar. 356 /// This data structure holds the unrolled and vectorized values that 357 /// originated from one scalar instruction. 358 typedef SmallVector<Value *, 2> VectorParts; 359 360 // When we if-convert we need to create edge masks. We have to cache values 361 // so that we don't end up with exponential recursion/IR. 362 typedef DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts> 363 EdgeMaskCache; 364 365 /// Create an empty loop, based on the loop ranges of the old loop. 366 void createEmptyLoop(); 367 368 /// Set up the values of the IVs correctly when exiting the vector loop. 369 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 370 Value *CountRoundDown, Value *EndValue, 371 BasicBlock *MiddleBlock); 372 373 /// Create a new induction variable inside L. 374 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 375 Value *Step, Instruction *DL); 376 /// Copy and widen the instructions from the old loop. 377 virtual void vectorizeLoop(); 378 379 /// Fix a first-order recurrence. This is the second phase of vectorizing 380 /// this phi node. 381 void fixFirstOrderRecurrence(PHINode *Phi); 382 383 /// \brief The Loop exit block may have single value PHI nodes where the 384 /// incoming value is 'Undef'. While vectorizing we only handled real values 385 /// that were defined inside the loop. Here we fix the 'undef case'. 386 /// See PR14725. 387 void fixLCSSAPHIs(); 388 389 /// Predicate conditional instructions that require predication on their 390 /// respective conditions. 391 void predicateInstructions(); 392 393 /// Shrinks vector element sizes based on information in "MinBWs". 394 void truncateToMinimalBitwidths(); 395 396 /// A helper function that computes the predicate of the block BB, assuming 397 /// that the header block of the loop is set to True. It returns the *entry* 398 /// mask for the block BB. 399 VectorParts createBlockInMask(BasicBlock *BB); 400 /// A helper function that computes the predicate of the edge between SRC 401 /// and DST. 402 VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst); 403 404 /// A helper function to vectorize a single BB within the innermost loop. 405 void vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV); 406 407 /// Vectorize a single PHINode in a block. This method handles the induction 408 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 409 /// arbitrary length vectors. 410 void widenPHIInstruction(Instruction *PN, VectorParts &Entry, unsigned UF, 411 unsigned VF, PhiVector *PV); 412 413 /// Insert the new loop to the loop hierarchy and pass manager 414 /// and update the analysis passes. 415 void updateAnalysis(); 416 417 /// This instruction is un-vectorizable. Implement it as a sequence 418 /// of scalars. If \p IfPredicateInstr is true we need to 'hide' each 419 /// scalarized instruction behind an if block predicated on the control 420 /// dependence of the instruction. 421 virtual void scalarizeInstruction(Instruction *Instr, 422 bool IfPredicateInstr = false); 423 424 /// Vectorize Load and Store instructions, 425 virtual void vectorizeMemoryInstruction(Instruction *Instr); 426 427 /// Create a broadcast instruction. This method generates a broadcast 428 /// instruction (shuffle) for loop invariant values and for the induction 429 /// value. If this is the induction variable then we extend it to N, N+1, ... 430 /// this is needed because each iteration in the loop corresponds to a SIMD 431 /// element. 432 virtual Value *getBroadcastInstrs(Value *V); 433 434 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 435 /// to each vector element of Val. The sequence starts at StartIndex. 436 /// \p Opcode is relevant for FP induction variable. 437 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 438 Instruction::BinaryOps Opcode = 439 Instruction::BinaryOpsEnd); 440 441 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 442 /// variable on which to base the steps, \p Step is the size of the step, and 443 /// \p EntryVal is the value from the original loop that maps to the steps. 444 /// Note that \p EntryVal doesn't have to be an induction variable (e.g., it 445 /// can be a truncate instruction). 446 void buildScalarSteps(Value *ScalarIV, Value *Step, Value *EntryVal); 447 448 /// Create a vector induction phi node based on an existing scalar one. This 449 /// currently only works for integer induction variables with a constant 450 /// step. If \p TruncType is non-null, instead of widening the original IV, 451 /// we widen a version of the IV truncated to \p TruncType. 452 void createVectorIntInductionPHI(const InductionDescriptor &II, 453 VectorParts &Entry, IntegerType *TruncType); 454 455 /// Widen an integer induction variable \p IV. If \p Trunc is provided, the 456 /// induction variable will first be truncated to the corresponding type. The 457 /// widened values are placed in \p Entry. 458 void widenIntInduction(PHINode *IV, VectorParts &Entry, 459 TruncInst *Trunc = nullptr); 460 461 /// Returns true if we should generate a scalar version of \p IV. 462 bool needsScalarInduction(Instruction *IV) const; 463 464 /// When we go over instructions in the basic block we rely on previous 465 /// values within the current basic block or on loop invariant values. 466 /// When we widen (vectorize) values we place them in the map. If the values 467 /// are not within the map, they have to be loop invariant, so we simply 468 /// broadcast them into a vector. 469 VectorParts &getVectorValue(Value *V); 470 471 /// Try to vectorize the interleaved access group that \p Instr belongs to. 472 void vectorizeInterleaveGroup(Instruction *Instr); 473 474 /// Generate a shuffle sequence that will reverse the vector Vec. 475 virtual Value *reverseVector(Value *Vec); 476 477 /// Returns (and creates if needed) the original loop trip count. 478 Value *getOrCreateTripCount(Loop *NewLoop); 479 480 /// Returns (and creates if needed) the trip count of the widened loop. 481 Value *getOrCreateVectorTripCount(Loop *NewLoop); 482 483 /// Emit a bypass check to see if the trip count would overflow, or we 484 /// wouldn't have enough iterations to execute one vector loop. 485 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 486 /// Emit a bypass check to see if the vector trip count is nonzero. 487 void emitVectorLoopEnteredCheck(Loop *L, BasicBlock *Bypass); 488 /// Emit a bypass check to see if all of the SCEV assumptions we've 489 /// had to make are correct. 490 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 491 /// Emit bypass checks to check any memory assumptions we may have made. 492 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 493 494 /// Add additional metadata to \p To that was not present on \p Orig. 495 /// 496 /// Currently this is used to add the noalias annotations based on the 497 /// inserted memchecks. Use this for instructions that are *cloned* into the 498 /// vector loop. 499 void addNewMetadata(Instruction *To, const Instruction *Orig); 500 501 /// Add metadata from one instruction to another. 502 /// 503 /// This includes both the original MDs from \p From and additional ones (\see 504 /// addNewMetadata). Use this for *newly created* instructions in the vector 505 /// loop. 506 void addMetadata(Instruction *To, Instruction *From); 507 508 /// \brief Similar to the previous function but it adds the metadata to a 509 /// vector of instructions. 510 void addMetadata(ArrayRef<Value *> To, Instruction *From); 511 512 /// This is a helper class that holds the vectorizer state. It maps scalar 513 /// instructions to vector instructions. When the code is 'unrolled' then 514 /// then a single scalar value is mapped to multiple vector parts. The parts 515 /// are stored in the VectorPart type. 516 struct ValueMap { 517 /// C'tor. UnrollFactor controls the number of vectors ('parts') that 518 /// are mapped. 519 ValueMap(unsigned UnrollFactor) : UF(UnrollFactor) {} 520 521 /// \return True if 'Key' is saved in the Value Map. 522 bool has(Value *Key) const { return MapStorage.count(Key); } 523 524 /// Initializes a new entry in the map. Sets all of the vector parts to the 525 /// save value in 'Val'. 526 /// \return A reference to a vector with splat values. 527 VectorParts &splat(Value *Key, Value *Val) { 528 VectorParts &Entry = MapStorage[Key]; 529 Entry.assign(UF, Val); 530 return Entry; 531 } 532 533 ///\return A reference to the value that is stored at 'Key'. 534 VectorParts &get(Value *Key) { 535 VectorParts &Entry = MapStorage[Key]; 536 if (Entry.empty()) 537 Entry.resize(UF); 538 assert(Entry.size() == UF); 539 return Entry; 540 } 541 542 private: 543 /// The unroll factor. Each entry in the map stores this number of vector 544 /// elements. 545 unsigned UF; 546 547 /// Map storage. We use std::map and not DenseMap because insertions to a 548 /// dense map invalidates its iterators. 549 std::map<Value *, VectorParts> MapStorage; 550 }; 551 552 /// The original loop. 553 Loop *OrigLoop; 554 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 555 /// dynamic knowledge to simplify SCEV expressions and converts them to a 556 /// more usable form. 557 PredicatedScalarEvolution &PSE; 558 /// Loop Info. 559 LoopInfo *LI; 560 /// Dominator Tree. 561 DominatorTree *DT; 562 /// Alias Analysis. 563 AliasAnalysis *AA; 564 /// Target Library Info. 565 const TargetLibraryInfo *TLI; 566 /// Target Transform Info. 567 const TargetTransformInfo *TTI; 568 /// Assumption Cache. 569 AssumptionCache *AC; 570 /// Interface to emit optimization remarks. 571 OptimizationRemarkEmitter *ORE; 572 573 /// \brief LoopVersioning. It's only set up (non-null) if memchecks were 574 /// used. 575 /// 576 /// This is currently only used to add no-alias metadata based on the 577 /// memchecks. The actually versioning is performed manually. 578 std::unique_ptr<LoopVersioning> LVer; 579 580 /// The vectorization SIMD factor to use. Each vector will have this many 581 /// vector elements. 582 unsigned VF; 583 584 protected: 585 /// The vectorization unroll factor to use. Each scalar is vectorized to this 586 /// many different vector instructions. 587 unsigned UF; 588 589 /// The builder that we use 590 IRBuilder<> Builder; 591 592 // --- Vectorization state --- 593 594 /// The vector-loop preheader. 595 BasicBlock *LoopVectorPreHeader; 596 /// The scalar-loop preheader. 597 BasicBlock *LoopScalarPreHeader; 598 /// Middle Block between the vector and the scalar. 599 BasicBlock *LoopMiddleBlock; 600 /// The ExitBlock of the scalar loop. 601 BasicBlock *LoopExitBlock; 602 /// The vector loop body. 603 BasicBlock *LoopVectorBody; 604 /// The scalar loop body. 605 BasicBlock *LoopScalarBody; 606 /// A list of all bypass blocks. The first block is the entry of the loop. 607 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 608 609 /// The new Induction variable which was added to the new block. 610 PHINode *Induction; 611 /// The induction variable of the old basic block. 612 PHINode *OldInduction; 613 /// Maps scalars to widened vectors. 614 ValueMap WidenMap; 615 616 /// A map of induction variables from the original loop to their 617 /// corresponding VF * UF scalarized values in the vectorized loop. The 618 /// purpose of ScalarIVMap is similar to that of WidenMap. Whereas WidenMap 619 /// maps original loop values to their vector versions in the new loop, 620 /// ScalarIVMap maps induction variables from the original loop that are not 621 /// vectorized to their scalar equivalents in the vector loop. Maintaining a 622 /// separate map for scalarized induction variables allows us to avoid 623 /// unnecessary scalar-to-vector-to-scalar conversions. 624 DenseMap<Value *, SmallVector<Value *, 8>> ScalarIVMap; 625 626 /// Store instructions that should be predicated, as a pair 627 /// <StoreInst, Predicate> 628 SmallVector<std::pair<Instruction *, Value *>, 4> PredicatedInstructions; 629 EdgeMaskCache MaskCache; 630 /// Trip count of the original loop. 631 Value *TripCount; 632 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 633 Value *VectorTripCount; 634 635 /// Map of scalar integer values to the smallest bitwidth they can be legally 636 /// represented as. The vector equivalents of these values should be truncated 637 /// to this type. 638 const MapVector<Instruction *, uint64_t> *MinBWs; 639 640 LoopVectorizationLegality *Legal; 641 642 // Record whether runtime checks are added. 643 bool AddedSafetyChecks; 644 }; 645 646 class InnerLoopUnroller : public InnerLoopVectorizer { 647 public: 648 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 649 LoopInfo *LI, DominatorTree *DT, 650 const TargetLibraryInfo *TLI, 651 const TargetTransformInfo *TTI, AssumptionCache *AC, 652 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor) 653 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 654 UnrollFactor) {} 655 656 private: 657 void scalarizeInstruction(Instruction *Instr, 658 bool IfPredicateInstr = false) override; 659 void vectorizeMemoryInstruction(Instruction *Instr) override; 660 Value *getBroadcastInstrs(Value *V) override; 661 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 662 Instruction::BinaryOps Opcode = 663 Instruction::BinaryOpsEnd) override; 664 Value *reverseVector(Value *Vec) override; 665 }; 666 667 /// \brief Look for a meaningful debug location on the instruction or it's 668 /// operands. 669 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 670 if (!I) 671 return I; 672 673 DebugLoc Empty; 674 if (I->getDebugLoc() != Empty) 675 return I; 676 677 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 678 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 679 if (OpInst->getDebugLoc() != Empty) 680 return OpInst; 681 } 682 683 return I; 684 } 685 686 /// \brief Set the debug location in the builder using the debug location in the 687 /// instruction. 688 static void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 689 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) 690 B.SetCurrentDebugLocation(Inst->getDebugLoc()); 691 else 692 B.SetCurrentDebugLocation(DebugLoc()); 693 } 694 695 #ifndef NDEBUG 696 /// \return string containing a file name and a line # for the given loop. 697 static std::string getDebugLocString(const Loop *L) { 698 std::string Result; 699 if (L) { 700 raw_string_ostream OS(Result); 701 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 702 LoopDbgLoc.print(OS); 703 else 704 // Just print the module name. 705 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 706 OS.flush(); 707 } 708 return Result; 709 } 710 #endif 711 712 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 713 const Instruction *Orig) { 714 // If the loop was versioned with memchecks, add the corresponding no-alias 715 // metadata. 716 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 717 LVer->annotateInstWithNoAlias(To, Orig); 718 } 719 720 void InnerLoopVectorizer::addMetadata(Instruction *To, 721 Instruction *From) { 722 propagateMetadata(To, From); 723 addNewMetadata(To, From); 724 } 725 726 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 727 Instruction *From) { 728 for (Value *V : To) { 729 if (Instruction *I = dyn_cast<Instruction>(V)) 730 addMetadata(I, From); 731 } 732 } 733 734 /// \brief The group of interleaved loads/stores sharing the same stride and 735 /// close to each other. 736 /// 737 /// Each member in this group has an index starting from 0, and the largest 738 /// index should be less than interleaved factor, which is equal to the absolute 739 /// value of the access's stride. 740 /// 741 /// E.g. An interleaved load group of factor 4: 742 /// for (unsigned i = 0; i < 1024; i+=4) { 743 /// a = A[i]; // Member of index 0 744 /// b = A[i+1]; // Member of index 1 745 /// d = A[i+3]; // Member of index 3 746 /// ... 747 /// } 748 /// 749 /// An interleaved store group of factor 4: 750 /// for (unsigned i = 0; i < 1024; i+=4) { 751 /// ... 752 /// A[i] = a; // Member of index 0 753 /// A[i+1] = b; // Member of index 1 754 /// A[i+2] = c; // Member of index 2 755 /// A[i+3] = d; // Member of index 3 756 /// } 757 /// 758 /// Note: the interleaved load group could have gaps (missing members), but 759 /// the interleaved store group doesn't allow gaps. 760 class InterleaveGroup { 761 public: 762 InterleaveGroup(Instruction *Instr, int Stride, unsigned Align) 763 : Align(Align), SmallestKey(0), LargestKey(0), InsertPos(Instr) { 764 assert(Align && "The alignment should be non-zero"); 765 766 Factor = std::abs(Stride); 767 assert(Factor > 1 && "Invalid interleave factor"); 768 769 Reverse = Stride < 0; 770 Members[0] = Instr; 771 } 772 773 bool isReverse() const { return Reverse; } 774 unsigned getFactor() const { return Factor; } 775 unsigned getAlignment() const { return Align; } 776 unsigned getNumMembers() const { return Members.size(); } 777 778 /// \brief Try to insert a new member \p Instr with index \p Index and 779 /// alignment \p NewAlign. The index is related to the leader and it could be 780 /// negative if it is the new leader. 781 /// 782 /// \returns false if the instruction doesn't belong to the group. 783 bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) { 784 assert(NewAlign && "The new member's alignment should be non-zero"); 785 786 int Key = Index + SmallestKey; 787 788 // Skip if there is already a member with the same index. 789 if (Members.count(Key)) 790 return false; 791 792 if (Key > LargestKey) { 793 // The largest index is always less than the interleave factor. 794 if (Index >= static_cast<int>(Factor)) 795 return false; 796 797 LargestKey = Key; 798 } else if (Key < SmallestKey) { 799 // The largest index is always less than the interleave factor. 800 if (LargestKey - Key >= static_cast<int>(Factor)) 801 return false; 802 803 SmallestKey = Key; 804 } 805 806 // It's always safe to select the minimum alignment. 807 Align = std::min(Align, NewAlign); 808 Members[Key] = Instr; 809 return true; 810 } 811 812 /// \brief Get the member with the given index \p Index 813 /// 814 /// \returns nullptr if contains no such member. 815 Instruction *getMember(unsigned Index) const { 816 int Key = SmallestKey + Index; 817 if (!Members.count(Key)) 818 return nullptr; 819 820 return Members.find(Key)->second; 821 } 822 823 /// \brief Get the index for the given member. Unlike the key in the member 824 /// map, the index starts from 0. 825 unsigned getIndex(Instruction *Instr) const { 826 for (auto I : Members) 827 if (I.second == Instr) 828 return I.first - SmallestKey; 829 830 llvm_unreachable("InterleaveGroup contains no such member"); 831 } 832 833 Instruction *getInsertPos() const { return InsertPos; } 834 void setInsertPos(Instruction *Inst) { InsertPos = Inst; } 835 836 private: 837 unsigned Factor; // Interleave Factor. 838 bool Reverse; 839 unsigned Align; 840 DenseMap<int, Instruction *> Members; 841 int SmallestKey; 842 int LargestKey; 843 844 // To avoid breaking dependences, vectorized instructions of an interleave 845 // group should be inserted at either the first load or the last store in 846 // program order. 847 // 848 // E.g. %even = load i32 // Insert Position 849 // %add = add i32 %even // Use of %even 850 // %odd = load i32 851 // 852 // store i32 %even 853 // %odd = add i32 // Def of %odd 854 // store i32 %odd // Insert Position 855 Instruction *InsertPos; 856 }; 857 858 /// \brief Drive the analysis of interleaved memory accesses in the loop. 859 /// 860 /// Use this class to analyze interleaved accesses only when we can vectorize 861 /// a loop. Otherwise it's meaningless to do analysis as the vectorization 862 /// on interleaved accesses is unsafe. 863 /// 864 /// The analysis collects interleave groups and records the relationships 865 /// between the member and the group in a map. 866 class InterleavedAccessInfo { 867 public: 868 InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L, 869 DominatorTree *DT, LoopInfo *LI) 870 : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(nullptr), 871 RequiresScalarEpilogue(false) {} 872 873 ~InterleavedAccessInfo() { 874 SmallSet<InterleaveGroup *, 4> DelSet; 875 // Avoid releasing a pointer twice. 876 for (auto &I : InterleaveGroupMap) 877 DelSet.insert(I.second); 878 for (auto *Ptr : DelSet) 879 delete Ptr; 880 } 881 882 /// \brief Analyze the interleaved accesses and collect them in interleave 883 /// groups. Substitute symbolic strides using \p Strides. 884 void analyzeInterleaving(const ValueToValueMap &Strides); 885 886 /// \brief Check if \p Instr belongs to any interleave group. 887 bool isInterleaved(Instruction *Instr) const { 888 return InterleaveGroupMap.count(Instr); 889 } 890 891 /// \brief Return the maximum interleave factor of all interleaved groups. 892 unsigned getMaxInterleaveFactor() const { 893 unsigned MaxFactor = 1; 894 for (auto &Entry : InterleaveGroupMap) 895 MaxFactor = std::max(MaxFactor, Entry.second->getFactor()); 896 return MaxFactor; 897 } 898 899 /// \brief Get the interleave group that \p Instr belongs to. 900 /// 901 /// \returns nullptr if doesn't have such group. 902 InterleaveGroup *getInterleaveGroup(Instruction *Instr) const { 903 if (InterleaveGroupMap.count(Instr)) 904 return InterleaveGroupMap.find(Instr)->second; 905 return nullptr; 906 } 907 908 /// \brief Returns true if an interleaved group that may access memory 909 /// out-of-bounds requires a scalar epilogue iteration for correctness. 910 bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; } 911 912 /// \brief Initialize the LoopAccessInfo used for dependence checking. 913 void setLAI(const LoopAccessInfo *Info) { LAI = Info; } 914 915 private: 916 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks. 917 /// Simplifies SCEV expressions in the context of existing SCEV assumptions. 918 /// The interleaved access analysis can also add new predicates (for example 919 /// by versioning strides of pointers). 920 PredicatedScalarEvolution &PSE; 921 Loop *TheLoop; 922 DominatorTree *DT; 923 LoopInfo *LI; 924 const LoopAccessInfo *LAI; 925 926 /// True if the loop may contain non-reversed interleaved groups with 927 /// out-of-bounds accesses. We ensure we don't speculatively access memory 928 /// out-of-bounds by executing at least one scalar epilogue iteration. 929 bool RequiresScalarEpilogue; 930 931 /// Holds the relationships between the members and the interleave group. 932 DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap; 933 934 /// Holds dependences among the memory accesses in the loop. It maps a source 935 /// access to a set of dependent sink accesses. 936 DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences; 937 938 /// \brief The descriptor for a strided memory access. 939 struct StrideDescriptor { 940 StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size, 941 unsigned Align) 942 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {} 943 944 StrideDescriptor() = default; 945 946 // The access's stride. It is negative for a reverse access. 947 int64_t Stride = 0; 948 const SCEV *Scev = nullptr; // The scalar expression of this access 949 uint64_t Size = 0; // The size of the memory object. 950 unsigned Align = 0; // The alignment of this access. 951 }; 952 953 /// \brief A type for holding instructions and their stride descriptors. 954 typedef std::pair<Instruction *, StrideDescriptor> StrideEntry; 955 956 /// \brief Create a new interleave group with the given instruction \p Instr, 957 /// stride \p Stride and alignment \p Align. 958 /// 959 /// \returns the newly created interleave group. 960 InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride, 961 unsigned Align) { 962 assert(!InterleaveGroupMap.count(Instr) && 963 "Already in an interleaved access group"); 964 InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align); 965 return InterleaveGroupMap[Instr]; 966 } 967 968 /// \brief Release the group and remove all the relationships. 969 void releaseGroup(InterleaveGroup *Group) { 970 for (unsigned i = 0; i < Group->getFactor(); i++) 971 if (Instruction *Member = Group->getMember(i)) 972 InterleaveGroupMap.erase(Member); 973 974 delete Group; 975 } 976 977 /// \brief Collect all the accesses with a constant stride in program order. 978 void collectConstStrideAccesses( 979 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 980 const ValueToValueMap &Strides); 981 982 /// \brief Returns true if \p Stride is allowed in an interleaved group. 983 static bool isStrided(int Stride) { 984 unsigned Factor = std::abs(Stride); 985 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 986 } 987 988 /// \brief Returns true if \p BB is a predicated block. 989 bool isPredicated(BasicBlock *BB) const { 990 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 991 } 992 993 /// \brief Returns true if LoopAccessInfo can be used for dependence queries. 994 bool areDependencesValid() const { 995 return LAI && LAI->getDepChecker().getDependences(); 996 } 997 998 /// \brief Returns true if memory accesses \p A and \p B can be reordered, if 999 /// necessary, when constructing interleaved groups. 1000 /// 1001 /// \p A must precede \p B in program order. We return false if reordering is 1002 /// not necessary or is prevented because \p A and \p B may be dependent. 1003 bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A, 1004 StrideEntry *B) const { 1005 1006 // Code motion for interleaved accesses can potentially hoist strided loads 1007 // and sink strided stores. The code below checks the legality of the 1008 // following two conditions: 1009 // 1010 // 1. Potentially moving a strided load (B) before any store (A) that 1011 // precedes B, or 1012 // 1013 // 2. Potentially moving a strided store (A) after any load or store (B) 1014 // that A precedes. 1015 // 1016 // It's legal to reorder A and B if we know there isn't a dependence from A 1017 // to B. Note that this determination is conservative since some 1018 // dependences could potentially be reordered safely. 1019 1020 // A is potentially the source of a dependence. 1021 auto *Src = A->first; 1022 auto SrcDes = A->second; 1023 1024 // B is potentially the sink of a dependence. 1025 auto *Sink = B->first; 1026 auto SinkDes = B->second; 1027 1028 // Code motion for interleaved accesses can't violate WAR dependences. 1029 // Thus, reordering is legal if the source isn't a write. 1030 if (!Src->mayWriteToMemory()) 1031 return true; 1032 1033 // At least one of the accesses must be strided. 1034 if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride)) 1035 return true; 1036 1037 // If dependence information is not available from LoopAccessInfo, 1038 // conservatively assume the instructions can't be reordered. 1039 if (!areDependencesValid()) 1040 return false; 1041 1042 // If we know there is a dependence from source to sink, assume the 1043 // instructions can't be reordered. Otherwise, reordering is legal. 1044 return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink); 1045 } 1046 1047 /// \brief Collect the dependences from LoopAccessInfo. 1048 /// 1049 /// We process the dependences once during the interleaved access analysis to 1050 /// enable constant-time dependence queries. 1051 void collectDependences() { 1052 if (!areDependencesValid()) 1053 return; 1054 auto *Deps = LAI->getDepChecker().getDependences(); 1055 for (auto Dep : *Deps) 1056 Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI)); 1057 } 1058 }; 1059 1060 /// Utility class for getting and setting loop vectorizer hints in the form 1061 /// of loop metadata. 1062 /// This class keeps a number of loop annotations locally (as member variables) 1063 /// and can, upon request, write them back as metadata on the loop. It will 1064 /// initially scan the loop for existing metadata, and will update the local 1065 /// values based on information in the loop. 1066 /// We cannot write all values to metadata, as the mere presence of some info, 1067 /// for example 'force', means a decision has been made. So, we need to be 1068 /// careful NOT to add them if the user hasn't specifically asked so. 1069 class LoopVectorizeHints { 1070 enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE }; 1071 1072 /// Hint - associates name and validation with the hint value. 1073 struct Hint { 1074 const char *Name; 1075 unsigned Value; // This may have to change for non-numeric values. 1076 HintKind Kind; 1077 1078 Hint(const char *Name, unsigned Value, HintKind Kind) 1079 : Name(Name), Value(Value), Kind(Kind) {} 1080 1081 bool validate(unsigned Val) { 1082 switch (Kind) { 1083 case HK_WIDTH: 1084 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth; 1085 case HK_UNROLL: 1086 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor; 1087 case HK_FORCE: 1088 return (Val <= 1); 1089 } 1090 return false; 1091 } 1092 }; 1093 1094 /// Vectorization width. 1095 Hint Width; 1096 /// Vectorization interleave factor. 1097 Hint Interleave; 1098 /// Vectorization forced 1099 Hint Force; 1100 1101 /// Return the loop metadata prefix. 1102 static StringRef Prefix() { return "llvm.loop."; } 1103 1104 /// True if there is any unsafe math in the loop. 1105 bool PotentiallyUnsafe; 1106 1107 public: 1108 enum ForceKind { 1109 FK_Undefined = -1, ///< Not selected. 1110 FK_Disabled = 0, ///< Forcing disabled. 1111 FK_Enabled = 1, ///< Forcing enabled. 1112 }; 1113 1114 LoopVectorizeHints(const Loop *L, bool DisableInterleaving, 1115 OptimizationRemarkEmitter &ORE) 1116 : Width("vectorize.width", VectorizerParams::VectorizationFactor, 1117 HK_WIDTH), 1118 Interleave("interleave.count", DisableInterleaving, HK_UNROLL), 1119 Force("vectorize.enable", FK_Undefined, HK_FORCE), 1120 PotentiallyUnsafe(false), TheLoop(L), ORE(ORE) { 1121 // Populate values with existing loop metadata. 1122 getHintsFromMetadata(); 1123 1124 // force-vector-interleave overrides DisableInterleaving. 1125 if (VectorizerParams::isInterleaveForced()) 1126 Interleave.Value = VectorizerParams::VectorizationInterleave; 1127 1128 DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs() 1129 << "LV: Interleaving disabled by the pass manager\n"); 1130 } 1131 1132 /// Mark the loop L as already vectorized by setting the width to 1. 1133 void setAlreadyVectorized() { 1134 Width.Value = Interleave.Value = 1; 1135 Hint Hints[] = {Width, Interleave}; 1136 writeHintsToMetadata(Hints); 1137 } 1138 1139 bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const { 1140 if (getForce() == LoopVectorizeHints::FK_Disabled) { 1141 DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); 1142 ORE.emitOptimizationRemarkAnalysis(vectorizeAnalysisPassName(), L, 1143 emitRemark()); 1144 return false; 1145 } 1146 1147 if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) { 1148 DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); 1149 ORE.emitOptimizationRemarkAnalysis(vectorizeAnalysisPassName(), L, 1150 emitRemark()); 1151 return false; 1152 } 1153 1154 if (getWidth() == 1 && getInterleave() == 1) { 1155 // FIXME: Add a separate metadata to indicate when the loop has already 1156 // been vectorized instead of setting width and count to 1. 1157 DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); 1158 // FIXME: Add interleave.disable metadata. This will allow 1159 // vectorize.disable to be used without disabling the pass and errors 1160 // to differentiate between disabled vectorization and a width of 1. 1161 ORE.emitOptimizationRemarkAnalysis( 1162 vectorizeAnalysisPassName(), L, 1163 "loop not vectorized: vectorization and interleaving are explicitly " 1164 "disabled, or vectorize width and interleave count are both set to " 1165 "1"); 1166 return false; 1167 } 1168 1169 return true; 1170 } 1171 1172 /// Dumps all the hint information. 1173 std::string emitRemark() const { 1174 VectorizationReport R; 1175 if (Force.Value == LoopVectorizeHints::FK_Disabled) 1176 R << "vectorization is explicitly disabled"; 1177 else { 1178 R << "use -Rpass-analysis=loop-vectorize for more info"; 1179 if (Force.Value == LoopVectorizeHints::FK_Enabled) { 1180 R << " (Force=true"; 1181 if (Width.Value != 0) 1182 R << ", Vector Width=" << Width.Value; 1183 if (Interleave.Value != 0) 1184 R << ", Interleave Count=" << Interleave.Value; 1185 R << ")"; 1186 } 1187 } 1188 1189 return R.str(); 1190 } 1191 1192 unsigned getWidth() const { return Width.Value; } 1193 unsigned getInterleave() const { return Interleave.Value; } 1194 enum ForceKind getForce() const { return (ForceKind)Force.Value; } 1195 1196 /// \brief If hints are provided that force vectorization, use the AlwaysPrint 1197 /// pass name to force the frontend to print the diagnostic. 1198 const char *vectorizeAnalysisPassName() const { 1199 if (getWidth() == 1) 1200 return LV_NAME; 1201 if (getForce() == LoopVectorizeHints::FK_Disabled) 1202 return LV_NAME; 1203 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0) 1204 return LV_NAME; 1205 return DiagnosticInfoOptimizationRemarkAnalysis::AlwaysPrint; 1206 } 1207 1208 bool allowReordering() const { 1209 // When enabling loop hints are provided we allow the vectorizer to change 1210 // the order of operations that is given by the scalar loop. This is not 1211 // enabled by default because can be unsafe or inefficient. For example, 1212 // reordering floating-point operations will change the way round-off 1213 // error accumulates in the loop. 1214 return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1; 1215 } 1216 1217 bool isPotentiallyUnsafe() const { 1218 // Avoid FP vectorization if the target is unsure about proper support. 1219 // This may be related to the SIMD unit in the target not handling 1220 // IEEE 754 FP ops properly, or bad single-to-double promotions. 1221 // Otherwise, a sequence of vectorized loops, even without reduction, 1222 // could lead to different end results on the destination vectors. 1223 return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe; 1224 } 1225 1226 void setPotentiallyUnsafe() { PotentiallyUnsafe = true; } 1227 1228 private: 1229 /// Find hints specified in the loop metadata and update local values. 1230 void getHintsFromMetadata() { 1231 MDNode *LoopID = TheLoop->getLoopID(); 1232 if (!LoopID) 1233 return; 1234 1235 // First operand should refer to the loop id itself. 1236 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 1237 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 1238 1239 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1240 const MDString *S = nullptr; 1241 SmallVector<Metadata *, 4> Args; 1242 1243 // The expected hint is either a MDString or a MDNode with the first 1244 // operand a MDString. 1245 if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) { 1246 if (!MD || MD->getNumOperands() == 0) 1247 continue; 1248 S = dyn_cast<MDString>(MD->getOperand(0)); 1249 for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i) 1250 Args.push_back(MD->getOperand(i)); 1251 } else { 1252 S = dyn_cast<MDString>(LoopID->getOperand(i)); 1253 assert(Args.size() == 0 && "too many arguments for MDString"); 1254 } 1255 1256 if (!S) 1257 continue; 1258 1259 // Check if the hint starts with the loop metadata prefix. 1260 StringRef Name = S->getString(); 1261 if (Args.size() == 1) 1262 setHint(Name, Args[0]); 1263 } 1264 } 1265 1266 /// Checks string hint with one operand and set value if valid. 1267 void setHint(StringRef Name, Metadata *Arg) { 1268 if (!Name.startswith(Prefix())) 1269 return; 1270 Name = Name.substr(Prefix().size(), StringRef::npos); 1271 1272 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); 1273 if (!C) 1274 return; 1275 unsigned Val = C->getZExtValue(); 1276 1277 Hint *Hints[] = {&Width, &Interleave, &Force}; 1278 for (auto H : Hints) { 1279 if (Name == H->Name) { 1280 if (H->validate(Val)) 1281 H->Value = Val; 1282 else 1283 DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); 1284 break; 1285 } 1286 } 1287 } 1288 1289 /// Create a new hint from name / value pair. 1290 MDNode *createHintMetadata(StringRef Name, unsigned V) const { 1291 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1292 Metadata *MDs[] = {MDString::get(Context, Name), 1293 ConstantAsMetadata::get( 1294 ConstantInt::get(Type::getInt32Ty(Context), V))}; 1295 return MDNode::get(Context, MDs); 1296 } 1297 1298 /// Matches metadata with hint name. 1299 bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) { 1300 MDString *Name = dyn_cast<MDString>(Node->getOperand(0)); 1301 if (!Name) 1302 return false; 1303 1304 for (auto H : HintTypes) 1305 if (Name->getString().endswith(H.Name)) 1306 return true; 1307 return false; 1308 } 1309 1310 /// Sets current hints into loop metadata, keeping other values intact. 1311 void writeHintsToMetadata(ArrayRef<Hint> HintTypes) { 1312 if (HintTypes.size() == 0) 1313 return; 1314 1315 // Reserve the first element to LoopID (see below). 1316 SmallVector<Metadata *, 4> MDs(1); 1317 // If the loop already has metadata, then ignore the existing operands. 1318 MDNode *LoopID = TheLoop->getLoopID(); 1319 if (LoopID) { 1320 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1321 MDNode *Node = cast<MDNode>(LoopID->getOperand(i)); 1322 // If node in update list, ignore old value. 1323 if (!matchesHintMetadataName(Node, HintTypes)) 1324 MDs.push_back(Node); 1325 } 1326 } 1327 1328 // Now, add the missing hints. 1329 for (auto H : HintTypes) 1330 MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value)); 1331 1332 // Replace current metadata node with new one. 1333 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1334 MDNode *NewLoopID = MDNode::get(Context, MDs); 1335 // Set operand 0 to refer to the loop id itself. 1336 NewLoopID->replaceOperandWith(0, NewLoopID); 1337 1338 TheLoop->setLoopID(NewLoopID); 1339 } 1340 1341 /// The loop these hints belong to. 1342 const Loop *TheLoop; 1343 1344 /// Interface to emit optimization remarks. 1345 OptimizationRemarkEmitter &ORE; 1346 }; 1347 1348 static void emitAnalysisDiag(const Loop *TheLoop, 1349 const LoopVectorizeHints &Hints, 1350 OptimizationRemarkEmitter &ORE, 1351 const LoopAccessReport &Message) { 1352 const char *Name = Hints.vectorizeAnalysisPassName(); 1353 LoopAccessReport::emitAnalysis(Message, TheLoop, Name, ORE); 1354 } 1355 1356 static void emitMissedWarning(Function *F, Loop *L, 1357 const LoopVectorizeHints &LH, 1358 OptimizationRemarkEmitter *ORE) { 1359 ORE->emitOptimizationRemarkMissed(LV_NAME, L, LH.emitRemark()); 1360 1361 if (LH.getForce() == LoopVectorizeHints::FK_Enabled) { 1362 if (LH.getWidth() != 1) 1363 emitLoopVectorizeWarning( 1364 F->getContext(), *F, L->getStartLoc(), 1365 "failed explicitly specified loop vectorization"); 1366 else if (LH.getInterleave() != 1) 1367 emitLoopInterleaveWarning( 1368 F->getContext(), *F, L->getStartLoc(), 1369 "failed explicitly specified loop interleaving"); 1370 } 1371 } 1372 1373 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and 1374 /// to what vectorization factor. 1375 /// This class does not look at the profitability of vectorization, only the 1376 /// legality. This class has two main kinds of checks: 1377 /// * Memory checks - The code in canVectorizeMemory checks if vectorization 1378 /// will change the order of memory accesses in a way that will change the 1379 /// correctness of the program. 1380 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory 1381 /// checks for a number of different conditions, such as the availability of a 1382 /// single induction variable, that all types are supported and vectorize-able, 1383 /// etc. This code reflects the capabilities of InnerLoopVectorizer. 1384 /// This class is also used by InnerLoopVectorizer for identifying 1385 /// induction variable and the different reduction variables. 1386 class LoopVectorizationLegality { 1387 public: 1388 LoopVectorizationLegality( 1389 Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT, 1390 TargetLibraryInfo *TLI, AliasAnalysis *AA, Function *F, 1391 const TargetTransformInfo *TTI, 1392 std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI, 1393 OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R, 1394 LoopVectorizeHints *H) 1395 : NumPredStores(0), TheLoop(L), PSE(PSE), TLI(TLI), TTI(TTI), DT(DT), 1396 GetLAA(GetLAA), LAI(nullptr), ORE(ORE), InterleaveInfo(PSE, L, DT, LI), 1397 Induction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false), 1398 Requirements(R), Hints(H) {} 1399 1400 /// ReductionList contains the reduction descriptors for all 1401 /// of the reductions that were found in the loop. 1402 typedef DenseMap<PHINode *, RecurrenceDescriptor> ReductionList; 1403 1404 /// InductionList saves induction variables and maps them to the 1405 /// induction descriptor. 1406 typedef MapVector<PHINode *, InductionDescriptor> InductionList; 1407 1408 /// RecurrenceSet contains the phi nodes that are recurrences other than 1409 /// inductions and reductions. 1410 typedef SmallPtrSet<const PHINode *, 8> RecurrenceSet; 1411 1412 /// Returns true if it is legal to vectorize this loop. 1413 /// This does not mean that it is profitable to vectorize this 1414 /// loop, only that it is legal to do so. 1415 bool canVectorize(); 1416 1417 /// Returns the Induction variable. 1418 PHINode *getInduction() { return Induction; } 1419 1420 /// Returns the reduction variables found in the loop. 1421 ReductionList *getReductionVars() { return &Reductions; } 1422 1423 /// Returns the induction variables found in the loop. 1424 InductionList *getInductionVars() { return &Inductions; } 1425 1426 /// Return the first-order recurrences found in the loop. 1427 RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; } 1428 1429 /// Returns the widest induction type. 1430 Type *getWidestInductionType() { return WidestIndTy; } 1431 1432 /// Returns True if V is an induction variable in this loop. 1433 bool isInductionVariable(const Value *V); 1434 1435 /// Returns True if PN is a reduction variable in this loop. 1436 bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); } 1437 1438 /// Returns True if Phi is a first-order recurrence in this loop. 1439 bool isFirstOrderRecurrence(const PHINode *Phi); 1440 1441 /// Return true if the block BB needs to be predicated in order for the loop 1442 /// to be vectorized. 1443 bool blockNeedsPredication(BasicBlock *BB); 1444 1445 /// Check if this pointer is consecutive when vectorizing. This happens 1446 /// when the last index of the GEP is the induction variable, or that the 1447 /// pointer itself is an induction variable. 1448 /// This check allows us to vectorize A[idx] into a wide load/store. 1449 /// Returns: 1450 /// 0 - Stride is unknown or non-consecutive. 1451 /// 1 - Address is consecutive. 1452 /// -1 - Address is consecutive, and decreasing. 1453 int isConsecutivePtr(Value *Ptr); 1454 1455 /// Returns true if the value V is uniform within the loop. 1456 bool isUniform(Value *V); 1457 1458 /// Returns true if \p I is known to be uniform after vectorization. 1459 bool isUniformAfterVectorization(Instruction *I) { return Uniforms.count(I); } 1460 1461 /// Returns true if \p I is known to be scalar after vectorization. 1462 bool isScalarAfterVectorization(Instruction *I) { return Scalars.count(I); } 1463 1464 /// Returns the information that we collected about runtime memory check. 1465 const RuntimePointerChecking *getRuntimePointerChecking() const { 1466 return LAI->getRuntimePointerChecking(); 1467 } 1468 1469 const LoopAccessInfo *getLAI() const { return LAI; } 1470 1471 /// \brief Check if \p Instr belongs to any interleaved access group. 1472 bool isAccessInterleaved(Instruction *Instr) { 1473 return InterleaveInfo.isInterleaved(Instr); 1474 } 1475 1476 /// \brief Return the maximum interleave factor of all interleaved groups. 1477 unsigned getMaxInterleaveFactor() const { 1478 return InterleaveInfo.getMaxInterleaveFactor(); 1479 } 1480 1481 /// \brief Get the interleaved access group that \p Instr belongs to. 1482 const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { 1483 return InterleaveInfo.getInterleaveGroup(Instr); 1484 } 1485 1486 /// \brief Returns true if an interleaved group requires a scalar iteration 1487 /// to handle accesses with gaps. 1488 bool requiresScalarEpilogue() const { 1489 return InterleaveInfo.requiresScalarEpilogue(); 1490 } 1491 1492 unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); } 1493 1494 bool hasStride(Value *V) { return LAI->hasStride(V); } 1495 1496 /// Returns true if the target machine supports masked store operation 1497 /// for the given \p DataType and kind of access to \p Ptr. 1498 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1499 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType); 1500 } 1501 /// Returns true if the target machine supports masked load operation 1502 /// for the given \p DataType and kind of access to \p Ptr. 1503 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1504 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType); 1505 } 1506 /// Returns true if the target machine supports masked scatter operation 1507 /// for the given \p DataType. 1508 bool isLegalMaskedScatter(Type *DataType) { 1509 return TTI->isLegalMaskedScatter(DataType); 1510 } 1511 /// Returns true if the target machine supports masked gather operation 1512 /// for the given \p DataType. 1513 bool isLegalMaskedGather(Type *DataType) { 1514 return TTI->isLegalMaskedGather(DataType); 1515 } 1516 /// Returns true if the target machine can represent \p V as a masked gather 1517 /// or scatter operation. 1518 bool isLegalGatherOrScatter(Value *V) { 1519 auto *LI = dyn_cast<LoadInst>(V); 1520 auto *SI = dyn_cast<StoreInst>(V); 1521 if (!LI && !SI) 1522 return false; 1523 auto *Ptr = getPointerOperand(V); 1524 auto *Ty = cast<PointerType>(Ptr->getType())->getElementType(); 1525 return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty)); 1526 } 1527 1528 /// Returns true if vector representation of the instruction \p I 1529 /// requires mask. 1530 bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); } 1531 unsigned getNumStores() const { return LAI->getNumStores(); } 1532 unsigned getNumLoads() const { return LAI->getNumLoads(); } 1533 unsigned getNumPredStores() const { return NumPredStores; } 1534 1535 private: 1536 /// Check if a single basic block loop is vectorizable. 1537 /// At this point we know that this is a loop with a constant trip count 1538 /// and we only need to check individual instructions. 1539 bool canVectorizeInstrs(); 1540 1541 /// When we vectorize loops we may change the order in which 1542 /// we read and write from memory. This method checks if it is 1543 /// legal to vectorize the code, considering only memory constrains. 1544 /// Returns true if the loop is vectorizable 1545 bool canVectorizeMemory(); 1546 1547 /// Return true if we can vectorize this loop using the IF-conversion 1548 /// transformation. 1549 bool canVectorizeWithIfConvert(); 1550 1551 /// Collect the instructions that are uniform after vectorization. An 1552 /// instruction is uniform if we represent it with a single scalar value in 1553 /// the vectorized loop corresponding to each vector iteration. Examples of 1554 /// uniform instructions include pointer operands of consecutive or 1555 /// interleaved memory accesses. Note that although uniformity implies an 1556 /// instruction will be scalar, the reverse is not true. In general, a 1557 /// scalarized instruction will be represented by VF scalar values in the 1558 /// vectorized loop, each corresponding to an iteration of the original 1559 /// scalar loop. 1560 void collectLoopUniforms(); 1561 1562 /// Collect the instructions that are scalar after vectorization. An 1563 /// instruction is scalar if it is known to be uniform or will be scalarized 1564 /// during vectorization. Non-uniform scalarized instructions will be 1565 /// represented by VF values in the vectorized loop, each corresponding to an 1566 /// iteration of the original scalar loop. 1567 void collectLoopScalars(); 1568 1569 /// Return true if all of the instructions in the block can be speculatively 1570 /// executed. \p SafePtrs is a list of addresses that are known to be legal 1571 /// and we know that we can read from them without segfault. 1572 bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs); 1573 1574 /// Updates the vectorization state by adding \p Phi to the inductions list. 1575 /// This can set \p Phi as the main induction of the loop if \p Phi is a 1576 /// better choice for the main induction than the existing one. 1577 void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID, 1578 SmallPtrSetImpl<Value *> &AllowedExit); 1579 1580 /// Report an analysis message to assist the user in diagnosing loops that are 1581 /// not vectorized. These are handled as LoopAccessReport rather than 1582 /// VectorizationReport because the << operator of VectorizationReport returns 1583 /// LoopAccessReport. 1584 void emitAnalysis(const LoopAccessReport &Message) const { 1585 emitAnalysisDiag(TheLoop, *Hints, *ORE, Message); 1586 } 1587 1588 /// \brief If an access has a symbolic strides, this maps the pointer value to 1589 /// the stride symbol. 1590 const ValueToValueMap *getSymbolicStrides() { 1591 // FIXME: Currently, the set of symbolic strides is sometimes queried before 1592 // it's collected. This happens from canVectorizeWithIfConvert, when the 1593 // pointer is checked to reference consecutive elements suitable for a 1594 // masked access. 1595 return LAI ? &LAI->getSymbolicStrides() : nullptr; 1596 } 1597 1598 unsigned NumPredStores; 1599 1600 /// The loop that we evaluate. 1601 Loop *TheLoop; 1602 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. 1603 /// Applies dynamic knowledge to simplify SCEV expressions in the context 1604 /// of existing SCEV assumptions. The analysis will also add a minimal set 1605 /// of new predicates if this is required to enable vectorization and 1606 /// unrolling. 1607 PredicatedScalarEvolution &PSE; 1608 /// Target Library Info. 1609 TargetLibraryInfo *TLI; 1610 /// Target Transform Info 1611 const TargetTransformInfo *TTI; 1612 /// Dominator Tree. 1613 DominatorTree *DT; 1614 // LoopAccess analysis. 1615 std::function<const LoopAccessInfo &(Loop &)> *GetLAA; 1616 // And the loop-accesses info corresponding to this loop. This pointer is 1617 // null until canVectorizeMemory sets it up. 1618 const LoopAccessInfo *LAI; 1619 /// Interface to emit optimization remarks. 1620 OptimizationRemarkEmitter *ORE; 1621 1622 /// The interleave access information contains groups of interleaved accesses 1623 /// with the same stride and close to each other. 1624 InterleavedAccessInfo InterleaveInfo; 1625 1626 // --- vectorization state --- // 1627 1628 /// Holds the integer induction variable. This is the counter of the 1629 /// loop. 1630 PHINode *Induction; 1631 /// Holds the reduction variables. 1632 ReductionList Reductions; 1633 /// Holds all of the induction variables that we found in the loop. 1634 /// Notice that inductions don't need to start at zero and that induction 1635 /// variables can be pointers. 1636 InductionList Inductions; 1637 /// Holds the phi nodes that are first-order recurrences. 1638 RecurrenceSet FirstOrderRecurrences; 1639 /// Holds the widest induction type encountered. 1640 Type *WidestIndTy; 1641 1642 /// Allowed outside users. This holds the induction and reduction 1643 /// vars which can be accessed from outside the loop. 1644 SmallPtrSet<Value *, 4> AllowedExit; 1645 1646 /// Holds the instructions known to be uniform after vectorization. 1647 SmallPtrSet<Instruction *, 4> Uniforms; 1648 1649 /// Holds the instructions known to be scalar after vectorization. 1650 SmallPtrSet<Instruction *, 4> Scalars; 1651 1652 /// Can we assume the absence of NaNs. 1653 bool HasFunNoNaNAttr; 1654 1655 /// Vectorization requirements that will go through late-evaluation. 1656 LoopVectorizationRequirements *Requirements; 1657 1658 /// Used to emit an analysis of any legality issues. 1659 LoopVectorizeHints *Hints; 1660 1661 /// While vectorizing these instructions we have to generate a 1662 /// call to the appropriate masked intrinsic 1663 SmallPtrSet<const Instruction *, 8> MaskedOp; 1664 }; 1665 1666 /// LoopVectorizationCostModel - estimates the expected speedups due to 1667 /// vectorization. 1668 /// In many cases vectorization is not profitable. This can happen because of 1669 /// a number of reasons. In this class we mainly attempt to predict the 1670 /// expected speedup/slowdowns due to the supported instruction set. We use the 1671 /// TargetTransformInfo to query the different backends for the cost of 1672 /// different operations. 1673 class LoopVectorizationCostModel { 1674 public: 1675 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, 1676 LoopInfo *LI, LoopVectorizationLegality *Legal, 1677 const TargetTransformInfo &TTI, 1678 const TargetLibraryInfo *TLI, DemandedBits *DB, 1679 AssumptionCache *AC, 1680 OptimizationRemarkEmitter *ORE, const Function *F, 1681 const LoopVectorizeHints *Hints) 1682 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 1683 AC(AC), ORE(ORE), TheFunction(F), Hints(Hints) {} 1684 1685 /// Information about vectorization costs 1686 struct VectorizationFactor { 1687 unsigned Width; // Vector width with best cost 1688 unsigned Cost; // Cost of the loop with that width 1689 }; 1690 /// \return The most profitable vectorization factor and the cost of that VF. 1691 /// This method checks every power of two up to VF. If UserVF is not ZERO 1692 /// then this vectorization factor will be selected if vectorization is 1693 /// possible. 1694 VectorizationFactor selectVectorizationFactor(bool OptForSize); 1695 1696 /// \return The size (in bits) of the smallest and widest types in the code 1697 /// that needs to be vectorized. We ignore values that remain scalar such as 1698 /// 64 bit loop indices. 1699 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1700 1701 /// \return The desired interleave count. 1702 /// If interleave count has been specified by metadata it will be returned. 1703 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1704 /// are the selected vectorization factor and the cost of the selected VF. 1705 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 1706 unsigned LoopCost); 1707 1708 /// \return The most profitable unroll factor. 1709 /// This method finds the best unroll-factor based on register pressure and 1710 /// other parameters. VF and LoopCost are the selected vectorization factor 1711 /// and the cost of the selected VF. 1712 unsigned computeInterleaveCount(bool OptForSize, unsigned VF, 1713 unsigned LoopCost); 1714 1715 /// \brief A struct that represents some properties of the register usage 1716 /// of a loop. 1717 struct RegisterUsage { 1718 /// Holds the number of loop invariant values that are used in the loop. 1719 unsigned LoopInvariantRegs; 1720 /// Holds the maximum number of concurrent live intervals in the loop. 1721 unsigned MaxLocalUsers; 1722 /// Holds the number of instructions in the loop. 1723 unsigned NumInstructions; 1724 }; 1725 1726 /// \return Returns information about the register usages of the loop for the 1727 /// given vectorization factors. 1728 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 1729 1730 /// Collect values we want to ignore in the cost model. 1731 void collectValuesToIgnore(); 1732 1733 private: 1734 /// The vectorization cost is a combination of the cost itself and a boolean 1735 /// indicating whether any of the contributing operations will actually 1736 /// operate on 1737 /// vector values after type legalization in the backend. If this latter value 1738 /// is 1739 /// false, then all operations will be scalarized (i.e. no vectorization has 1740 /// actually taken place). 1741 typedef std::pair<unsigned, bool> VectorizationCostTy; 1742 1743 /// Returns the expected execution cost. The unit of the cost does 1744 /// not matter because we use the 'cost' units to compare different 1745 /// vector widths. The cost that is returned is *not* normalized by 1746 /// the factor width. 1747 VectorizationCostTy expectedCost(unsigned VF); 1748 1749 /// Returns the execution time cost of an instruction for a given vector 1750 /// width. Vector width of one means scalar. 1751 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1752 1753 /// The cost-computation logic from getInstructionCost which provides 1754 /// the vector type as an output parameter. 1755 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1756 1757 /// Returns whether the instruction is a load or store and will be a emitted 1758 /// as a vector operation. 1759 bool isConsecutiveLoadOrStore(Instruction *I); 1760 1761 /// Report an analysis message to assist the user in diagnosing loops that are 1762 /// not vectorized. These are handled as LoopAccessReport rather than 1763 /// VectorizationReport because the << operator of VectorizationReport returns 1764 /// LoopAccessReport. 1765 void emitAnalysis(const LoopAccessReport &Message) const { 1766 emitAnalysisDiag(TheLoop, *Hints, *ORE, Message); 1767 } 1768 1769 public: 1770 /// Map of scalar integer values to the smallest bitwidth they can be legally 1771 /// represented as. The vector equivalents of these values should be truncated 1772 /// to this type. 1773 MapVector<Instruction *, uint64_t> MinBWs; 1774 1775 /// The loop that we evaluate. 1776 Loop *TheLoop; 1777 /// Predicated scalar evolution analysis. 1778 PredicatedScalarEvolution &PSE; 1779 /// Loop Info analysis. 1780 LoopInfo *LI; 1781 /// Vectorization legality. 1782 LoopVectorizationLegality *Legal; 1783 /// Vector target information. 1784 const TargetTransformInfo &TTI; 1785 /// Target Library Info. 1786 const TargetLibraryInfo *TLI; 1787 /// Demanded bits analysis. 1788 DemandedBits *DB; 1789 /// Assumption cache. 1790 AssumptionCache *AC; 1791 /// Interface to emit optimization remarks. 1792 OptimizationRemarkEmitter *ORE; 1793 1794 const Function *TheFunction; 1795 /// Loop Vectorize Hint. 1796 const LoopVectorizeHints *Hints; 1797 /// Values to ignore in the cost model. 1798 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1799 /// Values to ignore in the cost model when VF > 1. 1800 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1801 }; 1802 1803 /// \brief This holds vectorization requirements that must be verified late in 1804 /// the process. The requirements are set by legalize and costmodel. Once 1805 /// vectorization has been determined to be possible and profitable the 1806 /// requirements can be verified by looking for metadata or compiler options. 1807 /// For example, some loops require FP commutativity which is only allowed if 1808 /// vectorization is explicitly specified or if the fast-math compiler option 1809 /// has been provided. 1810 /// Late evaluation of these requirements allows helpful diagnostics to be 1811 /// composed that tells the user what need to be done to vectorize the loop. For 1812 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late 1813 /// evaluation should be used only when diagnostics can generated that can be 1814 /// followed by a non-expert user. 1815 class LoopVectorizationRequirements { 1816 public: 1817 LoopVectorizationRequirements(OptimizationRemarkEmitter &ORE) 1818 : NumRuntimePointerChecks(0), UnsafeAlgebraInst(nullptr), ORE(ORE) {} 1819 1820 void addUnsafeAlgebraInst(Instruction *I) { 1821 // First unsafe algebra instruction. 1822 if (!UnsafeAlgebraInst) 1823 UnsafeAlgebraInst = I; 1824 } 1825 1826 void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; } 1827 1828 bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) { 1829 const char *Name = Hints.vectorizeAnalysisPassName(); 1830 bool Failed = false; 1831 if (UnsafeAlgebraInst && !Hints.allowReordering()) { 1832 ORE.emitOptimizationRemarkAnalysisFPCommute( 1833 Name, UnsafeAlgebraInst->getDebugLoc(), 1834 UnsafeAlgebraInst->getParent(), 1835 VectorizationReport() << "cannot prove it is safe to reorder " 1836 "floating-point operations"); 1837 Failed = true; 1838 } 1839 1840 // Test if runtime memcheck thresholds are exceeded. 1841 bool PragmaThresholdReached = 1842 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 1843 bool ThresholdReached = 1844 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 1845 if ((ThresholdReached && !Hints.allowReordering()) || 1846 PragmaThresholdReached) { 1847 ORE.emitOptimizationRemarkAnalysisAliasing( 1848 Name, L, 1849 VectorizationReport() 1850 << "cannot prove it is safe to reorder memory operations"); 1851 DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 1852 Failed = true; 1853 } 1854 1855 return Failed; 1856 } 1857 1858 private: 1859 unsigned NumRuntimePointerChecks; 1860 Instruction *UnsafeAlgebraInst; 1861 1862 /// Interface to emit optimization remarks. 1863 OptimizationRemarkEmitter &ORE; 1864 }; 1865 1866 static void addAcyclicInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) { 1867 if (L.empty()) { 1868 if (!hasCyclesInLoopBody(L)) 1869 V.push_back(&L); 1870 return; 1871 } 1872 for (Loop *InnerL : L) 1873 addAcyclicInnerLoop(*InnerL, V); 1874 } 1875 1876 /// The LoopVectorize Pass. 1877 struct LoopVectorize : public FunctionPass { 1878 /// Pass identification, replacement for typeid 1879 static char ID; 1880 1881 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) 1882 : FunctionPass(ID) { 1883 Impl.DisableUnrolling = NoUnrolling; 1884 Impl.AlwaysVectorize = AlwaysVectorize; 1885 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1886 } 1887 1888 LoopVectorizePass Impl; 1889 1890 bool runOnFunction(Function &F) override { 1891 if (skipFunction(F)) 1892 return false; 1893 1894 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1895 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1896 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1897 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1898 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1899 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1900 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 1901 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1902 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1903 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 1904 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1905 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 1906 1907 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 1908 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 1909 1910 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 1911 GetLAA, *ORE); 1912 } 1913 1914 void getAnalysisUsage(AnalysisUsage &AU) const override { 1915 AU.addRequired<AssumptionCacheTracker>(); 1916 AU.addRequiredID(LoopSimplifyID); 1917 AU.addRequiredID(LCSSAID); 1918 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1919 AU.addRequired<DominatorTreeWrapperPass>(); 1920 AU.addRequired<LoopInfoWrapperPass>(); 1921 AU.addRequired<ScalarEvolutionWrapperPass>(); 1922 AU.addRequired<TargetTransformInfoWrapperPass>(); 1923 AU.addRequired<AAResultsWrapperPass>(); 1924 AU.addRequired<LoopAccessLegacyAnalysis>(); 1925 AU.addRequired<DemandedBitsWrapperPass>(); 1926 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 1927 AU.addPreserved<LoopInfoWrapperPass>(); 1928 AU.addPreserved<DominatorTreeWrapperPass>(); 1929 AU.addPreserved<BasicAAWrapperPass>(); 1930 AU.addPreserved<GlobalsAAWrapperPass>(); 1931 } 1932 }; 1933 1934 } // end anonymous namespace 1935 1936 //===----------------------------------------------------------------------===// 1937 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 1938 // LoopVectorizationCostModel. 1939 //===----------------------------------------------------------------------===// 1940 1941 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 1942 // We need to place the broadcast of invariant variables outside the loop. 1943 Instruction *Instr = dyn_cast<Instruction>(V); 1944 bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody); 1945 bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr; 1946 1947 // Place the code for broadcasting invariant variables in the new preheader. 1948 IRBuilder<>::InsertPointGuard Guard(Builder); 1949 if (Invariant) 1950 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1951 1952 // Broadcast the scalar into all locations in the vector. 1953 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 1954 1955 return Shuf; 1956 } 1957 1958 void InnerLoopVectorizer::createVectorIntInductionPHI( 1959 const InductionDescriptor &II, VectorParts &Entry, IntegerType *TruncType) { 1960 Value *Start = II.getStartValue(); 1961 ConstantInt *Step = II.getConstIntStepValue(); 1962 assert(Step && "Can not widen an IV with a non-constant step"); 1963 1964 // Construct the initial value of the vector IV in the vector loop preheader 1965 auto CurrIP = Builder.saveIP(); 1966 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1967 if (TruncType) { 1968 Step = ConstantInt::getSigned(TruncType, Step->getSExtValue()); 1969 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 1970 } 1971 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 1972 Value *SteppedStart = getStepVector(SplatStart, 0, Step); 1973 Builder.restoreIP(CurrIP); 1974 1975 Value *SplatVF = 1976 ConstantVector::getSplat(VF, ConstantInt::getSigned(Start->getType(), 1977 VF * Step->getSExtValue())); 1978 // We may need to add the step a number of times, depending on the unroll 1979 // factor. The last of those goes into the PHI. 1980 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 1981 &*LoopVectorBody->getFirstInsertionPt()); 1982 Instruction *LastInduction = VecInd; 1983 for (unsigned Part = 0; Part < UF; ++Part) { 1984 Entry[Part] = LastInduction; 1985 LastInduction = cast<Instruction>( 1986 Builder.CreateAdd(LastInduction, SplatVF, "step.add")); 1987 } 1988 1989 // Move the last step to the end of the latch block. This ensures consistent 1990 // placement of all induction updates. 1991 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 1992 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 1993 auto *ICmp = cast<Instruction>(Br->getCondition()); 1994 LastInduction->moveBefore(ICmp); 1995 LastInduction->setName("vec.ind.next"); 1996 1997 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 1998 VecInd->addIncoming(LastInduction, LoopVectorLatch); 1999 } 2000 2001 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2002 if (Legal->isScalarAfterVectorization(IV)) 2003 return true; 2004 auto isScalarInst = [&](User *U) -> bool { 2005 auto *I = cast<Instruction>(U); 2006 return (OrigLoop->contains(I) && Legal->isScalarAfterVectorization(I)); 2007 }; 2008 return any_of(IV->users(), isScalarInst); 2009 } 2010 2011 void InnerLoopVectorizer::widenIntInduction(PHINode *IV, VectorParts &Entry, 2012 TruncInst *Trunc) { 2013 2014 auto II = Legal->getInductionVars()->find(IV); 2015 assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); 2016 2017 auto ID = II->second; 2018 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2019 2020 // If a truncate instruction was provided, get the smaller type. 2021 auto *TruncType = Trunc ? cast<IntegerType>(Trunc->getType()) : nullptr; 2022 2023 // The scalar value to broadcast. This will be derived from the canonical 2024 // induction variable. 2025 Value *ScalarIV = nullptr; 2026 2027 // The step of the induction. 2028 Value *Step = nullptr; 2029 2030 // The value from the original loop to which we are mapping the new induction 2031 // variable. 2032 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2033 2034 // True if we have vectorized the induction variable. 2035 auto VectorizedIV = false; 2036 2037 // Determine if we want a scalar version of the induction variable. This is 2038 // true if the induction variable itself is not widened, or if it has at 2039 // least one user in the loop that is not widened. 2040 auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal); 2041 2042 // If the induction variable has a constant integer step value, go ahead and 2043 // get it now. 2044 if (ID.getConstIntStepValue()) 2045 Step = ID.getConstIntStepValue(); 2046 2047 // Try to create a new independent vector induction variable. If we can't 2048 // create the phi node, we will splat the scalar induction variable in each 2049 // loop iteration. 2050 if (VF > 1 && IV->getType() == Induction->getType() && Step && 2051 !Legal->isScalarAfterVectorization(EntryVal)) { 2052 createVectorIntInductionPHI(ID, Entry, TruncType); 2053 VectorizedIV = true; 2054 } 2055 2056 // If we haven't yet vectorized the induction variable, or if we will create 2057 // a scalar one, we need to define the scalar induction variable and step 2058 // values. If we were given a truncation type, truncate the canonical 2059 // induction variable and constant step. Otherwise, derive these values from 2060 // the induction descriptor. 2061 if (!VectorizedIV || NeedsScalarIV) { 2062 if (TruncType) { 2063 assert(Step && "Truncation requires constant integer step"); 2064 auto StepInt = cast<ConstantInt>(Step)->getSExtValue(); 2065 ScalarIV = Builder.CreateCast(Instruction::Trunc, Induction, TruncType); 2066 Step = ConstantInt::getSigned(TruncType, StepInt); 2067 } else { 2068 ScalarIV = Induction; 2069 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2070 if (IV != OldInduction) { 2071 ScalarIV = Builder.CreateSExtOrTrunc(ScalarIV, IV->getType()); 2072 ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL); 2073 ScalarIV->setName("offset.idx"); 2074 } 2075 if (!Step) { 2076 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2077 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), 2078 &*Builder.GetInsertPoint()); 2079 } 2080 } 2081 } 2082 2083 // If we haven't yet vectorized the induction variable, splat the scalar 2084 // induction variable, and build the necessary step vectors. 2085 if (!VectorizedIV) { 2086 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2087 for (unsigned Part = 0; Part < UF; ++Part) 2088 Entry[Part] = getStepVector(Broadcasted, VF * Part, Step); 2089 } 2090 2091 // If an induction variable is only used for counting loop iterations or 2092 // calculating addresses, it doesn't need to be widened. Create scalar steps 2093 // that can be used by instructions we will later scalarize. Note that the 2094 // addition of the scalar steps will not increase the number of instructions 2095 // in the loop in the common case prior to InstCombine. We will be trading 2096 // one vector extract for each scalar step. 2097 if (NeedsScalarIV) 2098 buildScalarSteps(ScalarIV, Step, EntryVal); 2099 } 2100 2101 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2102 Instruction::BinaryOps BinOp) { 2103 // Create and check the types. 2104 assert(Val->getType()->isVectorTy() && "Must be a vector"); 2105 int VLen = Val->getType()->getVectorNumElements(); 2106 2107 Type *STy = Val->getType()->getScalarType(); 2108 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2109 "Induction Step must be an integer or FP"); 2110 assert(Step->getType() == STy && "Step has wrong type"); 2111 2112 SmallVector<Constant *, 8> Indices; 2113 2114 if (STy->isIntegerTy()) { 2115 // Create a vector of consecutive numbers from zero to VF. 2116 for (int i = 0; i < VLen; ++i) 2117 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 2118 2119 // Add the consecutive indices to the vector value. 2120 Constant *Cv = ConstantVector::get(Indices); 2121 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2122 Step = Builder.CreateVectorSplat(VLen, Step); 2123 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2124 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2125 // which can be found from the original scalar operations. 2126 Step = Builder.CreateMul(Cv, Step); 2127 return Builder.CreateAdd(Val, Step, "induction"); 2128 } 2129 2130 // Floating point induction. 2131 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2132 "Binary Opcode should be specified for FP induction"); 2133 // Create a vector of consecutive numbers from zero to VF. 2134 for (int i = 0; i < VLen; ++i) 2135 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2136 2137 // Add the consecutive indices to the vector value. 2138 Constant *Cv = ConstantVector::get(Indices); 2139 2140 Step = Builder.CreateVectorSplat(VLen, Step); 2141 2142 // Floating point operations had to be 'fast' to enable the induction. 2143 FastMathFlags Flags; 2144 Flags.setUnsafeAlgebra(); 2145 2146 Value *MulOp = Builder.CreateFMul(Cv, Step); 2147 if (isa<Instruction>(MulOp)) 2148 // Have to check, MulOp may be a constant 2149 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2150 2151 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2152 if (isa<Instruction>(BOp)) 2153 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2154 return BOp; 2155 } 2156 2157 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2158 Value *EntryVal) { 2159 2160 // We shouldn't have to build scalar steps if we aren't vectorizing. 2161 assert(VF > 1 && "VF should be greater than one"); 2162 2163 // Get the value type and ensure it and the step have the same integer type. 2164 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2165 assert(ScalarIVTy->isIntegerTy() && ScalarIVTy == Step->getType() && 2166 "Val and Step should have the same integer type"); 2167 2168 // Compute the scalar steps and save the results in ScalarIVMap. 2169 for (unsigned Part = 0; Part < UF; ++Part) 2170 for (unsigned I = 0; I < VF; ++I) { 2171 auto *StartIdx = ConstantInt::get(ScalarIVTy, VF * Part + I); 2172 auto *Mul = Builder.CreateMul(StartIdx, Step); 2173 auto *Add = Builder.CreateAdd(ScalarIV, Mul); 2174 ScalarIVMap[EntryVal].push_back(Add); 2175 } 2176 } 2177 2178 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { 2179 assert(Ptr->getType()->isPointerTy() && "Unexpected non-ptr"); 2180 auto *SE = PSE.getSE(); 2181 // Make sure that the pointer does not point to structs. 2182 if (Ptr->getType()->getPointerElementType()->isAggregateType()) 2183 return 0; 2184 2185 // If this value is a pointer induction variable, we know it is consecutive. 2186 PHINode *Phi = dyn_cast_or_null<PHINode>(Ptr); 2187 if (Phi && Inductions.count(Phi)) { 2188 InductionDescriptor II = Inductions[Phi]; 2189 return II.getConsecutiveDirection(); 2190 } 2191 2192 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2193 if (!Gep) 2194 return 0; 2195 2196 unsigned NumOperands = Gep->getNumOperands(); 2197 Value *GpPtr = Gep->getPointerOperand(); 2198 // If this GEP value is a consecutive pointer induction variable and all of 2199 // the indices are constant, then we know it is consecutive. 2200 Phi = dyn_cast<PHINode>(GpPtr); 2201 if (Phi && Inductions.count(Phi)) { 2202 2203 // Make sure that the pointer does not point to structs. 2204 PointerType *GepPtrType = cast<PointerType>(GpPtr->getType()); 2205 if (GepPtrType->getElementType()->isAggregateType()) 2206 return 0; 2207 2208 // Make sure that all of the index operands are loop invariant. 2209 for (unsigned i = 1; i < NumOperands; ++i) 2210 if (!SE->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), TheLoop)) 2211 return 0; 2212 2213 InductionDescriptor II = Inductions[Phi]; 2214 return II.getConsecutiveDirection(); 2215 } 2216 2217 unsigned InductionOperand = getGEPInductionOperand(Gep); 2218 2219 // Check that all of the gep indices are uniform except for our induction 2220 // operand. 2221 for (unsigned i = 0; i != NumOperands; ++i) 2222 if (i != InductionOperand && 2223 !SE->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), TheLoop)) 2224 return 0; 2225 2226 // We can emit wide load/stores only if the last non-zero index is the 2227 // induction variable. 2228 const SCEV *Last = nullptr; 2229 if (!getSymbolicStrides() || !getSymbolicStrides()->count(Gep)) 2230 Last = PSE.getSCEV(Gep->getOperand(InductionOperand)); 2231 else { 2232 // Because of the multiplication by a stride we can have a s/zext cast. 2233 // We are going to replace this stride by 1 so the cast is safe to ignore. 2234 // 2235 // %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] 2236 // %0 = trunc i64 %indvars.iv to i32 2237 // %mul = mul i32 %0, %Stride1 2238 // %idxprom = zext i32 %mul to i64 << Safe cast. 2239 // %arrayidx = getelementptr inbounds i32* %B, i64 %idxprom 2240 // 2241 Last = replaceSymbolicStrideSCEV(PSE, *getSymbolicStrides(), 2242 Gep->getOperand(InductionOperand), Gep); 2243 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(Last)) 2244 Last = 2245 (C->getSCEVType() == scSignExtend || C->getSCEVType() == scZeroExtend) 2246 ? C->getOperand() 2247 : Last; 2248 } 2249 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Last)) { 2250 const SCEV *Step = AR->getStepRecurrence(*SE); 2251 2252 // The memory is consecutive because the last index is consecutive 2253 // and all other indices are loop invariant. 2254 if (Step->isOne()) 2255 return 1; 2256 if (Step->isAllOnesValue()) 2257 return -1; 2258 } 2259 2260 return 0; 2261 } 2262 2263 bool LoopVectorizationLegality::isUniform(Value *V) { 2264 return LAI->isUniform(V); 2265 } 2266 2267 InnerLoopVectorizer::VectorParts & 2268 InnerLoopVectorizer::getVectorValue(Value *V) { 2269 assert(V != Induction && "The new induction variable should not be used."); 2270 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2271 2272 // If we have a stride that is replaced by one, do it here. 2273 if (Legal->hasStride(V)) 2274 V = ConstantInt::get(V->getType(), 1); 2275 2276 // If we have this scalar in the map, return it. 2277 if (WidenMap.has(V)) 2278 return WidenMap.get(V); 2279 2280 // If this scalar is unknown, assume that it is a constant or that it is 2281 // loop invariant. Broadcast V and save the value for future uses. 2282 Value *B = getBroadcastInstrs(V); 2283 return WidenMap.splat(V, B); 2284 } 2285 2286 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2287 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2288 SmallVector<Constant *, 8> ShuffleMask; 2289 for (unsigned i = 0; i < VF; ++i) 2290 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 2291 2292 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2293 ConstantVector::get(ShuffleMask), 2294 "reverse"); 2295 } 2296 2297 // Get a mask to interleave \p NumVec vectors into a wide vector. 2298 // I.e. <0, VF, VF*2, ..., VF*(NumVec-1), 1, VF+1, VF*2+1, ...> 2299 // E.g. For 2 interleaved vectors, if VF is 4, the mask is: 2300 // <0, 4, 1, 5, 2, 6, 3, 7> 2301 static Constant *getInterleavedMask(IRBuilder<> &Builder, unsigned VF, 2302 unsigned NumVec) { 2303 SmallVector<Constant *, 16> Mask; 2304 for (unsigned i = 0; i < VF; i++) 2305 for (unsigned j = 0; j < NumVec; j++) 2306 Mask.push_back(Builder.getInt32(j * VF + i)); 2307 2308 return ConstantVector::get(Mask); 2309 } 2310 2311 // Get the strided mask starting from index \p Start. 2312 // I.e. <Start, Start + Stride, ..., Start + Stride*(VF-1)> 2313 static Constant *getStridedMask(IRBuilder<> &Builder, unsigned Start, 2314 unsigned Stride, unsigned VF) { 2315 SmallVector<Constant *, 16> Mask; 2316 for (unsigned i = 0; i < VF; i++) 2317 Mask.push_back(Builder.getInt32(Start + i * Stride)); 2318 2319 return ConstantVector::get(Mask); 2320 } 2321 2322 // Get a mask of two parts: The first part consists of sequential integers 2323 // starting from 0, The second part consists of UNDEFs. 2324 // I.e. <0, 1, 2, ..., NumInt - 1, undef, ..., undef> 2325 static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned NumInt, 2326 unsigned NumUndef) { 2327 SmallVector<Constant *, 16> Mask; 2328 for (unsigned i = 0; i < NumInt; i++) 2329 Mask.push_back(Builder.getInt32(i)); 2330 2331 Constant *Undef = UndefValue::get(Builder.getInt32Ty()); 2332 for (unsigned i = 0; i < NumUndef; i++) 2333 Mask.push_back(Undef); 2334 2335 return ConstantVector::get(Mask); 2336 } 2337 2338 // Concatenate two vectors with the same element type. The 2nd vector should 2339 // not have more elements than the 1st vector. If the 2nd vector has less 2340 // elements, extend it with UNDEFs. 2341 static Value *ConcatenateTwoVectors(IRBuilder<> &Builder, Value *V1, 2342 Value *V2) { 2343 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); 2344 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); 2345 assert(VecTy1 && VecTy2 && 2346 VecTy1->getScalarType() == VecTy2->getScalarType() && 2347 "Expect two vectors with the same element type"); 2348 2349 unsigned NumElts1 = VecTy1->getNumElements(); 2350 unsigned NumElts2 = VecTy2->getNumElements(); 2351 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); 2352 2353 if (NumElts1 > NumElts2) { 2354 // Extend with UNDEFs. 2355 Constant *ExtMask = 2356 getSequentialMask(Builder, NumElts2, NumElts1 - NumElts2); 2357 V2 = Builder.CreateShuffleVector(V2, UndefValue::get(VecTy2), ExtMask); 2358 } 2359 2360 Constant *Mask = getSequentialMask(Builder, NumElts1 + NumElts2, 0); 2361 return Builder.CreateShuffleVector(V1, V2, Mask); 2362 } 2363 2364 // Concatenate vectors in the given list. All vectors have the same type. 2365 static Value *ConcatenateVectors(IRBuilder<> &Builder, 2366 ArrayRef<Value *> InputList) { 2367 unsigned NumVec = InputList.size(); 2368 assert(NumVec > 1 && "Should be at least two vectors"); 2369 2370 SmallVector<Value *, 8> ResList; 2371 ResList.append(InputList.begin(), InputList.end()); 2372 do { 2373 SmallVector<Value *, 8> TmpList; 2374 for (unsigned i = 0; i < NumVec - 1; i += 2) { 2375 Value *V0 = ResList[i], *V1 = ResList[i + 1]; 2376 assert((V0->getType() == V1->getType() || i == NumVec - 2) && 2377 "Only the last vector may have a different type"); 2378 2379 TmpList.push_back(ConcatenateTwoVectors(Builder, V0, V1)); 2380 } 2381 2382 // Push the last vector if the total number of vectors is odd. 2383 if (NumVec % 2 != 0) 2384 TmpList.push_back(ResList[NumVec - 1]); 2385 2386 ResList = TmpList; 2387 NumVec = ResList.size(); 2388 } while (NumVec > 1); 2389 2390 return ResList[0]; 2391 } 2392 2393 // Try to vectorize the interleave group that \p Instr belongs to. 2394 // 2395 // E.g. Translate following interleaved load group (factor = 3): 2396 // for (i = 0; i < N; i+=3) { 2397 // R = Pic[i]; // Member of index 0 2398 // G = Pic[i+1]; // Member of index 1 2399 // B = Pic[i+2]; // Member of index 2 2400 // ... // do something to R, G, B 2401 // } 2402 // To: 2403 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2404 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2405 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2406 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2407 // 2408 // Or translate following interleaved store group (factor = 3): 2409 // for (i = 0; i < N; i+=3) { 2410 // ... do something to R, G, B 2411 // Pic[i] = R; // Member of index 0 2412 // Pic[i+1] = G; // Member of index 1 2413 // Pic[i+2] = B; // Member of index 2 2414 // } 2415 // To: 2416 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2417 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2418 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2419 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2420 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2421 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { 2422 const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr); 2423 assert(Group && "Fail to get an interleaved access group."); 2424 2425 // Skip if current instruction is not the insert position. 2426 if (Instr != Group->getInsertPos()) 2427 return; 2428 2429 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2430 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2431 Value *Ptr = getPointerOperand(Instr); 2432 2433 // Prepare for the vector type of the interleaved load/store. 2434 Type *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2435 unsigned InterleaveFactor = Group->getFactor(); 2436 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2437 Type *PtrTy = VecTy->getPointerTo(Ptr->getType()->getPointerAddressSpace()); 2438 2439 // Prepare for the new pointers. 2440 setDebugLocFromInst(Builder, Ptr); 2441 VectorParts &PtrParts = getVectorValue(Ptr); 2442 SmallVector<Value *, 2> NewPtrs; 2443 unsigned Index = Group->getIndex(Instr); 2444 for (unsigned Part = 0; Part < UF; Part++) { 2445 // Extract the pointer for current instruction from the pointer vector. A 2446 // reverse access uses the pointer in the last lane. 2447 Value *NewPtr = Builder.CreateExtractElement( 2448 PtrParts[Part], 2449 Group->isReverse() ? Builder.getInt32(VF - 1) : Builder.getInt32(0)); 2450 2451 // Notice current instruction could be any index. Need to adjust the address 2452 // to the member of index 0. 2453 // 2454 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2455 // b = A[i]; // Member of index 0 2456 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2457 // 2458 // E.g. A[i+1] = a; // Member of index 1 2459 // A[i] = b; // Member of index 0 2460 // A[i+2] = c; // Member of index 2 (Current instruction) 2461 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2462 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 2463 2464 // Cast to the vector pointer type. 2465 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2466 } 2467 2468 setDebugLocFromInst(Builder, Instr); 2469 Value *UndefVec = UndefValue::get(VecTy); 2470 2471 // Vectorize the interleaved load group. 2472 if (LI) { 2473 for (unsigned Part = 0; Part < UF; Part++) { 2474 Instruction *NewLoadInstr = Builder.CreateAlignedLoad( 2475 NewPtrs[Part], Group->getAlignment(), "wide.vec"); 2476 2477 for (unsigned i = 0; i < InterleaveFactor; i++) { 2478 Instruction *Member = Group->getMember(i); 2479 2480 // Skip the gaps in the group. 2481 if (!Member) 2482 continue; 2483 2484 Constant *StrideMask = getStridedMask(Builder, i, InterleaveFactor, VF); 2485 Value *StridedVec = Builder.CreateShuffleVector( 2486 NewLoadInstr, UndefVec, StrideMask, "strided.vec"); 2487 2488 // If this member has different type, cast the result type. 2489 if (Member->getType() != ScalarTy) { 2490 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2491 StridedVec = Builder.CreateBitOrPointerCast(StridedVec, OtherVTy); 2492 } 2493 2494 VectorParts &Entry = WidenMap.get(Member); 2495 Entry[Part] = 2496 Group->isReverse() ? reverseVector(StridedVec) : StridedVec; 2497 } 2498 2499 addMetadata(NewLoadInstr, Instr); 2500 } 2501 return; 2502 } 2503 2504 // The sub vector type for current instruction. 2505 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2506 2507 // Vectorize the interleaved store group. 2508 for (unsigned Part = 0; Part < UF; Part++) { 2509 // Collect the stored vector from each member. 2510 SmallVector<Value *, 4> StoredVecs; 2511 for (unsigned i = 0; i < InterleaveFactor; i++) { 2512 // Interleaved store group doesn't allow a gap, so each index has a member 2513 Instruction *Member = Group->getMember(i); 2514 assert(Member && "Fail to get a member from an interleaved store group"); 2515 2516 Value *StoredVec = 2517 getVectorValue(cast<StoreInst>(Member)->getValueOperand())[Part]; 2518 if (Group->isReverse()) 2519 StoredVec = reverseVector(StoredVec); 2520 2521 // If this member has different type, cast it to an unified type. 2522 if (StoredVec->getType() != SubVT) 2523 StoredVec = Builder.CreateBitOrPointerCast(StoredVec, SubVT); 2524 2525 StoredVecs.push_back(StoredVec); 2526 } 2527 2528 // Concatenate all vectors into a wide vector. 2529 Value *WideVec = ConcatenateVectors(Builder, StoredVecs); 2530 2531 // Interleave the elements in the wide vector. 2532 Constant *IMask = getInterleavedMask(Builder, VF, InterleaveFactor); 2533 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2534 "interleaved.vec"); 2535 2536 Instruction *NewStoreInstr = 2537 Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment()); 2538 addMetadata(NewStoreInstr, Instr); 2539 } 2540 } 2541 2542 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { 2543 // Attempt to issue a wide load. 2544 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2545 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2546 2547 assert((LI || SI) && "Invalid Load/Store instruction"); 2548 2549 // Try to vectorize the interleave group if this access is interleaved. 2550 if (Legal->isAccessInterleaved(Instr)) 2551 return vectorizeInterleaveGroup(Instr); 2552 2553 Type *ScalarDataTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2554 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2555 Value *Ptr = getPointerOperand(Instr); 2556 unsigned Alignment = LI ? LI->getAlignment() : SI->getAlignment(); 2557 // An alignment of 0 means target abi alignment. We need to use the scalar's 2558 // target abi alignment in such a case. 2559 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2560 if (!Alignment) 2561 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2562 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2563 uint64_t ScalarAllocatedSize = DL.getTypeAllocSize(ScalarDataTy); 2564 uint64_t VectorElementSize = DL.getTypeStoreSize(DataTy) / VF; 2565 2566 if (SI && Legal->blockNeedsPredication(SI->getParent()) && 2567 !Legal->isMaskRequired(SI)) 2568 return scalarizeInstruction(Instr, true); 2569 2570 if (ScalarAllocatedSize != VectorElementSize) 2571 return scalarizeInstruction(Instr); 2572 2573 // If the pointer is loop invariant scalarize the load. 2574 if (LI && Legal->isUniform(Ptr)) 2575 return scalarizeInstruction(Instr); 2576 2577 // If the pointer is non-consecutive and gather/scatter is not supported 2578 // scalarize the instruction. 2579 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 2580 bool Reverse = ConsecutiveStride < 0; 2581 bool CreateGatherScatter = 2582 !ConsecutiveStride && ((LI && Legal->isLegalMaskedGather(ScalarDataTy)) || 2583 (SI && Legal->isLegalMaskedScatter(ScalarDataTy))); 2584 2585 if (!ConsecutiveStride && !CreateGatherScatter) 2586 return scalarizeInstruction(Instr); 2587 2588 Constant *Zero = Builder.getInt32(0); 2589 VectorParts &Entry = WidenMap.get(Instr); 2590 VectorParts VectorGep; 2591 2592 // Handle consecutive loads/stores. 2593 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2594 if (ConsecutiveStride) { 2595 if (Gep && Legal->isInductionVariable(Gep->getPointerOperand())) { 2596 setDebugLocFromInst(Builder, Gep); 2597 Value *PtrOperand = Gep->getPointerOperand(); 2598 Value *FirstBasePtr = getVectorValue(PtrOperand)[0]; 2599 FirstBasePtr = Builder.CreateExtractElement(FirstBasePtr, Zero); 2600 2601 // Create the new GEP with the new induction variable. 2602 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2603 Gep2->setOperand(0, FirstBasePtr); 2604 Gep2->setName("gep.indvar.base"); 2605 Ptr = Builder.Insert(Gep2); 2606 } else if (Gep) { 2607 setDebugLocFromInst(Builder, Gep); 2608 assert(PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getPointerOperand()), 2609 OrigLoop) && 2610 "Base ptr must be invariant"); 2611 // The last index does not have to be the induction. It can be 2612 // consecutive and be a function of the index. For example A[I+1]; 2613 unsigned NumOperands = Gep->getNumOperands(); 2614 unsigned InductionOperand = getGEPInductionOperand(Gep); 2615 // Create the new GEP with the new induction variable. 2616 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2617 2618 for (unsigned i = 0; i < NumOperands; ++i) { 2619 Value *GepOperand = Gep->getOperand(i); 2620 Instruction *GepOperandInst = dyn_cast<Instruction>(GepOperand); 2621 2622 // Update last index or loop invariant instruction anchored in loop. 2623 if (i == InductionOperand || 2624 (GepOperandInst && OrigLoop->contains(GepOperandInst))) { 2625 assert((i == InductionOperand || 2626 PSE.getSE()->isLoopInvariant(PSE.getSCEV(GepOperandInst), 2627 OrigLoop)) && 2628 "Must be last index or loop invariant"); 2629 2630 VectorParts &GEPParts = getVectorValue(GepOperand); 2631 2632 // If GepOperand is an induction variable, and there's a scalarized 2633 // version of it available, use it. Otherwise, we will need to create 2634 // an extractelement instruction. 2635 Value *Index = ScalarIVMap.count(GepOperand) 2636 ? ScalarIVMap[GepOperand][0] 2637 : Builder.CreateExtractElement(GEPParts[0], Zero); 2638 2639 Gep2->setOperand(i, Index); 2640 Gep2->setName("gep.indvar.idx"); 2641 } 2642 } 2643 Ptr = Builder.Insert(Gep2); 2644 } else { // No GEP 2645 // Use the induction element ptr. 2646 assert(isa<PHINode>(Ptr) && "Invalid induction ptr"); 2647 setDebugLocFromInst(Builder, Ptr); 2648 VectorParts &PtrVal = getVectorValue(Ptr); 2649 Ptr = Builder.CreateExtractElement(PtrVal[0], Zero); 2650 } 2651 } else { 2652 // At this point we should vector version of GEP for Gather or Scatter 2653 assert(CreateGatherScatter && "The instruction should be scalarized"); 2654 if (Gep) { 2655 // Vectorizing GEP, across UF parts. We want to get a vector value for base 2656 // and each index that's defined inside the loop, even if it is 2657 // loop-invariant but wasn't hoisted out. Otherwise we want to keep them 2658 // scalar. 2659 SmallVector<VectorParts, 4> OpsV; 2660 for (Value *Op : Gep->operands()) { 2661 Instruction *SrcInst = dyn_cast<Instruction>(Op); 2662 if (SrcInst && OrigLoop->contains(SrcInst)) 2663 OpsV.push_back(getVectorValue(Op)); 2664 else 2665 OpsV.push_back(VectorParts(UF, Op)); 2666 } 2667 for (unsigned Part = 0; Part < UF; ++Part) { 2668 SmallVector<Value *, 4> Ops; 2669 Value *GEPBasePtr = OpsV[0][Part]; 2670 for (unsigned i = 1; i < Gep->getNumOperands(); i++) 2671 Ops.push_back(OpsV[i][Part]); 2672 Value *NewGep = Builder.CreateGEP(GEPBasePtr, Ops, "VectorGep"); 2673 cast<GetElementPtrInst>(NewGep)->setIsInBounds(Gep->isInBounds()); 2674 assert(NewGep->getType()->isVectorTy() && "Expected vector GEP"); 2675 2676 NewGep = 2677 Builder.CreateBitCast(NewGep, VectorType::get(Ptr->getType(), VF)); 2678 VectorGep.push_back(NewGep); 2679 } 2680 } else 2681 VectorGep = getVectorValue(Ptr); 2682 } 2683 2684 VectorParts Mask = createBlockInMask(Instr->getParent()); 2685 // Handle Stores: 2686 if (SI) { 2687 assert(!Legal->isUniform(SI->getPointerOperand()) && 2688 "We do not allow storing to uniform addresses"); 2689 setDebugLocFromInst(Builder, SI); 2690 // We don't want to update the value in the map as it might be used in 2691 // another expression. So don't use a reference type for "StoredVal". 2692 VectorParts StoredVal = getVectorValue(SI->getValueOperand()); 2693 2694 for (unsigned Part = 0; Part < UF; ++Part) { 2695 Instruction *NewSI = nullptr; 2696 if (CreateGatherScatter) { 2697 Value *MaskPart = Legal->isMaskRequired(SI) ? Mask[Part] : nullptr; 2698 NewSI = Builder.CreateMaskedScatter(StoredVal[Part], VectorGep[Part], 2699 Alignment, MaskPart); 2700 } else { 2701 // Calculate the pointer for the specific unroll-part. 2702 Value *PartPtr = 2703 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2704 2705 if (Reverse) { 2706 // If we store to reverse consecutive memory locations, then we need 2707 // to reverse the order of elements in the stored value. 2708 StoredVal[Part] = reverseVector(StoredVal[Part]); 2709 // If the address is consecutive but reversed, then the 2710 // wide store needs to start at the last vector element. 2711 PartPtr = 2712 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2713 PartPtr = 2714 Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2715 Mask[Part] = reverseVector(Mask[Part]); 2716 } 2717 2718 Value *VecPtr = 2719 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2720 2721 if (Legal->isMaskRequired(SI)) 2722 NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment, 2723 Mask[Part]); 2724 else 2725 NewSI = 2726 Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment); 2727 } 2728 addMetadata(NewSI, SI); 2729 } 2730 return; 2731 } 2732 2733 // Handle loads. 2734 assert(LI && "Must have a load instruction"); 2735 setDebugLocFromInst(Builder, LI); 2736 for (unsigned Part = 0; Part < UF; ++Part) { 2737 Instruction *NewLI; 2738 if (CreateGatherScatter) { 2739 Value *MaskPart = Legal->isMaskRequired(LI) ? Mask[Part] : nullptr; 2740 NewLI = Builder.CreateMaskedGather(VectorGep[Part], Alignment, MaskPart, 2741 0, "wide.masked.gather"); 2742 Entry[Part] = NewLI; 2743 } else { 2744 // Calculate the pointer for the specific unroll-part. 2745 Value *PartPtr = 2746 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2747 2748 if (Reverse) { 2749 // If the address is consecutive but reversed, then the 2750 // wide load needs to start at the last vector element. 2751 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2752 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2753 Mask[Part] = reverseVector(Mask[Part]); 2754 } 2755 2756 Value *VecPtr = 2757 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2758 if (Legal->isMaskRequired(LI)) 2759 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 2760 UndefValue::get(DataTy), 2761 "wide.masked.load"); 2762 else 2763 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 2764 Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI; 2765 } 2766 addMetadata(NewLI, LI); 2767 } 2768 } 2769 2770 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2771 bool IfPredicateInstr) { 2772 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2773 DEBUG(dbgs() << "LV: Scalarizing" 2774 << (IfPredicateInstr ? " and predicating:" : ":") << *Instr 2775 << '\n'); 2776 // Holds vector parameters or scalars, in case of uniform vals. 2777 SmallVector<VectorParts, 4> Params; 2778 2779 setDebugLocFromInst(Builder, Instr); 2780 2781 // Find all of the vectorized parameters. 2782 for (Value *SrcOp : Instr->operands()) { 2783 // If we are accessing the old induction variable, use the new one. 2784 if (SrcOp == OldInduction) { 2785 Params.push_back(getVectorValue(SrcOp)); 2786 continue; 2787 } 2788 2789 // Try using previously calculated values. 2790 auto *SrcInst = dyn_cast<Instruction>(SrcOp); 2791 2792 // If the src is an instruction that appeared earlier in the basic block, 2793 // then it should already be vectorized. 2794 if (SrcInst && OrigLoop->contains(SrcInst)) { 2795 assert(WidenMap.has(SrcInst) && "Source operand is unavailable"); 2796 // The parameter is a vector value from earlier. 2797 Params.push_back(WidenMap.get(SrcInst)); 2798 } else { 2799 // The parameter is a scalar from outside the loop. Maybe even a constant. 2800 VectorParts Scalars; 2801 Scalars.append(UF, SrcOp); 2802 Params.push_back(Scalars); 2803 } 2804 } 2805 2806 assert(Params.size() == Instr->getNumOperands() && 2807 "Invalid number of operands"); 2808 2809 // Does this instruction return a value ? 2810 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2811 2812 Value *UndefVec = 2813 IsVoidRetTy ? nullptr 2814 : UndefValue::get(VectorType::get(Instr->getType(), VF)); 2815 // Create a new entry in the WidenMap and initialize it to Undef or Null. 2816 VectorParts &VecResults = WidenMap.splat(Instr, UndefVec); 2817 2818 VectorParts Cond; 2819 if (IfPredicateInstr) { 2820 assert(Instr->getParent()->getSinglePredecessor() && 2821 "Only support single predecessor blocks"); 2822 Cond = createEdgeMask(Instr->getParent()->getSinglePredecessor(), 2823 Instr->getParent()); 2824 } 2825 2826 // For each vector unroll 'part': 2827 for (unsigned Part = 0; Part < UF; ++Part) { 2828 // For each scalar that we create: 2829 for (unsigned Width = 0; Width < VF; ++Width) { 2830 2831 // Start if-block. 2832 Value *Cmp = nullptr; 2833 if (IfPredicateInstr) { 2834 Cmp = Builder.CreateExtractElement(Cond[Part], Builder.getInt32(Width)); 2835 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cmp, 2836 ConstantInt::get(Cmp->getType(), 1)); 2837 } 2838 2839 Instruction *Cloned = Instr->clone(); 2840 if (!IsVoidRetTy) 2841 Cloned->setName(Instr->getName() + ".cloned"); 2842 // Replace the operands of the cloned instructions with extracted scalars. 2843 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2844 2845 // If the operand is an induction variable, and there's a scalarized 2846 // version of it available, use it. Otherwise, we will need to create 2847 // an extractelement instruction if vectorizing. 2848 auto *NewOp = Params[op][Part]; 2849 auto *ScalarOp = Instr->getOperand(op); 2850 if (ScalarIVMap.count(ScalarOp)) 2851 NewOp = ScalarIVMap[ScalarOp][VF * Part + Width]; 2852 else if (NewOp->getType()->isVectorTy()) 2853 NewOp = Builder.CreateExtractElement(NewOp, Builder.getInt32(Width)); 2854 Cloned->setOperand(op, NewOp); 2855 } 2856 addNewMetadata(Cloned, Instr); 2857 2858 // Place the cloned scalar in the new loop. 2859 Builder.Insert(Cloned); 2860 2861 // If we just cloned a new assumption, add it the assumption cache. 2862 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2863 if (II->getIntrinsicID() == Intrinsic::assume) 2864 AC->registerAssumption(II); 2865 2866 // If the original scalar returns a value we need to place it in a vector 2867 // so that future users will be able to use it. 2868 if (!IsVoidRetTy) 2869 VecResults[Part] = Builder.CreateInsertElement(VecResults[Part], Cloned, 2870 Builder.getInt32(Width)); 2871 // End if-block. 2872 if (IfPredicateInstr) 2873 PredicatedInstructions.push_back(std::make_pair(Cloned, Cmp)); 2874 } 2875 } 2876 } 2877 2878 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2879 Value *End, Value *Step, 2880 Instruction *DL) { 2881 BasicBlock *Header = L->getHeader(); 2882 BasicBlock *Latch = L->getLoopLatch(); 2883 // As we're just creating this loop, it's possible no latch exists 2884 // yet. If so, use the header as this will be a single block loop. 2885 if (!Latch) 2886 Latch = Header; 2887 2888 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2889 setDebugLocFromInst(Builder, getDebugLocFromInstOrOperands(OldInduction)); 2890 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2891 2892 Builder.SetInsertPoint(Latch->getTerminator()); 2893 2894 // Create i+1 and fill the PHINode. 2895 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2896 Induction->addIncoming(Start, L->getLoopPreheader()); 2897 Induction->addIncoming(Next, Latch); 2898 // Create the compare. 2899 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2900 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 2901 2902 // Now we have two terminators. Remove the old one from the block. 2903 Latch->getTerminator()->eraseFromParent(); 2904 2905 return Induction; 2906 } 2907 2908 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2909 if (TripCount) 2910 return TripCount; 2911 2912 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2913 // Find the loop boundaries. 2914 ScalarEvolution *SE = PSE.getSE(); 2915 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2916 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 2917 "Invalid loop count"); 2918 2919 Type *IdxTy = Legal->getWidestInductionType(); 2920 2921 // The exit count might have the type of i64 while the phi is i32. This can 2922 // happen if we have an induction variable that is sign extended before the 2923 // compare. The only way that we get a backedge taken count is that the 2924 // induction variable was signed and as such will not overflow. In such a case 2925 // truncation is legal. 2926 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 2927 IdxTy->getPrimitiveSizeInBits()) 2928 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2929 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2930 2931 // Get the total trip count from the count by adding 1. 2932 const SCEV *ExitCount = SE->getAddExpr( 2933 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2934 2935 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2936 2937 // Expand the trip count and place the new instructions in the preheader. 2938 // Notice that the pre-header does not change, only the loop body. 2939 SCEVExpander Exp(*SE, DL, "induction"); 2940 2941 // Count holds the overall loop count (N). 2942 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2943 L->getLoopPreheader()->getTerminator()); 2944 2945 if (TripCount->getType()->isPointerTy()) 2946 TripCount = 2947 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2948 L->getLoopPreheader()->getTerminator()); 2949 2950 return TripCount; 2951 } 2952 2953 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 2954 if (VectorTripCount) 2955 return VectorTripCount; 2956 2957 Value *TC = getOrCreateTripCount(L); 2958 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2959 2960 // Now we need to generate the expression for the part of the loop that the 2961 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2962 // iterations are not required for correctness, or N - Step, otherwise. Step 2963 // is equal to the vectorization factor (number of SIMD elements) times the 2964 // unroll factor (number of SIMD instructions). 2965 Constant *Step = ConstantInt::get(TC->getType(), VF * UF); 2966 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2967 2968 // If there is a non-reversed interleaved group that may speculatively access 2969 // memory out-of-bounds, we need to ensure that there will be at least one 2970 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 2971 // the trip count, we set the remainder to be equal to the step. If the step 2972 // does not evenly divide the trip count, no adjustment is necessary since 2973 // there will already be scalar iterations. Note that the minimum iterations 2974 // check ensures that N >= Step. 2975 if (VF > 1 && Legal->requiresScalarEpilogue()) { 2976 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2977 R = Builder.CreateSelect(IsZero, Step, R); 2978 } 2979 2980 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2981 2982 return VectorTripCount; 2983 } 2984 2985 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 2986 BasicBlock *Bypass) { 2987 Value *Count = getOrCreateTripCount(L); 2988 BasicBlock *BB = L->getLoopPreheader(); 2989 IRBuilder<> Builder(BB->getTerminator()); 2990 2991 // Generate code to check that the loop's trip count that we computed by 2992 // adding one to the backedge-taken count will not overflow. 2993 Value *CheckMinIters = Builder.CreateICmpULT( 2994 Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check"); 2995 2996 BasicBlock *NewBB = 2997 BB->splitBasicBlock(BB->getTerminator(), "min.iters.checked"); 2998 // Update dominator tree immediately if the generated block is a 2999 // LoopBypassBlock because SCEV expansions to generate loop bypass 3000 // checks may query it before the current function is finished. 3001 DT->addNewBlock(NewBB, BB); 3002 if (L->getParentLoop()) 3003 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3004 ReplaceInstWithInst(BB->getTerminator(), 3005 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 3006 LoopBypassBlocks.push_back(BB); 3007 } 3008 3009 void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L, 3010 BasicBlock *Bypass) { 3011 Value *TC = getOrCreateVectorTripCount(L); 3012 BasicBlock *BB = L->getLoopPreheader(); 3013 IRBuilder<> Builder(BB->getTerminator()); 3014 3015 // Now, compare the new count to zero. If it is zero skip the vector loop and 3016 // jump to the scalar loop. 3017 Value *Cmp = Builder.CreateICmpEQ(TC, Constant::getNullValue(TC->getType()), 3018 "cmp.zero"); 3019 3020 // Generate code to check that the loop's trip count that we computed by 3021 // adding one to the backedge-taken count will not overflow. 3022 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3023 // Update dominator tree immediately if the generated block is a 3024 // LoopBypassBlock because SCEV expansions to generate loop bypass 3025 // checks may query it before the current function is finished. 3026 DT->addNewBlock(NewBB, BB); 3027 if (L->getParentLoop()) 3028 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3029 ReplaceInstWithInst(BB->getTerminator(), 3030 BranchInst::Create(Bypass, NewBB, Cmp)); 3031 LoopBypassBlocks.push_back(BB); 3032 } 3033 3034 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3035 BasicBlock *BB = L->getLoopPreheader(); 3036 3037 // Generate the code to check that the SCEV assumptions that we made. 3038 // We want the new basic block to start at the first instruction in a 3039 // sequence of instructions that form a check. 3040 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 3041 "scev.check"); 3042 Value *SCEVCheck = 3043 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 3044 3045 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 3046 if (C->isZero()) 3047 return; 3048 3049 // Create a new block containing the stride check. 3050 BB->setName("vector.scevcheck"); 3051 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3052 // Update dominator tree immediately if the generated block is a 3053 // LoopBypassBlock because SCEV expansions to generate loop bypass 3054 // checks may query it before the current function is finished. 3055 DT->addNewBlock(NewBB, BB); 3056 if (L->getParentLoop()) 3057 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3058 ReplaceInstWithInst(BB->getTerminator(), 3059 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 3060 LoopBypassBlocks.push_back(BB); 3061 AddedSafetyChecks = true; 3062 } 3063 3064 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 3065 BasicBlock *BB = L->getLoopPreheader(); 3066 3067 // Generate the code that checks in runtime if arrays overlap. We put the 3068 // checks into a separate block to make the more common case of few elements 3069 // faster. 3070 Instruction *FirstCheckInst; 3071 Instruction *MemRuntimeCheck; 3072 std::tie(FirstCheckInst, MemRuntimeCheck) = 3073 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 3074 if (!MemRuntimeCheck) 3075 return; 3076 3077 // Create a new block containing the memory check. 3078 BB->setName("vector.memcheck"); 3079 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3080 // Update dominator tree immediately if the generated block is a 3081 // LoopBypassBlock because SCEV expansions to generate loop bypass 3082 // checks may query it before the current function is finished. 3083 DT->addNewBlock(NewBB, BB); 3084 if (L->getParentLoop()) 3085 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3086 ReplaceInstWithInst(BB->getTerminator(), 3087 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 3088 LoopBypassBlocks.push_back(BB); 3089 AddedSafetyChecks = true; 3090 3091 // We currently don't use LoopVersioning for the actual loop cloning but we 3092 // still use it to add the noalias metadata. 3093 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 3094 PSE.getSE()); 3095 LVer->prepareNoAliasMetadata(); 3096 } 3097 3098 void InnerLoopVectorizer::createEmptyLoop() { 3099 /* 3100 In this function we generate a new loop. The new loop will contain 3101 the vectorized instructions while the old loop will continue to run the 3102 scalar remainder. 3103 3104 [ ] <-- loop iteration number check. 3105 / | 3106 / v 3107 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3108 | / | 3109 | / v 3110 || [ ] <-- vector pre header. 3111 |/ | 3112 | v 3113 | [ ] \ 3114 | [ ]_| <-- vector loop. 3115 | | 3116 | v 3117 | -[ ] <--- middle-block. 3118 | / | 3119 | / v 3120 -|- >[ ] <--- new preheader. 3121 | | 3122 | v 3123 | [ ] \ 3124 | [ ]_| <-- old scalar loop to handle remainder. 3125 \ | 3126 \ v 3127 >[ ] <-- exit block. 3128 ... 3129 */ 3130 3131 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 3132 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 3133 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 3134 assert(VectorPH && "Invalid loop structure"); 3135 assert(ExitBlock && "Must have an exit block"); 3136 3137 // Some loops have a single integer induction variable, while other loops 3138 // don't. One example is c++ iterators that often have multiple pointer 3139 // induction variables. In the code below we also support a case where we 3140 // don't have a single induction variable. 3141 // 3142 // We try to obtain an induction variable from the original loop as hard 3143 // as possible. However if we don't find one that: 3144 // - is an integer 3145 // - counts from zero, stepping by one 3146 // - is the size of the widest induction variable type 3147 // then we create a new one. 3148 OldInduction = Legal->getInduction(); 3149 Type *IdxTy = Legal->getWidestInductionType(); 3150 3151 // Split the single block loop into the two loop structure described above. 3152 BasicBlock *VecBody = 3153 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 3154 BasicBlock *MiddleBlock = 3155 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 3156 BasicBlock *ScalarPH = 3157 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 3158 3159 // Create and register the new vector loop. 3160 Loop *Lp = new Loop(); 3161 Loop *ParentLoop = OrigLoop->getParentLoop(); 3162 3163 // Insert the new loop into the loop nest and register the new basic blocks 3164 // before calling any utilities such as SCEV that require valid LoopInfo. 3165 if (ParentLoop) { 3166 ParentLoop->addChildLoop(Lp); 3167 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 3168 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 3169 } else { 3170 LI->addTopLevelLoop(Lp); 3171 } 3172 Lp->addBasicBlockToLoop(VecBody, *LI); 3173 3174 // Find the loop boundaries. 3175 Value *Count = getOrCreateTripCount(Lp); 3176 3177 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3178 3179 // We need to test whether the backedge-taken count is uint##_max. Adding one 3180 // to it will cause overflow and an incorrect loop trip count in the vector 3181 // body. In case of overflow we want to directly jump to the scalar remainder 3182 // loop. 3183 emitMinimumIterationCountCheck(Lp, ScalarPH); 3184 // Now, compare the new count to zero. If it is zero skip the vector loop and 3185 // jump to the scalar loop. 3186 emitVectorLoopEnteredCheck(Lp, ScalarPH); 3187 // Generate the code to check any assumptions that we've made for SCEV 3188 // expressions. 3189 emitSCEVChecks(Lp, ScalarPH); 3190 3191 // Generate the code that checks in runtime if arrays overlap. We put the 3192 // checks into a separate block to make the more common case of few elements 3193 // faster. 3194 emitMemRuntimeChecks(Lp, ScalarPH); 3195 3196 // Generate the induction variable. 3197 // The loop step is equal to the vectorization factor (num of SIMD elements) 3198 // times the unroll factor (num of SIMD instructions). 3199 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3200 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 3201 Induction = 3202 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3203 getDebugLocFromInstOrOperands(OldInduction)); 3204 3205 // We are going to resume the execution of the scalar loop. 3206 // Go over all of the induction variables that we found and fix the 3207 // PHIs that are left in the scalar version of the loop. 3208 // The starting values of PHI nodes depend on the counter of the last 3209 // iteration in the vectorized loop. 3210 // If we come from a bypass edge then we need to start from the original 3211 // start value. 3212 3213 // This variable saves the new starting index for the scalar loop. It is used 3214 // to test if there are any tail iterations left once the vector loop has 3215 // completed. 3216 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 3217 for (auto &InductionEntry : *List) { 3218 PHINode *OrigPhi = InductionEntry.first; 3219 InductionDescriptor II = InductionEntry.second; 3220 3221 // Create phi nodes to merge from the backedge-taken check block. 3222 PHINode *BCResumeVal = PHINode::Create( 3223 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 3224 Value *EndValue; 3225 if (OrigPhi == OldInduction) { 3226 // We know what the end value is. 3227 EndValue = CountRoundDown; 3228 } else { 3229 IRBuilder<> B(LoopBypassBlocks.back()->getTerminator()); 3230 Type *StepType = II.getStep()->getType(); 3231 Instruction::CastOps CastOp = 3232 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 3233 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 3234 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3235 EndValue = II.transform(B, CRD, PSE.getSE(), DL); 3236 EndValue->setName("ind.end"); 3237 } 3238 3239 // The new PHI merges the original incoming value, in case of a bypass, 3240 // or the value at the end of the vectorized loop. 3241 BCResumeVal->addIncoming(EndValue, MiddleBlock); 3242 3243 // Fix up external users of the induction variable. 3244 fixupIVUsers(OrigPhi, II, CountRoundDown, EndValue, MiddleBlock); 3245 3246 // Fix the scalar body counter (PHI node). 3247 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 3248 3249 // The old induction's phi node in the scalar body needs the truncated 3250 // value. 3251 for (BasicBlock *BB : LoopBypassBlocks) 3252 BCResumeVal->addIncoming(II.getStartValue(), BB); 3253 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 3254 } 3255 3256 // Add a check in the middle block to see if we have completed 3257 // all of the iterations in the first vector loop. 3258 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3259 Value *CmpN = 3260 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 3261 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 3262 ReplaceInstWithInst(MiddleBlock->getTerminator(), 3263 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 3264 3265 // Get ready to start creating new instructions into the vectorized body. 3266 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 3267 3268 // Save the state. 3269 LoopVectorPreHeader = Lp->getLoopPreheader(); 3270 LoopScalarPreHeader = ScalarPH; 3271 LoopMiddleBlock = MiddleBlock; 3272 LoopExitBlock = ExitBlock; 3273 LoopVectorBody = VecBody; 3274 LoopScalarBody = OldBasicBlock; 3275 3276 // Keep all loop hints from the original loop on the vector loop (we'll 3277 // replace the vectorizer-specific hints below). 3278 if (MDNode *LID = OrigLoop->getLoopID()) 3279 Lp->setLoopID(LID); 3280 3281 LoopVectorizeHints Hints(Lp, true, *ORE); 3282 Hints.setAlreadyVectorized(); 3283 } 3284 3285 // Fix up external users of the induction variable. At this point, we are 3286 // in LCSSA form, with all external PHIs that use the IV having one input value, 3287 // coming from the remainder loop. We need those PHIs to also have a correct 3288 // value for the IV when arriving directly from the middle block. 3289 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3290 const InductionDescriptor &II, 3291 Value *CountRoundDown, Value *EndValue, 3292 BasicBlock *MiddleBlock) { 3293 // There are two kinds of external IV usages - those that use the value 3294 // computed in the last iteration (the PHI) and those that use the penultimate 3295 // value (the value that feeds into the phi from the loop latch). 3296 // We allow both, but they, obviously, have different values. 3297 3298 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 3299 3300 DenseMap<Value *, Value *> MissingVals; 3301 3302 // An external user of the last iteration's value should see the value that 3303 // the remainder loop uses to initialize its own IV. 3304 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3305 for (User *U : PostInc->users()) { 3306 Instruction *UI = cast<Instruction>(U); 3307 if (!OrigLoop->contains(UI)) { 3308 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3309 MissingVals[UI] = EndValue; 3310 } 3311 } 3312 3313 // An external user of the penultimate value need to see EndValue - Step. 3314 // The simplest way to get this is to recompute it from the constituent SCEVs, 3315 // that is Start + (Step * (CRD - 1)). 3316 for (User *U : OrigPhi->users()) { 3317 auto *UI = cast<Instruction>(U); 3318 if (!OrigLoop->contains(UI)) { 3319 const DataLayout &DL = 3320 OrigLoop->getHeader()->getModule()->getDataLayout(); 3321 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3322 3323 IRBuilder<> B(MiddleBlock->getTerminator()); 3324 Value *CountMinusOne = B.CreateSub( 3325 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3326 Value *CMO = B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType(), 3327 "cast.cmo"); 3328 Value *Escape = II.transform(B, CMO, PSE.getSE(), DL); 3329 Escape->setName("ind.escape"); 3330 MissingVals[UI] = Escape; 3331 } 3332 } 3333 3334 for (auto &I : MissingVals) { 3335 PHINode *PHI = cast<PHINode>(I.first); 3336 // One corner case we have to handle is two IVs "chasing" each-other, 3337 // that is %IV2 = phi [...], [ %IV1, %latch ] 3338 // In this case, if IV1 has an external use, we need to avoid adding both 3339 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3340 // don't already have an incoming value for the middle block. 3341 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3342 PHI->addIncoming(I.second, MiddleBlock); 3343 } 3344 } 3345 3346 namespace { 3347 struct CSEDenseMapInfo { 3348 static bool canHandle(Instruction *I) { 3349 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3350 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3351 } 3352 static inline Instruction *getEmptyKey() { 3353 return DenseMapInfo<Instruction *>::getEmptyKey(); 3354 } 3355 static inline Instruction *getTombstoneKey() { 3356 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3357 } 3358 static unsigned getHashValue(Instruction *I) { 3359 assert(canHandle(I) && "Unknown instruction!"); 3360 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3361 I->value_op_end())); 3362 } 3363 static bool isEqual(Instruction *LHS, Instruction *RHS) { 3364 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3365 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3366 return LHS == RHS; 3367 return LHS->isIdenticalTo(RHS); 3368 } 3369 }; 3370 } 3371 3372 ///\brief Perform cse of induction variable instructions. 3373 static void cse(BasicBlock *BB) { 3374 // Perform simple cse. 3375 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3376 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3377 Instruction *In = &*I++; 3378 3379 if (!CSEDenseMapInfo::canHandle(In)) 3380 continue; 3381 3382 // Check if we can replace this instruction with any of the 3383 // visited instructions. 3384 if (Instruction *V = CSEMap.lookup(In)) { 3385 In->replaceAllUsesWith(V); 3386 In->eraseFromParent(); 3387 continue; 3388 } 3389 3390 CSEMap[In] = In; 3391 } 3392 } 3393 3394 /// \brief Adds a 'fast' flag to floating point operations. 3395 static Value *addFastMathFlag(Value *V) { 3396 if (isa<FPMathOperator>(V)) { 3397 FastMathFlags Flags; 3398 Flags.setUnsafeAlgebra(); 3399 cast<Instruction>(V)->setFastMathFlags(Flags); 3400 } 3401 return V; 3402 } 3403 3404 /// \brief Estimate the overhead of scalarizing a value based on its type. 3405 /// Insert and Extract are set if the result needs to be inserted and/or 3406 /// extracted from vectors. 3407 /// If the instruction is also to be predicated, add the cost of a PHI 3408 /// node to the insertion cost. 3409 static unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract, 3410 bool Predicated, 3411 const TargetTransformInfo &TTI) { 3412 if (Ty->isVoidTy()) 3413 return 0; 3414 3415 assert(Ty->isVectorTy() && "Can only scalarize vectors"); 3416 unsigned Cost = 0; 3417 3418 for (unsigned I = 0, E = Ty->getVectorNumElements(); I < E; ++I) { 3419 if (Extract) 3420 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, Ty, I); 3421 if (Insert) { 3422 Cost += TTI.getVectorInstrCost(Instruction::InsertElement, Ty, I); 3423 if (Predicated) 3424 Cost += TTI.getCFInstrCost(Instruction::PHI); 3425 } 3426 } 3427 3428 // We assume that if-converted blocks have a 50% chance of being executed. 3429 // Predicated scalarized instructions are avoided due to the CF that bypasses 3430 // turned off lanes. The extracts and inserts will be sinked/hoisted to the 3431 // predicated basic-block and are subjected to the same assumption. 3432 if (Predicated) 3433 Cost /= 2; 3434 3435 return Cost; 3436 } 3437 3438 /// \brief Estimate the overhead of scalarizing an Instruction based on the 3439 /// types of its operands and return value. 3440 static unsigned getScalarizationOverhead(SmallVectorImpl<Type *> &OpTys, 3441 Type *RetTy, bool Predicated, 3442 const TargetTransformInfo &TTI) { 3443 unsigned ScalarizationCost = 3444 getScalarizationOverhead(RetTy, true, false, Predicated, TTI); 3445 3446 for (Type *Ty : OpTys) 3447 ScalarizationCost += 3448 getScalarizationOverhead(Ty, false, true, Predicated, TTI); 3449 3450 return ScalarizationCost; 3451 } 3452 3453 /// \brief Estimate the overhead of scalarizing an instruction. This is a 3454 /// convenience wrapper for the type-based getScalarizationOverhead API. 3455 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF, 3456 bool Predicated, 3457 const TargetTransformInfo &TTI) { 3458 if (VF == 1) 3459 return 0; 3460 3461 Type *RetTy = ToVectorTy(I->getType(), VF); 3462 3463 SmallVector<Type *, 4> OpTys; 3464 unsigned OperandsNum = I->getNumOperands(); 3465 for (unsigned OpInd = 0; OpInd < OperandsNum; ++OpInd) 3466 OpTys.push_back(ToVectorTy(I->getOperand(OpInd)->getType(), VF)); 3467 3468 return getScalarizationOverhead(OpTys, RetTy, Predicated, TTI); 3469 } 3470 3471 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3472 // Return the cost of the instruction, including scalarization overhead if it's 3473 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3474 // i.e. either vector version isn't available, or is too expensive. 3475 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3476 const TargetTransformInfo &TTI, 3477 const TargetLibraryInfo *TLI, 3478 bool &NeedToScalarize) { 3479 Function *F = CI->getCalledFunction(); 3480 StringRef FnName = CI->getCalledFunction()->getName(); 3481 Type *ScalarRetTy = CI->getType(); 3482 SmallVector<Type *, 4> Tys, ScalarTys; 3483 for (auto &ArgOp : CI->arg_operands()) 3484 ScalarTys.push_back(ArgOp->getType()); 3485 3486 // Estimate cost of scalarized vector call. The source operands are assumed 3487 // to be vectors, so we need to extract individual elements from there, 3488 // execute VF scalar calls, and then gather the result into the vector return 3489 // value. 3490 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3491 if (VF == 1) 3492 return ScalarCallCost; 3493 3494 // Compute corresponding vector type for return value and arguments. 3495 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3496 for (Type *ScalarTy : ScalarTys) 3497 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3498 3499 // Compute costs of unpacking argument values for the scalar calls and 3500 // packing the return values to a vector. 3501 unsigned ScalarizationCost = getScalarizationOverhead(Tys, RetTy, false, TTI); 3502 3503 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3504 3505 // If we can't emit a vector call for this function, then the currently found 3506 // cost is the cost we need to return. 3507 NeedToScalarize = true; 3508 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3509 return Cost; 3510 3511 // If the corresponding vector cost is cheaper, return its cost. 3512 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3513 if (VectorCallCost < Cost) { 3514 NeedToScalarize = false; 3515 return VectorCallCost; 3516 } 3517 return Cost; 3518 } 3519 3520 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3521 // factor VF. Return the cost of the instruction, including scalarization 3522 // overhead if it's needed. 3523 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3524 const TargetTransformInfo &TTI, 3525 const TargetLibraryInfo *TLI) { 3526 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3527 assert(ID && "Expected intrinsic call!"); 3528 3529 Type *RetTy = ToVectorTy(CI->getType(), VF); 3530 SmallVector<Type *, 4> Tys; 3531 for (Value *ArgOperand : CI->arg_operands()) 3532 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 3533 3534 FastMathFlags FMF; 3535 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3536 FMF = FPMO->getFastMathFlags(); 3537 3538 return TTI.getIntrinsicInstrCost(ID, RetTy, Tys, FMF); 3539 } 3540 3541 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3542 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3543 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3544 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3545 } 3546 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3547 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3548 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3549 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3550 } 3551 3552 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3553 // For every instruction `I` in MinBWs, truncate the operands, create a 3554 // truncated version of `I` and reextend its result. InstCombine runs 3555 // later and will remove any ext/trunc pairs. 3556 // 3557 SmallPtrSet<Value *, 4> Erased; 3558 for (const auto &KV : *MinBWs) { 3559 VectorParts &Parts = WidenMap.get(KV.first); 3560 for (Value *&I : Parts) { 3561 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3562 continue; 3563 Type *OriginalTy = I->getType(); 3564 Type *ScalarTruncatedTy = 3565 IntegerType::get(OriginalTy->getContext(), KV.second); 3566 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3567 OriginalTy->getVectorNumElements()); 3568 if (TruncatedTy == OriginalTy) 3569 continue; 3570 3571 IRBuilder<> B(cast<Instruction>(I)); 3572 auto ShrinkOperand = [&](Value *V) -> Value * { 3573 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3574 if (ZI->getSrcTy() == TruncatedTy) 3575 return ZI->getOperand(0); 3576 return B.CreateZExtOrTrunc(V, TruncatedTy); 3577 }; 3578 3579 // The actual instruction modification depends on the instruction type, 3580 // unfortunately. 3581 Value *NewI = nullptr; 3582 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3583 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3584 ShrinkOperand(BO->getOperand(1))); 3585 cast<BinaryOperator>(NewI)->copyIRFlags(I); 3586 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3587 NewI = 3588 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3589 ShrinkOperand(CI->getOperand(1))); 3590 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3591 NewI = B.CreateSelect(SI->getCondition(), 3592 ShrinkOperand(SI->getTrueValue()), 3593 ShrinkOperand(SI->getFalseValue())); 3594 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3595 switch (CI->getOpcode()) { 3596 default: 3597 llvm_unreachable("Unhandled cast!"); 3598 case Instruction::Trunc: 3599 NewI = ShrinkOperand(CI->getOperand(0)); 3600 break; 3601 case Instruction::SExt: 3602 NewI = B.CreateSExtOrTrunc( 3603 CI->getOperand(0), 3604 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3605 break; 3606 case Instruction::ZExt: 3607 NewI = B.CreateZExtOrTrunc( 3608 CI->getOperand(0), 3609 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3610 break; 3611 } 3612 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3613 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3614 auto *O0 = B.CreateZExtOrTrunc( 3615 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3616 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3617 auto *O1 = B.CreateZExtOrTrunc( 3618 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3619 3620 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3621 } else if (isa<LoadInst>(I)) { 3622 // Don't do anything with the operands, just extend the result. 3623 continue; 3624 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3625 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3626 auto *O0 = B.CreateZExtOrTrunc( 3627 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3628 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3629 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3630 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3631 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3632 auto *O0 = B.CreateZExtOrTrunc( 3633 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3634 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3635 } else { 3636 llvm_unreachable("Unhandled instruction type!"); 3637 } 3638 3639 // Lastly, extend the result. 3640 NewI->takeName(cast<Instruction>(I)); 3641 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3642 I->replaceAllUsesWith(Res); 3643 cast<Instruction>(I)->eraseFromParent(); 3644 Erased.insert(I); 3645 I = Res; 3646 } 3647 } 3648 3649 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3650 for (const auto &KV : *MinBWs) { 3651 VectorParts &Parts = WidenMap.get(KV.first); 3652 for (Value *&I : Parts) { 3653 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3654 if (Inst && Inst->use_empty()) { 3655 Value *NewI = Inst->getOperand(0); 3656 Inst->eraseFromParent(); 3657 I = NewI; 3658 } 3659 } 3660 } 3661 } 3662 3663 void InnerLoopVectorizer::vectorizeLoop() { 3664 //===------------------------------------------------===// 3665 // 3666 // Notice: any optimization or new instruction that go 3667 // into the code below should be also be implemented in 3668 // the cost-model. 3669 // 3670 //===------------------------------------------------===// 3671 Constant *Zero = Builder.getInt32(0); 3672 3673 // In order to support recurrences we need to be able to vectorize Phi nodes. 3674 // Phi nodes have cycles, so we need to vectorize them in two stages. First, 3675 // we create a new vector PHI node with no incoming edges. We use this value 3676 // when we vectorize all of the instructions that use the PHI. Next, after 3677 // all of the instructions in the block are complete we add the new incoming 3678 // edges to the PHI. At this point all of the instructions in the basic block 3679 // are vectorized, so we can use them to construct the PHI. 3680 PhiVector PHIsToFix; 3681 3682 // Scan the loop in a topological order to ensure that defs are vectorized 3683 // before users. 3684 LoopBlocksDFS DFS(OrigLoop); 3685 DFS.perform(LI); 3686 3687 // Vectorize all of the blocks in the original loop. 3688 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 3689 vectorizeBlockInLoop(BB, &PHIsToFix); 3690 3691 // Insert truncates and extends for any truncated instructions as hints to 3692 // InstCombine. 3693 if (VF > 1) 3694 truncateToMinimalBitwidths(); 3695 3696 // At this point every instruction in the original loop is widened to a 3697 // vector form. Now we need to fix the recurrences in PHIsToFix. These PHI 3698 // nodes are currently empty because we did not want to introduce cycles. 3699 // This is the second stage of vectorizing recurrences. 3700 for (PHINode *Phi : PHIsToFix) { 3701 assert(Phi && "Unable to recover vectorized PHI"); 3702 3703 // Handle first-order recurrences that need to be fixed. 3704 if (Legal->isFirstOrderRecurrence(Phi)) { 3705 fixFirstOrderRecurrence(Phi); 3706 continue; 3707 } 3708 3709 // If the phi node is not a first-order recurrence, it must be a reduction. 3710 // Get it's reduction variable descriptor. 3711 assert(Legal->isReductionVariable(Phi) && 3712 "Unable to find the reduction variable"); 3713 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 3714 3715 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3716 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3717 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3718 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3719 RdxDesc.getMinMaxRecurrenceKind(); 3720 setDebugLocFromInst(Builder, ReductionStartValue); 3721 3722 // We need to generate a reduction vector from the incoming scalar. 3723 // To do so, we need to generate the 'identity' vector and override 3724 // one of the elements with the incoming scalar reduction. We need 3725 // to do it in the vector-loop preheader. 3726 Builder.SetInsertPoint(LoopBypassBlocks[1]->getTerminator()); 3727 3728 // This is the vector-clone of the value that leaves the loop. 3729 VectorParts &VectorExit = getVectorValue(LoopExitInst); 3730 Type *VecTy = VectorExit[0]->getType(); 3731 3732 // Find the reduction identity variable. Zero for addition, or, xor, 3733 // one for multiplication, -1 for And. 3734 Value *Identity; 3735 Value *VectorStart; 3736 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3737 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3738 // MinMax reduction have the start value as their identify. 3739 if (VF == 1) { 3740 VectorStart = Identity = ReductionStartValue; 3741 } else { 3742 VectorStart = Identity = 3743 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3744 } 3745 } else { 3746 // Handle other reduction kinds: 3747 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3748 RK, VecTy->getScalarType()); 3749 if (VF == 1) { 3750 Identity = Iden; 3751 // This vector is the Identity vector where the first element is the 3752 // incoming scalar reduction. 3753 VectorStart = ReductionStartValue; 3754 } else { 3755 Identity = ConstantVector::getSplat(VF, Iden); 3756 3757 // This vector is the Identity vector where the first element is the 3758 // incoming scalar reduction. 3759 VectorStart = 3760 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3761 } 3762 } 3763 3764 // Fix the vector-loop phi. 3765 3766 // Reductions do not have to start at zero. They can start with 3767 // any loop invariant values. 3768 VectorParts &VecRdxPhi = WidenMap.get(Phi); 3769 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3770 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3771 VectorParts &Val = getVectorValue(LoopVal); 3772 for (unsigned part = 0; part < UF; ++part) { 3773 // Make sure to add the reduction stat value only to the 3774 // first unroll part. 3775 Value *StartVal = (part == 0) ? VectorStart : Identity; 3776 cast<PHINode>(VecRdxPhi[part]) 3777 ->addIncoming(StartVal, LoopVectorPreHeader); 3778 cast<PHINode>(VecRdxPhi[part]) 3779 ->addIncoming(Val[part], LoopVectorBody); 3780 } 3781 3782 // Before each round, move the insertion point right between 3783 // the PHIs and the values we are going to write. 3784 // This allows us to write both PHINodes and the extractelement 3785 // instructions. 3786 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3787 3788 VectorParts RdxParts = getVectorValue(LoopExitInst); 3789 setDebugLocFromInst(Builder, LoopExitInst); 3790 3791 // If the vector reduction can be performed in a smaller type, we truncate 3792 // then extend the loop exit value to enable InstCombine to evaluate the 3793 // entire expression in the smaller type. 3794 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3795 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3796 Builder.SetInsertPoint(LoopVectorBody->getTerminator()); 3797 for (unsigned part = 0; part < UF; ++part) { 3798 Value *Trunc = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3799 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3800 : Builder.CreateZExt(Trunc, VecTy); 3801 for (Value::user_iterator UI = RdxParts[part]->user_begin(); 3802 UI != RdxParts[part]->user_end();) 3803 if (*UI != Trunc) { 3804 (*UI++)->replaceUsesOfWith(RdxParts[part], Extnd); 3805 RdxParts[part] = Extnd; 3806 } else { 3807 ++UI; 3808 } 3809 } 3810 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3811 for (unsigned part = 0; part < UF; ++part) 3812 RdxParts[part] = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3813 } 3814 3815 // Reduce all of the unrolled parts into a single vector. 3816 Value *ReducedPartRdx = RdxParts[0]; 3817 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3818 setDebugLocFromInst(Builder, ReducedPartRdx); 3819 for (unsigned part = 1; part < UF; ++part) { 3820 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3821 // Floating point operations had to be 'fast' to enable the reduction. 3822 ReducedPartRdx = addFastMathFlag( 3823 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxParts[part], 3824 ReducedPartRdx, "bin.rdx")); 3825 else 3826 ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp( 3827 Builder, MinMaxKind, ReducedPartRdx, RdxParts[part]); 3828 } 3829 3830 if (VF > 1) { 3831 // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles 3832 // and vector ops, reducing the set of values being computed by half each 3833 // round. 3834 assert(isPowerOf2_32(VF) && 3835 "Reduction emission only supported for pow2 vectors!"); 3836 Value *TmpVec = ReducedPartRdx; 3837 SmallVector<Constant *, 32> ShuffleMask(VF, nullptr); 3838 for (unsigned i = VF; i != 1; i >>= 1) { 3839 // Move the upper half of the vector to the lower half. 3840 for (unsigned j = 0; j != i / 2; ++j) 3841 ShuffleMask[j] = Builder.getInt32(i / 2 + j); 3842 3843 // Fill the rest of the mask with undef. 3844 std::fill(&ShuffleMask[i / 2], ShuffleMask.end(), 3845 UndefValue::get(Builder.getInt32Ty())); 3846 3847 Value *Shuf = Builder.CreateShuffleVector( 3848 TmpVec, UndefValue::get(TmpVec->getType()), 3849 ConstantVector::get(ShuffleMask), "rdx.shuf"); 3850 3851 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3852 // Floating point operations had to be 'fast' to enable the reduction. 3853 TmpVec = addFastMathFlag(Builder.CreateBinOp( 3854 (Instruction::BinaryOps)Op, TmpVec, Shuf, "bin.rdx")); 3855 else 3856 TmpVec = RecurrenceDescriptor::createMinMaxOp(Builder, MinMaxKind, 3857 TmpVec, Shuf); 3858 } 3859 3860 // The result is in the first element of the vector. 3861 ReducedPartRdx = 3862 Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 3863 3864 // If the reduction can be performed in a smaller type, we need to extend 3865 // the reduction to the wider type before we branch to the original loop. 3866 if (Phi->getType() != RdxDesc.getRecurrenceType()) 3867 ReducedPartRdx = 3868 RdxDesc.isSigned() 3869 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 3870 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 3871 } 3872 3873 // Create a phi node that merges control-flow from the backedge-taken check 3874 // block and the middle block. 3875 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 3876 LoopScalarPreHeader->getTerminator()); 3877 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3878 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 3879 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3880 3881 // Now, we need to fix the users of the reduction variable 3882 // inside and outside of the scalar remainder loop. 3883 // We know that the loop is in LCSSA form. We need to update the 3884 // PHI nodes in the exit blocks. 3885 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 3886 LEE = LoopExitBlock->end(); 3887 LEI != LEE; ++LEI) { 3888 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 3889 if (!LCSSAPhi) 3890 break; 3891 3892 // All PHINodes need to have a single entry edge, or two if 3893 // we already fixed them. 3894 assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 3895 3896 // We found our reduction value exit-PHI. Update it with the 3897 // incoming bypass edge. 3898 if (LCSSAPhi->getIncomingValue(0) == LoopExitInst) { 3899 // Add an edge coming from the bypass. 3900 LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3901 break; 3902 } 3903 } // end of the LCSSA phi scan. 3904 3905 // Fix the scalar loop reduction variable with the incoming reduction sum 3906 // from the vector body and from the backedge value. 3907 int IncomingEdgeBlockIdx = 3908 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 3909 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 3910 // Pick the other block. 3911 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 3912 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 3913 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 3914 } // end of for each Phi in PHIsToFix. 3915 3916 fixLCSSAPHIs(); 3917 3918 // Make sure DomTree is updated. 3919 updateAnalysis(); 3920 3921 predicateInstructions(); 3922 3923 // Remove redundant induction instructions. 3924 cse(LoopVectorBody); 3925 } 3926 3927 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 3928 3929 // This is the second phase of vectorizing first-order recurrences. An 3930 // overview of the transformation is described below. Suppose we have the 3931 // following loop. 3932 // 3933 // for (int i = 0; i < n; ++i) 3934 // b[i] = a[i] - a[i - 1]; 3935 // 3936 // There is a first-order recurrence on "a". For this loop, the shorthand 3937 // scalar IR looks like: 3938 // 3939 // scalar.ph: 3940 // s_init = a[-1] 3941 // br scalar.body 3942 // 3943 // scalar.body: 3944 // i = phi [0, scalar.ph], [i+1, scalar.body] 3945 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3946 // s2 = a[i] 3947 // b[i] = s2 - s1 3948 // br cond, scalar.body, ... 3949 // 3950 // In this example, s1 is a recurrence because it's value depends on the 3951 // previous iteration. In the first phase of vectorization, we created a 3952 // temporary value for s1. We now complete the vectorization and produce the 3953 // shorthand vector IR shown below (for VF = 4, UF = 1). 3954 // 3955 // vector.ph: 3956 // v_init = vector(..., ..., ..., a[-1]) 3957 // br vector.body 3958 // 3959 // vector.body 3960 // i = phi [0, vector.ph], [i+4, vector.body] 3961 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3962 // v2 = a[i, i+1, i+2, i+3]; 3963 // v3 = vector(v1(3), v2(0, 1, 2)) 3964 // b[i, i+1, i+2, i+3] = v2 - v3 3965 // br cond, vector.body, middle.block 3966 // 3967 // middle.block: 3968 // x = v2(3) 3969 // br scalar.ph 3970 // 3971 // scalar.ph: 3972 // s_init = phi [x, middle.block], [a[-1], otherwise] 3973 // br scalar.body 3974 // 3975 // After execution completes the vector loop, we extract the next value of 3976 // the recurrence (x) to use as the initial value in the scalar loop. 3977 3978 // Get the original loop preheader and single loop latch. 3979 auto *Preheader = OrigLoop->getLoopPreheader(); 3980 auto *Latch = OrigLoop->getLoopLatch(); 3981 3982 // Get the initial and previous values of the scalar recurrence. 3983 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 3984 auto *Previous = Phi->getIncomingValueForBlock(Latch); 3985 3986 // Create a vector from the initial value. 3987 auto *VectorInit = ScalarInit; 3988 if (VF > 1) { 3989 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3990 VectorInit = Builder.CreateInsertElement( 3991 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 3992 Builder.getInt32(VF - 1), "vector.recur.init"); 3993 } 3994 3995 // We constructed a temporary phi node in the first phase of vectorization. 3996 // This phi node will eventually be deleted. 3997 auto &PhiParts = getVectorValue(Phi); 3998 Builder.SetInsertPoint(cast<Instruction>(PhiParts[0])); 3999 4000 // Create a phi node for the new recurrence. The current value will either be 4001 // the initial value inserted into a vector or loop-varying vector value. 4002 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 4003 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 4004 4005 // Get the vectorized previous value. We ensured the previous values was an 4006 // instruction when detecting the recurrence. 4007 auto &PreviousParts = getVectorValue(Previous); 4008 4009 // Set the insertion point to be after this instruction. We ensured the 4010 // previous value dominated all uses of the phi when detecting the 4011 // recurrence. 4012 Builder.SetInsertPoint( 4013 &*++BasicBlock::iterator(cast<Instruction>(PreviousParts[UF - 1]))); 4014 4015 // We will construct a vector for the recurrence by combining the values for 4016 // the current and previous iterations. This is the required shuffle mask. 4017 SmallVector<Constant *, 8> ShuffleMask(VF); 4018 ShuffleMask[0] = Builder.getInt32(VF - 1); 4019 for (unsigned I = 1; I < VF; ++I) 4020 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 4021 4022 // The vector from which to take the initial value for the current iteration 4023 // (actual or unrolled). Initially, this is the vector phi node. 4024 Value *Incoming = VecPhi; 4025 4026 // Shuffle the current and previous vector and update the vector parts. 4027 for (unsigned Part = 0; Part < UF; ++Part) { 4028 auto *Shuffle = 4029 VF > 1 4030 ? Builder.CreateShuffleVector(Incoming, PreviousParts[Part], 4031 ConstantVector::get(ShuffleMask)) 4032 : Incoming; 4033 PhiParts[Part]->replaceAllUsesWith(Shuffle); 4034 cast<Instruction>(PhiParts[Part])->eraseFromParent(); 4035 PhiParts[Part] = Shuffle; 4036 Incoming = PreviousParts[Part]; 4037 } 4038 4039 // Fix the latch value of the new recurrence in the vector loop. 4040 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4041 4042 // Extract the last vector element in the middle block. This will be the 4043 // initial value for the recurrence when jumping to the scalar loop. 4044 auto *Extract = Incoming; 4045 if (VF > 1) { 4046 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4047 Extract = Builder.CreateExtractElement(Extract, Builder.getInt32(VF - 1), 4048 "vector.recur.extract"); 4049 } 4050 4051 // Fix the initial value of the original recurrence in the scalar loop. 4052 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4053 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4054 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4055 auto *Incoming = BB == LoopMiddleBlock ? Extract : ScalarInit; 4056 Start->addIncoming(Incoming, BB); 4057 } 4058 4059 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 4060 Phi->setName("scalar.recur"); 4061 4062 // Finally, fix users of the recurrence outside the loop. The users will need 4063 // either the last value of the scalar recurrence or the last value of the 4064 // vector recurrence we extracted in the middle block. Since the loop is in 4065 // LCSSA form, we just need to find the phi node for the original scalar 4066 // recurrence in the exit block, and then add an edge for the middle block. 4067 for (auto &I : *LoopExitBlock) { 4068 auto *LCSSAPhi = dyn_cast<PHINode>(&I); 4069 if (!LCSSAPhi) 4070 break; 4071 if (LCSSAPhi->getIncomingValue(0) == Phi) { 4072 LCSSAPhi->addIncoming(Extract, LoopMiddleBlock); 4073 break; 4074 } 4075 } 4076 } 4077 4078 void InnerLoopVectorizer::fixLCSSAPHIs() { 4079 for (Instruction &LEI : *LoopExitBlock) { 4080 auto *LCSSAPhi = dyn_cast<PHINode>(&LEI); 4081 if (!LCSSAPhi) 4082 break; 4083 if (LCSSAPhi->getNumIncomingValues() == 1) 4084 LCSSAPhi->addIncoming(UndefValue::get(LCSSAPhi->getType()), 4085 LoopMiddleBlock); 4086 } 4087 } 4088 4089 void InnerLoopVectorizer::predicateInstructions() { 4090 4091 // For each instruction I marked for predication on value C, split I into its 4092 // own basic block to form an if-then construct over C. 4093 // Since I may be fed by extractelement and/or be feeding an insertelement 4094 // generated during scalarization we try to move such instructions into the 4095 // predicated basic block as well. For the insertelement this also means that 4096 // the PHI will be created for the resulting vector rather than for the 4097 // scalar instruction. 4098 // So for some predicated instruction, e.g. the conditional sdiv in: 4099 // 4100 // for.body: 4101 // ... 4102 // %add = add nsw i32 %mul, %0 4103 // %cmp5 = icmp sgt i32 %2, 7 4104 // br i1 %cmp5, label %if.then, label %if.end 4105 // 4106 // if.then: 4107 // %div = sdiv i32 %0, %1 4108 // br label %if.end 4109 // 4110 // if.end: 4111 // %x.0 = phi i32 [ %div, %if.then ], [ %add, %for.body ] 4112 // 4113 // the sdiv at this point is scalarized and if-converted using a select. 4114 // The inactive elements in the vector are not used, but the predicated 4115 // instruction is still executed for all vector elements, essentially: 4116 // 4117 // vector.body: 4118 // ... 4119 // %17 = add nsw <2 x i32> %16, %wide.load 4120 // %29 = extractelement <2 x i32> %wide.load, i32 0 4121 // %30 = extractelement <2 x i32> %wide.load51, i32 0 4122 // %31 = sdiv i32 %29, %30 4123 // %32 = insertelement <2 x i32> undef, i32 %31, i32 0 4124 // %35 = extractelement <2 x i32> %wide.load, i32 1 4125 // %36 = extractelement <2 x i32> %wide.load51, i32 1 4126 // %37 = sdiv i32 %35, %36 4127 // %38 = insertelement <2 x i32> %32, i32 %37, i32 1 4128 // %predphi = select <2 x i1> %26, <2 x i32> %38, <2 x i32> %17 4129 // 4130 // Predication will now re-introduce the original control flow to avoid false 4131 // side-effects by the sdiv instructions on the inactive elements, yielding 4132 // (after cleanup): 4133 // 4134 // vector.body: 4135 // ... 4136 // %5 = add nsw <2 x i32> %4, %wide.load 4137 // %8 = icmp sgt <2 x i32> %wide.load52, <i32 7, i32 7> 4138 // %9 = extractelement <2 x i1> %8, i32 0 4139 // br i1 %9, label %pred.sdiv.if, label %pred.sdiv.continue 4140 // 4141 // pred.sdiv.if: 4142 // %10 = extractelement <2 x i32> %wide.load, i32 0 4143 // %11 = extractelement <2 x i32> %wide.load51, i32 0 4144 // %12 = sdiv i32 %10, %11 4145 // %13 = insertelement <2 x i32> undef, i32 %12, i32 0 4146 // br label %pred.sdiv.continue 4147 // 4148 // pred.sdiv.continue: 4149 // %14 = phi <2 x i32> [ undef, %vector.body ], [ %13, %pred.sdiv.if ] 4150 // %15 = extractelement <2 x i1> %8, i32 1 4151 // br i1 %15, label %pred.sdiv.if54, label %pred.sdiv.continue55 4152 // 4153 // pred.sdiv.if54: 4154 // %16 = extractelement <2 x i32> %wide.load, i32 1 4155 // %17 = extractelement <2 x i32> %wide.load51, i32 1 4156 // %18 = sdiv i32 %16, %17 4157 // %19 = insertelement <2 x i32> %14, i32 %18, i32 1 4158 // br label %pred.sdiv.continue55 4159 // 4160 // pred.sdiv.continue55: 4161 // %20 = phi <2 x i32> [ %14, %pred.sdiv.continue ], [ %19, %pred.sdiv.if54 ] 4162 // %predphi = select <2 x i1> %8, <2 x i32> %20, <2 x i32> %5 4163 4164 for (auto KV : PredicatedInstructions) { 4165 BasicBlock::iterator I(KV.first); 4166 BasicBlock *Head = I->getParent(); 4167 auto *BB = SplitBlock(Head, &*std::next(I), DT, LI); 4168 auto *T = SplitBlockAndInsertIfThen(KV.second, &*I, /*Unreachable=*/false, 4169 /*BranchWeights=*/nullptr, DT, LI); 4170 I->moveBefore(T); 4171 // Try to move any extractelement we may have created for the predicated 4172 // instruction into the Then block. 4173 for (Use &Op : I->operands()) { 4174 auto *OpInst = dyn_cast<ExtractElementInst>(&*Op); 4175 if (OpInst && OpInst->hasOneUse()) // TODO: more accurately - hasOneUser() 4176 OpInst->moveBefore(&*I); 4177 } 4178 4179 I->getParent()->setName(Twine("pred.") + I->getOpcodeName() + ".if"); 4180 BB->setName(Twine("pred.") + I->getOpcodeName() + ".continue"); 4181 4182 // If the instruction is non-void create a Phi node at reconvergence point. 4183 if (!I->getType()->isVoidTy()) { 4184 Value *IncomingTrue = nullptr; 4185 Value *IncomingFalse = nullptr; 4186 4187 if (I->hasOneUse() && isa<InsertElementInst>(*I->user_begin())) { 4188 // If the predicated instruction is feeding an insert-element, move it 4189 // into the Then block; Phi node will be created for the vector. 4190 InsertElementInst *IEI = cast<InsertElementInst>(*I->user_begin()); 4191 IEI->moveBefore(T); 4192 IncomingTrue = IEI; // the new vector with the inserted element. 4193 IncomingFalse = IEI->getOperand(0); // the unmodified vector 4194 } else { 4195 // Phi node will be created for the scalar predicated instruction. 4196 IncomingTrue = &*I; 4197 IncomingFalse = UndefValue::get(I->getType()); 4198 } 4199 4200 BasicBlock *PostDom = I->getParent()->getSingleSuccessor(); 4201 assert(PostDom && "Then block has multiple successors"); 4202 PHINode *Phi = 4203 PHINode::Create(IncomingTrue->getType(), 2, "", &PostDom->front()); 4204 IncomingTrue->replaceAllUsesWith(Phi); 4205 Phi->addIncoming(IncomingFalse, Head); 4206 Phi->addIncoming(IncomingTrue, I->getParent()); 4207 } 4208 } 4209 4210 DEBUG(DT->verifyDomTree()); 4211 } 4212 4213 InnerLoopVectorizer::VectorParts 4214 InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) { 4215 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 4216 4217 // Look for cached value. 4218 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 4219 EdgeMaskCache::iterator ECEntryIt = MaskCache.find(Edge); 4220 if (ECEntryIt != MaskCache.end()) 4221 return ECEntryIt->second; 4222 4223 VectorParts SrcMask = createBlockInMask(Src); 4224 4225 // The terminator has to be a branch inst! 4226 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 4227 assert(BI && "Unexpected terminator found"); 4228 4229 if (BI->isConditional()) { 4230 VectorParts EdgeMask = getVectorValue(BI->getCondition()); 4231 4232 if (BI->getSuccessor(0) != Dst) 4233 for (unsigned part = 0; part < UF; ++part) 4234 EdgeMask[part] = Builder.CreateNot(EdgeMask[part]); 4235 4236 for (unsigned part = 0; part < UF; ++part) 4237 EdgeMask[part] = Builder.CreateAnd(EdgeMask[part], SrcMask[part]); 4238 4239 MaskCache[Edge] = EdgeMask; 4240 return EdgeMask; 4241 } 4242 4243 MaskCache[Edge] = SrcMask; 4244 return SrcMask; 4245 } 4246 4247 InnerLoopVectorizer::VectorParts 4248 InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) { 4249 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 4250 4251 // Loop incoming mask is all-one. 4252 if (OrigLoop->getHeader() == BB) { 4253 Value *C = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 1); 4254 return getVectorValue(C); 4255 } 4256 4257 // This is the block mask. We OR all incoming edges, and with zero. 4258 Value *Zero = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 0); 4259 VectorParts BlockMask = getVectorValue(Zero); 4260 4261 // For each pred: 4262 for (pred_iterator it = pred_begin(BB), e = pred_end(BB); it != e; ++it) { 4263 VectorParts EM = createEdgeMask(*it, BB); 4264 for (unsigned part = 0; part < UF; ++part) 4265 BlockMask[part] = Builder.CreateOr(BlockMask[part], EM[part]); 4266 } 4267 4268 return BlockMask; 4269 } 4270 4271 void InnerLoopVectorizer::widenPHIInstruction( 4272 Instruction *PN, InnerLoopVectorizer::VectorParts &Entry, unsigned UF, 4273 unsigned VF, PhiVector *PV) { 4274 PHINode *P = cast<PHINode>(PN); 4275 // Handle recurrences. 4276 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 4277 for (unsigned part = 0; part < UF; ++part) { 4278 // This is phase one of vectorizing PHIs. 4279 Type *VecTy = 4280 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4281 Entry[part] = PHINode::Create( 4282 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4283 } 4284 PV->push_back(P); 4285 return; 4286 } 4287 4288 setDebugLocFromInst(Builder, P); 4289 // Check for PHI nodes that are lowered to vector selects. 4290 if (P->getParent() != OrigLoop->getHeader()) { 4291 // We know that all PHIs in non-header blocks are converted into 4292 // selects, so we don't have to worry about the insertion order and we 4293 // can just use the builder. 4294 // At this point we generate the predication tree. There may be 4295 // duplications since this is a simple recursive scan, but future 4296 // optimizations will clean it up. 4297 4298 unsigned NumIncoming = P->getNumIncomingValues(); 4299 4300 // Generate a sequence of selects of the form: 4301 // SELECT(Mask3, In3, 4302 // SELECT(Mask2, In2, 4303 // ( ...))) 4304 for (unsigned In = 0; In < NumIncoming; In++) { 4305 VectorParts Cond = 4306 createEdgeMask(P->getIncomingBlock(In), P->getParent()); 4307 VectorParts &In0 = getVectorValue(P->getIncomingValue(In)); 4308 4309 for (unsigned part = 0; part < UF; ++part) { 4310 // We might have single edge PHIs (blocks) - use an identity 4311 // 'select' for the first PHI operand. 4312 if (In == 0) 4313 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], In0[part]); 4314 else 4315 // Select between the current value and the previous incoming edge 4316 // based on the incoming mask. 4317 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], Entry[part], 4318 "predphi"); 4319 } 4320 } 4321 return; 4322 } 4323 4324 // This PHINode must be an induction variable. 4325 // Make sure that we know about it. 4326 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 4327 4328 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 4329 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4330 4331 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4332 // which can be found from the original scalar operations. 4333 switch (II.getKind()) { 4334 case InductionDescriptor::IK_NoInduction: 4335 llvm_unreachable("Unknown induction"); 4336 case InductionDescriptor::IK_IntInduction: 4337 return widenIntInduction(P, Entry); 4338 case InductionDescriptor::IK_PtrInduction: { 4339 // Handle the pointer induction variable case. 4340 assert(P->getType()->isPointerTy() && "Unexpected type."); 4341 // This is the normalized GEP that starts counting at zero. 4342 Value *PtrInd = Induction; 4343 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 4344 // This is the vector of results. Notice that we don't generate 4345 // vector geps because scalar geps result in better code. 4346 for (unsigned part = 0; part < UF; ++part) { 4347 if (VF == 1) { 4348 int EltIndex = part; 4349 Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex); 4350 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4351 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 4352 SclrGep->setName("next.gep"); 4353 Entry[part] = SclrGep; 4354 continue; 4355 } 4356 4357 Value *VecVal = UndefValue::get(VectorType::get(P->getType(), VF)); 4358 for (unsigned int i = 0; i < VF; ++i) { 4359 int EltIndex = i + part * VF; 4360 Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex); 4361 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4362 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 4363 SclrGep->setName("next.gep"); 4364 VecVal = Builder.CreateInsertElement(VecVal, SclrGep, 4365 Builder.getInt32(i), "insert.gep"); 4366 } 4367 Entry[part] = VecVal; 4368 } 4369 return; 4370 } 4371 case InductionDescriptor::IK_FpInduction: { 4372 assert(P->getType() == II.getStartValue()->getType() && 4373 "Types must match"); 4374 // Handle other induction variables that are now based on the 4375 // canonical one. 4376 assert(P != OldInduction && "Primary induction can be integer only"); 4377 4378 Value *V = Builder.CreateCast(Instruction::SIToFP, Induction, P->getType()); 4379 V = II.transform(Builder, V, PSE.getSE(), DL); 4380 V->setName("fp.offset.idx"); 4381 4382 // Now we have scalar op: %fp.offset.idx = StartVal +/- Induction*StepVal 4383 4384 Value *Broadcasted = getBroadcastInstrs(V); 4385 // After broadcasting the induction variable we need to make the vector 4386 // consecutive by adding StepVal*0, StepVal*1, StepVal*2, etc. 4387 Value *StepVal = cast<SCEVUnknown>(II.getStep())->getValue(); 4388 for (unsigned part = 0; part < UF; ++part) 4389 Entry[part] = getStepVector(Broadcasted, VF * part, StepVal, 4390 II.getInductionOpcode()); 4391 return; 4392 } 4393 } 4394 } 4395 4396 /// A helper function for checking whether an integer division-related 4397 /// instruction may divide by zero (in which case it must be predicated if 4398 /// executed conditionally in the scalar code). 4399 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4400 /// Non-zero divisors that are non compile-time constants will not be 4401 /// converted into multiplication, so we will still end up scalarizing 4402 /// the division, but can do so w/o predication. 4403 static bool mayDivideByZero(Instruction &I) { 4404 assert((I.getOpcode() == Instruction::UDiv || 4405 I.getOpcode() == Instruction::SDiv || 4406 I.getOpcode() == Instruction::URem || 4407 I.getOpcode() == Instruction::SRem) && 4408 "Unexpected instruction"); 4409 Value *Divisor = I.getOperand(1); 4410 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4411 return !CInt || CInt->isZero(); 4412 } 4413 4414 void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { 4415 // For each instruction in the old loop. 4416 for (Instruction &I : *BB) { 4417 VectorParts &Entry = WidenMap.get(&I); 4418 4419 switch (I.getOpcode()) { 4420 case Instruction::Br: 4421 // Nothing to do for PHIs and BR, since we already took care of the 4422 // loop control flow instructions. 4423 continue; 4424 case Instruction::PHI: { 4425 // Vectorize PHINodes. 4426 widenPHIInstruction(&I, Entry, UF, VF, PV); 4427 continue; 4428 } // End of PHI. 4429 4430 case Instruction::UDiv: 4431 case Instruction::SDiv: 4432 case Instruction::SRem: 4433 case Instruction::URem: 4434 // Scalarize with predication if this instruction may divide by zero and 4435 // block execution is conditional, otherwise fallthrough. 4436 if (mayDivideByZero(I) && Legal->blockNeedsPredication(I.getParent())) { 4437 scalarizeInstruction(&I, true); 4438 continue; 4439 } 4440 case Instruction::Add: 4441 case Instruction::FAdd: 4442 case Instruction::Sub: 4443 case Instruction::FSub: 4444 case Instruction::Mul: 4445 case Instruction::FMul: 4446 case Instruction::FDiv: 4447 case Instruction::FRem: 4448 case Instruction::Shl: 4449 case Instruction::LShr: 4450 case Instruction::AShr: 4451 case Instruction::And: 4452 case Instruction::Or: 4453 case Instruction::Xor: { 4454 // Just widen binops. 4455 auto *BinOp = cast<BinaryOperator>(&I); 4456 setDebugLocFromInst(Builder, BinOp); 4457 VectorParts &A = getVectorValue(BinOp->getOperand(0)); 4458 VectorParts &B = getVectorValue(BinOp->getOperand(1)); 4459 4460 // Use this vector value for all users of the original instruction. 4461 for (unsigned Part = 0; Part < UF; ++Part) { 4462 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A[Part], B[Part]); 4463 4464 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 4465 VecOp->copyIRFlags(BinOp); 4466 4467 Entry[Part] = V; 4468 } 4469 4470 addMetadata(Entry, BinOp); 4471 break; 4472 } 4473 case Instruction::Select: { 4474 // Widen selects. 4475 // If the selector is loop invariant we can create a select 4476 // instruction with a scalar condition. Otherwise, use vector-select. 4477 auto *SE = PSE.getSE(); 4478 bool InvariantCond = 4479 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); 4480 setDebugLocFromInst(Builder, &I); 4481 4482 // The condition can be loop invariant but still defined inside the 4483 // loop. This means that we can't just use the original 'cond' value. 4484 // We have to take the 'vectorized' value and pick the first lane. 4485 // Instcombine will make this a no-op. 4486 VectorParts &Cond = getVectorValue(I.getOperand(0)); 4487 VectorParts &Op0 = getVectorValue(I.getOperand(1)); 4488 VectorParts &Op1 = getVectorValue(I.getOperand(2)); 4489 4490 Value *ScalarCond = 4491 (VF == 1) 4492 ? Cond[0] 4493 : Builder.CreateExtractElement(Cond[0], Builder.getInt32(0)); 4494 4495 for (unsigned Part = 0; Part < UF; ++Part) { 4496 Entry[Part] = Builder.CreateSelect( 4497 InvariantCond ? ScalarCond : Cond[Part], Op0[Part], Op1[Part]); 4498 } 4499 4500 addMetadata(Entry, &I); 4501 break; 4502 } 4503 4504 case Instruction::ICmp: 4505 case Instruction::FCmp: { 4506 // Widen compares. Generate vector compares. 4507 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4508 auto *Cmp = dyn_cast<CmpInst>(&I); 4509 setDebugLocFromInst(Builder, Cmp); 4510 VectorParts &A = getVectorValue(Cmp->getOperand(0)); 4511 VectorParts &B = getVectorValue(Cmp->getOperand(1)); 4512 for (unsigned Part = 0; Part < UF; ++Part) { 4513 Value *C = nullptr; 4514 if (FCmp) { 4515 C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]); 4516 cast<FCmpInst>(C)->copyFastMathFlags(Cmp); 4517 } else { 4518 C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]); 4519 } 4520 Entry[Part] = C; 4521 } 4522 4523 addMetadata(Entry, &I); 4524 break; 4525 } 4526 4527 case Instruction::Store: 4528 case Instruction::Load: 4529 vectorizeMemoryInstruction(&I); 4530 break; 4531 case Instruction::ZExt: 4532 case Instruction::SExt: 4533 case Instruction::FPToUI: 4534 case Instruction::FPToSI: 4535 case Instruction::FPExt: 4536 case Instruction::PtrToInt: 4537 case Instruction::IntToPtr: 4538 case Instruction::SIToFP: 4539 case Instruction::UIToFP: 4540 case Instruction::Trunc: 4541 case Instruction::FPTrunc: 4542 case Instruction::BitCast: { 4543 auto *CI = dyn_cast<CastInst>(&I); 4544 setDebugLocFromInst(Builder, CI); 4545 4546 // Optimize the special case where the source is a constant integer 4547 // induction variable. Notice that we can only optimize the 'trunc' case 4548 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 4549 // (c) other casts depend on pointer size. 4550 auto ID = Legal->getInductionVars()->lookup(OldInduction); 4551 if (isa<TruncInst>(CI) && CI->getOperand(0) == OldInduction && 4552 ID.getConstIntStepValue()) { 4553 widenIntInduction(OldInduction, Entry, cast<TruncInst>(CI)); 4554 addMetadata(Entry, &I); 4555 break; 4556 } 4557 4558 /// Vectorize casts. 4559 Type *DestTy = 4560 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4561 4562 VectorParts &A = getVectorValue(CI->getOperand(0)); 4563 for (unsigned Part = 0; Part < UF; ++Part) 4564 Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy); 4565 addMetadata(Entry, &I); 4566 break; 4567 } 4568 4569 case Instruction::Call: { 4570 // Ignore dbg intrinsics. 4571 if (isa<DbgInfoIntrinsic>(I)) 4572 break; 4573 setDebugLocFromInst(Builder, &I); 4574 4575 Module *M = BB->getParent()->getParent(); 4576 auto *CI = cast<CallInst>(&I); 4577 4578 StringRef FnName = CI->getCalledFunction()->getName(); 4579 Function *F = CI->getCalledFunction(); 4580 Type *RetTy = ToVectorTy(CI->getType(), VF); 4581 SmallVector<Type *, 4> Tys; 4582 for (Value *ArgOperand : CI->arg_operands()) 4583 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4584 4585 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4586 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 4587 ID == Intrinsic::lifetime_start)) { 4588 scalarizeInstruction(&I); 4589 break; 4590 } 4591 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4592 // version of the instruction. 4593 // Is it beneficial to perform intrinsic call compared to lib call? 4594 bool NeedToScalarize; 4595 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4596 bool UseVectorIntrinsic = 4597 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4598 if (!UseVectorIntrinsic && NeedToScalarize) { 4599 scalarizeInstruction(&I); 4600 break; 4601 } 4602 4603 for (unsigned Part = 0; Part < UF; ++Part) { 4604 SmallVector<Value *, 4> Args; 4605 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4606 Value *Arg = CI->getArgOperand(i); 4607 // Some intrinsics have a scalar argument - don't replace it with a 4608 // vector. 4609 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) { 4610 VectorParts &VectorArg = getVectorValue(CI->getArgOperand(i)); 4611 Arg = VectorArg[Part]; 4612 } 4613 Args.push_back(Arg); 4614 } 4615 4616 Function *VectorF; 4617 if (UseVectorIntrinsic) { 4618 // Use vector version of the intrinsic. 4619 Type *TysForDecl[] = {CI->getType()}; 4620 if (VF > 1) 4621 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4622 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4623 } else { 4624 // Use vector version of the library call. 4625 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4626 assert(!VFnName.empty() && "Vector function name is empty."); 4627 VectorF = M->getFunction(VFnName); 4628 if (!VectorF) { 4629 // Generate a declaration 4630 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4631 VectorF = 4632 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4633 VectorF->copyAttributesFrom(F); 4634 } 4635 } 4636 assert(VectorF && "Can't create vector function."); 4637 4638 SmallVector<OperandBundleDef, 1> OpBundles; 4639 CI->getOperandBundlesAsDefs(OpBundles); 4640 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4641 4642 if (isa<FPMathOperator>(V)) 4643 V->copyFastMathFlags(CI); 4644 4645 Entry[Part] = V; 4646 } 4647 4648 addMetadata(Entry, &I); 4649 break; 4650 } 4651 4652 default: 4653 // All other instructions are unsupported. Scalarize them. 4654 scalarizeInstruction(&I); 4655 break; 4656 } // end of switch. 4657 } // end of for_each instr. 4658 } 4659 4660 void InnerLoopVectorizer::updateAnalysis() { 4661 // Forget the original basic block. 4662 PSE.getSE()->forgetLoop(OrigLoop); 4663 4664 // Update the dominator tree information. 4665 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4666 "Entry does not dominate exit."); 4667 4668 // We don't predicate stores by this point, so the vector body should be a 4669 // single loop. 4670 DT->addNewBlock(LoopVectorBody, LoopVectorPreHeader); 4671 4672 DT->addNewBlock(LoopMiddleBlock, LoopVectorBody); 4673 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4674 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4675 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4676 4677 DEBUG(DT->verifyDomTree()); 4678 } 4679 4680 /// \brief Check whether it is safe to if-convert this phi node. 4681 /// 4682 /// Phi nodes with constant expressions that can trap are not safe to if 4683 /// convert. 4684 static bool canIfConvertPHINodes(BasicBlock *BB) { 4685 for (Instruction &I : *BB) { 4686 auto *Phi = dyn_cast<PHINode>(&I); 4687 if (!Phi) 4688 return true; 4689 for (Value *V : Phi->incoming_values()) 4690 if (auto *C = dyn_cast<Constant>(V)) 4691 if (C->canTrap()) 4692 return false; 4693 } 4694 return true; 4695 } 4696 4697 bool LoopVectorizationLegality::canVectorizeWithIfConvert() { 4698 if (!EnableIfConversion) { 4699 emitAnalysis(VectorizationReport() << "if-conversion is disabled"); 4700 return false; 4701 } 4702 4703 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable"); 4704 4705 // A list of pointers that we can safely read and write to. 4706 SmallPtrSet<Value *, 8> SafePointes; 4707 4708 // Collect safe addresses. 4709 for (BasicBlock *BB : TheLoop->blocks()) { 4710 if (blockNeedsPredication(BB)) 4711 continue; 4712 4713 for (Instruction &I : *BB) 4714 if (auto *Ptr = getPointerOperand(&I)) 4715 SafePointes.insert(Ptr); 4716 } 4717 4718 // Collect the blocks that need predication. 4719 BasicBlock *Header = TheLoop->getHeader(); 4720 for (BasicBlock *BB : TheLoop->blocks()) { 4721 // We don't support switch statements inside loops. 4722 if (!isa<BranchInst>(BB->getTerminator())) { 4723 emitAnalysis(VectorizationReport(BB->getTerminator()) 4724 << "loop contains a switch statement"); 4725 return false; 4726 } 4727 4728 // We must be able to predicate all blocks that need to be predicated. 4729 if (blockNeedsPredication(BB)) { 4730 if (!blockCanBePredicated(BB, SafePointes)) { 4731 emitAnalysis(VectorizationReport(BB->getTerminator()) 4732 << "control flow cannot be substituted for a select"); 4733 return false; 4734 } 4735 } else if (BB != Header && !canIfConvertPHINodes(BB)) { 4736 emitAnalysis(VectorizationReport(BB->getTerminator()) 4737 << "control flow cannot be substituted for a select"); 4738 return false; 4739 } 4740 } 4741 4742 // We can if-convert this loop. 4743 return true; 4744 } 4745 4746 bool LoopVectorizationLegality::canVectorize() { 4747 // We must have a loop in canonical form. Loops with indirectbr in them cannot 4748 // be canonicalized. 4749 if (!TheLoop->getLoopPreheader()) { 4750 emitAnalysis(VectorizationReport() 4751 << "loop control flow is not understood by vectorizer"); 4752 return false; 4753 } 4754 4755 // FIXME: The code is currently dead, since the loop gets sent to 4756 // LoopVectorizationLegality is already an innermost loop. 4757 // 4758 // We can only vectorize innermost loops. 4759 if (!TheLoop->empty()) { 4760 emitAnalysis(VectorizationReport() << "loop is not the innermost loop"); 4761 return false; 4762 } 4763 4764 // We must have a single backedge. 4765 if (TheLoop->getNumBackEdges() != 1) { 4766 emitAnalysis(VectorizationReport() 4767 << "loop control flow is not understood by vectorizer"); 4768 return false; 4769 } 4770 4771 // We must have a single exiting block. 4772 if (!TheLoop->getExitingBlock()) { 4773 emitAnalysis(VectorizationReport() 4774 << "loop control flow is not understood by vectorizer"); 4775 return false; 4776 } 4777 4778 // We only handle bottom-tested loops, i.e. loop in which the condition is 4779 // checked at the end of each iteration. With that we can assume that all 4780 // instructions in the loop are executed the same number of times. 4781 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 4782 emitAnalysis(VectorizationReport() 4783 << "loop control flow is not understood by vectorizer"); 4784 return false; 4785 } 4786 4787 // We need to have a loop header. 4788 DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName() 4789 << '\n'); 4790 4791 // Check if we can if-convert non-single-bb loops. 4792 unsigned NumBlocks = TheLoop->getNumBlocks(); 4793 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { 4794 DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); 4795 return false; 4796 } 4797 4798 // ScalarEvolution needs to be able to find the exit count. 4799 const SCEV *ExitCount = PSE.getBackedgeTakenCount(); 4800 if (ExitCount == PSE.getSE()->getCouldNotCompute()) { 4801 emitAnalysis(VectorizationReport() 4802 << "could not determine number of loop iterations"); 4803 DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n"); 4804 return false; 4805 } 4806 4807 // Check if we can vectorize the instructions and CFG in this loop. 4808 if (!canVectorizeInstrs()) { 4809 DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); 4810 return false; 4811 } 4812 4813 // Go over each instruction and look at memory deps. 4814 if (!canVectorizeMemory()) { 4815 DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); 4816 return false; 4817 } 4818 4819 DEBUG(dbgs() << "LV: We can vectorize this loop" 4820 << (LAI->getRuntimePointerChecking()->Need 4821 ? " (with a runtime bound check)" 4822 : "") 4823 << "!\n"); 4824 4825 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 4826 4827 // If an override option has been passed in for interleaved accesses, use it. 4828 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 4829 UseInterleaved = EnableInterleavedMemAccesses; 4830 4831 // Analyze interleaved memory accesses. 4832 if (UseInterleaved) 4833 InterleaveInfo.analyzeInterleaving(*getSymbolicStrides()); 4834 4835 // Collect all instructions that are known to be uniform after vectorization. 4836 collectLoopUniforms(); 4837 4838 // Collect all instructions that are known to be scalar after vectorization. 4839 collectLoopScalars(); 4840 4841 unsigned SCEVThreshold = VectorizeSCEVCheckThreshold; 4842 if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) 4843 SCEVThreshold = PragmaVectorizeSCEVCheckThreshold; 4844 4845 if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) { 4846 emitAnalysis(VectorizationReport() 4847 << "Too many SCEV assumptions need to be made and checked " 4848 << "at runtime"); 4849 DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n"); 4850 return false; 4851 } 4852 4853 // Okay! We can vectorize. At this point we don't have any other mem analysis 4854 // which may limit our maximum vectorization factor, so just return true with 4855 // no restrictions. 4856 return true; 4857 } 4858 4859 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { 4860 if (Ty->isPointerTy()) 4861 return DL.getIntPtrType(Ty); 4862 4863 // It is possible that char's or short's overflow when we ask for the loop's 4864 // trip count, work around this by changing the type size. 4865 if (Ty->getScalarSizeInBits() < 32) 4866 return Type::getInt32Ty(Ty->getContext()); 4867 4868 return Ty; 4869 } 4870 4871 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { 4872 Ty0 = convertPointerToIntegerType(DL, Ty0); 4873 Ty1 = convertPointerToIntegerType(DL, Ty1); 4874 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) 4875 return Ty0; 4876 return Ty1; 4877 } 4878 4879 /// \brief Check that the instruction has outside loop users and is not an 4880 /// identified reduction variable. 4881 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, 4882 SmallPtrSetImpl<Value *> &AllowedExit) { 4883 // Reduction and Induction instructions are allowed to have exit users. All 4884 // other instructions must not have external users. 4885 if (!AllowedExit.count(Inst)) 4886 // Check that all of the users of the loop are inside the BB. 4887 for (User *U : Inst->users()) { 4888 Instruction *UI = cast<Instruction>(U); 4889 // This user may be a reduction exit value. 4890 if (!TheLoop->contains(UI)) { 4891 DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); 4892 return true; 4893 } 4894 } 4895 return false; 4896 } 4897 4898 void LoopVectorizationLegality::addInductionPhi( 4899 PHINode *Phi, const InductionDescriptor &ID, 4900 SmallPtrSetImpl<Value *> &AllowedExit) { 4901 Inductions[Phi] = ID; 4902 Type *PhiTy = Phi->getType(); 4903 const DataLayout &DL = Phi->getModule()->getDataLayout(); 4904 4905 // Get the widest type. 4906 if (!PhiTy->isFloatingPointTy()) { 4907 if (!WidestIndTy) 4908 WidestIndTy = convertPointerToIntegerType(DL, PhiTy); 4909 else 4910 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); 4911 } 4912 4913 // Int inductions are special because we only allow one IV. 4914 if (ID.getKind() == InductionDescriptor::IK_IntInduction && 4915 ID.getConstIntStepValue() && 4916 ID.getConstIntStepValue()->isOne() && 4917 isa<Constant>(ID.getStartValue()) && 4918 cast<Constant>(ID.getStartValue())->isNullValue()) { 4919 4920 // Use the phi node with the widest type as induction. Use the last 4921 // one if there are multiple (no good reason for doing this other 4922 // than it is expedient). We've checked that it begins at zero and 4923 // steps by one, so this is a canonical induction variable. 4924 if (!Induction || PhiTy == WidestIndTy) 4925 Induction = Phi; 4926 } 4927 4928 // Both the PHI node itself, and the "post-increment" value feeding 4929 // back into the PHI node may have external users. 4930 AllowedExit.insert(Phi); 4931 AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch())); 4932 4933 DEBUG(dbgs() << "LV: Found an induction variable.\n"); 4934 return; 4935 } 4936 4937 bool LoopVectorizationLegality::canVectorizeInstrs() { 4938 BasicBlock *Header = TheLoop->getHeader(); 4939 4940 // Look for the attribute signaling the absence of NaNs. 4941 Function &F = *Header->getParent(); 4942 HasFunNoNaNAttr = 4943 F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; 4944 4945 // For each block in the loop. 4946 for (BasicBlock *BB : TheLoop->blocks()) { 4947 // Scan the instructions in the block and look for hazards. 4948 for (Instruction &I : *BB) { 4949 if (auto *Phi = dyn_cast<PHINode>(&I)) { 4950 Type *PhiTy = Phi->getType(); 4951 // Check that this PHI type is allowed. 4952 if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() && 4953 !PhiTy->isPointerTy()) { 4954 emitAnalysis(VectorizationReport(Phi) 4955 << "loop control flow is not understood by vectorizer"); 4956 DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); 4957 return false; 4958 } 4959 4960 // If this PHINode is not in the header block, then we know that we 4961 // can convert it to select during if-conversion. No need to check if 4962 // the PHIs in this block are induction or reduction variables. 4963 if (BB != Header) { 4964 // Check that this instruction has no outside users or is an 4965 // identified reduction value with an outside user. 4966 if (!hasOutsideLoopUser(TheLoop, Phi, AllowedExit)) 4967 continue; 4968 emitAnalysis(VectorizationReport(Phi) 4969 << "value could not be identified as " 4970 "an induction or reduction variable"); 4971 return false; 4972 } 4973 4974 // We only allow if-converted PHIs with exactly two incoming values. 4975 if (Phi->getNumIncomingValues() != 2) { 4976 emitAnalysis(VectorizationReport(Phi) 4977 << "control flow not understood by vectorizer"); 4978 DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); 4979 return false; 4980 } 4981 4982 RecurrenceDescriptor RedDes; 4983 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) { 4984 if (RedDes.hasUnsafeAlgebra()) 4985 Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst()); 4986 AllowedExit.insert(RedDes.getLoopExitInstr()); 4987 Reductions[Phi] = RedDes; 4988 continue; 4989 } 4990 4991 InductionDescriptor ID; 4992 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID)) { 4993 addInductionPhi(Phi, ID, AllowedExit); 4994 if (ID.hasUnsafeAlgebra() && !HasFunNoNaNAttr) 4995 Requirements->addUnsafeAlgebraInst(ID.getUnsafeAlgebraInst()); 4996 continue; 4997 } 4998 4999 if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, DT)) { 5000 FirstOrderRecurrences.insert(Phi); 5001 continue; 5002 } 5003 5004 // As a last resort, coerce the PHI to a AddRec expression 5005 // and re-try classifying it a an induction PHI. 5006 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true)) { 5007 addInductionPhi(Phi, ID, AllowedExit); 5008 continue; 5009 } 5010 5011 emitAnalysis(VectorizationReport(Phi) 5012 << "value that could not be identified as " 5013 "reduction is used outside the loop"); 5014 DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n"); 5015 return false; 5016 } // end of PHI handling 5017 5018 // We handle calls that: 5019 // * Are debug info intrinsics. 5020 // * Have a mapping to an IR intrinsic. 5021 // * Have a vector version available. 5022 auto *CI = dyn_cast<CallInst>(&I); 5023 if (CI && !getVectorIntrinsicIDForCall(CI, TLI) && 5024 !isa<DbgInfoIntrinsic>(CI) && 5025 !(CI->getCalledFunction() && TLI && 5026 TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) { 5027 emitAnalysis(VectorizationReport(CI) 5028 << "call instruction cannot be vectorized"); 5029 DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); 5030 return false; 5031 } 5032 5033 // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the 5034 // second argument is the same (i.e. loop invariant) 5035 if (CI && hasVectorInstrinsicScalarOpd( 5036 getVectorIntrinsicIDForCall(CI, TLI), 1)) { 5037 auto *SE = PSE.getSE(); 5038 if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) { 5039 emitAnalysis(VectorizationReport(CI) 5040 << "intrinsic instruction cannot be vectorized"); 5041 DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n"); 5042 return false; 5043 } 5044 } 5045 5046 // Check that the instruction return type is vectorizable. 5047 // Also, we can't vectorize extractelement instructions. 5048 if ((!VectorType::isValidElementType(I.getType()) && 5049 !I.getType()->isVoidTy()) || 5050 isa<ExtractElementInst>(I)) { 5051 emitAnalysis(VectorizationReport(&I) 5052 << "instruction return type cannot be vectorized"); 5053 DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); 5054 return false; 5055 } 5056 5057 // Check that the stored type is vectorizable. 5058 if (auto *ST = dyn_cast<StoreInst>(&I)) { 5059 Type *T = ST->getValueOperand()->getType(); 5060 if (!VectorType::isValidElementType(T)) { 5061 emitAnalysis(VectorizationReport(ST) 5062 << "store instruction cannot be vectorized"); 5063 return false; 5064 } 5065 5066 // FP instructions can allow unsafe algebra, thus vectorizable by 5067 // non-IEEE-754 compliant SIMD units. 5068 // This applies to floating-point math operations and calls, not memory 5069 // operations, shuffles, or casts, as they don't change precision or 5070 // semantics. 5071 } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) && 5072 !I.hasUnsafeAlgebra()) { 5073 DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n"); 5074 Hints->setPotentiallyUnsafe(); 5075 } 5076 5077 // Reduction instructions are allowed to have exit users. 5078 // All other instructions must not have external users. 5079 if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) { 5080 emitAnalysis(VectorizationReport(&I) 5081 << "value cannot be used outside the loop"); 5082 return false; 5083 } 5084 5085 } // next instr. 5086 } 5087 5088 if (!Induction) { 5089 DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); 5090 if (Inductions.empty()) { 5091 emitAnalysis(VectorizationReport() 5092 << "loop induction variable could not be identified"); 5093 return false; 5094 } 5095 } 5096 5097 // Now we know the widest induction type, check if our found induction 5098 // is the same size. If it's not, unset it here and InnerLoopVectorizer 5099 // will create another. 5100 if (Induction && WidestIndTy != Induction->getType()) 5101 Induction = nullptr; 5102 5103 return true; 5104 } 5105 5106 void LoopVectorizationLegality::collectLoopScalars() { 5107 5108 // If an instruction is uniform after vectorization, it will remain scalar. 5109 Scalars.insert(Uniforms.begin(), Uniforms.end()); 5110 5111 // Collect the getelementptr instructions that will not be vectorized. A 5112 // getelementptr instruction is only vectorized if it is used for a legal 5113 // gather or scatter operation. 5114 for (auto *BB : TheLoop->blocks()) 5115 for (auto &I : *BB) { 5116 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 5117 Scalars.insert(GEP); 5118 continue; 5119 } 5120 auto *Ptr = getPointerOperand(&I); 5121 if (!Ptr) 5122 continue; 5123 auto *GEP = getGEPInstruction(Ptr); 5124 if (GEP && isLegalGatherOrScatter(&I)) 5125 Scalars.erase(GEP); 5126 } 5127 5128 // An induction variable will remain scalar if all users of the induction 5129 // variable and induction variable update remain scalar. 5130 auto *Latch = TheLoop->getLoopLatch(); 5131 for (auto &Induction : *getInductionVars()) { 5132 auto *Ind = Induction.first; 5133 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5134 5135 // Determine if all users of the induction variable are scalar after 5136 // vectorization. 5137 auto ScalarInd = all_of(Ind->users(), [&](User *U) -> bool { 5138 auto *I = cast<Instruction>(U); 5139 return I == IndUpdate || !TheLoop->contains(I) || Scalars.count(I); 5140 }); 5141 if (!ScalarInd) 5142 continue; 5143 5144 // Determine if all users of the induction variable update instruction are 5145 // scalar after vectorization. 5146 auto ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool { 5147 auto *I = cast<Instruction>(U); 5148 return I == Ind || !TheLoop->contains(I) || Scalars.count(I); 5149 }); 5150 if (!ScalarIndUpdate) 5151 continue; 5152 5153 // The induction variable and its update instruction will remain scalar. 5154 Scalars.insert(Ind); 5155 Scalars.insert(IndUpdate); 5156 } 5157 } 5158 5159 void LoopVectorizationLegality::collectLoopUniforms() { 5160 // We now know that the loop is vectorizable! 5161 // Collect instructions inside the loop that will remain uniform after 5162 // vectorization. 5163 5164 // Global values, params and instructions outside of current loop are out of 5165 // scope. 5166 auto isOutOfScope = [&](Value *V) -> bool { 5167 Instruction *I = dyn_cast<Instruction>(V); 5168 return (!I || !TheLoop->contains(I)); 5169 }; 5170 5171 SetVector<Instruction *> Worklist; 5172 BasicBlock *Latch = TheLoop->getLoopLatch(); 5173 // Start with the conditional branch. 5174 if (!isOutOfScope(Latch->getTerminator()->getOperand(0))) { 5175 Instruction *Cmp = cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5176 Worklist.insert(Cmp); 5177 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); 5178 } 5179 5180 // Add all consecutive pointer values; these values will be uniform after 5181 // vectorization (and subsequent cleanup). Although non-consecutive, we also 5182 // add the pointer operands of interleaved accesses since they are treated 5183 // like consecutive pointers during vectorization. 5184 for (auto *BB : TheLoop->blocks()) 5185 for (auto &I : *BB) { 5186 Instruction *Ptr = nullptr; 5187 if (I.getType()->isPointerTy() && isConsecutivePtr(&I)) 5188 Ptr = &I; 5189 else if (isAccessInterleaved(&I)) 5190 Ptr = cast<Instruction>(getPointerOperand(&I)); 5191 else 5192 continue; 5193 Worklist.insert(Ptr); 5194 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ptr << "\n"); 5195 } 5196 5197 // Expand Worklist in topological order: whenever a new instruction 5198 // is added , its users should be either already inside Worklist, or 5199 // out of scope. It ensures a uniform instruction will only be used 5200 // by uniform instructions or out of scope instructions. 5201 unsigned idx = 0; 5202 while (idx != Worklist.size()) { 5203 Instruction *I = Worklist[idx++]; 5204 5205 for (auto OV : I->operand_values()) { 5206 if (isOutOfScope(OV)) 5207 continue; 5208 auto *OI = cast<Instruction>(OV); 5209 if (all_of(OI->users(), [&](User *U) -> bool { 5210 return isOutOfScope(U) || Worklist.count(cast<Instruction>(U)); 5211 })) { 5212 Worklist.insert(OI); 5213 DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); 5214 } 5215 } 5216 } 5217 5218 // For an instruction to be added into Worklist above, all its users inside 5219 // the current loop should be already added into Worklist. This condition 5220 // cannot be true for phi instructions which is always in a dependence loop. 5221 // Because any instruction in the dependence cycle always depends on others 5222 // in the cycle to be added into Worklist first, the result is no ones in 5223 // the cycle will be added into Worklist in the end. 5224 // That is why we process PHI separately. 5225 for (auto &Induction : *getInductionVars()) { 5226 auto *PN = Induction.first; 5227 auto *UpdateV = PN->getIncomingValueForBlock(TheLoop->getLoopLatch()); 5228 if (all_of(PN->users(), 5229 [&](User *U) -> bool { 5230 return U == UpdateV || isOutOfScope(U) || 5231 Worklist.count(cast<Instruction>(U)); 5232 }) && 5233 all_of(UpdateV->users(), [&](User *U) -> bool { 5234 return U == PN || isOutOfScope(U) || 5235 Worklist.count(cast<Instruction>(U)); 5236 })) { 5237 Worklist.insert(cast<Instruction>(PN)); 5238 Worklist.insert(cast<Instruction>(UpdateV)); 5239 DEBUG(dbgs() << "LV: Found uniform instruction: " << *PN << "\n"); 5240 DEBUG(dbgs() << "LV: Found uniform instruction: " << *UpdateV << "\n"); 5241 } 5242 } 5243 5244 Uniforms.insert(Worklist.begin(), Worklist.end()); 5245 } 5246 5247 bool LoopVectorizationLegality::canVectorizeMemory() { 5248 LAI = &(*GetLAA)(*TheLoop); 5249 InterleaveInfo.setLAI(LAI); 5250 auto &OptionalReport = LAI->getReport(); 5251 if (OptionalReport) 5252 emitAnalysis(VectorizationReport(*OptionalReport)); 5253 if (!LAI->canVectorizeMemory()) 5254 return false; 5255 5256 if (LAI->hasStoreToLoopInvariantAddress()) { 5257 emitAnalysis( 5258 VectorizationReport() 5259 << "write to a loop invariant address could not be vectorized"); 5260 DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); 5261 return false; 5262 } 5263 5264 Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks()); 5265 PSE.addPredicate(LAI->getPSE().getUnionPredicate()); 5266 5267 return true; 5268 } 5269 5270 bool LoopVectorizationLegality::isInductionVariable(const Value *V) { 5271 Value *In0 = const_cast<Value *>(V); 5272 PHINode *PN = dyn_cast_or_null<PHINode>(In0); 5273 if (!PN) 5274 return false; 5275 5276 return Inductions.count(PN); 5277 } 5278 5279 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) { 5280 return FirstOrderRecurrences.count(Phi); 5281 } 5282 5283 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { 5284 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 5285 } 5286 5287 bool LoopVectorizationLegality::blockCanBePredicated( 5288 BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) { 5289 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 5290 5291 for (Instruction &I : *BB) { 5292 // Check that we don't have a constant expression that can trap as operand. 5293 for (Value *Operand : I.operands()) { 5294 if (auto *C = dyn_cast<Constant>(Operand)) 5295 if (C->canTrap()) 5296 return false; 5297 } 5298 // We might be able to hoist the load. 5299 if (I.mayReadFromMemory()) { 5300 auto *LI = dyn_cast<LoadInst>(&I); 5301 if (!LI) 5302 return false; 5303 if (!SafePtrs.count(LI->getPointerOperand())) { 5304 if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) || 5305 isLegalMaskedGather(LI->getType())) { 5306 MaskedOp.insert(LI); 5307 continue; 5308 } 5309 // !llvm.mem.parallel_loop_access implies if-conversion safety. 5310 if (IsAnnotatedParallel) 5311 continue; 5312 return false; 5313 } 5314 } 5315 5316 if (I.mayWriteToMemory()) { 5317 auto *SI = dyn_cast<StoreInst>(&I); 5318 // We only support predication of stores in basic blocks with one 5319 // predecessor. 5320 if (!SI) 5321 return false; 5322 5323 // Build a masked store if it is legal for the target. 5324 if (isLegalMaskedStore(SI->getValueOperand()->getType(), 5325 SI->getPointerOperand()) || 5326 isLegalMaskedScatter(SI->getValueOperand()->getType())) { 5327 MaskedOp.insert(SI); 5328 continue; 5329 } 5330 5331 bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0); 5332 bool isSinglePredecessor = SI->getParent()->getSinglePredecessor(); 5333 5334 if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr || 5335 !isSinglePredecessor) 5336 return false; 5337 } 5338 if (I.mayThrow()) 5339 return false; 5340 } 5341 5342 return true; 5343 } 5344 5345 void InterleavedAccessInfo::collectConstStrideAccesses( 5346 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 5347 const ValueToValueMap &Strides) { 5348 5349 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 5350 5351 // Since it's desired that the load/store instructions be maintained in 5352 // "program order" for the interleaved access analysis, we have to visit the 5353 // blocks in the loop in reverse postorder (i.e., in a topological order). 5354 // Such an ordering will ensure that any load/store that may be executed 5355 // before a second load/store will precede the second load/store in 5356 // AccessStrideInfo. 5357 LoopBlocksDFS DFS(TheLoop); 5358 DFS.perform(LI); 5359 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 5360 for (auto &I : *BB) { 5361 auto *LI = dyn_cast<LoadInst>(&I); 5362 auto *SI = dyn_cast<StoreInst>(&I); 5363 if (!LI && !SI) 5364 continue; 5365 5366 Value *Ptr = getPointerOperand(&I); 5367 int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides); 5368 5369 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 5370 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 5371 uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 5372 5373 // An alignment of 0 means target ABI alignment. 5374 unsigned Align = LI ? LI->getAlignment() : SI->getAlignment(); 5375 if (!Align) 5376 Align = DL.getABITypeAlignment(PtrTy->getElementType()); 5377 5378 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align); 5379 } 5380 } 5381 5382 // Analyze interleaved accesses and collect them into interleaved load and 5383 // store groups. 5384 // 5385 // When generating code for an interleaved load group, we effectively hoist all 5386 // loads in the group to the location of the first load in program order. When 5387 // generating code for an interleaved store group, we sink all stores to the 5388 // location of the last store. This code motion can change the order of load 5389 // and store instructions and may break dependences. 5390 // 5391 // The code generation strategy mentioned above ensures that we won't violate 5392 // any write-after-read (WAR) dependences. 5393 // 5394 // E.g., for the WAR dependence: a = A[i]; // (1) 5395 // A[i] = b; // (2) 5396 // 5397 // The store group of (2) is always inserted at or below (2), and the load 5398 // group of (1) is always inserted at or above (1). Thus, the instructions will 5399 // never be reordered. All other dependences are checked to ensure the 5400 // correctness of the instruction reordering. 5401 // 5402 // The algorithm visits all memory accesses in the loop in bottom-up program 5403 // order. Program order is established by traversing the blocks in the loop in 5404 // reverse postorder when collecting the accesses. 5405 // 5406 // We visit the memory accesses in bottom-up order because it can simplify the 5407 // construction of store groups in the presence of write-after-write (WAW) 5408 // dependences. 5409 // 5410 // E.g., for the WAW dependence: A[i] = a; // (1) 5411 // A[i] = b; // (2) 5412 // A[i + 1] = c; // (3) 5413 // 5414 // We will first create a store group with (3) and (2). (1) can't be added to 5415 // this group because it and (2) are dependent. However, (1) can be grouped 5416 // with other accesses that may precede it in program order. Note that a 5417 // bottom-up order does not imply that WAW dependences should not be checked. 5418 void InterleavedAccessInfo::analyzeInterleaving( 5419 const ValueToValueMap &Strides) { 5420 DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 5421 5422 // Holds all accesses with a constant stride. 5423 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 5424 collectConstStrideAccesses(AccessStrideInfo, Strides); 5425 5426 if (AccessStrideInfo.empty()) 5427 return; 5428 5429 // Collect the dependences in the loop. 5430 collectDependences(); 5431 5432 // Holds all interleaved store groups temporarily. 5433 SmallSetVector<InterleaveGroup *, 4> StoreGroups; 5434 // Holds all interleaved load groups temporarily. 5435 SmallSetVector<InterleaveGroup *, 4> LoadGroups; 5436 5437 // Search in bottom-up program order for pairs of accesses (A and B) that can 5438 // form interleaved load or store groups. In the algorithm below, access A 5439 // precedes access B in program order. We initialize a group for B in the 5440 // outer loop of the algorithm, and then in the inner loop, we attempt to 5441 // insert each A into B's group if: 5442 // 5443 // 1. A and B have the same stride, 5444 // 2. A and B have the same memory object size, and 5445 // 3. A belongs in B's group according to its distance from B. 5446 // 5447 // Special care is taken to ensure group formation will not break any 5448 // dependences. 5449 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 5450 BI != E; ++BI) { 5451 Instruction *B = BI->first; 5452 StrideDescriptor DesB = BI->second; 5453 5454 // Initialize a group for B if it has an allowable stride. Even if we don't 5455 // create a group for B, we continue with the bottom-up algorithm to ensure 5456 // we don't break any of B's dependences. 5457 InterleaveGroup *Group = nullptr; 5458 if (isStrided(DesB.Stride)) { 5459 Group = getInterleaveGroup(B); 5460 if (!Group) { 5461 DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n'); 5462 Group = createInterleaveGroup(B, DesB.Stride, DesB.Align); 5463 } 5464 if (B->mayWriteToMemory()) 5465 StoreGroups.insert(Group); 5466 else 5467 LoadGroups.insert(Group); 5468 } 5469 5470 for (auto AI = std::next(BI); AI != E; ++AI) { 5471 Instruction *A = AI->first; 5472 StrideDescriptor DesA = AI->second; 5473 5474 // Our code motion strategy implies that we can't have dependences 5475 // between accesses in an interleaved group and other accesses located 5476 // between the first and last member of the group. Note that this also 5477 // means that a group can't have more than one member at a given offset. 5478 // The accesses in a group can have dependences with other accesses, but 5479 // we must ensure we don't extend the boundaries of the group such that 5480 // we encompass those dependent accesses. 5481 // 5482 // For example, assume we have the sequence of accesses shown below in a 5483 // stride-2 loop: 5484 // 5485 // (1, 2) is a group | A[i] = a; // (1) 5486 // | A[i-1] = b; // (2) | 5487 // A[i-3] = c; // (3) 5488 // A[i] = d; // (4) | (2, 4) is not a group 5489 // 5490 // Because accesses (2) and (3) are dependent, we can group (2) with (1) 5491 // but not with (4). If we did, the dependent access (3) would be within 5492 // the boundaries of the (2, 4) group. 5493 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { 5494 5495 // If a dependence exists and A is already in a group, we know that A 5496 // must be a store since A precedes B and WAR dependences are allowed. 5497 // Thus, A would be sunk below B. We release A's group to prevent this 5498 // illegal code motion. A will then be free to form another group with 5499 // instructions that precede it. 5500 if (isInterleaved(A)) { 5501 InterleaveGroup *StoreGroup = getInterleaveGroup(A); 5502 StoreGroups.remove(StoreGroup); 5503 releaseGroup(StoreGroup); 5504 } 5505 5506 // If a dependence exists and A is not already in a group (or it was 5507 // and we just released it), B might be hoisted above A (if B is a 5508 // load) or another store might be sunk below A (if B is a store). In 5509 // either case, we can't add additional instructions to B's group. B 5510 // will only form a group with instructions that it precedes. 5511 break; 5512 } 5513 5514 // At this point, we've checked for illegal code motion. If either A or B 5515 // isn't strided, there's nothing left to do. 5516 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 5517 continue; 5518 5519 // Ignore A if it's already in a group or isn't the same kind of memory 5520 // operation as B. 5521 if (isInterleaved(A) || A->mayReadFromMemory() != B->mayReadFromMemory()) 5522 continue; 5523 5524 // Check rules 1 and 2. Ignore A if its stride or size is different from 5525 // that of B. 5526 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 5527 continue; 5528 5529 // Calculate the distance from A to B. 5530 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 5531 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 5532 if (!DistToB) 5533 continue; 5534 int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 5535 5536 // Check rule 3. Ignore A if its distance to B is not a multiple of the 5537 // size. 5538 if (DistanceToB % static_cast<int64_t>(DesB.Size)) 5539 continue; 5540 5541 // Ignore A if either A or B is in a predicated block. Although we 5542 // currently prevent group formation for predicated accesses, we may be 5543 // able to relax this limitation in the future once we handle more 5544 // complicated blocks. 5545 if (isPredicated(A->getParent()) || isPredicated(B->getParent())) 5546 continue; 5547 5548 // The index of A is the index of B plus A's distance to B in multiples 5549 // of the size. 5550 int IndexA = 5551 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 5552 5553 // Try to insert A into B's group. 5554 if (Group->insertMember(A, IndexA, DesA.Align)) { 5555 DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 5556 << " into the interleave group with" << *B << '\n'); 5557 InterleaveGroupMap[A] = Group; 5558 5559 // Set the first load in program order as the insert position. 5560 if (A->mayReadFromMemory()) 5561 Group->setInsertPos(A); 5562 } 5563 } // Iteration over A accesses. 5564 } // Iteration over B accesses. 5565 5566 // Remove interleaved store groups with gaps. 5567 for (InterleaveGroup *Group : StoreGroups) 5568 if (Group->getNumMembers() != Group->getFactor()) 5569 releaseGroup(Group); 5570 5571 // If there is a non-reversed interleaved load group with gaps, we will need 5572 // to execute at least one scalar epilogue iteration. This will ensure that 5573 // we don't speculatively access memory out-of-bounds. Note that we only need 5574 // to look for a member at index factor - 1, since every group must have a 5575 // member at index zero. 5576 for (InterleaveGroup *Group : LoadGroups) 5577 if (!Group->getMember(Group->getFactor() - 1)) { 5578 if (Group->isReverse()) { 5579 releaseGroup(Group); 5580 } else { 5581 DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 5582 RequiresScalarEpilogue = true; 5583 } 5584 } 5585 } 5586 5587 LoopVectorizationCostModel::VectorizationFactor 5588 LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { 5589 // Width 1 means no vectorize 5590 VectorizationFactor Factor = {1U, 0U}; 5591 if (OptForSize && Legal->getRuntimePointerChecking()->Need) { 5592 emitAnalysis( 5593 VectorizationReport() 5594 << "runtime pointer checks needed. Enable vectorization of this " 5595 "loop with '#pragma clang loop vectorize(enable)' when " 5596 "compiling with -Os/-Oz"); 5597 DEBUG(dbgs() 5598 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 5599 return Factor; 5600 } 5601 5602 if (!EnableCondStoresVectorization && Legal->getNumPredStores()) { 5603 emitAnalysis( 5604 VectorizationReport() 5605 << "store that is conditionally executed prevents vectorization"); 5606 DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); 5607 return Factor; 5608 } 5609 5610 // Find the trip count. 5611 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5612 DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5613 5614 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5615 unsigned SmallestType, WidestType; 5616 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5617 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 5618 unsigned MaxSafeDepDist = -1U; 5619 5620 // Get the maximum safe dependence distance in bits computed by LAA. If the 5621 // loop contains any interleaved accesses, we divide the dependence distance 5622 // by the maximum interleave factor of all interleaved groups. Note that 5623 // although the division ensures correctness, this is a fairly conservative 5624 // computation because the maximum distance computed by LAA may not involve 5625 // any of the interleaved accesses. 5626 if (Legal->getMaxSafeDepDistBytes() != -1U) 5627 MaxSafeDepDist = 5628 Legal->getMaxSafeDepDistBytes() * 8 / Legal->getMaxInterleaveFactor(); 5629 5630 WidestRegister = 5631 ((WidestRegister < MaxSafeDepDist) ? WidestRegister : MaxSafeDepDist); 5632 unsigned MaxVectorSize = WidestRegister / WidestType; 5633 5634 DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " 5635 << WidestType << " bits.\n"); 5636 DEBUG(dbgs() << "LV: The Widest register is: " << WidestRegister 5637 << " bits.\n"); 5638 5639 if (MaxVectorSize == 0) { 5640 DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5641 MaxVectorSize = 1; 5642 } 5643 5644 assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements" 5645 " into one vector!"); 5646 5647 unsigned VF = MaxVectorSize; 5648 if (MaximizeBandwidth && !OptForSize) { 5649 // Collect all viable vectorization factors. 5650 SmallVector<unsigned, 8> VFs; 5651 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 5652 for (unsigned VS = MaxVectorSize; VS <= NewMaxVectorSize; VS *= 2) 5653 VFs.push_back(VS); 5654 5655 // For each VF calculate its register usage. 5656 auto RUs = calculateRegisterUsage(VFs); 5657 5658 // Select the largest VF which doesn't require more registers than existing 5659 // ones. 5660 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 5661 for (int i = RUs.size() - 1; i >= 0; --i) { 5662 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 5663 VF = VFs[i]; 5664 break; 5665 } 5666 } 5667 } 5668 5669 // If we optimize the program for size, avoid creating the tail loop. 5670 if (OptForSize) { 5671 // If we are unable to calculate the trip count then don't try to vectorize. 5672 if (TC < 2) { 5673 emitAnalysis( 5674 VectorizationReport() 5675 << "unable to calculate the loop count due to complex control flow"); 5676 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 5677 return Factor; 5678 } 5679 5680 // Find the maximum SIMD width that can fit within the trip count. 5681 VF = TC % MaxVectorSize; 5682 5683 if (VF == 0) 5684 VF = MaxVectorSize; 5685 else { 5686 // If the trip count that we found modulo the vectorization factor is not 5687 // zero then we require a tail. 5688 emitAnalysis(VectorizationReport() 5689 << "cannot optimize for size and vectorize at the " 5690 "same time. Enable vectorization of this loop " 5691 "with '#pragma clang loop vectorize(enable)' " 5692 "when compiling with -Os/-Oz"); 5693 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 5694 return Factor; 5695 } 5696 } 5697 5698 int UserVF = Hints->getWidth(); 5699 if (UserVF != 0) { 5700 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 5701 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 5702 5703 Factor.Width = UserVF; 5704 return Factor; 5705 } 5706 5707 float Cost = expectedCost(1).first; 5708 #ifndef NDEBUG 5709 const float ScalarCost = Cost; 5710 #endif /* NDEBUG */ 5711 unsigned Width = 1; 5712 DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 5713 5714 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5715 // Ignore scalar width, because the user explicitly wants vectorization. 5716 if (ForceVectorization && VF > 1) { 5717 Width = 2; 5718 Cost = expectedCost(Width).first / (float)Width; 5719 } 5720 5721 for (unsigned i = 2; i <= VF; i *= 2) { 5722 // Notice that the vector loop needs to be executed less times, so 5723 // we need to divide the cost of the vector loops by the width of 5724 // the vector elements. 5725 VectorizationCostTy C = expectedCost(i); 5726 float VectorCost = C.first / (float)i; 5727 DEBUG(dbgs() << "LV: Vector loop of width " << i 5728 << " costs: " << (int)VectorCost << ".\n"); 5729 if (!C.second && !ForceVectorization) { 5730 DEBUG( 5731 dbgs() << "LV: Not considering vector loop of width " << i 5732 << " because it will not generate any vector instructions.\n"); 5733 continue; 5734 } 5735 if (VectorCost < Cost) { 5736 Cost = VectorCost; 5737 Width = i; 5738 } 5739 } 5740 5741 DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 5742 << "LV: Vectorization seems to be not beneficial, " 5743 << "but was forced by a user.\n"); 5744 DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 5745 Factor.Width = Width; 5746 Factor.Cost = Width * Cost; 5747 return Factor; 5748 } 5749 5750 std::pair<unsigned, unsigned> 5751 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5752 unsigned MinWidth = -1U; 5753 unsigned MaxWidth = 8; 5754 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5755 5756 // For each block. 5757 for (BasicBlock *BB : TheLoop->blocks()) { 5758 // For each instruction in the loop. 5759 for (Instruction &I : *BB) { 5760 Type *T = I.getType(); 5761 5762 // Skip ignored values. 5763 if (ValuesToIgnore.count(&I)) 5764 continue; 5765 5766 // Only examine Loads, Stores and PHINodes. 5767 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5768 continue; 5769 5770 // Examine PHI nodes that are reduction variables. Update the type to 5771 // account for the recurrence type. 5772 if (auto *PN = dyn_cast<PHINode>(&I)) { 5773 if (!Legal->isReductionVariable(PN)) 5774 continue; 5775 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 5776 T = RdxDesc.getRecurrenceType(); 5777 } 5778 5779 // Examine the stored values. 5780 if (auto *ST = dyn_cast<StoreInst>(&I)) 5781 T = ST->getValueOperand()->getType(); 5782 5783 // Ignore loaded pointer types and stored pointer types that are not 5784 // consecutive. However, we do want to take consecutive stores/loads of 5785 // pointer vectors into account. 5786 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I)) 5787 continue; 5788 5789 MinWidth = std::min(MinWidth, 5790 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5791 MaxWidth = std::max(MaxWidth, 5792 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5793 } 5794 } 5795 5796 return {MinWidth, MaxWidth}; 5797 } 5798 5799 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 5800 unsigned VF, 5801 unsigned LoopCost) { 5802 5803 // -- The interleave heuristics -- 5804 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5805 // There are many micro-architectural considerations that we can't predict 5806 // at this level. For example, frontend pressure (on decode or fetch) due to 5807 // code size, or the number and capabilities of the execution ports. 5808 // 5809 // We use the following heuristics to select the interleave count: 5810 // 1. If the code has reductions, then we interleave to break the cross 5811 // iteration dependency. 5812 // 2. If the loop is really small, then we interleave to reduce the loop 5813 // overhead. 5814 // 3. We don't interleave if we think that we will spill registers to memory 5815 // due to the increased register pressure. 5816 5817 // When we optimize for size, we don't interleave. 5818 if (OptForSize) 5819 return 1; 5820 5821 // We used the distance for the interleave count. 5822 if (Legal->getMaxSafeDepDistBytes() != -1U) 5823 return 1; 5824 5825 // Do not interleave loops with a relatively small trip count. 5826 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5827 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 5828 return 1; 5829 5830 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 5831 DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5832 << " registers\n"); 5833 5834 if (VF == 1) { 5835 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5836 TargetNumRegisters = ForceTargetNumScalarRegs; 5837 } else { 5838 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5839 TargetNumRegisters = ForceTargetNumVectorRegs; 5840 } 5841 5842 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5843 // We divide by these constants so assume that we have at least one 5844 // instruction that uses at least one register. 5845 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 5846 R.NumInstructions = std::max(R.NumInstructions, 1U); 5847 5848 // We calculate the interleave count using the following formula. 5849 // Subtract the number of loop invariants from the number of available 5850 // registers. These registers are used by all of the interleaved instances. 5851 // Next, divide the remaining registers by the number of registers that is 5852 // required by the loop, in order to estimate how many parallel instances 5853 // fit without causing spills. All of this is rounded down if necessary to be 5854 // a power of two. We want power of two interleave count to simplify any 5855 // addressing operations or alignment considerations. 5856 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 5857 R.MaxLocalUsers); 5858 5859 // Don't count the induction variable as interleaved. 5860 if (EnableIndVarRegisterHeur) 5861 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 5862 std::max(1U, (R.MaxLocalUsers - 1))); 5863 5864 // Clamp the interleave ranges to reasonable counts. 5865 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 5866 5867 // Check if the user has overridden the max. 5868 if (VF == 1) { 5869 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5870 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5871 } else { 5872 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5873 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5874 } 5875 5876 // If we did not calculate the cost for VF (because the user selected the VF) 5877 // then we calculate the cost of VF here. 5878 if (LoopCost == 0) 5879 LoopCost = expectedCost(VF).first; 5880 5881 // Clamp the calculated IC to be between the 1 and the max interleave count 5882 // that the target allows. 5883 if (IC > MaxInterleaveCount) 5884 IC = MaxInterleaveCount; 5885 else if (IC < 1) 5886 IC = 1; 5887 5888 // Interleave if we vectorized this loop and there is a reduction that could 5889 // benefit from interleaving. 5890 if (VF > 1 && Legal->getReductionVars()->size()) { 5891 DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5892 return IC; 5893 } 5894 5895 // Note that if we've already vectorized the loop we will have done the 5896 // runtime check and so interleaving won't require further checks. 5897 bool InterleavingRequiresRuntimePointerCheck = 5898 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 5899 5900 // We want to interleave small loops in order to reduce the loop overhead and 5901 // potentially expose ILP opportunities. 5902 DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 5903 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 5904 // We assume that the cost overhead is 1 and we use the cost model 5905 // to estimate the cost of the loop and interleave until the cost of the 5906 // loop overhead is about 5% of the cost of the loop. 5907 unsigned SmallIC = 5908 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5909 5910 // Interleave until store/load ports (estimated by max interleave count) are 5911 // saturated. 5912 unsigned NumStores = Legal->getNumStores(); 5913 unsigned NumLoads = Legal->getNumLoads(); 5914 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5915 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5916 5917 // If we have a scalar reduction (vector reductions are already dealt with 5918 // by this point), we can increase the critical path length if the loop 5919 // we're interleaving is inside another loop. Limit, by default to 2, so the 5920 // critical path only gets increased by one reduction operation. 5921 if (Legal->getReductionVars()->size() && TheLoop->getLoopDepth() > 1) { 5922 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5923 SmallIC = std::min(SmallIC, F); 5924 StoresIC = std::min(StoresIC, F); 5925 LoadsIC = std::min(LoadsIC, F); 5926 } 5927 5928 if (EnableLoadStoreRuntimeInterleave && 5929 std::max(StoresIC, LoadsIC) > SmallIC) { 5930 DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5931 return std::max(StoresIC, LoadsIC); 5932 } 5933 5934 DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5935 return SmallIC; 5936 } 5937 5938 // Interleave if this is a large loop (small loops are already dealt with by 5939 // this point) that could benefit from interleaving. 5940 bool HasReductions = (Legal->getReductionVars()->size() > 0); 5941 if (TTI.enableAggressiveInterleaving(HasReductions)) { 5942 DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5943 return IC; 5944 } 5945 5946 DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5947 return 1; 5948 } 5949 5950 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5951 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 5952 // This function calculates the register usage by measuring the highest number 5953 // of values that are alive at a single location. Obviously, this is a very 5954 // rough estimation. We scan the loop in a topological order in order and 5955 // assign a number to each instruction. We use RPO to ensure that defs are 5956 // met before their users. We assume that each instruction that has in-loop 5957 // users starts an interval. We record every time that an in-loop value is 5958 // used, so we have a list of the first and last occurrences of each 5959 // instruction. Next, we transpose this data structure into a multi map that 5960 // holds the list of intervals that *end* at a specific location. This multi 5961 // map allows us to perform a linear search. We scan the instructions linearly 5962 // and record each time that a new interval starts, by placing it in a set. 5963 // If we find this value in the multi-map then we remove it from the set. 5964 // The max register usage is the maximum size of the set. 5965 // We also search for instructions that are defined outside the loop, but are 5966 // used inside the loop. We need this number separately from the max-interval 5967 // usage number because when we unroll, loop-invariant values do not take 5968 // more register. 5969 LoopBlocksDFS DFS(TheLoop); 5970 DFS.perform(LI); 5971 5972 RegisterUsage RU; 5973 RU.NumInstructions = 0; 5974 5975 // Each 'key' in the map opens a new interval. The values 5976 // of the map are the index of the 'last seen' usage of the 5977 // instruction that is the key. 5978 typedef DenseMap<Instruction *, unsigned> IntervalMap; 5979 // Maps instruction to its index. 5980 DenseMap<unsigned, Instruction *> IdxToInstr; 5981 // Marks the end of each interval. 5982 IntervalMap EndPoint; 5983 // Saves the list of instruction indices that are used in the loop. 5984 SmallSet<Instruction *, 8> Ends; 5985 // Saves the list of values that are used in the loop but are 5986 // defined outside the loop, such as arguments and constants. 5987 SmallPtrSet<Value *, 8> LoopInvariants; 5988 5989 unsigned Index = 0; 5990 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5991 RU.NumInstructions += BB->size(); 5992 for (Instruction &I : *BB) { 5993 IdxToInstr[Index++] = &I; 5994 5995 // Save the end location of each USE. 5996 for (Value *U : I.operands()) { 5997 auto *Instr = dyn_cast<Instruction>(U); 5998 5999 // Ignore non-instruction values such as arguments, constants, etc. 6000 if (!Instr) 6001 continue; 6002 6003 // If this instruction is outside the loop then record it and continue. 6004 if (!TheLoop->contains(Instr)) { 6005 LoopInvariants.insert(Instr); 6006 continue; 6007 } 6008 6009 // Overwrite previous end points. 6010 EndPoint[Instr] = Index; 6011 Ends.insert(Instr); 6012 } 6013 } 6014 } 6015 6016 // Saves the list of intervals that end with the index in 'key'. 6017 typedef SmallVector<Instruction *, 2> InstrList; 6018 DenseMap<unsigned, InstrList> TransposeEnds; 6019 6020 // Transpose the EndPoints to a list of values that end at each index. 6021 for (auto &Interval : EndPoint) 6022 TransposeEnds[Interval.second].push_back(Interval.first); 6023 6024 SmallSet<Instruction *, 8> OpenIntervals; 6025 6026 // Get the size of the widest register. 6027 unsigned MaxSafeDepDist = -1U; 6028 if (Legal->getMaxSafeDepDistBytes() != -1U) 6029 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 6030 unsigned WidestRegister = 6031 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 6032 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6033 6034 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6035 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 6036 6037 DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6038 6039 // A lambda that gets the register usage for the given type and VF. 6040 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 6041 if (Ty->isTokenTy()) 6042 return 0U; 6043 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 6044 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 6045 }; 6046 6047 for (unsigned int i = 0; i < Index; ++i) { 6048 Instruction *I = IdxToInstr[i]; 6049 // Ignore instructions that are never used within the loop. 6050 if (!Ends.count(I)) 6051 continue; 6052 6053 // Remove all of the instructions that end at this location. 6054 InstrList &List = TransposeEnds[i]; 6055 for (Instruction *ToRemove : List) 6056 OpenIntervals.erase(ToRemove); 6057 6058 // Skip ignored values. 6059 if (ValuesToIgnore.count(I)) 6060 continue; 6061 6062 // For each VF find the maximum usage of registers. 6063 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6064 if (VFs[j] == 1) { 6065 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 6066 continue; 6067 } 6068 6069 // Count the number of live intervals. 6070 unsigned RegUsage = 0; 6071 for (auto Inst : OpenIntervals) { 6072 // Skip ignored values for VF > 1. 6073 if (VecValuesToIgnore.count(Inst)) 6074 continue; 6075 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 6076 } 6077 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 6078 } 6079 6080 DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6081 << OpenIntervals.size() << '\n'); 6082 6083 // Add the current instruction to the list of open intervals. 6084 OpenIntervals.insert(I); 6085 } 6086 6087 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6088 unsigned Invariant = 0; 6089 if (VFs[i] == 1) 6090 Invariant = LoopInvariants.size(); 6091 else { 6092 for (auto Inst : LoopInvariants) 6093 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 6094 } 6095 6096 DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 6097 DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 6098 DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); 6099 DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n'); 6100 6101 RU.LoopInvariantRegs = Invariant; 6102 RU.MaxLocalUsers = MaxUsages[i]; 6103 RUs[i] = RU; 6104 } 6105 6106 return RUs; 6107 } 6108 6109 LoopVectorizationCostModel::VectorizationCostTy 6110 LoopVectorizationCostModel::expectedCost(unsigned VF) { 6111 VectorizationCostTy Cost; 6112 6113 // For each block. 6114 for (BasicBlock *BB : TheLoop->blocks()) { 6115 VectorizationCostTy BlockCost; 6116 6117 // For each instruction in the old loop. 6118 for (Instruction &I : *BB) { 6119 // Skip dbg intrinsics. 6120 if (isa<DbgInfoIntrinsic>(I)) 6121 continue; 6122 6123 // Skip ignored values. 6124 if (ValuesToIgnore.count(&I)) 6125 continue; 6126 6127 VectorizationCostTy C = getInstructionCost(&I, VF); 6128 6129 // Check if we should override the cost. 6130 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6131 C.first = ForceTargetInstructionCost; 6132 6133 BlockCost.first += C.first; 6134 BlockCost.second |= C.second; 6135 DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " 6136 << VF << " For instruction: " << I << '\n'); 6137 } 6138 6139 // We assume that if-converted blocks have a 50% chance of being executed. 6140 // When the code is scalar then some of the blocks are avoided due to CF. 6141 // When the code is vectorized we execute all code paths. 6142 if (VF == 1 && Legal->blockNeedsPredication(BB)) 6143 BlockCost.first /= 2; 6144 6145 Cost.first += BlockCost.first; 6146 Cost.second |= BlockCost.second; 6147 } 6148 6149 return Cost; 6150 } 6151 6152 /// \brief Check whether the address computation for a non-consecutive memory 6153 /// access looks like an unlikely candidate for being merged into the indexing 6154 /// mode. 6155 /// 6156 /// We look for a GEP which has one index that is an induction variable and all 6157 /// other indices are loop invariant. If the stride of this access is also 6158 /// within a small bound we decide that this address computation can likely be 6159 /// merged into the addressing mode. 6160 /// In all other cases, we identify the address computation as complex. 6161 static bool isLikelyComplexAddressComputation(Value *Ptr, 6162 LoopVectorizationLegality *Legal, 6163 ScalarEvolution *SE, 6164 const Loop *TheLoop) { 6165 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6166 if (!Gep) 6167 return true; 6168 6169 // We are looking for a gep with all loop invariant indices except for one 6170 // which should be an induction variable. 6171 unsigned NumOperands = Gep->getNumOperands(); 6172 for (unsigned i = 1; i < NumOperands; ++i) { 6173 Value *Opd = Gep->getOperand(i); 6174 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6175 !Legal->isInductionVariable(Opd)) 6176 return true; 6177 } 6178 6179 // Now we know we have a GEP ptr, %inv, %ind, %inv. Make sure that the step 6180 // can likely be merged into the address computation. 6181 unsigned MaxMergeDistance = 64; 6182 6183 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Ptr)); 6184 if (!AddRec) 6185 return true; 6186 6187 // Check the step is constant. 6188 const SCEV *Step = AddRec->getStepRecurrence(*SE); 6189 // Calculate the pointer stride and check if it is consecutive. 6190 const auto *C = dyn_cast<SCEVConstant>(Step); 6191 if (!C) 6192 return true; 6193 6194 const APInt &APStepVal = C->getAPInt(); 6195 6196 // Huge step value - give up. 6197 if (APStepVal.getBitWidth() > 64) 6198 return true; 6199 6200 int64_t StepVal = APStepVal.getSExtValue(); 6201 6202 return StepVal > MaxMergeDistance; 6203 } 6204 6205 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6206 return Legal->hasStride(I->getOperand(0)) || 6207 Legal->hasStride(I->getOperand(1)); 6208 } 6209 6210 LoopVectorizationCostModel::VectorizationCostTy 6211 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 6212 // If we know that this instruction will remain uniform, check the cost of 6213 // the scalar version. 6214 if (Legal->isUniformAfterVectorization(I)) 6215 VF = 1; 6216 6217 Type *VectorTy; 6218 unsigned C = getInstructionCost(I, VF, VectorTy); 6219 6220 bool TypeNotScalarized = 6221 VF > 1 && !VectorTy->isVoidTy() && TTI.getNumberOfParts(VectorTy) < VF; 6222 return VectorizationCostTy(C, TypeNotScalarized); 6223 } 6224 6225 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6226 unsigned VF, 6227 Type *&VectorTy) { 6228 Type *RetTy = I->getType(); 6229 if (VF > 1 && MinBWs.count(I)) 6230 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 6231 VectorTy = ToVectorTy(RetTy, VF); 6232 auto SE = PSE.getSE(); 6233 6234 // TODO: We need to estimate the cost of intrinsic calls. 6235 switch (I->getOpcode()) { 6236 case Instruction::GetElementPtr: 6237 // We mark this instruction as zero-cost because the cost of GEPs in 6238 // vectorized code depends on whether the corresponding memory instruction 6239 // is scalarized or not. Therefore, we handle GEPs with the memory 6240 // instruction cost. 6241 return 0; 6242 case Instruction::Br: { 6243 return TTI.getCFInstrCost(I->getOpcode()); 6244 } 6245 case Instruction::PHI: { 6246 auto *Phi = cast<PHINode>(I); 6247 6248 // First-order recurrences are replaced by vector shuffles inside the loop. 6249 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 6250 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 6251 VectorTy, VF - 1, VectorTy); 6252 6253 // TODO: IF-converted IFs become selects. 6254 return 0; 6255 } 6256 case Instruction::UDiv: 6257 case Instruction::SDiv: 6258 case Instruction::URem: 6259 case Instruction::SRem: 6260 // We assume that if-converted blocks have a 50% chance of being executed. 6261 // Predicated scalarized instructions are avoided due to the CF that 6262 // bypasses turned off lanes. If we are not predicating, fallthrough. 6263 if (VF > 1 && mayDivideByZero(*I) && 6264 Legal->blockNeedsPredication(I->getParent())) 6265 return VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy) / 2 + 6266 getScalarizationOverhead(I, VF, true, TTI); 6267 case Instruction::Add: 6268 case Instruction::FAdd: 6269 case Instruction::Sub: 6270 case Instruction::FSub: 6271 case Instruction::Mul: 6272 case Instruction::FMul: 6273 case Instruction::FDiv: 6274 case Instruction::FRem: 6275 case Instruction::Shl: 6276 case Instruction::LShr: 6277 case Instruction::AShr: 6278 case Instruction::And: 6279 case Instruction::Or: 6280 case Instruction::Xor: { 6281 // Since we will replace the stride by 1 the multiplication should go away. 6282 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 6283 return 0; 6284 // Certain instructions can be cheaper to vectorize if they have a constant 6285 // second vector operand. One example of this are shifts on x86. 6286 TargetTransformInfo::OperandValueKind Op1VK = 6287 TargetTransformInfo::OK_AnyValue; 6288 TargetTransformInfo::OperandValueKind Op2VK = 6289 TargetTransformInfo::OK_AnyValue; 6290 TargetTransformInfo::OperandValueProperties Op1VP = 6291 TargetTransformInfo::OP_None; 6292 TargetTransformInfo::OperandValueProperties Op2VP = 6293 TargetTransformInfo::OP_None; 6294 Value *Op2 = I->getOperand(1); 6295 6296 // Check for a splat or for a non uniform vector of constants. 6297 if (isa<ConstantInt>(Op2)) { 6298 ConstantInt *CInt = cast<ConstantInt>(Op2); 6299 if (CInt && CInt->getValue().isPowerOf2()) 6300 Op2VP = TargetTransformInfo::OP_PowerOf2; 6301 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 6302 } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) { 6303 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 6304 Constant *SplatValue = cast<Constant>(Op2)->getSplatValue(); 6305 if (SplatValue) { 6306 ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue); 6307 if (CInt && CInt->getValue().isPowerOf2()) 6308 Op2VP = TargetTransformInfo::OP_PowerOf2; 6309 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 6310 } 6311 } else if (Legal->isUniform(Op2)) { 6312 Op2VK = TargetTransformInfo::OK_UniformValue; 6313 } 6314 6315 return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, Op2VK, 6316 Op1VP, Op2VP); 6317 } 6318 case Instruction::Select: { 6319 SelectInst *SI = cast<SelectInst>(I); 6320 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 6321 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 6322 Type *CondTy = SI->getCondition()->getType(); 6323 if (!ScalarCond) 6324 CondTy = VectorType::get(CondTy, VF); 6325 6326 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy); 6327 } 6328 case Instruction::ICmp: 6329 case Instruction::FCmp: { 6330 Type *ValTy = I->getOperand(0)->getType(); 6331 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 6332 auto It = MinBWs.find(Op0AsInstruction); 6333 if (VF > 1 && It != MinBWs.end()) 6334 ValTy = IntegerType::get(ValTy->getContext(), It->second); 6335 VectorTy = ToVectorTy(ValTy, VF); 6336 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy); 6337 } 6338 case Instruction::Store: 6339 case Instruction::Load: { 6340 StoreInst *SI = dyn_cast<StoreInst>(I); 6341 LoadInst *LI = dyn_cast<LoadInst>(I); 6342 Type *ValTy = (SI ? SI->getValueOperand()->getType() : LI->getType()); 6343 VectorTy = ToVectorTy(ValTy, VF); 6344 6345 unsigned Alignment = SI ? SI->getAlignment() : LI->getAlignment(); 6346 unsigned AS = 6347 SI ? SI->getPointerAddressSpace() : LI->getPointerAddressSpace(); 6348 Value *Ptr = getPointerOperand(I); 6349 // We add the cost of address computation here instead of with the gep 6350 // instruction because only here we know whether the operation is 6351 // scalarized. 6352 if (VF == 1) 6353 return TTI.getAddressComputationCost(VectorTy) + 6354 TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6355 6356 if (LI && Legal->isUniform(Ptr)) { 6357 // Scalar load + broadcast 6358 unsigned Cost = TTI.getAddressComputationCost(ValTy->getScalarType()); 6359 Cost += TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 6360 Alignment, AS); 6361 return Cost + 6362 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, ValTy); 6363 } 6364 6365 // For an interleaved access, calculate the total cost of the whole 6366 // interleave group. 6367 if (Legal->isAccessInterleaved(I)) { 6368 auto Group = Legal->getInterleavedAccessGroup(I); 6369 assert(Group && "Fail to get an interleaved access group."); 6370 6371 // Only calculate the cost once at the insert position. 6372 if (Group->getInsertPos() != I) 6373 return 0; 6374 6375 unsigned InterleaveFactor = Group->getFactor(); 6376 Type *WideVecTy = 6377 VectorType::get(VectorTy->getVectorElementType(), 6378 VectorTy->getVectorNumElements() * InterleaveFactor); 6379 6380 // Holds the indices of existing members in an interleaved load group. 6381 // An interleaved store group doesn't need this as it doesn't allow gaps. 6382 SmallVector<unsigned, 4> Indices; 6383 if (LI) { 6384 for (unsigned i = 0; i < InterleaveFactor; i++) 6385 if (Group->getMember(i)) 6386 Indices.push_back(i); 6387 } 6388 6389 // Calculate the cost of the whole interleaved group. 6390 unsigned Cost = TTI.getInterleavedMemoryOpCost( 6391 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 6392 Group->getAlignment(), AS); 6393 6394 if (Group->isReverse()) 6395 Cost += 6396 Group->getNumMembers() * 6397 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6398 6399 // FIXME: The interleaved load group with a huge gap could be even more 6400 // expensive than scalar operations. Then we could ignore such group and 6401 // use scalar operations instead. 6402 return Cost; 6403 } 6404 6405 // Scalarized loads/stores. 6406 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6407 bool UseGatherOrScatter = 6408 (ConsecutiveStride == 0) && Legal->isLegalGatherOrScatter(I); 6409 6410 bool Reverse = ConsecutiveStride < 0; 6411 const DataLayout &DL = I->getModule()->getDataLayout(); 6412 uint64_t ScalarAllocatedSize = DL.getTypeAllocSize(ValTy); 6413 uint64_t VectorElementSize = DL.getTypeStoreSize(VectorTy) / VF; 6414 if ((!ConsecutiveStride && !UseGatherOrScatter) || 6415 ScalarAllocatedSize != VectorElementSize) { 6416 bool IsComplexComputation = 6417 isLikelyComplexAddressComputation(Ptr, Legal, SE, TheLoop); 6418 unsigned Cost = 0; 6419 // The cost of extracting from the value vector and pointer vector. 6420 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6421 for (unsigned i = 0; i < VF; ++i) { 6422 // The cost of extracting the pointer operand. 6423 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, PtrTy, i); 6424 // In case of STORE, the cost of ExtractElement from the vector. 6425 // In case of LOAD, the cost of InsertElement into the returned 6426 // vector. 6427 Cost += TTI.getVectorInstrCost(SI ? Instruction::ExtractElement 6428 : Instruction::InsertElement, 6429 VectorTy, i); 6430 } 6431 6432 // The cost of the scalar loads/stores. 6433 Cost += VF * TTI.getAddressComputationCost(PtrTy, IsComplexComputation); 6434 Cost += VF * 6435 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 6436 Alignment, AS); 6437 return Cost; 6438 } 6439 6440 unsigned Cost = TTI.getAddressComputationCost(VectorTy); 6441 if (UseGatherOrScatter) { 6442 assert(ConsecutiveStride == 0 && 6443 "Gather/Scatter are not used for consecutive stride"); 6444 return Cost + 6445 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 6446 Legal->isMaskRequired(I), Alignment); 6447 } 6448 // Wide load/stores. 6449 if (Legal->isMaskRequired(I)) 6450 Cost += 6451 TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6452 else 6453 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6454 6455 if (Reverse) 6456 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6457 return Cost; 6458 } 6459 case Instruction::ZExt: 6460 case Instruction::SExt: 6461 case Instruction::FPToUI: 6462 case Instruction::FPToSI: 6463 case Instruction::FPExt: 6464 case Instruction::PtrToInt: 6465 case Instruction::IntToPtr: 6466 case Instruction::SIToFP: 6467 case Instruction::UIToFP: 6468 case Instruction::Trunc: 6469 case Instruction::FPTrunc: 6470 case Instruction::BitCast: { 6471 // We optimize the truncation of induction variable. 6472 // The cost of these is the same as the scalar operation. 6473 if (I->getOpcode() == Instruction::Trunc && 6474 Legal->isInductionVariable(I->getOperand(0))) 6475 return TTI.getCastInstrCost(I->getOpcode(), I->getType(), 6476 I->getOperand(0)->getType()); 6477 6478 Type *SrcScalarTy = I->getOperand(0)->getType(); 6479 Type *SrcVecTy = ToVectorTy(SrcScalarTy, VF); 6480 if (VF > 1 && MinBWs.count(I)) { 6481 // This cast is going to be shrunk. This may remove the cast or it might 6482 // turn it into slightly different cast. For example, if MinBW == 16, 6483 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 6484 // 6485 // Calculate the modified src and dest types. 6486 Type *MinVecTy = VectorTy; 6487 if (I->getOpcode() == Instruction::Trunc) { 6488 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 6489 VectorTy = 6490 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6491 } else if (I->getOpcode() == Instruction::ZExt || 6492 I->getOpcode() == Instruction::SExt) { 6493 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 6494 VectorTy = 6495 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6496 } 6497 } 6498 6499 return TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy); 6500 } 6501 case Instruction::Call: { 6502 bool NeedToScalarize; 6503 CallInst *CI = cast<CallInst>(I); 6504 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 6505 if (getVectorIntrinsicIDForCall(CI, TLI)) 6506 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 6507 return CallCost; 6508 } 6509 default: 6510 // The cost of executing VF copies of the scalar instruction. This opcode 6511 // is unknown. Assume that it is the same as 'mul'. 6512 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) + 6513 getScalarizationOverhead(I, VF, false, TTI); 6514 } // end of switch. 6515 } 6516 6517 char LoopVectorize::ID = 0; 6518 static const char lv_name[] = "Loop Vectorization"; 6519 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 6520 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6521 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 6522 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6523 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 6524 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6525 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6526 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6527 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6528 INITIALIZE_PASS_DEPENDENCY(LCSSAWrapperPass) 6529 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6530 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 6531 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 6532 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6533 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 6534 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6535 6536 namespace llvm { 6537 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { 6538 return new LoopVectorize(NoUnrolling, AlwaysVectorize); 6539 } 6540 } 6541 6542 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6543 6544 // Check if the pointer operand of a load or store instruction is 6545 // consecutive. 6546 if (auto *Ptr = getPointerOperand(Inst)) 6547 return Legal->isConsecutivePtr(Ptr); 6548 return false; 6549 } 6550 6551 void LoopVectorizationCostModel::collectValuesToIgnore() { 6552 // Ignore ephemeral values. 6553 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 6554 6555 // Ignore type-promoting instructions we identified during reduction 6556 // detection. 6557 for (auto &Reduction : *Legal->getReductionVars()) { 6558 RecurrenceDescriptor &RedDes = Reduction.second; 6559 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 6560 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6561 } 6562 6563 // Insert values known to be scalar into VecValuesToIgnore. 6564 for (auto *BB : TheLoop->getBlocks()) 6565 for (auto &I : *BB) 6566 if (Legal->isScalarAfterVectorization(&I)) 6567 VecValuesToIgnore.insert(&I); 6568 } 6569 6570 void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr, 6571 bool IfPredicateInstr) { 6572 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 6573 // Holds vector parameters or scalars, in case of uniform vals. 6574 SmallVector<VectorParts, 4> Params; 6575 6576 setDebugLocFromInst(Builder, Instr); 6577 6578 // Find all of the vectorized parameters. 6579 for (Value *SrcOp : Instr->operands()) { 6580 // If we are accessing the old induction variable, use the new one. 6581 if (SrcOp == OldInduction) { 6582 Params.push_back(getVectorValue(SrcOp)); 6583 continue; 6584 } 6585 6586 // Try using previously calculated values. 6587 Instruction *SrcInst = dyn_cast<Instruction>(SrcOp); 6588 6589 // If the src is an instruction that appeared earlier in the basic block 6590 // then it should already be vectorized. 6591 if (SrcInst && OrigLoop->contains(SrcInst)) { 6592 assert(WidenMap.has(SrcInst) && "Source operand is unavailable"); 6593 // The parameter is a vector value from earlier. 6594 Params.push_back(WidenMap.get(SrcInst)); 6595 } else { 6596 // The parameter is a scalar from outside the loop. Maybe even a constant. 6597 VectorParts Scalars; 6598 Scalars.append(UF, SrcOp); 6599 Params.push_back(Scalars); 6600 } 6601 } 6602 6603 assert(Params.size() == Instr->getNumOperands() && 6604 "Invalid number of operands"); 6605 6606 // Does this instruction return a value ? 6607 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 6608 6609 Value *UndefVec = IsVoidRetTy ? nullptr : UndefValue::get(Instr->getType()); 6610 // Create a new entry in the WidenMap and initialize it to Undef or Null. 6611 VectorParts &VecResults = WidenMap.splat(Instr, UndefVec); 6612 6613 VectorParts Cond; 6614 if (IfPredicateInstr) { 6615 assert(Instr->getParent()->getSinglePredecessor() && 6616 "Only support single predecessor blocks"); 6617 Cond = createEdgeMask(Instr->getParent()->getSinglePredecessor(), 6618 Instr->getParent()); 6619 } 6620 6621 // For each vector unroll 'part': 6622 for (unsigned Part = 0; Part < UF; ++Part) { 6623 // For each scalar that we create: 6624 6625 // Start an "if (pred) a[i] = ..." block. 6626 Value *Cmp = nullptr; 6627 if (IfPredicateInstr) { 6628 if (Cond[Part]->getType()->isVectorTy()) 6629 Cond[Part] = 6630 Builder.CreateExtractElement(Cond[Part], Builder.getInt32(0)); 6631 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cond[Part], 6632 ConstantInt::get(Cond[Part]->getType(), 1)); 6633 } 6634 6635 Instruction *Cloned = Instr->clone(); 6636 if (!IsVoidRetTy) 6637 Cloned->setName(Instr->getName() + ".cloned"); 6638 // Replace the operands of the cloned instructions with extracted scalars. 6639 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 6640 Value *Op = Params[op][Part]; 6641 Cloned->setOperand(op, Op); 6642 } 6643 6644 // Place the cloned scalar in the new loop. 6645 Builder.Insert(Cloned); 6646 6647 // If we just cloned a new assumption, add it the assumption cache. 6648 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 6649 if (II->getIntrinsicID() == Intrinsic::assume) 6650 AC->registerAssumption(II); 6651 6652 // If the original scalar returns a value we need to place it in a vector 6653 // so that future users will be able to use it. 6654 if (!IsVoidRetTy) 6655 VecResults[Part] = Cloned; 6656 6657 // End if-block. 6658 if (IfPredicateInstr) 6659 PredicatedInstructions.push_back(std::make_pair(Cloned, Cmp)); 6660 } 6661 } 6662 6663 void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) { 6664 auto *SI = dyn_cast<StoreInst>(Instr); 6665 bool IfPredicateInstr = (SI && Legal->blockNeedsPredication(SI->getParent())); 6666 6667 return scalarizeInstruction(Instr, IfPredicateInstr); 6668 } 6669 6670 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 6671 6672 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 6673 6674 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 6675 Instruction::BinaryOps BinOp) { 6676 // When unrolling and the VF is 1, we only need to add a simple scalar. 6677 Type *Ty = Val->getType(); 6678 assert(!Ty->isVectorTy() && "Val must be a scalar"); 6679 6680 if (Ty->isFloatingPointTy()) { 6681 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 6682 6683 // Floating point operations had to be 'fast' to enable the unrolling. 6684 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 6685 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 6686 } 6687 Constant *C = ConstantInt::get(Ty, StartIdx); 6688 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 6689 } 6690 6691 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 6692 SmallVector<Metadata *, 4> MDs; 6693 // Reserve first location for self reference to the LoopID metadata node. 6694 MDs.push_back(nullptr); 6695 bool IsUnrollMetadata = false; 6696 MDNode *LoopID = L->getLoopID(); 6697 if (LoopID) { 6698 // First find existing loop unrolling disable metadata. 6699 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 6700 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 6701 if (MD) { 6702 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 6703 IsUnrollMetadata = 6704 S && S->getString().startswith("llvm.loop.unroll.disable"); 6705 } 6706 MDs.push_back(LoopID->getOperand(i)); 6707 } 6708 } 6709 6710 if (!IsUnrollMetadata) { 6711 // Add runtime unroll disable metadata. 6712 LLVMContext &Context = L->getHeader()->getContext(); 6713 SmallVector<Metadata *, 1> DisableOperands; 6714 DisableOperands.push_back( 6715 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 6716 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 6717 MDs.push_back(DisableNode); 6718 MDNode *NewLoopID = MDNode::get(Context, MDs); 6719 // Set operand 0 to refer to the loop id itself. 6720 NewLoopID->replaceOperandWith(0, NewLoopID); 6721 L->setLoopID(NewLoopID); 6722 } 6723 } 6724 6725 bool LoopVectorizePass::processLoop(Loop *L) { 6726 assert(L->empty() && "Only process inner loops."); 6727 6728 #ifndef NDEBUG 6729 const std::string DebugLocStr = getDebugLocString(L); 6730 #endif /* NDEBUG */ 6731 6732 DEBUG(dbgs() << "\nLV: Checking a loop in \"" 6733 << L->getHeader()->getParent()->getName() << "\" from " 6734 << DebugLocStr << "\n"); 6735 6736 LoopVectorizeHints Hints(L, DisableUnrolling, *ORE); 6737 6738 DEBUG(dbgs() << "LV: Loop hints:" 6739 << " force=" 6740 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 6741 ? "disabled" 6742 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 6743 ? "enabled" 6744 : "?")) 6745 << " width=" << Hints.getWidth() 6746 << " unroll=" << Hints.getInterleave() << "\n"); 6747 6748 // Function containing loop 6749 Function *F = L->getHeader()->getParent(); 6750 6751 // Looking at the diagnostic output is the only way to determine if a loop 6752 // was vectorized (other than looking at the IR or machine code), so it 6753 // is important to generate an optimization remark for each loop. Most of 6754 // these messages are generated by emitOptimizationRemarkAnalysis. Remarks 6755 // generated by emitOptimizationRemark and emitOptimizationRemarkMissed are 6756 // less verbose reporting vectorized loops and unvectorized loops that may 6757 // benefit from vectorization, respectively. 6758 6759 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { 6760 DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 6761 return false; 6762 } 6763 6764 // Check the loop for a trip count threshold: 6765 // do not vectorize loops with a tiny trip count. 6766 const unsigned TC = SE->getSmallConstantTripCount(L); 6767 if (TC > 0u && TC < TinyTripCountVectorThreshold) { 6768 DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 6769 << "This loop is not worth vectorizing."); 6770 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 6771 DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 6772 else { 6773 DEBUG(dbgs() << "\n"); 6774 emitAnalysisDiag(L, Hints, *ORE, VectorizationReport() 6775 << "vectorization is not beneficial " 6776 "and is not explicitly forced"); 6777 return false; 6778 } 6779 } 6780 6781 PredicatedScalarEvolution PSE(*SE, *L); 6782 6783 // Check if it is legal to vectorize the loop. 6784 LoopVectorizationRequirements Requirements(*ORE); 6785 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI, ORE, 6786 &Requirements, &Hints); 6787 if (!LVL.canVectorize()) { 6788 DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 6789 emitMissedWarning(F, L, Hints, ORE); 6790 return false; 6791 } 6792 6793 // Use the cost model. 6794 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F, 6795 &Hints); 6796 CM.collectValuesToIgnore(); 6797 6798 // Check the function attributes to find out if this function should be 6799 // optimized for size. 6800 bool OptForSize = 6801 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 6802 6803 // Compute the weighted frequency of this loop being executed and see if it 6804 // is less than 20% of the function entry baseline frequency. Note that we 6805 // always have a canonical loop here because we think we *can* vectorize. 6806 // FIXME: This is hidden behind a flag due to pervasive problems with 6807 // exactly what block frequency models. 6808 if (LoopVectorizeWithBlockFrequency) { 6809 BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader()); 6810 if (Hints.getForce() != LoopVectorizeHints::FK_Enabled && 6811 LoopEntryFreq < ColdEntryFreq) 6812 OptForSize = true; 6813 } 6814 6815 // Check the function attributes to see if implicit floats are allowed. 6816 // FIXME: This check doesn't seem possibly correct -- what if the loop is 6817 // an integer loop and the vector instructions selected are purely integer 6818 // vector instructions? 6819 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 6820 DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 6821 "attribute is used.\n"); 6822 emitAnalysisDiag( 6823 L, Hints, *ORE, 6824 VectorizationReport() 6825 << "loop not vectorized due to NoImplicitFloat attribute"); 6826 emitMissedWarning(F, L, Hints, ORE); 6827 return false; 6828 } 6829 6830 // Check if the target supports potentially unsafe FP vectorization. 6831 // FIXME: Add a check for the type of safety issue (denormal, signaling) 6832 // for the target we're vectorizing for, to make sure none of the 6833 // additional fp-math flags can help. 6834 if (Hints.isPotentiallyUnsafe() && 6835 TTI->isFPVectorizationPotentiallyUnsafe()) { 6836 DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 6837 emitAnalysisDiag(L, Hints, *ORE, 6838 VectorizationReport() 6839 << "loop not vectorized due to unsafe FP support."); 6840 emitMissedWarning(F, L, Hints, ORE); 6841 return false; 6842 } 6843 6844 // Select the optimal vectorization factor. 6845 const LoopVectorizationCostModel::VectorizationFactor VF = 6846 CM.selectVectorizationFactor(OptForSize); 6847 6848 // Select the interleave count. 6849 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 6850 6851 // Get user interleave count. 6852 unsigned UserIC = Hints.getInterleave(); 6853 6854 // Identify the diagnostic messages that should be produced. 6855 std::string VecDiagMsg, IntDiagMsg; 6856 bool VectorizeLoop = true, InterleaveLoop = true; 6857 if (Requirements.doesNotMeet(F, L, Hints)) { 6858 DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 6859 "requirements.\n"); 6860 emitMissedWarning(F, L, Hints, ORE); 6861 return false; 6862 } 6863 6864 if (VF.Width == 1) { 6865 DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 6866 VecDiagMsg = 6867 "the cost-model indicates that vectorization is not beneficial"; 6868 VectorizeLoop = false; 6869 } 6870 6871 if (IC == 1 && UserIC <= 1) { 6872 // Tell the user interleaving is not beneficial. 6873 DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 6874 IntDiagMsg = 6875 "the cost-model indicates that interleaving is not beneficial"; 6876 InterleaveLoop = false; 6877 if (UserIC == 1) 6878 IntDiagMsg += 6879 " and is explicitly disabled or interleave count is set to 1"; 6880 } else if (IC > 1 && UserIC == 1) { 6881 // Tell the user interleaving is beneficial, but it explicitly disabled. 6882 DEBUG(dbgs() 6883 << "LV: Interleaving is beneficial but is explicitly disabled."); 6884 IntDiagMsg = "the cost-model indicates that interleaving is beneficial " 6885 "but is explicitly disabled or interleave count is set to 1"; 6886 InterleaveLoop = false; 6887 } 6888 6889 // Override IC if user provided an interleave count. 6890 IC = UserIC > 0 ? UserIC : IC; 6891 6892 // Emit diagnostic messages, if any. 6893 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 6894 if (!VectorizeLoop && !InterleaveLoop) { 6895 // Do not vectorize or interleaving the loop. 6896 ORE->emitOptimizationRemarkAnalysis(VAPassName, L, VecDiagMsg); 6897 ORE->emitOptimizationRemarkAnalysis(LV_NAME, L, IntDiagMsg); 6898 return false; 6899 } else if (!VectorizeLoop && InterleaveLoop) { 6900 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 6901 ORE->emitOptimizationRemarkAnalysis(VAPassName, L, VecDiagMsg); 6902 } else if (VectorizeLoop && !InterleaveLoop) { 6903 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 6904 << DebugLocStr << '\n'); 6905 ORE->emitOptimizationRemarkAnalysis(LV_NAME, L, IntDiagMsg); 6906 } else if (VectorizeLoop && InterleaveLoop) { 6907 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 6908 << DebugLocStr << '\n'); 6909 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 6910 } 6911 6912 if (!VectorizeLoop) { 6913 assert(IC > 1 && "interleave count should not be 1 or 0"); 6914 // If we decided that it is not legal to vectorize the loop, then 6915 // interleave it. 6916 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC); 6917 Unroller.vectorize(&LVL, CM.MinBWs); 6918 6919 ORE->emitOptimizationRemark(LV_NAME, L, 6920 Twine("interleaved loop (interleaved count: ") + 6921 Twine(IC) + ")"); 6922 } else { 6923 // If we decided that it is *legal* to vectorize the loop, then do it. 6924 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC); 6925 LB.vectorize(&LVL, CM.MinBWs); 6926 ++LoopsVectorized; 6927 6928 // Add metadata to disable runtime unrolling a scalar loop when there are 6929 // no runtime checks about strides and memory. A scalar loop that is 6930 // rarely used is not worth unrolling. 6931 if (!LB.areSafetyChecksAdded()) 6932 AddRuntimeUnrollDisableMetaData(L); 6933 6934 // Report the vectorization decision. 6935 ORE->emitOptimizationRemark( 6936 LV_NAME, L, Twine("vectorized loop (vectorization width: ") + 6937 Twine(VF.Width) + ", interleaved count: " + Twine(IC) + 6938 ")"); 6939 } 6940 6941 // Mark the loop as already vectorized to avoid vectorizing again. 6942 Hints.setAlreadyVectorized(); 6943 6944 DEBUG(verifyFunction(*L->getHeader()->getParent())); 6945 return true; 6946 } 6947 6948 bool LoopVectorizePass::runImpl( 6949 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 6950 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 6951 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 6952 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 6953 OptimizationRemarkEmitter &ORE_) { 6954 6955 SE = &SE_; 6956 LI = &LI_; 6957 TTI = &TTI_; 6958 DT = &DT_; 6959 BFI = &BFI_; 6960 TLI = TLI_; 6961 AA = &AA_; 6962 AC = &AC_; 6963 GetLAA = &GetLAA_; 6964 DB = &DB_; 6965 ORE = &ORE_; 6966 6967 // Compute some weights outside of the loop over the loops. Compute this 6968 // using a BranchProbability to re-use its scaling math. 6969 const BranchProbability ColdProb(1, 5); // 20% 6970 ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb; 6971 6972 // Don't attempt if 6973 // 1. the target claims to have no vector registers, and 6974 // 2. interleaving won't help ILP. 6975 // 6976 // The second condition is necessary because, even if the target has no 6977 // vector registers, loop vectorization may still enable scalar 6978 // interleaving. 6979 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 6980 return false; 6981 6982 // Build up a worklist of inner-loops to vectorize. This is necessary as 6983 // the act of vectorizing or partially unrolling a loop creates new loops 6984 // and can invalidate iterators across the loops. 6985 SmallVector<Loop *, 8> Worklist; 6986 6987 for (Loop *L : *LI) 6988 addAcyclicInnerLoop(*L, Worklist); 6989 6990 LoopsAnalyzed += Worklist.size(); 6991 6992 // Now walk the identified inner loops. 6993 bool Changed = false; 6994 while (!Worklist.empty()) 6995 Changed |= processLoop(Worklist.pop_back_val()); 6996 6997 // Process each loop nest in the function. 6998 return Changed; 6999 7000 } 7001 7002 7003 PreservedAnalyses LoopVectorizePass::run(Function &F, 7004 FunctionAnalysisManager &AM) { 7005 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 7006 auto &LI = AM.getResult<LoopAnalysis>(F); 7007 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 7008 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 7009 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 7010 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 7011 auto &AA = AM.getResult<AAManager>(F); 7012 auto &AC = AM.getResult<AssumptionAnalysis>(F); 7013 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 7014 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 7015 7016 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 7017 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 7018 [&](Loop &L) -> const LoopAccessInfo & { 7019 return LAM.getResult<LoopAccessAnalysis>(L); 7020 }; 7021 bool Changed = 7022 runImpl(F, SE, LI, TTI, DT, BFI, TLI, DB, AA, AC, GetLAA, ORE); 7023 if (!Changed) 7024 return PreservedAnalyses::all(); 7025 PreservedAnalyses PA; 7026 PA.preserve<LoopAnalysis>(); 7027 PA.preserve<DominatorTreeAnalysis>(); 7028 PA.preserve<BasicAA>(); 7029 PA.preserve<GlobalsAA>(); 7030 return PA; 7031 } 7032