1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 11 // and generates target-independent LLVM-IR. 12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 13 // of instructions in order to estimate the profitability of vectorization. 14 // 15 // The loop vectorizer combines consecutive loop iterations into a single 16 // 'wide' iteration. After this transformation the index is incremented 17 // by the SIMD vector width, and not by one. 18 // 19 // This pass has three parts: 20 // 1. The main loop pass that drives the different parts. 21 // 2. LoopVectorizationLegality - A unit that checks for the legality 22 // of the vectorization. 23 // 3. InnerLoopVectorizer - A unit that performs the actual 24 // widening of instructions. 25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 26 // of vectorization. It decides on the optimal vector width, which 27 // can be one, if vectorization is not profitable. 28 // 29 //===----------------------------------------------------------------------===// 30 // 31 // The reduction-variable vectorization is based on the paper: 32 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 33 // 34 // Variable uniformity checks are inspired by: 35 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 36 // 37 // The interleaved access vectorization is based on the paper: 38 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 39 // Data for SIMD 40 // 41 // Other ideas/concepts are from: 42 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 43 // 44 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 45 // Vectorizing Compilers. 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 50 #include "llvm/ADT/DenseMap.h" 51 #include "llvm/ADT/Hashing.h" 52 #include "llvm/ADT/MapVector.h" 53 #include "llvm/ADT/SCCIterator.h" 54 #include "llvm/ADT/SetVector.h" 55 #include "llvm/ADT/SmallPtrSet.h" 56 #include "llvm/ADT/SmallSet.h" 57 #include "llvm/ADT/SmallVector.h" 58 #include "llvm/ADT/Statistic.h" 59 #include "llvm/ADT/StringExtras.h" 60 #include "llvm/Analysis/CodeMetrics.h" 61 #include "llvm/Analysis/GlobalsModRef.h" 62 #include "llvm/Analysis/LoopInfo.h" 63 #include "llvm/Analysis/LoopIterator.h" 64 #include "llvm/Analysis/LoopPass.h" 65 #include "llvm/Analysis/ScalarEvolutionExpander.h" 66 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 67 #include "llvm/Analysis/ValueTracking.h" 68 #include "llvm/Analysis/VectorUtils.h" 69 #include "llvm/IR/Constants.h" 70 #include "llvm/IR/DataLayout.h" 71 #include "llvm/IR/DebugInfo.h" 72 #include "llvm/IR/DerivedTypes.h" 73 #include "llvm/IR/DiagnosticInfo.h" 74 #include "llvm/IR/Dominators.h" 75 #include "llvm/IR/Function.h" 76 #include "llvm/IR/IRBuilder.h" 77 #include "llvm/IR/Instructions.h" 78 #include "llvm/IR/IntrinsicInst.h" 79 #include "llvm/IR/LLVMContext.h" 80 #include "llvm/IR/Module.h" 81 #include "llvm/IR/PatternMatch.h" 82 #include "llvm/IR/Type.h" 83 #include "llvm/IR/Value.h" 84 #include "llvm/IR/ValueHandle.h" 85 #include "llvm/IR/Verifier.h" 86 #include "llvm/Pass.h" 87 #include "llvm/Support/BranchProbability.h" 88 #include "llvm/Support/CommandLine.h" 89 #include "llvm/Support/Debug.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include "llvm/Transforms/Scalar.h" 92 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 93 #include "llvm/Transforms/Utils/Local.h" 94 #include "llvm/Transforms/Utils/LoopUtils.h" 95 #include "llvm/Transforms/Utils/LoopVersioning.h" 96 #include "llvm/Transforms/Vectorize.h" 97 #include <algorithm> 98 #include <map> 99 #include <tuple> 100 101 using namespace llvm; 102 using namespace llvm::PatternMatch; 103 104 #define LV_NAME "loop-vectorize" 105 #define DEBUG_TYPE LV_NAME 106 107 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 108 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 109 110 static cl::opt<bool> 111 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, 112 cl::desc("Enable if-conversion during vectorization.")); 113 114 /// We don't vectorize loops with a known constant trip count below this number. 115 static cl::opt<unsigned> TinyTripCountVectorThreshold( 116 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 117 cl::desc("Don't vectorize loops with a constant " 118 "trip count that is smaller than this " 119 "value.")); 120 121 static cl::opt<bool> MaximizeBandwidth( 122 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 123 cl::desc("Maximize bandwidth when selecting vectorization factor which " 124 "will be determined by the smallest type in loop.")); 125 126 static cl::opt<bool> EnableInterleavedMemAccesses( 127 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 128 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 129 130 /// Maximum factor for an interleaved memory access. 131 static cl::opt<unsigned> MaxInterleaveGroupFactor( 132 "max-interleave-group-factor", cl::Hidden, 133 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 134 cl::init(8)); 135 136 /// We don't interleave loops with a known constant trip count below this 137 /// number. 138 static const unsigned TinyTripCountInterleaveThreshold = 128; 139 140 static cl::opt<unsigned> ForceTargetNumScalarRegs( 141 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 142 cl::desc("A flag that overrides the target's number of scalar registers.")); 143 144 static cl::opt<unsigned> ForceTargetNumVectorRegs( 145 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 146 cl::desc("A flag that overrides the target's number of vector registers.")); 147 148 /// Maximum vectorization interleave count. 149 static const unsigned MaxInterleaveFactor = 16; 150 151 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 152 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 153 cl::desc("A flag that overrides the target's max interleave factor for " 154 "scalar loops.")); 155 156 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 157 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 158 cl::desc("A flag that overrides the target's max interleave factor for " 159 "vectorized loops.")); 160 161 static cl::opt<unsigned> ForceTargetInstructionCost( 162 "force-target-instruction-cost", cl::init(0), cl::Hidden, 163 cl::desc("A flag that overrides the target's expected cost for " 164 "an instruction to a single constant value. Mostly " 165 "useful for getting consistent testing.")); 166 167 static cl::opt<unsigned> SmallLoopCost( 168 "small-loop-cost", cl::init(20), cl::Hidden, 169 cl::desc( 170 "The cost of a loop that is considered 'small' by the interleaver.")); 171 172 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 173 "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden, 174 cl::desc("Enable the use of the block frequency analysis to access PGO " 175 "heuristics minimizing code growth in cold regions and being more " 176 "aggressive in hot regions.")); 177 178 // Runtime interleave loops for load/store throughput. 179 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 180 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 181 cl::desc( 182 "Enable runtime interleaving until load/store ports are saturated")); 183 184 /// The number of stores in a loop that are allowed to need predication. 185 static cl::opt<unsigned> NumberOfStoresToPredicate( 186 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 187 cl::desc("Max number of stores to be predicated behind an if.")); 188 189 static cl::opt<bool> EnableIndVarRegisterHeur( 190 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 191 cl::desc("Count the induction variable only once when interleaving")); 192 193 static cl::opt<bool> EnableCondStoresVectorization( 194 "enable-cond-stores-vec", cl::init(false), cl::Hidden, 195 cl::desc("Enable if predication of stores during vectorization.")); 196 197 static cl::opt<unsigned> MaxNestedScalarReductionIC( 198 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 199 cl::desc("The maximum interleave count to use when interleaving a scalar " 200 "reduction in a nested loop.")); 201 202 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 203 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 204 cl::desc("The maximum allowed number of runtime memory checks with a " 205 "vectorize(enable) pragma.")); 206 207 static cl::opt<unsigned> VectorizeSCEVCheckThreshold( 208 "vectorize-scev-check-threshold", cl::init(16), cl::Hidden, 209 cl::desc("The maximum number of SCEV checks allowed.")); 210 211 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold( 212 "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden, 213 cl::desc("The maximum number of SCEV checks allowed with a " 214 "vectorize(enable) pragma")); 215 216 namespace { 217 218 // Forward declarations. 219 class LoopVectorizeHints; 220 class LoopVectorizationLegality; 221 class LoopVectorizationCostModel; 222 class LoopVectorizationRequirements; 223 224 /// Returns true if the given loop body has a cycle, excluding the loop 225 /// itself. 226 static bool hasCyclesInLoopBody(const Loop &L) { 227 if (!L.empty()) 228 return true; 229 230 for (const auto &SCC : 231 make_range(scc_iterator<Loop, LoopBodyTraits>::begin(L), 232 scc_iterator<Loop, LoopBodyTraits>::end(L))) { 233 if (SCC.size() > 1) { 234 DEBUG(dbgs() << "LVL: Detected a cycle in the loop body:\n"); 235 DEBUG(L.dump()); 236 return true; 237 } 238 } 239 return false; 240 } 241 242 /// \brief This modifies LoopAccessReport to initialize message with 243 /// loop-vectorizer-specific part. 244 class VectorizationReport : public LoopAccessReport { 245 public: 246 VectorizationReport(Instruction *I = nullptr) 247 : LoopAccessReport("loop not vectorized: ", I) {} 248 249 /// \brief This allows promotion of the loop-access analysis report into the 250 /// loop-vectorizer report. It modifies the message to add the 251 /// loop-vectorizer-specific part of the message. 252 explicit VectorizationReport(const LoopAccessReport &R) 253 : LoopAccessReport(Twine("loop not vectorized: ") + R.str(), 254 R.getInstr()) {} 255 }; 256 257 /// A helper function for converting Scalar types to vector types. 258 /// If the incoming type is void, we return void. If the VF is 1, we return 259 /// the scalar type. 260 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 261 if (Scalar->isVoidTy() || VF == 1) 262 return Scalar; 263 return VectorType::get(Scalar, VF); 264 } 265 266 /// A helper function that returns GEP instruction and knows to skip a 267 /// 'bitcast'. The 'bitcast' may be skipped if the source and the destination 268 /// pointee types of the 'bitcast' have the same size. 269 /// For example: 270 /// bitcast double** %var to i64* - can be skipped 271 /// bitcast double** %var to i8* - can not 272 static GetElementPtrInst *getGEPInstruction(Value *Ptr) { 273 274 if (isa<GetElementPtrInst>(Ptr)) 275 return cast<GetElementPtrInst>(Ptr); 276 277 if (isa<BitCastInst>(Ptr) && 278 isa<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0))) { 279 Type *BitcastTy = Ptr->getType(); 280 Type *GEPTy = cast<BitCastInst>(Ptr)->getSrcTy(); 281 if (!isa<PointerType>(BitcastTy) || !isa<PointerType>(GEPTy)) 282 return nullptr; 283 Type *Pointee1Ty = cast<PointerType>(BitcastTy)->getPointerElementType(); 284 Type *Pointee2Ty = cast<PointerType>(GEPTy)->getPointerElementType(); 285 const DataLayout &DL = cast<BitCastInst>(Ptr)->getModule()->getDataLayout(); 286 if (DL.getTypeSizeInBits(Pointee1Ty) == DL.getTypeSizeInBits(Pointee2Ty)) 287 return cast<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0)); 288 } 289 return nullptr; 290 } 291 292 /// A helper function that returns the pointer operand of a load or store 293 /// instruction. 294 static Value *getPointerOperand(Value *I) { 295 if (auto *LI = dyn_cast<LoadInst>(I)) 296 return LI->getPointerOperand(); 297 if (auto *SI = dyn_cast<StoreInst>(I)) 298 return SI->getPointerOperand(); 299 return nullptr; 300 } 301 302 /// InnerLoopVectorizer vectorizes loops which contain only one basic 303 /// block to a specified vectorization factor (VF). 304 /// This class performs the widening of scalars into vectors, or multiple 305 /// scalars. This class also implements the following features: 306 /// * It inserts an epilogue loop for handling loops that don't have iteration 307 /// counts that are known to be a multiple of the vectorization factor. 308 /// * It handles the code generation for reduction variables. 309 /// * Scalarization (implementation using scalars) of un-vectorizable 310 /// instructions. 311 /// InnerLoopVectorizer does not perform any vectorization-legality 312 /// checks, and relies on the caller to check for the different legality 313 /// aspects. The InnerLoopVectorizer relies on the 314 /// LoopVectorizationLegality class to provide information about the induction 315 /// and reduction variables that were found to a given vectorization factor. 316 class InnerLoopVectorizer { 317 public: 318 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 319 LoopInfo *LI, DominatorTree *DT, 320 const TargetLibraryInfo *TLI, 321 const TargetTransformInfo *TTI, AssumptionCache *AC, 322 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 323 unsigned UnrollFactor) 324 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 325 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 326 Builder(PSE.getSE()->getContext()), Induction(nullptr), 327 OldInduction(nullptr), VectorLoopValueMap(UnrollFactor, VecWidth), 328 TripCount(nullptr), VectorTripCount(nullptr), Legal(nullptr), 329 AddedSafetyChecks(false) {} 330 331 // Perform the actual loop widening (vectorization). 332 // MinimumBitWidths maps scalar integer values to the smallest bitwidth they 333 // can be validly truncated to. The cost model has assumed this truncation 334 // will happen when vectorizing. VecValuesToIgnore contains scalar values 335 // that the cost model has chosen to ignore because they will not be 336 // vectorized. 337 void vectorize(LoopVectorizationLegality *L, 338 const MapVector<Instruction *, uint64_t> &MinimumBitWidths) { 339 MinBWs = &MinimumBitWidths; 340 Legal = L; 341 // Create a new empty loop. Unlink the old loop and connect the new one. 342 createEmptyLoop(); 343 // Widen each instruction in the old loop to a new one in the new loop. 344 // Use the Legality module to find the induction and reduction variables. 345 vectorizeLoop(); 346 } 347 348 // Return true if any runtime check is added. 349 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 350 351 virtual ~InnerLoopVectorizer() {} 352 353 protected: 354 /// A small list of PHINodes. 355 typedef SmallVector<PHINode *, 4> PhiVector; 356 357 /// A type for vectorized values in the new loop. Each value from the 358 /// original loop, when vectorized, is represented by UF vector values in the 359 /// new unrolled loop, where UF is the unroll factor. 360 typedef SmallVector<Value *, 2> VectorParts; 361 362 /// A type for scalarized values in the new loop. Each value from the 363 /// original loop, when scalarized, is represented by UF x VF scalar values 364 /// in the new unrolled loop, where UF is the unroll factor and VF is the 365 /// vectorization factor. 366 typedef SmallVector<SmallVector<Value *, 4>, 2> ScalarParts; 367 368 // When we if-convert we need to create edge masks. We have to cache values 369 // so that we don't end up with exponential recursion/IR. 370 typedef DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts> 371 EdgeMaskCache; 372 373 /// Create an empty loop, based on the loop ranges of the old loop. 374 void createEmptyLoop(); 375 376 /// Set up the values of the IVs correctly when exiting the vector loop. 377 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 378 Value *CountRoundDown, Value *EndValue, 379 BasicBlock *MiddleBlock); 380 381 /// Create a new induction variable inside L. 382 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 383 Value *Step, Instruction *DL); 384 /// Copy and widen the instructions from the old loop. 385 virtual void vectorizeLoop(); 386 387 /// Fix a first-order recurrence. This is the second phase of vectorizing 388 /// this phi node. 389 void fixFirstOrderRecurrence(PHINode *Phi); 390 391 /// \brief The Loop exit block may have single value PHI nodes where the 392 /// incoming value is 'Undef'. While vectorizing we only handled real values 393 /// that were defined inside the loop. Here we fix the 'undef case'. 394 /// See PR14725. 395 void fixLCSSAPHIs(); 396 397 /// Predicate conditional instructions that require predication on their 398 /// respective conditions. 399 void predicateInstructions(); 400 401 /// Shrinks vector element sizes based on information in "MinBWs". 402 void truncateToMinimalBitwidths(); 403 404 /// A helper function that computes the predicate of the block BB, assuming 405 /// that the header block of the loop is set to True. It returns the *entry* 406 /// mask for the block BB. 407 VectorParts createBlockInMask(BasicBlock *BB); 408 /// A helper function that computes the predicate of the edge between SRC 409 /// and DST. 410 VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst); 411 412 /// A helper function to vectorize a single BB within the innermost loop. 413 void vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV); 414 415 /// Vectorize a single PHINode in a block. This method handles the induction 416 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 417 /// arbitrary length vectors. 418 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF, 419 PhiVector *PV); 420 421 /// Insert the new loop to the loop hierarchy and pass manager 422 /// and update the analysis passes. 423 void updateAnalysis(); 424 425 /// This instruction is un-vectorizable. Implement it as a sequence 426 /// of scalars. If \p IfPredicateInstr is true we need to 'hide' each 427 /// scalarized instruction behind an if block predicated on the control 428 /// dependence of the instruction. 429 virtual void scalarizeInstruction(Instruction *Instr, 430 bool IfPredicateInstr = false); 431 432 /// Vectorize Load and Store instructions, 433 virtual void vectorizeMemoryInstruction(Instruction *Instr); 434 435 /// Create a broadcast instruction. This method generates a broadcast 436 /// instruction (shuffle) for loop invariant values and for the induction 437 /// value. If this is the induction variable then we extend it to N, N+1, ... 438 /// this is needed because each iteration in the loop corresponds to a SIMD 439 /// element. 440 virtual Value *getBroadcastInstrs(Value *V); 441 442 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 443 /// to each vector element of Val. The sequence starts at StartIndex. 444 /// \p Opcode is relevant for FP induction variable. 445 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 446 Instruction::BinaryOps Opcode = 447 Instruction::BinaryOpsEnd); 448 449 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 450 /// variable on which to base the steps, \p Step is the size of the step, and 451 /// \p EntryVal is the value from the original loop that maps to the steps. 452 /// Note that \p EntryVal doesn't have to be an induction variable (e.g., it 453 /// can be a truncate instruction). 454 void buildScalarSteps(Value *ScalarIV, Value *Step, Value *EntryVal); 455 456 /// Create a vector induction phi node based on an existing scalar one. This 457 /// currently only works for integer induction variables with a constant 458 /// step. \p EntryVal is the value from the original loop that maps to the 459 /// vector phi node. If \p EntryVal is a truncate instruction, instead of 460 /// widening the original IV, we widen a version of the IV truncated to \p 461 /// EntryVal's type. 462 void createVectorIntInductionPHI(const InductionDescriptor &II, 463 Instruction *EntryVal); 464 465 /// Widen an integer induction variable \p IV. If \p Trunc is provided, the 466 /// induction variable will first be truncated to the corresponding type. 467 void widenIntInduction(PHINode *IV, TruncInst *Trunc = nullptr); 468 469 /// Returns true if we should generate a scalar version of \p IV. 470 bool needsScalarInduction(Instruction *IV) const; 471 472 /// Return a constant reference to the VectorParts corresponding to \p V from 473 /// the original loop. If the value has already been vectorized, the 474 /// corresponding vector entry in VectorLoopValueMap is returned. If, 475 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 476 /// new vector values on-demand by inserting the scalar values into vectors 477 /// with an insertelement sequence. If the value has been neither vectorized 478 /// nor scalarized, it must be loop invariant, so we simply broadcast the 479 /// value into vectors. 480 const VectorParts &getVectorValue(Value *V); 481 482 /// Return a value in the new loop corresponding to \p V from the original 483 /// loop at unroll index \p Part and vector index \p Lane. If the value has 484 /// been vectorized but not scalarized, the necessary extractelement 485 /// instruction will be generated. 486 Value *getScalarValue(Value *V, unsigned Part, unsigned Lane); 487 488 /// Try to vectorize the interleaved access group that \p Instr belongs to. 489 void vectorizeInterleaveGroup(Instruction *Instr); 490 491 /// Generate a shuffle sequence that will reverse the vector Vec. 492 virtual Value *reverseVector(Value *Vec); 493 494 /// Returns (and creates if needed) the original loop trip count. 495 Value *getOrCreateTripCount(Loop *NewLoop); 496 497 /// Returns (and creates if needed) the trip count of the widened loop. 498 Value *getOrCreateVectorTripCount(Loop *NewLoop); 499 500 /// Emit a bypass check to see if the trip count would overflow, or we 501 /// wouldn't have enough iterations to execute one vector loop. 502 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 503 /// Emit a bypass check to see if the vector trip count is nonzero. 504 void emitVectorLoopEnteredCheck(Loop *L, BasicBlock *Bypass); 505 /// Emit a bypass check to see if all of the SCEV assumptions we've 506 /// had to make are correct. 507 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 508 /// Emit bypass checks to check any memory assumptions we may have made. 509 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 510 511 /// Add additional metadata to \p To that was not present on \p Orig. 512 /// 513 /// Currently this is used to add the noalias annotations based on the 514 /// inserted memchecks. Use this for instructions that are *cloned* into the 515 /// vector loop. 516 void addNewMetadata(Instruction *To, const Instruction *Orig); 517 518 /// Add metadata from one instruction to another. 519 /// 520 /// This includes both the original MDs from \p From and additional ones (\see 521 /// addNewMetadata). Use this for *newly created* instructions in the vector 522 /// loop. 523 void addMetadata(Instruction *To, Instruction *From); 524 525 /// \brief Similar to the previous function but it adds the metadata to a 526 /// vector of instructions. 527 void addMetadata(ArrayRef<Value *> To, Instruction *From); 528 529 /// This is a helper class for maintaining vectorization state. It's used for 530 /// mapping values from the original loop to their corresponding values in 531 /// the new loop. Two mappings are maintained: one for vectorized values and 532 /// one for scalarized values. Vectorized values are represented with UF 533 /// vector values in the new loop, and scalarized values are represented with 534 /// UF x VF scalar values in the new loop. UF and VF are the unroll and 535 /// vectorization factors, respectively. 536 /// 537 /// Entries can be added to either map with initVector and initScalar, which 538 /// initialize and return a constant reference to the new entry. If a 539 /// non-constant reference to a vector entry is required, getVector can be 540 /// used to retrieve a mutable entry. We currently directly modify the mapped 541 /// values during "fix-up" operations that occur once the first phase of 542 /// widening is complete. These operations include type truncation and the 543 /// second phase of recurrence widening. 544 /// 545 /// Otherwise, entries from either map should be accessed using the 546 /// getVectorValue or getScalarValue functions from InnerLoopVectorizer. 547 /// getVectorValue and getScalarValue coordinate to generate a vector or 548 /// scalar value on-demand if one is not yet available. When vectorizing a 549 /// loop, we visit the definition of an instruction before its uses. When 550 /// visiting the definition, we either vectorize or scalarize the 551 /// instruction, creating an entry for it in the corresponding map. (In some 552 /// cases, such as induction variables, we will create both vector and scalar 553 /// entries.) Then, as we encounter uses of the definition, we derive values 554 /// for each scalar or vector use unless such a value is already available. 555 /// For example, if we scalarize a definition and one of its uses is vector, 556 /// we build the required vector on-demand with an insertelement sequence 557 /// when visiting the use. Otherwise, if the use is scalar, we can use the 558 /// existing scalar definition. 559 struct ValueMap { 560 561 /// Construct an empty map with the given unroll and vectorization factors. 562 ValueMap(unsigned UnrollFactor, unsigned VecWidth) 563 : UF(UnrollFactor), VF(VecWidth) { 564 // The unroll and vectorization factors are only used in asserts builds 565 // to verify map entries are sized appropriately. 566 (void)UF; 567 (void)VF; 568 } 569 570 /// \return True if the map has a vector entry for \p Key. 571 bool hasVector(Value *Key) const { return VectorMapStorage.count(Key); } 572 573 /// \return True if the map has a scalar entry for \p Key. 574 bool hasScalar(Value *Key) const { return ScalarMapStorage.count(Key); } 575 576 /// \brief Map \p Key to the given VectorParts \p Entry, and return a 577 /// constant reference to the new vector map entry. The given key should 578 /// not already be in the map, and the given VectorParts should be 579 /// correctly sized for the current unroll factor. 580 const VectorParts &initVector(Value *Key, const VectorParts &Entry) { 581 assert(!hasVector(Key) && "Vector entry already initialized"); 582 assert(Entry.size() == UF && "VectorParts has wrong dimensions"); 583 VectorMapStorage[Key] = Entry; 584 return VectorMapStorage[Key]; 585 } 586 587 /// \brief Map \p Key to the given ScalarParts \p Entry, and return a 588 /// constant reference to the new scalar map entry. The given key should 589 /// not already be in the map, and the given ScalarParts should be 590 /// correctly sized for the current unroll and vectorization factors. 591 const ScalarParts &initScalar(Value *Key, const ScalarParts &Entry) { 592 assert(!hasScalar(Key) && "Scalar entry already initialized"); 593 assert(Entry.size() == UF && 594 all_of(make_range(Entry.begin(), Entry.end()), 595 [&](const SmallVectorImpl<Value *> &Values) -> bool { 596 return Values.size() == VF; 597 }) && 598 "ScalarParts has wrong dimensions"); 599 ScalarMapStorage[Key] = Entry; 600 return ScalarMapStorage[Key]; 601 } 602 603 /// \return A reference to the vector map entry corresponding to \p Key. 604 /// The key should already be in the map. This function should only be used 605 /// when it's necessary to update values that have already been vectorized. 606 /// This is the case for "fix-up" operations including type truncation and 607 /// the second phase of recurrence vectorization. If a non-const reference 608 /// isn't required, getVectorValue should be used instead. 609 VectorParts &getVector(Value *Key) { 610 assert(hasVector(Key) && "Vector entry not initialized"); 611 return VectorMapStorage.find(Key)->second; 612 } 613 614 /// Retrieve an entry from the vector or scalar maps. The preferred way to 615 /// access an existing mapped entry is with getVectorValue or 616 /// getScalarValue from InnerLoopVectorizer. Until those functions can be 617 /// moved inside ValueMap, we have to declare them as friends. 618 friend const VectorParts &InnerLoopVectorizer::getVectorValue(Value *V); 619 friend Value *InnerLoopVectorizer::getScalarValue(Value *V, unsigned Part, 620 unsigned Lane); 621 622 private: 623 /// The unroll factor. Each entry in the vector map contains UF vector 624 /// values. 625 unsigned UF; 626 627 /// The vectorization factor. Each entry in the scalar map contains UF x VF 628 /// scalar values. 629 unsigned VF; 630 631 /// The vector and scalar map storage. We use std::map and not DenseMap 632 /// because insertions to DenseMap invalidate its iterators. 633 std::map<Value *, VectorParts> VectorMapStorage; 634 std::map<Value *, ScalarParts> ScalarMapStorage; 635 }; 636 637 /// The original loop. 638 Loop *OrigLoop; 639 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 640 /// dynamic knowledge to simplify SCEV expressions and converts them to a 641 /// more usable form. 642 PredicatedScalarEvolution &PSE; 643 /// Loop Info. 644 LoopInfo *LI; 645 /// Dominator Tree. 646 DominatorTree *DT; 647 /// Alias Analysis. 648 AliasAnalysis *AA; 649 /// Target Library Info. 650 const TargetLibraryInfo *TLI; 651 /// Target Transform Info. 652 const TargetTransformInfo *TTI; 653 /// Assumption Cache. 654 AssumptionCache *AC; 655 /// Interface to emit optimization remarks. 656 OptimizationRemarkEmitter *ORE; 657 658 /// \brief LoopVersioning. It's only set up (non-null) if memchecks were 659 /// used. 660 /// 661 /// This is currently only used to add no-alias metadata based on the 662 /// memchecks. The actually versioning is performed manually. 663 std::unique_ptr<LoopVersioning> LVer; 664 665 /// The vectorization SIMD factor to use. Each vector will have this many 666 /// vector elements. 667 unsigned VF; 668 669 protected: 670 /// The vectorization unroll factor to use. Each scalar is vectorized to this 671 /// many different vector instructions. 672 unsigned UF; 673 674 /// The builder that we use 675 IRBuilder<> Builder; 676 677 // --- Vectorization state --- 678 679 /// The vector-loop preheader. 680 BasicBlock *LoopVectorPreHeader; 681 /// The scalar-loop preheader. 682 BasicBlock *LoopScalarPreHeader; 683 /// Middle Block between the vector and the scalar. 684 BasicBlock *LoopMiddleBlock; 685 /// The ExitBlock of the scalar loop. 686 BasicBlock *LoopExitBlock; 687 /// The vector loop body. 688 BasicBlock *LoopVectorBody; 689 /// The scalar loop body. 690 BasicBlock *LoopScalarBody; 691 /// A list of all bypass blocks. The first block is the entry of the loop. 692 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 693 694 /// The new Induction variable which was added to the new block. 695 PHINode *Induction; 696 /// The induction variable of the old basic block. 697 PHINode *OldInduction; 698 699 /// Maps values from the original loop to their corresponding values in the 700 /// vectorized loop. A key value can map to either vector values, scalar 701 /// values or both kinds of values, depending on whether the key was 702 /// vectorized and scalarized. 703 ValueMap VectorLoopValueMap; 704 705 /// Store instructions that should be predicated, as a pair 706 /// <StoreInst, Predicate> 707 SmallVector<std::pair<Instruction *, Value *>, 4> PredicatedInstructions; 708 EdgeMaskCache MaskCache; 709 /// Trip count of the original loop. 710 Value *TripCount; 711 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 712 Value *VectorTripCount; 713 714 /// Map of scalar integer values to the smallest bitwidth they can be legally 715 /// represented as. The vector equivalents of these values should be truncated 716 /// to this type. 717 const MapVector<Instruction *, uint64_t> *MinBWs; 718 719 LoopVectorizationLegality *Legal; 720 721 // Record whether runtime checks are added. 722 bool AddedSafetyChecks; 723 }; 724 725 class InnerLoopUnroller : public InnerLoopVectorizer { 726 public: 727 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 728 LoopInfo *LI, DominatorTree *DT, 729 const TargetLibraryInfo *TLI, 730 const TargetTransformInfo *TTI, AssumptionCache *AC, 731 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor) 732 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 733 UnrollFactor) {} 734 735 private: 736 void scalarizeInstruction(Instruction *Instr, 737 bool IfPredicateInstr = false) override; 738 void vectorizeMemoryInstruction(Instruction *Instr) override; 739 Value *getBroadcastInstrs(Value *V) override; 740 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 741 Instruction::BinaryOps Opcode = 742 Instruction::BinaryOpsEnd) override; 743 Value *reverseVector(Value *Vec) override; 744 }; 745 746 /// \brief Look for a meaningful debug location on the instruction or it's 747 /// operands. 748 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 749 if (!I) 750 return I; 751 752 DebugLoc Empty; 753 if (I->getDebugLoc() != Empty) 754 return I; 755 756 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 757 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 758 if (OpInst->getDebugLoc() != Empty) 759 return OpInst; 760 } 761 762 return I; 763 } 764 765 /// \brief Set the debug location in the builder using the debug location in the 766 /// instruction. 767 static void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 768 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) 769 B.SetCurrentDebugLocation(Inst->getDebugLoc()); 770 else 771 B.SetCurrentDebugLocation(DebugLoc()); 772 } 773 774 #ifndef NDEBUG 775 /// \return string containing a file name and a line # for the given loop. 776 static std::string getDebugLocString(const Loop *L) { 777 std::string Result; 778 if (L) { 779 raw_string_ostream OS(Result); 780 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 781 LoopDbgLoc.print(OS); 782 else 783 // Just print the module name. 784 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 785 OS.flush(); 786 } 787 return Result; 788 } 789 #endif 790 791 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 792 const Instruction *Orig) { 793 // If the loop was versioned with memchecks, add the corresponding no-alias 794 // metadata. 795 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 796 LVer->annotateInstWithNoAlias(To, Orig); 797 } 798 799 void InnerLoopVectorizer::addMetadata(Instruction *To, 800 Instruction *From) { 801 propagateMetadata(To, From); 802 addNewMetadata(To, From); 803 } 804 805 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 806 Instruction *From) { 807 for (Value *V : To) { 808 if (Instruction *I = dyn_cast<Instruction>(V)) 809 addMetadata(I, From); 810 } 811 } 812 813 /// \brief The group of interleaved loads/stores sharing the same stride and 814 /// close to each other. 815 /// 816 /// Each member in this group has an index starting from 0, and the largest 817 /// index should be less than interleaved factor, which is equal to the absolute 818 /// value of the access's stride. 819 /// 820 /// E.g. An interleaved load group of factor 4: 821 /// for (unsigned i = 0; i < 1024; i+=4) { 822 /// a = A[i]; // Member of index 0 823 /// b = A[i+1]; // Member of index 1 824 /// d = A[i+3]; // Member of index 3 825 /// ... 826 /// } 827 /// 828 /// An interleaved store group of factor 4: 829 /// for (unsigned i = 0; i < 1024; i+=4) { 830 /// ... 831 /// A[i] = a; // Member of index 0 832 /// A[i+1] = b; // Member of index 1 833 /// A[i+2] = c; // Member of index 2 834 /// A[i+3] = d; // Member of index 3 835 /// } 836 /// 837 /// Note: the interleaved load group could have gaps (missing members), but 838 /// the interleaved store group doesn't allow gaps. 839 class InterleaveGroup { 840 public: 841 InterleaveGroup(Instruction *Instr, int Stride, unsigned Align) 842 : Align(Align), SmallestKey(0), LargestKey(0), InsertPos(Instr) { 843 assert(Align && "The alignment should be non-zero"); 844 845 Factor = std::abs(Stride); 846 assert(Factor > 1 && "Invalid interleave factor"); 847 848 Reverse = Stride < 0; 849 Members[0] = Instr; 850 } 851 852 bool isReverse() const { return Reverse; } 853 unsigned getFactor() const { return Factor; } 854 unsigned getAlignment() const { return Align; } 855 unsigned getNumMembers() const { return Members.size(); } 856 857 /// \brief Try to insert a new member \p Instr with index \p Index and 858 /// alignment \p NewAlign. The index is related to the leader and it could be 859 /// negative if it is the new leader. 860 /// 861 /// \returns false if the instruction doesn't belong to the group. 862 bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) { 863 assert(NewAlign && "The new member's alignment should be non-zero"); 864 865 int Key = Index + SmallestKey; 866 867 // Skip if there is already a member with the same index. 868 if (Members.count(Key)) 869 return false; 870 871 if (Key > LargestKey) { 872 // The largest index is always less than the interleave factor. 873 if (Index >= static_cast<int>(Factor)) 874 return false; 875 876 LargestKey = Key; 877 } else if (Key < SmallestKey) { 878 // The largest index is always less than the interleave factor. 879 if (LargestKey - Key >= static_cast<int>(Factor)) 880 return false; 881 882 SmallestKey = Key; 883 } 884 885 // It's always safe to select the minimum alignment. 886 Align = std::min(Align, NewAlign); 887 Members[Key] = Instr; 888 return true; 889 } 890 891 /// \brief Get the member with the given index \p Index 892 /// 893 /// \returns nullptr if contains no such member. 894 Instruction *getMember(unsigned Index) const { 895 int Key = SmallestKey + Index; 896 if (!Members.count(Key)) 897 return nullptr; 898 899 return Members.find(Key)->second; 900 } 901 902 /// \brief Get the index for the given member. Unlike the key in the member 903 /// map, the index starts from 0. 904 unsigned getIndex(Instruction *Instr) const { 905 for (auto I : Members) 906 if (I.second == Instr) 907 return I.first - SmallestKey; 908 909 llvm_unreachable("InterleaveGroup contains no such member"); 910 } 911 912 Instruction *getInsertPos() const { return InsertPos; } 913 void setInsertPos(Instruction *Inst) { InsertPos = Inst; } 914 915 private: 916 unsigned Factor; // Interleave Factor. 917 bool Reverse; 918 unsigned Align; 919 DenseMap<int, Instruction *> Members; 920 int SmallestKey; 921 int LargestKey; 922 923 // To avoid breaking dependences, vectorized instructions of an interleave 924 // group should be inserted at either the first load or the last store in 925 // program order. 926 // 927 // E.g. %even = load i32 // Insert Position 928 // %add = add i32 %even // Use of %even 929 // %odd = load i32 930 // 931 // store i32 %even 932 // %odd = add i32 // Def of %odd 933 // store i32 %odd // Insert Position 934 Instruction *InsertPos; 935 }; 936 937 /// \brief Drive the analysis of interleaved memory accesses in the loop. 938 /// 939 /// Use this class to analyze interleaved accesses only when we can vectorize 940 /// a loop. Otherwise it's meaningless to do analysis as the vectorization 941 /// on interleaved accesses is unsafe. 942 /// 943 /// The analysis collects interleave groups and records the relationships 944 /// between the member and the group in a map. 945 class InterleavedAccessInfo { 946 public: 947 InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L, 948 DominatorTree *DT, LoopInfo *LI) 949 : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(nullptr), 950 RequiresScalarEpilogue(false) {} 951 952 ~InterleavedAccessInfo() { 953 SmallSet<InterleaveGroup *, 4> DelSet; 954 // Avoid releasing a pointer twice. 955 for (auto &I : InterleaveGroupMap) 956 DelSet.insert(I.second); 957 for (auto *Ptr : DelSet) 958 delete Ptr; 959 } 960 961 /// \brief Analyze the interleaved accesses and collect them in interleave 962 /// groups. Substitute symbolic strides using \p Strides. 963 void analyzeInterleaving(const ValueToValueMap &Strides); 964 965 /// \brief Check if \p Instr belongs to any interleave group. 966 bool isInterleaved(Instruction *Instr) const { 967 return InterleaveGroupMap.count(Instr); 968 } 969 970 /// \brief Return the maximum interleave factor of all interleaved groups. 971 unsigned getMaxInterleaveFactor() const { 972 unsigned MaxFactor = 1; 973 for (auto &Entry : InterleaveGroupMap) 974 MaxFactor = std::max(MaxFactor, Entry.second->getFactor()); 975 return MaxFactor; 976 } 977 978 /// \brief Get the interleave group that \p Instr belongs to. 979 /// 980 /// \returns nullptr if doesn't have such group. 981 InterleaveGroup *getInterleaveGroup(Instruction *Instr) const { 982 if (InterleaveGroupMap.count(Instr)) 983 return InterleaveGroupMap.find(Instr)->second; 984 return nullptr; 985 } 986 987 /// \brief Returns true if an interleaved group that may access memory 988 /// out-of-bounds requires a scalar epilogue iteration for correctness. 989 bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; } 990 991 /// \brief Initialize the LoopAccessInfo used for dependence checking. 992 void setLAI(const LoopAccessInfo *Info) { LAI = Info; } 993 994 private: 995 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks. 996 /// Simplifies SCEV expressions in the context of existing SCEV assumptions. 997 /// The interleaved access analysis can also add new predicates (for example 998 /// by versioning strides of pointers). 999 PredicatedScalarEvolution &PSE; 1000 Loop *TheLoop; 1001 DominatorTree *DT; 1002 LoopInfo *LI; 1003 const LoopAccessInfo *LAI; 1004 1005 /// True if the loop may contain non-reversed interleaved groups with 1006 /// out-of-bounds accesses. We ensure we don't speculatively access memory 1007 /// out-of-bounds by executing at least one scalar epilogue iteration. 1008 bool RequiresScalarEpilogue; 1009 1010 /// Holds the relationships between the members and the interleave group. 1011 DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap; 1012 1013 /// Holds dependences among the memory accesses in the loop. It maps a source 1014 /// access to a set of dependent sink accesses. 1015 DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences; 1016 1017 /// \brief The descriptor for a strided memory access. 1018 struct StrideDescriptor { 1019 StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size, 1020 unsigned Align) 1021 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {} 1022 1023 StrideDescriptor() = default; 1024 1025 // The access's stride. It is negative for a reverse access. 1026 int64_t Stride = 0; 1027 const SCEV *Scev = nullptr; // The scalar expression of this access 1028 uint64_t Size = 0; // The size of the memory object. 1029 unsigned Align = 0; // The alignment of this access. 1030 }; 1031 1032 /// \brief A type for holding instructions and their stride descriptors. 1033 typedef std::pair<Instruction *, StrideDescriptor> StrideEntry; 1034 1035 /// \brief Create a new interleave group with the given instruction \p Instr, 1036 /// stride \p Stride and alignment \p Align. 1037 /// 1038 /// \returns the newly created interleave group. 1039 InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride, 1040 unsigned Align) { 1041 assert(!InterleaveGroupMap.count(Instr) && 1042 "Already in an interleaved access group"); 1043 InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align); 1044 return InterleaveGroupMap[Instr]; 1045 } 1046 1047 /// \brief Release the group and remove all the relationships. 1048 void releaseGroup(InterleaveGroup *Group) { 1049 for (unsigned i = 0; i < Group->getFactor(); i++) 1050 if (Instruction *Member = Group->getMember(i)) 1051 InterleaveGroupMap.erase(Member); 1052 1053 delete Group; 1054 } 1055 1056 /// \brief Collect all the accesses with a constant stride in program order. 1057 void collectConstStrideAccesses( 1058 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 1059 const ValueToValueMap &Strides); 1060 1061 /// \brief Returns true if \p Stride is allowed in an interleaved group. 1062 static bool isStrided(int Stride) { 1063 unsigned Factor = std::abs(Stride); 1064 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 1065 } 1066 1067 /// \brief Returns true if \p BB is a predicated block. 1068 bool isPredicated(BasicBlock *BB) const { 1069 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 1070 } 1071 1072 /// \brief Returns true if LoopAccessInfo can be used for dependence queries. 1073 bool areDependencesValid() const { 1074 return LAI && LAI->getDepChecker().getDependences(); 1075 } 1076 1077 /// \brief Returns true if memory accesses \p A and \p B can be reordered, if 1078 /// necessary, when constructing interleaved groups. 1079 /// 1080 /// \p A must precede \p B in program order. We return false if reordering is 1081 /// not necessary or is prevented because \p A and \p B may be dependent. 1082 bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A, 1083 StrideEntry *B) const { 1084 1085 // Code motion for interleaved accesses can potentially hoist strided loads 1086 // and sink strided stores. The code below checks the legality of the 1087 // following two conditions: 1088 // 1089 // 1. Potentially moving a strided load (B) before any store (A) that 1090 // precedes B, or 1091 // 1092 // 2. Potentially moving a strided store (A) after any load or store (B) 1093 // that A precedes. 1094 // 1095 // It's legal to reorder A and B if we know there isn't a dependence from A 1096 // to B. Note that this determination is conservative since some 1097 // dependences could potentially be reordered safely. 1098 1099 // A is potentially the source of a dependence. 1100 auto *Src = A->first; 1101 auto SrcDes = A->second; 1102 1103 // B is potentially the sink of a dependence. 1104 auto *Sink = B->first; 1105 auto SinkDes = B->second; 1106 1107 // Code motion for interleaved accesses can't violate WAR dependences. 1108 // Thus, reordering is legal if the source isn't a write. 1109 if (!Src->mayWriteToMemory()) 1110 return true; 1111 1112 // At least one of the accesses must be strided. 1113 if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride)) 1114 return true; 1115 1116 // If dependence information is not available from LoopAccessInfo, 1117 // conservatively assume the instructions can't be reordered. 1118 if (!areDependencesValid()) 1119 return false; 1120 1121 // If we know there is a dependence from source to sink, assume the 1122 // instructions can't be reordered. Otherwise, reordering is legal. 1123 return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink); 1124 } 1125 1126 /// \brief Collect the dependences from LoopAccessInfo. 1127 /// 1128 /// We process the dependences once during the interleaved access analysis to 1129 /// enable constant-time dependence queries. 1130 void collectDependences() { 1131 if (!areDependencesValid()) 1132 return; 1133 auto *Deps = LAI->getDepChecker().getDependences(); 1134 for (auto Dep : *Deps) 1135 Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI)); 1136 } 1137 }; 1138 1139 /// Utility class for getting and setting loop vectorizer hints in the form 1140 /// of loop metadata. 1141 /// This class keeps a number of loop annotations locally (as member variables) 1142 /// and can, upon request, write them back as metadata on the loop. It will 1143 /// initially scan the loop for existing metadata, and will update the local 1144 /// values based on information in the loop. 1145 /// We cannot write all values to metadata, as the mere presence of some info, 1146 /// for example 'force', means a decision has been made. So, we need to be 1147 /// careful NOT to add them if the user hasn't specifically asked so. 1148 class LoopVectorizeHints { 1149 enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE }; 1150 1151 /// Hint - associates name and validation with the hint value. 1152 struct Hint { 1153 const char *Name; 1154 unsigned Value; // This may have to change for non-numeric values. 1155 HintKind Kind; 1156 1157 Hint(const char *Name, unsigned Value, HintKind Kind) 1158 : Name(Name), Value(Value), Kind(Kind) {} 1159 1160 bool validate(unsigned Val) { 1161 switch (Kind) { 1162 case HK_WIDTH: 1163 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth; 1164 case HK_UNROLL: 1165 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor; 1166 case HK_FORCE: 1167 return (Val <= 1); 1168 } 1169 return false; 1170 } 1171 }; 1172 1173 /// Vectorization width. 1174 Hint Width; 1175 /// Vectorization interleave factor. 1176 Hint Interleave; 1177 /// Vectorization forced 1178 Hint Force; 1179 1180 /// Return the loop metadata prefix. 1181 static StringRef Prefix() { return "llvm.loop."; } 1182 1183 /// True if there is any unsafe math in the loop. 1184 bool PotentiallyUnsafe; 1185 1186 public: 1187 enum ForceKind { 1188 FK_Undefined = -1, ///< Not selected. 1189 FK_Disabled = 0, ///< Forcing disabled. 1190 FK_Enabled = 1, ///< Forcing enabled. 1191 }; 1192 1193 LoopVectorizeHints(const Loop *L, bool DisableInterleaving, 1194 OptimizationRemarkEmitter &ORE) 1195 : Width("vectorize.width", VectorizerParams::VectorizationFactor, 1196 HK_WIDTH), 1197 Interleave("interleave.count", DisableInterleaving, HK_UNROLL), 1198 Force("vectorize.enable", FK_Undefined, HK_FORCE), 1199 PotentiallyUnsafe(false), TheLoop(L), ORE(ORE) { 1200 // Populate values with existing loop metadata. 1201 getHintsFromMetadata(); 1202 1203 // force-vector-interleave overrides DisableInterleaving. 1204 if (VectorizerParams::isInterleaveForced()) 1205 Interleave.Value = VectorizerParams::VectorizationInterleave; 1206 1207 DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs() 1208 << "LV: Interleaving disabled by the pass manager\n"); 1209 } 1210 1211 /// Mark the loop L as already vectorized by setting the width to 1. 1212 void setAlreadyVectorized() { 1213 Width.Value = Interleave.Value = 1; 1214 Hint Hints[] = {Width, Interleave}; 1215 writeHintsToMetadata(Hints); 1216 } 1217 1218 bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const { 1219 if (getForce() == LoopVectorizeHints::FK_Disabled) { 1220 DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); 1221 ORE.emitOptimizationRemarkAnalysis(vectorizeAnalysisPassName(), L, 1222 emitRemark()); 1223 return false; 1224 } 1225 1226 if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) { 1227 DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); 1228 ORE.emitOptimizationRemarkAnalysis(vectorizeAnalysisPassName(), L, 1229 emitRemark()); 1230 return false; 1231 } 1232 1233 if (getWidth() == 1 && getInterleave() == 1) { 1234 // FIXME: Add a separate metadata to indicate when the loop has already 1235 // been vectorized instead of setting width and count to 1. 1236 DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); 1237 // FIXME: Add interleave.disable metadata. This will allow 1238 // vectorize.disable to be used without disabling the pass and errors 1239 // to differentiate between disabled vectorization and a width of 1. 1240 ORE.emitOptimizationRemarkAnalysis( 1241 vectorizeAnalysisPassName(), L, 1242 "loop not vectorized: vectorization and interleaving are explicitly " 1243 "disabled, or vectorize width and interleave count are both set to " 1244 "1"); 1245 return false; 1246 } 1247 1248 return true; 1249 } 1250 1251 /// Dumps all the hint information. 1252 std::string emitRemark() const { 1253 VectorizationReport R; 1254 if (Force.Value == LoopVectorizeHints::FK_Disabled) 1255 R << "vectorization is explicitly disabled"; 1256 else { 1257 R << "use -Rpass-analysis=loop-vectorize for more info"; 1258 if (Force.Value == LoopVectorizeHints::FK_Enabled) { 1259 R << " (Force=true"; 1260 if (Width.Value != 0) 1261 R << ", Vector Width=" << Width.Value; 1262 if (Interleave.Value != 0) 1263 R << ", Interleave Count=" << Interleave.Value; 1264 R << ")"; 1265 } 1266 } 1267 1268 return R.str(); 1269 } 1270 1271 unsigned getWidth() const { return Width.Value; } 1272 unsigned getInterleave() const { return Interleave.Value; } 1273 enum ForceKind getForce() const { return (ForceKind)Force.Value; } 1274 1275 /// \brief If hints are provided that force vectorization, use the AlwaysPrint 1276 /// pass name to force the frontend to print the diagnostic. 1277 const char *vectorizeAnalysisPassName() const { 1278 if (getWidth() == 1) 1279 return LV_NAME; 1280 if (getForce() == LoopVectorizeHints::FK_Disabled) 1281 return LV_NAME; 1282 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0) 1283 return LV_NAME; 1284 return DiagnosticInfoOptimizationRemarkAnalysis::AlwaysPrint; 1285 } 1286 1287 bool allowReordering() const { 1288 // When enabling loop hints are provided we allow the vectorizer to change 1289 // the order of operations that is given by the scalar loop. This is not 1290 // enabled by default because can be unsafe or inefficient. For example, 1291 // reordering floating-point operations will change the way round-off 1292 // error accumulates in the loop. 1293 return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1; 1294 } 1295 1296 bool isPotentiallyUnsafe() const { 1297 // Avoid FP vectorization if the target is unsure about proper support. 1298 // This may be related to the SIMD unit in the target not handling 1299 // IEEE 754 FP ops properly, or bad single-to-double promotions. 1300 // Otherwise, a sequence of vectorized loops, even without reduction, 1301 // could lead to different end results on the destination vectors. 1302 return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe; 1303 } 1304 1305 void setPotentiallyUnsafe() { PotentiallyUnsafe = true; } 1306 1307 private: 1308 /// Find hints specified in the loop metadata and update local values. 1309 void getHintsFromMetadata() { 1310 MDNode *LoopID = TheLoop->getLoopID(); 1311 if (!LoopID) 1312 return; 1313 1314 // First operand should refer to the loop id itself. 1315 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 1316 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 1317 1318 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1319 const MDString *S = nullptr; 1320 SmallVector<Metadata *, 4> Args; 1321 1322 // The expected hint is either a MDString or a MDNode with the first 1323 // operand a MDString. 1324 if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) { 1325 if (!MD || MD->getNumOperands() == 0) 1326 continue; 1327 S = dyn_cast<MDString>(MD->getOperand(0)); 1328 for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i) 1329 Args.push_back(MD->getOperand(i)); 1330 } else { 1331 S = dyn_cast<MDString>(LoopID->getOperand(i)); 1332 assert(Args.size() == 0 && "too many arguments for MDString"); 1333 } 1334 1335 if (!S) 1336 continue; 1337 1338 // Check if the hint starts with the loop metadata prefix. 1339 StringRef Name = S->getString(); 1340 if (Args.size() == 1) 1341 setHint(Name, Args[0]); 1342 } 1343 } 1344 1345 /// Checks string hint with one operand and set value if valid. 1346 void setHint(StringRef Name, Metadata *Arg) { 1347 if (!Name.startswith(Prefix())) 1348 return; 1349 Name = Name.substr(Prefix().size(), StringRef::npos); 1350 1351 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); 1352 if (!C) 1353 return; 1354 unsigned Val = C->getZExtValue(); 1355 1356 Hint *Hints[] = {&Width, &Interleave, &Force}; 1357 for (auto H : Hints) { 1358 if (Name == H->Name) { 1359 if (H->validate(Val)) 1360 H->Value = Val; 1361 else 1362 DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); 1363 break; 1364 } 1365 } 1366 } 1367 1368 /// Create a new hint from name / value pair. 1369 MDNode *createHintMetadata(StringRef Name, unsigned V) const { 1370 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1371 Metadata *MDs[] = {MDString::get(Context, Name), 1372 ConstantAsMetadata::get( 1373 ConstantInt::get(Type::getInt32Ty(Context), V))}; 1374 return MDNode::get(Context, MDs); 1375 } 1376 1377 /// Matches metadata with hint name. 1378 bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) { 1379 MDString *Name = dyn_cast<MDString>(Node->getOperand(0)); 1380 if (!Name) 1381 return false; 1382 1383 for (auto H : HintTypes) 1384 if (Name->getString().endswith(H.Name)) 1385 return true; 1386 return false; 1387 } 1388 1389 /// Sets current hints into loop metadata, keeping other values intact. 1390 void writeHintsToMetadata(ArrayRef<Hint> HintTypes) { 1391 if (HintTypes.size() == 0) 1392 return; 1393 1394 // Reserve the first element to LoopID (see below). 1395 SmallVector<Metadata *, 4> MDs(1); 1396 // If the loop already has metadata, then ignore the existing operands. 1397 MDNode *LoopID = TheLoop->getLoopID(); 1398 if (LoopID) { 1399 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1400 MDNode *Node = cast<MDNode>(LoopID->getOperand(i)); 1401 // If node in update list, ignore old value. 1402 if (!matchesHintMetadataName(Node, HintTypes)) 1403 MDs.push_back(Node); 1404 } 1405 } 1406 1407 // Now, add the missing hints. 1408 for (auto H : HintTypes) 1409 MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value)); 1410 1411 // Replace current metadata node with new one. 1412 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1413 MDNode *NewLoopID = MDNode::get(Context, MDs); 1414 // Set operand 0 to refer to the loop id itself. 1415 NewLoopID->replaceOperandWith(0, NewLoopID); 1416 1417 TheLoop->setLoopID(NewLoopID); 1418 } 1419 1420 /// The loop these hints belong to. 1421 const Loop *TheLoop; 1422 1423 /// Interface to emit optimization remarks. 1424 OptimizationRemarkEmitter &ORE; 1425 }; 1426 1427 static void emitAnalysisDiag(const Loop *TheLoop, 1428 const LoopVectorizeHints &Hints, 1429 OptimizationRemarkEmitter &ORE, 1430 const LoopAccessReport &Message) { 1431 const char *Name = Hints.vectorizeAnalysisPassName(); 1432 LoopAccessReport::emitAnalysis(Message, TheLoop, Name, ORE); 1433 } 1434 1435 static void emitMissedWarning(Function *F, Loop *L, 1436 const LoopVectorizeHints &LH, 1437 OptimizationRemarkEmitter *ORE) { 1438 ORE->emitOptimizationRemarkMissed(LV_NAME, L, LH.emitRemark()); 1439 1440 if (LH.getForce() == LoopVectorizeHints::FK_Enabled) { 1441 if (LH.getWidth() != 1) 1442 emitLoopVectorizeWarning( 1443 F->getContext(), *F, L->getStartLoc(), 1444 "failed explicitly specified loop vectorization"); 1445 else if (LH.getInterleave() != 1) 1446 emitLoopInterleaveWarning( 1447 F->getContext(), *F, L->getStartLoc(), 1448 "failed explicitly specified loop interleaving"); 1449 } 1450 } 1451 1452 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and 1453 /// to what vectorization factor. 1454 /// This class does not look at the profitability of vectorization, only the 1455 /// legality. This class has two main kinds of checks: 1456 /// * Memory checks - The code in canVectorizeMemory checks if vectorization 1457 /// will change the order of memory accesses in a way that will change the 1458 /// correctness of the program. 1459 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory 1460 /// checks for a number of different conditions, such as the availability of a 1461 /// single induction variable, that all types are supported and vectorize-able, 1462 /// etc. This code reflects the capabilities of InnerLoopVectorizer. 1463 /// This class is also used by InnerLoopVectorizer for identifying 1464 /// induction variable and the different reduction variables. 1465 class LoopVectorizationLegality { 1466 public: 1467 LoopVectorizationLegality( 1468 Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT, 1469 TargetLibraryInfo *TLI, AliasAnalysis *AA, Function *F, 1470 const TargetTransformInfo *TTI, 1471 std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI, 1472 OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R, 1473 LoopVectorizeHints *H) 1474 : NumPredStores(0), TheLoop(L), PSE(PSE), TLI(TLI), TTI(TTI), DT(DT), 1475 GetLAA(GetLAA), LAI(nullptr), ORE(ORE), InterleaveInfo(PSE, L, DT, LI), 1476 Induction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false), 1477 Requirements(R), Hints(H) {} 1478 1479 /// ReductionList contains the reduction descriptors for all 1480 /// of the reductions that were found in the loop. 1481 typedef DenseMap<PHINode *, RecurrenceDescriptor> ReductionList; 1482 1483 /// InductionList saves induction variables and maps them to the 1484 /// induction descriptor. 1485 typedef MapVector<PHINode *, InductionDescriptor> InductionList; 1486 1487 /// RecurrenceSet contains the phi nodes that are recurrences other than 1488 /// inductions and reductions. 1489 typedef SmallPtrSet<const PHINode *, 8> RecurrenceSet; 1490 1491 /// Returns true if it is legal to vectorize this loop. 1492 /// This does not mean that it is profitable to vectorize this 1493 /// loop, only that it is legal to do so. 1494 bool canVectorize(); 1495 1496 /// Returns the Induction variable. 1497 PHINode *getInduction() { return Induction; } 1498 1499 /// Returns the reduction variables found in the loop. 1500 ReductionList *getReductionVars() { return &Reductions; } 1501 1502 /// Returns the induction variables found in the loop. 1503 InductionList *getInductionVars() { return &Inductions; } 1504 1505 /// Return the first-order recurrences found in the loop. 1506 RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; } 1507 1508 /// Returns the widest induction type. 1509 Type *getWidestInductionType() { return WidestIndTy; } 1510 1511 /// Returns True if V is an induction variable in this loop. 1512 bool isInductionVariable(const Value *V); 1513 1514 /// Returns True if PN is a reduction variable in this loop. 1515 bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); } 1516 1517 /// Returns True if Phi is a first-order recurrence in this loop. 1518 bool isFirstOrderRecurrence(const PHINode *Phi); 1519 1520 /// Return true if the block BB needs to be predicated in order for the loop 1521 /// to be vectorized. 1522 bool blockNeedsPredication(BasicBlock *BB); 1523 1524 /// Check if this pointer is consecutive when vectorizing. This happens 1525 /// when the last index of the GEP is the induction variable, or that the 1526 /// pointer itself is an induction variable. 1527 /// This check allows us to vectorize A[idx] into a wide load/store. 1528 /// Returns: 1529 /// 0 - Stride is unknown or non-consecutive. 1530 /// 1 - Address is consecutive. 1531 /// -1 - Address is consecutive, and decreasing. 1532 int isConsecutivePtr(Value *Ptr); 1533 1534 /// Returns true if the value V is uniform within the loop. 1535 bool isUniform(Value *V); 1536 1537 /// Returns true if \p I is known to be uniform after vectorization. 1538 bool isUniformAfterVectorization(Instruction *I) { return Uniforms.count(I); } 1539 1540 /// Returns true if \p I is known to be scalar after vectorization. 1541 bool isScalarAfterVectorization(Instruction *I) { return Scalars.count(I); } 1542 1543 /// Returns the information that we collected about runtime memory check. 1544 const RuntimePointerChecking *getRuntimePointerChecking() const { 1545 return LAI->getRuntimePointerChecking(); 1546 } 1547 1548 const LoopAccessInfo *getLAI() const { return LAI; } 1549 1550 /// \brief Check if \p Instr belongs to any interleaved access group. 1551 bool isAccessInterleaved(Instruction *Instr) { 1552 return InterleaveInfo.isInterleaved(Instr); 1553 } 1554 1555 /// \brief Return the maximum interleave factor of all interleaved groups. 1556 unsigned getMaxInterleaveFactor() const { 1557 return InterleaveInfo.getMaxInterleaveFactor(); 1558 } 1559 1560 /// \brief Get the interleaved access group that \p Instr belongs to. 1561 const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { 1562 return InterleaveInfo.getInterleaveGroup(Instr); 1563 } 1564 1565 /// \brief Returns true if an interleaved group requires a scalar iteration 1566 /// to handle accesses with gaps. 1567 bool requiresScalarEpilogue() const { 1568 return InterleaveInfo.requiresScalarEpilogue(); 1569 } 1570 1571 unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); } 1572 1573 bool hasStride(Value *V) { return LAI->hasStride(V); } 1574 1575 /// Returns true if the target machine supports masked store operation 1576 /// for the given \p DataType and kind of access to \p Ptr. 1577 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1578 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType); 1579 } 1580 /// Returns true if the target machine supports masked load operation 1581 /// for the given \p DataType and kind of access to \p Ptr. 1582 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1583 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType); 1584 } 1585 /// Returns true if the target machine supports masked scatter operation 1586 /// for the given \p DataType. 1587 bool isLegalMaskedScatter(Type *DataType) { 1588 return TTI->isLegalMaskedScatter(DataType); 1589 } 1590 /// Returns true if the target machine supports masked gather operation 1591 /// for the given \p DataType. 1592 bool isLegalMaskedGather(Type *DataType) { 1593 return TTI->isLegalMaskedGather(DataType); 1594 } 1595 /// Returns true if the target machine can represent \p V as a masked gather 1596 /// or scatter operation. 1597 bool isLegalGatherOrScatter(Value *V) { 1598 auto *LI = dyn_cast<LoadInst>(V); 1599 auto *SI = dyn_cast<StoreInst>(V); 1600 if (!LI && !SI) 1601 return false; 1602 auto *Ptr = getPointerOperand(V); 1603 auto *Ty = cast<PointerType>(Ptr->getType())->getElementType(); 1604 return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty)); 1605 } 1606 1607 /// Returns true if vector representation of the instruction \p I 1608 /// requires mask. 1609 bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); } 1610 unsigned getNumStores() const { return LAI->getNumStores(); } 1611 unsigned getNumLoads() const { return LAI->getNumLoads(); } 1612 unsigned getNumPredStores() const { return NumPredStores; } 1613 1614 private: 1615 /// Check if a single basic block loop is vectorizable. 1616 /// At this point we know that this is a loop with a constant trip count 1617 /// and we only need to check individual instructions. 1618 bool canVectorizeInstrs(); 1619 1620 /// When we vectorize loops we may change the order in which 1621 /// we read and write from memory. This method checks if it is 1622 /// legal to vectorize the code, considering only memory constrains. 1623 /// Returns true if the loop is vectorizable 1624 bool canVectorizeMemory(); 1625 1626 /// Return true if we can vectorize this loop using the IF-conversion 1627 /// transformation. 1628 bool canVectorizeWithIfConvert(); 1629 1630 /// Collect the instructions that are uniform after vectorization. An 1631 /// instruction is uniform if we represent it with a single scalar value in 1632 /// the vectorized loop corresponding to each vector iteration. Examples of 1633 /// uniform instructions include pointer operands of consecutive or 1634 /// interleaved memory accesses. Note that although uniformity implies an 1635 /// instruction will be scalar, the reverse is not true. In general, a 1636 /// scalarized instruction will be represented by VF scalar values in the 1637 /// vectorized loop, each corresponding to an iteration of the original 1638 /// scalar loop. 1639 void collectLoopUniforms(); 1640 1641 /// Collect the instructions that are scalar after vectorization. An 1642 /// instruction is scalar if it is known to be uniform or will be scalarized 1643 /// during vectorization. Non-uniform scalarized instructions will be 1644 /// represented by VF values in the vectorized loop, each corresponding to an 1645 /// iteration of the original scalar loop. 1646 void collectLoopScalars(); 1647 1648 /// Return true if all of the instructions in the block can be speculatively 1649 /// executed. \p SafePtrs is a list of addresses that are known to be legal 1650 /// and we know that we can read from them without segfault. 1651 bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs); 1652 1653 /// Updates the vectorization state by adding \p Phi to the inductions list. 1654 /// This can set \p Phi as the main induction of the loop if \p Phi is a 1655 /// better choice for the main induction than the existing one. 1656 void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID, 1657 SmallPtrSetImpl<Value *> &AllowedExit); 1658 1659 /// Report an analysis message to assist the user in diagnosing loops that are 1660 /// not vectorized. These are handled as LoopAccessReport rather than 1661 /// VectorizationReport because the << operator of VectorizationReport returns 1662 /// LoopAccessReport. 1663 void emitAnalysis(const LoopAccessReport &Message) const { 1664 emitAnalysisDiag(TheLoop, *Hints, *ORE, Message); 1665 } 1666 1667 /// \brief If an access has a symbolic strides, this maps the pointer value to 1668 /// the stride symbol. 1669 const ValueToValueMap *getSymbolicStrides() { 1670 // FIXME: Currently, the set of symbolic strides is sometimes queried before 1671 // it's collected. This happens from canVectorizeWithIfConvert, when the 1672 // pointer is checked to reference consecutive elements suitable for a 1673 // masked access. 1674 return LAI ? &LAI->getSymbolicStrides() : nullptr; 1675 } 1676 1677 unsigned NumPredStores; 1678 1679 /// The loop that we evaluate. 1680 Loop *TheLoop; 1681 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. 1682 /// Applies dynamic knowledge to simplify SCEV expressions in the context 1683 /// of existing SCEV assumptions. The analysis will also add a minimal set 1684 /// of new predicates if this is required to enable vectorization and 1685 /// unrolling. 1686 PredicatedScalarEvolution &PSE; 1687 /// Target Library Info. 1688 TargetLibraryInfo *TLI; 1689 /// Target Transform Info 1690 const TargetTransformInfo *TTI; 1691 /// Dominator Tree. 1692 DominatorTree *DT; 1693 // LoopAccess analysis. 1694 std::function<const LoopAccessInfo &(Loop &)> *GetLAA; 1695 // And the loop-accesses info corresponding to this loop. This pointer is 1696 // null until canVectorizeMemory sets it up. 1697 const LoopAccessInfo *LAI; 1698 /// Interface to emit optimization remarks. 1699 OptimizationRemarkEmitter *ORE; 1700 1701 /// The interleave access information contains groups of interleaved accesses 1702 /// with the same stride and close to each other. 1703 InterleavedAccessInfo InterleaveInfo; 1704 1705 // --- vectorization state --- // 1706 1707 /// Holds the integer induction variable. This is the counter of the 1708 /// loop. 1709 PHINode *Induction; 1710 /// Holds the reduction variables. 1711 ReductionList Reductions; 1712 /// Holds all of the induction variables that we found in the loop. 1713 /// Notice that inductions don't need to start at zero and that induction 1714 /// variables can be pointers. 1715 InductionList Inductions; 1716 /// Holds the phi nodes that are first-order recurrences. 1717 RecurrenceSet FirstOrderRecurrences; 1718 /// Holds the widest induction type encountered. 1719 Type *WidestIndTy; 1720 1721 /// Allowed outside users. This holds the induction and reduction 1722 /// vars which can be accessed from outside the loop. 1723 SmallPtrSet<Value *, 4> AllowedExit; 1724 1725 /// Holds the instructions known to be uniform after vectorization. 1726 SmallPtrSet<Instruction *, 4> Uniforms; 1727 1728 /// Holds the instructions known to be scalar after vectorization. 1729 SmallPtrSet<Instruction *, 4> Scalars; 1730 1731 /// Can we assume the absence of NaNs. 1732 bool HasFunNoNaNAttr; 1733 1734 /// Vectorization requirements that will go through late-evaluation. 1735 LoopVectorizationRequirements *Requirements; 1736 1737 /// Used to emit an analysis of any legality issues. 1738 LoopVectorizeHints *Hints; 1739 1740 /// While vectorizing these instructions we have to generate a 1741 /// call to the appropriate masked intrinsic 1742 SmallPtrSet<const Instruction *, 8> MaskedOp; 1743 }; 1744 1745 /// LoopVectorizationCostModel - estimates the expected speedups due to 1746 /// vectorization. 1747 /// In many cases vectorization is not profitable. This can happen because of 1748 /// a number of reasons. In this class we mainly attempt to predict the 1749 /// expected speedup/slowdowns due to the supported instruction set. We use the 1750 /// TargetTransformInfo to query the different backends for the cost of 1751 /// different operations. 1752 class LoopVectorizationCostModel { 1753 public: 1754 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, 1755 LoopInfo *LI, LoopVectorizationLegality *Legal, 1756 const TargetTransformInfo &TTI, 1757 const TargetLibraryInfo *TLI, DemandedBits *DB, 1758 AssumptionCache *AC, 1759 OptimizationRemarkEmitter *ORE, const Function *F, 1760 const LoopVectorizeHints *Hints) 1761 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 1762 AC(AC), ORE(ORE), TheFunction(F), Hints(Hints) {} 1763 1764 /// Information about vectorization costs 1765 struct VectorizationFactor { 1766 unsigned Width; // Vector width with best cost 1767 unsigned Cost; // Cost of the loop with that width 1768 }; 1769 /// \return The most profitable vectorization factor and the cost of that VF. 1770 /// This method checks every power of two up to VF. If UserVF is not ZERO 1771 /// then this vectorization factor will be selected if vectorization is 1772 /// possible. 1773 VectorizationFactor selectVectorizationFactor(bool OptForSize); 1774 1775 /// \return The size (in bits) of the smallest and widest types in the code 1776 /// that needs to be vectorized. We ignore values that remain scalar such as 1777 /// 64 bit loop indices. 1778 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1779 1780 /// \return The desired interleave count. 1781 /// If interleave count has been specified by metadata it will be returned. 1782 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1783 /// are the selected vectorization factor and the cost of the selected VF. 1784 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 1785 unsigned LoopCost); 1786 1787 /// \return The most profitable unroll factor. 1788 /// This method finds the best unroll-factor based on register pressure and 1789 /// other parameters. VF and LoopCost are the selected vectorization factor 1790 /// and the cost of the selected VF. 1791 unsigned computeInterleaveCount(bool OptForSize, unsigned VF, 1792 unsigned LoopCost); 1793 1794 /// \brief A struct that represents some properties of the register usage 1795 /// of a loop. 1796 struct RegisterUsage { 1797 /// Holds the number of loop invariant values that are used in the loop. 1798 unsigned LoopInvariantRegs; 1799 /// Holds the maximum number of concurrent live intervals in the loop. 1800 unsigned MaxLocalUsers; 1801 /// Holds the number of instructions in the loop. 1802 unsigned NumInstructions; 1803 }; 1804 1805 /// \return Returns information about the register usages of the loop for the 1806 /// given vectorization factors. 1807 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 1808 1809 /// Collect values we want to ignore in the cost model. 1810 void collectValuesToIgnore(); 1811 1812 private: 1813 /// The vectorization cost is a combination of the cost itself and a boolean 1814 /// indicating whether any of the contributing operations will actually 1815 /// operate on 1816 /// vector values after type legalization in the backend. If this latter value 1817 /// is 1818 /// false, then all operations will be scalarized (i.e. no vectorization has 1819 /// actually taken place). 1820 typedef std::pair<unsigned, bool> VectorizationCostTy; 1821 1822 /// Returns the expected execution cost. The unit of the cost does 1823 /// not matter because we use the 'cost' units to compare different 1824 /// vector widths. The cost that is returned is *not* normalized by 1825 /// the factor width. 1826 VectorizationCostTy expectedCost(unsigned VF); 1827 1828 /// Returns the execution time cost of an instruction for a given vector 1829 /// width. Vector width of one means scalar. 1830 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1831 1832 /// The cost-computation logic from getInstructionCost which provides 1833 /// the vector type as an output parameter. 1834 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1835 1836 /// Returns whether the instruction is a load or store and will be a emitted 1837 /// as a vector operation. 1838 bool isConsecutiveLoadOrStore(Instruction *I); 1839 1840 /// Report an analysis message to assist the user in diagnosing loops that are 1841 /// not vectorized. These are handled as LoopAccessReport rather than 1842 /// VectorizationReport because the << operator of VectorizationReport returns 1843 /// LoopAccessReport. 1844 void emitAnalysis(const LoopAccessReport &Message) const { 1845 emitAnalysisDiag(TheLoop, *Hints, *ORE, Message); 1846 } 1847 1848 public: 1849 /// Map of scalar integer values to the smallest bitwidth they can be legally 1850 /// represented as. The vector equivalents of these values should be truncated 1851 /// to this type. 1852 MapVector<Instruction *, uint64_t> MinBWs; 1853 1854 /// The loop that we evaluate. 1855 Loop *TheLoop; 1856 /// Predicated scalar evolution analysis. 1857 PredicatedScalarEvolution &PSE; 1858 /// Loop Info analysis. 1859 LoopInfo *LI; 1860 /// Vectorization legality. 1861 LoopVectorizationLegality *Legal; 1862 /// Vector target information. 1863 const TargetTransformInfo &TTI; 1864 /// Target Library Info. 1865 const TargetLibraryInfo *TLI; 1866 /// Demanded bits analysis. 1867 DemandedBits *DB; 1868 /// Assumption cache. 1869 AssumptionCache *AC; 1870 /// Interface to emit optimization remarks. 1871 OptimizationRemarkEmitter *ORE; 1872 1873 const Function *TheFunction; 1874 /// Loop Vectorize Hint. 1875 const LoopVectorizeHints *Hints; 1876 /// Values to ignore in the cost model. 1877 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1878 /// Values to ignore in the cost model when VF > 1. 1879 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1880 }; 1881 1882 /// \brief This holds vectorization requirements that must be verified late in 1883 /// the process. The requirements are set by legalize and costmodel. Once 1884 /// vectorization has been determined to be possible and profitable the 1885 /// requirements can be verified by looking for metadata or compiler options. 1886 /// For example, some loops require FP commutativity which is only allowed if 1887 /// vectorization is explicitly specified or if the fast-math compiler option 1888 /// has been provided. 1889 /// Late evaluation of these requirements allows helpful diagnostics to be 1890 /// composed that tells the user what need to be done to vectorize the loop. For 1891 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late 1892 /// evaluation should be used only when diagnostics can generated that can be 1893 /// followed by a non-expert user. 1894 class LoopVectorizationRequirements { 1895 public: 1896 LoopVectorizationRequirements(OptimizationRemarkEmitter &ORE) 1897 : NumRuntimePointerChecks(0), UnsafeAlgebraInst(nullptr), ORE(ORE) {} 1898 1899 void addUnsafeAlgebraInst(Instruction *I) { 1900 // First unsafe algebra instruction. 1901 if (!UnsafeAlgebraInst) 1902 UnsafeAlgebraInst = I; 1903 } 1904 1905 void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; } 1906 1907 bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) { 1908 const char *Name = Hints.vectorizeAnalysisPassName(); 1909 bool Failed = false; 1910 if (UnsafeAlgebraInst && !Hints.allowReordering()) { 1911 ORE.emitOptimizationRemarkAnalysisFPCommute( 1912 Name, UnsafeAlgebraInst->getDebugLoc(), 1913 UnsafeAlgebraInst->getParent(), 1914 VectorizationReport() << "cannot prove it is safe to reorder " 1915 "floating-point operations"); 1916 Failed = true; 1917 } 1918 1919 // Test if runtime memcheck thresholds are exceeded. 1920 bool PragmaThresholdReached = 1921 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 1922 bool ThresholdReached = 1923 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 1924 if ((ThresholdReached && !Hints.allowReordering()) || 1925 PragmaThresholdReached) { 1926 ORE.emitOptimizationRemarkAnalysisAliasing( 1927 Name, L, 1928 VectorizationReport() 1929 << "cannot prove it is safe to reorder memory operations"); 1930 DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 1931 Failed = true; 1932 } 1933 1934 return Failed; 1935 } 1936 1937 private: 1938 unsigned NumRuntimePointerChecks; 1939 Instruction *UnsafeAlgebraInst; 1940 1941 /// Interface to emit optimization remarks. 1942 OptimizationRemarkEmitter &ORE; 1943 }; 1944 1945 static void addAcyclicInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) { 1946 if (L.empty()) { 1947 if (!hasCyclesInLoopBody(L)) 1948 V.push_back(&L); 1949 return; 1950 } 1951 for (Loop *InnerL : L) 1952 addAcyclicInnerLoop(*InnerL, V); 1953 } 1954 1955 /// The LoopVectorize Pass. 1956 struct LoopVectorize : public FunctionPass { 1957 /// Pass identification, replacement for typeid 1958 static char ID; 1959 1960 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) 1961 : FunctionPass(ID) { 1962 Impl.DisableUnrolling = NoUnrolling; 1963 Impl.AlwaysVectorize = AlwaysVectorize; 1964 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1965 } 1966 1967 LoopVectorizePass Impl; 1968 1969 bool runOnFunction(Function &F) override { 1970 if (skipFunction(F)) 1971 return false; 1972 1973 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1974 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1975 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1976 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1977 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1978 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1979 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 1980 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1981 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1982 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 1983 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1984 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 1985 1986 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 1987 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 1988 1989 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 1990 GetLAA, *ORE); 1991 } 1992 1993 void getAnalysisUsage(AnalysisUsage &AU) const override { 1994 AU.addRequired<AssumptionCacheTracker>(); 1995 AU.addRequiredID(LoopSimplifyID); 1996 AU.addRequiredID(LCSSAID); 1997 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1998 AU.addRequired<DominatorTreeWrapperPass>(); 1999 AU.addRequired<LoopInfoWrapperPass>(); 2000 AU.addRequired<ScalarEvolutionWrapperPass>(); 2001 AU.addRequired<TargetTransformInfoWrapperPass>(); 2002 AU.addRequired<AAResultsWrapperPass>(); 2003 AU.addRequired<LoopAccessLegacyAnalysis>(); 2004 AU.addRequired<DemandedBitsWrapperPass>(); 2005 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2006 AU.addPreserved<LoopInfoWrapperPass>(); 2007 AU.addPreserved<DominatorTreeWrapperPass>(); 2008 AU.addPreserved<BasicAAWrapperPass>(); 2009 AU.addPreserved<GlobalsAAWrapperPass>(); 2010 } 2011 }; 2012 2013 } // end anonymous namespace 2014 2015 //===----------------------------------------------------------------------===// 2016 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2017 // LoopVectorizationCostModel. 2018 //===----------------------------------------------------------------------===// 2019 2020 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2021 // We need to place the broadcast of invariant variables outside the loop. 2022 Instruction *Instr = dyn_cast<Instruction>(V); 2023 bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody); 2024 bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr; 2025 2026 // Place the code for broadcasting invariant variables in the new preheader. 2027 IRBuilder<>::InsertPointGuard Guard(Builder); 2028 if (Invariant) 2029 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2030 2031 // Broadcast the scalar into all locations in the vector. 2032 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2033 2034 return Shuf; 2035 } 2036 2037 void InnerLoopVectorizer::createVectorIntInductionPHI( 2038 const InductionDescriptor &II, Instruction *EntryVal) { 2039 Value *Start = II.getStartValue(); 2040 ConstantInt *Step = II.getConstIntStepValue(); 2041 assert(Step && "Can not widen an IV with a non-constant step"); 2042 2043 // Construct the initial value of the vector IV in the vector loop preheader 2044 auto CurrIP = Builder.saveIP(); 2045 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2046 if (isa<TruncInst>(EntryVal)) { 2047 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2048 Step = ConstantInt::getSigned(TruncType, Step->getSExtValue()); 2049 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2050 } 2051 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2052 Value *SteppedStart = getStepVector(SplatStart, 0, Step); 2053 Builder.restoreIP(CurrIP); 2054 2055 Value *SplatVF = 2056 ConstantVector::getSplat(VF, ConstantInt::getSigned(Start->getType(), 2057 VF * Step->getSExtValue())); 2058 // We may need to add the step a number of times, depending on the unroll 2059 // factor. The last of those goes into the PHI. 2060 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2061 &*LoopVectorBody->getFirstInsertionPt()); 2062 Instruction *LastInduction = VecInd; 2063 VectorParts Entry(UF); 2064 for (unsigned Part = 0; Part < UF; ++Part) { 2065 Entry[Part] = LastInduction; 2066 LastInduction = cast<Instruction>( 2067 Builder.CreateAdd(LastInduction, SplatVF, "step.add")); 2068 } 2069 VectorLoopValueMap.initVector(EntryVal, Entry); 2070 if (isa<TruncInst>(EntryVal)) 2071 addMetadata(Entry, EntryVal); 2072 2073 // Move the last step to the end of the latch block. This ensures consistent 2074 // placement of all induction updates. 2075 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2076 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2077 auto *ICmp = cast<Instruction>(Br->getCondition()); 2078 LastInduction->moveBefore(ICmp); 2079 LastInduction->setName("vec.ind.next"); 2080 2081 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2082 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2083 } 2084 2085 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2086 if (Legal->isScalarAfterVectorization(IV)) 2087 return true; 2088 auto isScalarInst = [&](User *U) -> bool { 2089 auto *I = cast<Instruction>(U); 2090 return (OrigLoop->contains(I) && Legal->isScalarAfterVectorization(I)); 2091 }; 2092 return any_of(IV->users(), isScalarInst); 2093 } 2094 2095 void InnerLoopVectorizer::widenIntInduction(PHINode *IV, TruncInst *Trunc) { 2096 2097 auto II = Legal->getInductionVars()->find(IV); 2098 assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); 2099 2100 auto ID = II->second; 2101 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2102 2103 // The scalar value to broadcast. This will be derived from the canonical 2104 // induction variable. 2105 Value *ScalarIV = nullptr; 2106 2107 // The step of the induction. 2108 Value *Step = nullptr; 2109 2110 // The value from the original loop to which we are mapping the new induction 2111 // variable. 2112 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2113 2114 // True if we have vectorized the induction variable. 2115 auto VectorizedIV = false; 2116 2117 // Determine if we want a scalar version of the induction variable. This is 2118 // true if the induction variable itself is not widened, or if it has at 2119 // least one user in the loop that is not widened. 2120 auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal); 2121 2122 // If the induction variable has a constant integer step value, go ahead and 2123 // get it now. 2124 if (ID.getConstIntStepValue()) 2125 Step = ID.getConstIntStepValue(); 2126 2127 // Try to create a new independent vector induction variable. If we can't 2128 // create the phi node, we will splat the scalar induction variable in each 2129 // loop iteration. 2130 if (VF > 1 && IV->getType() == Induction->getType() && Step && 2131 !Legal->isScalarAfterVectorization(EntryVal)) { 2132 createVectorIntInductionPHI(ID, EntryVal); 2133 VectorizedIV = true; 2134 } 2135 2136 // If we haven't yet vectorized the induction variable, or if we will create 2137 // a scalar one, we need to define the scalar induction variable and step 2138 // values. If we were given a truncation type, truncate the canonical 2139 // induction variable and constant step. Otherwise, derive these values from 2140 // the induction descriptor. 2141 if (!VectorizedIV || NeedsScalarIV) { 2142 if (Trunc) { 2143 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2144 assert(Step && "Truncation requires constant integer step"); 2145 auto StepInt = cast<ConstantInt>(Step)->getSExtValue(); 2146 ScalarIV = Builder.CreateCast(Instruction::Trunc, Induction, TruncType); 2147 Step = ConstantInt::getSigned(TruncType, StepInt); 2148 } else { 2149 ScalarIV = Induction; 2150 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2151 if (IV != OldInduction) { 2152 ScalarIV = Builder.CreateSExtOrTrunc(ScalarIV, IV->getType()); 2153 ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL); 2154 ScalarIV->setName("offset.idx"); 2155 } 2156 if (!Step) { 2157 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2158 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), 2159 &*Builder.GetInsertPoint()); 2160 } 2161 } 2162 } 2163 2164 // If we haven't yet vectorized the induction variable, splat the scalar 2165 // induction variable, and build the necessary step vectors. 2166 if (!VectorizedIV) { 2167 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2168 VectorParts Entry(UF); 2169 for (unsigned Part = 0; Part < UF; ++Part) 2170 Entry[Part] = getStepVector(Broadcasted, VF * Part, Step); 2171 VectorLoopValueMap.initVector(EntryVal, Entry); 2172 if (Trunc) 2173 addMetadata(Entry, Trunc); 2174 } 2175 2176 // If an induction variable is only used for counting loop iterations or 2177 // calculating addresses, it doesn't need to be widened. Create scalar steps 2178 // that can be used by instructions we will later scalarize. Note that the 2179 // addition of the scalar steps will not increase the number of instructions 2180 // in the loop in the common case prior to InstCombine. We will be trading 2181 // one vector extract for each scalar step. 2182 if (NeedsScalarIV) 2183 buildScalarSteps(ScalarIV, Step, EntryVal); 2184 } 2185 2186 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2187 Instruction::BinaryOps BinOp) { 2188 // Create and check the types. 2189 assert(Val->getType()->isVectorTy() && "Must be a vector"); 2190 int VLen = Val->getType()->getVectorNumElements(); 2191 2192 Type *STy = Val->getType()->getScalarType(); 2193 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2194 "Induction Step must be an integer or FP"); 2195 assert(Step->getType() == STy && "Step has wrong type"); 2196 2197 SmallVector<Constant *, 8> Indices; 2198 2199 if (STy->isIntegerTy()) { 2200 // Create a vector of consecutive numbers from zero to VF. 2201 for (int i = 0; i < VLen; ++i) 2202 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 2203 2204 // Add the consecutive indices to the vector value. 2205 Constant *Cv = ConstantVector::get(Indices); 2206 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2207 Step = Builder.CreateVectorSplat(VLen, Step); 2208 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2209 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2210 // which can be found from the original scalar operations. 2211 Step = Builder.CreateMul(Cv, Step); 2212 return Builder.CreateAdd(Val, Step, "induction"); 2213 } 2214 2215 // Floating point induction. 2216 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2217 "Binary Opcode should be specified for FP induction"); 2218 // Create a vector of consecutive numbers from zero to VF. 2219 for (int i = 0; i < VLen; ++i) 2220 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2221 2222 // Add the consecutive indices to the vector value. 2223 Constant *Cv = ConstantVector::get(Indices); 2224 2225 Step = Builder.CreateVectorSplat(VLen, Step); 2226 2227 // Floating point operations had to be 'fast' to enable the induction. 2228 FastMathFlags Flags; 2229 Flags.setUnsafeAlgebra(); 2230 2231 Value *MulOp = Builder.CreateFMul(Cv, Step); 2232 if (isa<Instruction>(MulOp)) 2233 // Have to check, MulOp may be a constant 2234 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2235 2236 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2237 if (isa<Instruction>(BOp)) 2238 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2239 return BOp; 2240 } 2241 2242 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2243 Value *EntryVal) { 2244 2245 // We shouldn't have to build scalar steps if we aren't vectorizing. 2246 assert(VF > 1 && "VF should be greater than one"); 2247 2248 // Get the value type and ensure it and the step have the same integer type. 2249 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2250 assert(ScalarIVTy->isIntegerTy() && ScalarIVTy == Step->getType() && 2251 "Val and Step should have the same integer type"); 2252 2253 // Compute the scalar steps and save the results in VectorLoopValueMap. 2254 ScalarParts Entry(UF); 2255 for (unsigned Part = 0; Part < UF; ++Part) { 2256 Entry[Part].resize(VF); 2257 for (unsigned Lane = 0; Lane < VF; ++Lane) { 2258 auto *StartIdx = ConstantInt::get(ScalarIVTy, VF * Part + Lane); 2259 auto *Mul = Builder.CreateMul(StartIdx, Step); 2260 auto *Add = Builder.CreateAdd(ScalarIV, Mul); 2261 Entry[Part][Lane] = Add; 2262 } 2263 } 2264 VectorLoopValueMap.initScalar(EntryVal, Entry); 2265 } 2266 2267 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { 2268 assert(Ptr->getType()->isPointerTy() && "Unexpected non-ptr"); 2269 auto *SE = PSE.getSE(); 2270 // Make sure that the pointer does not point to structs. 2271 if (Ptr->getType()->getPointerElementType()->isAggregateType()) 2272 return 0; 2273 2274 // If this value is a pointer induction variable, we know it is consecutive. 2275 PHINode *Phi = dyn_cast_or_null<PHINode>(Ptr); 2276 if (Phi && Inductions.count(Phi)) { 2277 InductionDescriptor II = Inductions[Phi]; 2278 return II.getConsecutiveDirection(); 2279 } 2280 2281 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2282 if (!Gep) 2283 return 0; 2284 2285 unsigned NumOperands = Gep->getNumOperands(); 2286 Value *GpPtr = Gep->getPointerOperand(); 2287 // If this GEP value is a consecutive pointer induction variable and all of 2288 // the indices are constant, then we know it is consecutive. 2289 Phi = dyn_cast<PHINode>(GpPtr); 2290 if (Phi && Inductions.count(Phi)) { 2291 2292 // Make sure that the pointer does not point to structs. 2293 PointerType *GepPtrType = cast<PointerType>(GpPtr->getType()); 2294 if (GepPtrType->getElementType()->isAggregateType()) 2295 return 0; 2296 2297 // Make sure that all of the index operands are loop invariant. 2298 for (unsigned i = 1; i < NumOperands; ++i) 2299 if (!SE->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), TheLoop)) 2300 return 0; 2301 2302 InductionDescriptor II = Inductions[Phi]; 2303 return II.getConsecutiveDirection(); 2304 } 2305 2306 unsigned InductionOperand = getGEPInductionOperand(Gep); 2307 2308 // Check that all of the gep indices are uniform except for our induction 2309 // operand. 2310 for (unsigned i = 0; i != NumOperands; ++i) 2311 if (i != InductionOperand && 2312 !SE->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), TheLoop)) 2313 return 0; 2314 2315 // We can emit wide load/stores only if the last non-zero index is the 2316 // induction variable. 2317 const SCEV *Last = nullptr; 2318 if (!getSymbolicStrides() || !getSymbolicStrides()->count(Gep)) 2319 Last = PSE.getSCEV(Gep->getOperand(InductionOperand)); 2320 else { 2321 // Because of the multiplication by a stride we can have a s/zext cast. 2322 // We are going to replace this stride by 1 so the cast is safe to ignore. 2323 // 2324 // %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] 2325 // %0 = trunc i64 %indvars.iv to i32 2326 // %mul = mul i32 %0, %Stride1 2327 // %idxprom = zext i32 %mul to i64 << Safe cast. 2328 // %arrayidx = getelementptr inbounds i32* %B, i64 %idxprom 2329 // 2330 Last = replaceSymbolicStrideSCEV(PSE, *getSymbolicStrides(), 2331 Gep->getOperand(InductionOperand), Gep); 2332 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(Last)) 2333 Last = 2334 (C->getSCEVType() == scSignExtend || C->getSCEVType() == scZeroExtend) 2335 ? C->getOperand() 2336 : Last; 2337 } 2338 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Last)) { 2339 const SCEV *Step = AR->getStepRecurrence(*SE); 2340 2341 // The memory is consecutive because the last index is consecutive 2342 // and all other indices are loop invariant. 2343 if (Step->isOne()) 2344 return 1; 2345 if (Step->isAllOnesValue()) 2346 return -1; 2347 } 2348 2349 return 0; 2350 } 2351 2352 bool LoopVectorizationLegality::isUniform(Value *V) { 2353 return LAI->isUniform(V); 2354 } 2355 2356 const InnerLoopVectorizer::VectorParts & 2357 InnerLoopVectorizer::getVectorValue(Value *V) { 2358 assert(V != Induction && "The new induction variable should not be used."); 2359 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2360 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2361 2362 // If we have a stride that is replaced by one, do it here. 2363 if (Legal->hasStride(V)) 2364 V = ConstantInt::get(V->getType(), 1); 2365 2366 // If we have this scalar in the map, return it. 2367 if (VectorLoopValueMap.hasVector(V)) 2368 return VectorLoopValueMap.VectorMapStorage[V]; 2369 2370 // If the value has not been vectorized, check if it has been scalarized 2371 // instead. If it has been scalarized, and we actually need the value in 2372 // vector form, we will construct the vector values on demand. 2373 if (VectorLoopValueMap.hasScalar(V)) { 2374 2375 // Initialize a new vector map entry. 2376 VectorParts Entry(UF); 2377 2378 // If we aren't vectorizing, we can just copy the scalar map values over to 2379 // the vector map. 2380 if (VF == 1) { 2381 for (unsigned Part = 0; Part < UF; ++Part) 2382 Entry[Part] = getScalarValue(V, Part, 0); 2383 return VectorLoopValueMap.initVector(V, Entry); 2384 } 2385 2386 // Get the last scalarized instruction. This corresponds to the instruction 2387 // we created for the last vector lane on the last unroll iteration. 2388 auto *LastInst = cast<Instruction>(getScalarValue(V, UF - 1, VF - 1)); 2389 2390 // Set the insert point after the last scalarized instruction. This ensures 2391 // the insertelement sequence will directly follow the scalar definitions. 2392 auto OldIP = Builder.saveIP(); 2393 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 2394 Builder.SetInsertPoint(&*NewIP); 2395 2396 // However, if we are vectorizing, we need to construct the vector values 2397 // using insertelement instructions. Since the resulting vectors are stored 2398 // in VectorLoopValueMap, we will only generate the insertelements once. 2399 for (unsigned Part = 0; Part < UF; ++Part) { 2400 Value *Insert = UndefValue::get(VectorType::get(V->getType(), VF)); 2401 for (unsigned Width = 0; Width < VF; ++Width) 2402 Insert = Builder.CreateInsertElement( 2403 Insert, getScalarValue(V, Part, Width), Builder.getInt32(Width)); 2404 Entry[Part] = Insert; 2405 } 2406 Builder.restoreIP(OldIP); 2407 return VectorLoopValueMap.initVector(V, Entry); 2408 } 2409 2410 // If this scalar is unknown, assume that it is a constant or that it is 2411 // loop invariant. Broadcast V and save the value for future uses. 2412 Value *B = getBroadcastInstrs(V); 2413 return VectorLoopValueMap.initVector(V, VectorParts(UF, B)); 2414 } 2415 2416 Value *InnerLoopVectorizer::getScalarValue(Value *V, unsigned Part, 2417 unsigned Lane) { 2418 2419 // If the value is not an instruction contained in the loop, it should 2420 // already be scalar. 2421 if (OrigLoop->isLoopInvariant(V)) 2422 return V; 2423 2424 // If the value from the original loop has not been vectorized, it is 2425 // represented by UF x VF scalar values in the new loop. Return the requested 2426 // scalar value. 2427 if (VectorLoopValueMap.hasScalar(V)) 2428 return VectorLoopValueMap.ScalarMapStorage[V][Part][Lane]; 2429 2430 // If the value has not been scalarized, get its entry in VectorLoopValueMap 2431 // for the given unroll part. If this entry is not a vector type (i.e., the 2432 // vectorization factor is one), there is no need to generate an 2433 // extractelement instruction. 2434 auto *U = getVectorValue(V)[Part]; 2435 if (!U->getType()->isVectorTy()) { 2436 assert(VF == 1 && "Value not scalarized has non-vector type"); 2437 return U; 2438 } 2439 2440 // Otherwise, the value from the original loop has been vectorized and is 2441 // represented by UF vector values. Extract and return the requested scalar 2442 // value from the appropriate vector lane. 2443 return Builder.CreateExtractElement(U, Builder.getInt32(Lane)); 2444 } 2445 2446 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2447 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2448 SmallVector<Constant *, 8> ShuffleMask; 2449 for (unsigned i = 0; i < VF; ++i) 2450 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 2451 2452 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2453 ConstantVector::get(ShuffleMask), 2454 "reverse"); 2455 } 2456 2457 // Get a mask to interleave \p NumVec vectors into a wide vector. 2458 // I.e. <0, VF, VF*2, ..., VF*(NumVec-1), 1, VF+1, VF*2+1, ...> 2459 // E.g. For 2 interleaved vectors, if VF is 4, the mask is: 2460 // <0, 4, 1, 5, 2, 6, 3, 7> 2461 static Constant *getInterleavedMask(IRBuilder<> &Builder, unsigned VF, 2462 unsigned NumVec) { 2463 SmallVector<Constant *, 16> Mask; 2464 for (unsigned i = 0; i < VF; i++) 2465 for (unsigned j = 0; j < NumVec; j++) 2466 Mask.push_back(Builder.getInt32(j * VF + i)); 2467 2468 return ConstantVector::get(Mask); 2469 } 2470 2471 // Get the strided mask starting from index \p Start. 2472 // I.e. <Start, Start + Stride, ..., Start + Stride*(VF-1)> 2473 static Constant *getStridedMask(IRBuilder<> &Builder, unsigned Start, 2474 unsigned Stride, unsigned VF) { 2475 SmallVector<Constant *, 16> Mask; 2476 for (unsigned i = 0; i < VF; i++) 2477 Mask.push_back(Builder.getInt32(Start + i * Stride)); 2478 2479 return ConstantVector::get(Mask); 2480 } 2481 2482 // Get a mask of two parts: The first part consists of sequential integers 2483 // starting from 0, The second part consists of UNDEFs. 2484 // I.e. <0, 1, 2, ..., NumInt - 1, undef, ..., undef> 2485 static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned NumInt, 2486 unsigned NumUndef) { 2487 SmallVector<Constant *, 16> Mask; 2488 for (unsigned i = 0; i < NumInt; i++) 2489 Mask.push_back(Builder.getInt32(i)); 2490 2491 Constant *Undef = UndefValue::get(Builder.getInt32Ty()); 2492 for (unsigned i = 0; i < NumUndef; i++) 2493 Mask.push_back(Undef); 2494 2495 return ConstantVector::get(Mask); 2496 } 2497 2498 // Concatenate two vectors with the same element type. The 2nd vector should 2499 // not have more elements than the 1st vector. If the 2nd vector has less 2500 // elements, extend it with UNDEFs. 2501 static Value *ConcatenateTwoVectors(IRBuilder<> &Builder, Value *V1, 2502 Value *V2) { 2503 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); 2504 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); 2505 assert(VecTy1 && VecTy2 && 2506 VecTy1->getScalarType() == VecTy2->getScalarType() && 2507 "Expect two vectors with the same element type"); 2508 2509 unsigned NumElts1 = VecTy1->getNumElements(); 2510 unsigned NumElts2 = VecTy2->getNumElements(); 2511 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); 2512 2513 if (NumElts1 > NumElts2) { 2514 // Extend with UNDEFs. 2515 Constant *ExtMask = 2516 getSequentialMask(Builder, NumElts2, NumElts1 - NumElts2); 2517 V2 = Builder.CreateShuffleVector(V2, UndefValue::get(VecTy2), ExtMask); 2518 } 2519 2520 Constant *Mask = getSequentialMask(Builder, NumElts1 + NumElts2, 0); 2521 return Builder.CreateShuffleVector(V1, V2, Mask); 2522 } 2523 2524 // Concatenate vectors in the given list. All vectors have the same type. 2525 static Value *ConcatenateVectors(IRBuilder<> &Builder, 2526 ArrayRef<Value *> InputList) { 2527 unsigned NumVec = InputList.size(); 2528 assert(NumVec > 1 && "Should be at least two vectors"); 2529 2530 SmallVector<Value *, 8> ResList; 2531 ResList.append(InputList.begin(), InputList.end()); 2532 do { 2533 SmallVector<Value *, 8> TmpList; 2534 for (unsigned i = 0; i < NumVec - 1; i += 2) { 2535 Value *V0 = ResList[i], *V1 = ResList[i + 1]; 2536 assert((V0->getType() == V1->getType() || i == NumVec - 2) && 2537 "Only the last vector may have a different type"); 2538 2539 TmpList.push_back(ConcatenateTwoVectors(Builder, V0, V1)); 2540 } 2541 2542 // Push the last vector if the total number of vectors is odd. 2543 if (NumVec % 2 != 0) 2544 TmpList.push_back(ResList[NumVec - 1]); 2545 2546 ResList = TmpList; 2547 NumVec = ResList.size(); 2548 } while (NumVec > 1); 2549 2550 return ResList[0]; 2551 } 2552 2553 // Try to vectorize the interleave group that \p Instr belongs to. 2554 // 2555 // E.g. Translate following interleaved load group (factor = 3): 2556 // for (i = 0; i < N; i+=3) { 2557 // R = Pic[i]; // Member of index 0 2558 // G = Pic[i+1]; // Member of index 1 2559 // B = Pic[i+2]; // Member of index 2 2560 // ... // do something to R, G, B 2561 // } 2562 // To: 2563 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2564 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2565 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2566 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2567 // 2568 // Or translate following interleaved store group (factor = 3): 2569 // for (i = 0; i < N; i+=3) { 2570 // ... do something to R, G, B 2571 // Pic[i] = R; // Member of index 0 2572 // Pic[i+1] = G; // Member of index 1 2573 // Pic[i+2] = B; // Member of index 2 2574 // } 2575 // To: 2576 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2577 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2578 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2579 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2580 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2581 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { 2582 const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr); 2583 assert(Group && "Fail to get an interleaved access group."); 2584 2585 // Skip if current instruction is not the insert position. 2586 if (Instr != Group->getInsertPos()) 2587 return; 2588 2589 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2590 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2591 Value *Ptr = getPointerOperand(Instr); 2592 2593 // Prepare for the vector type of the interleaved load/store. 2594 Type *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2595 unsigned InterleaveFactor = Group->getFactor(); 2596 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2597 Type *PtrTy = VecTy->getPointerTo(Ptr->getType()->getPointerAddressSpace()); 2598 2599 // Prepare for the new pointers. 2600 setDebugLocFromInst(Builder, Ptr); 2601 SmallVector<Value *, 2> NewPtrs; 2602 unsigned Index = Group->getIndex(Instr); 2603 2604 // If the group is reverse, adjust the index to refer to the last vector lane 2605 // instead of the first. We adjust the index from the first vector lane, 2606 // rather than directly getting the pointer for lane VF - 1, because the 2607 // pointer operand of the interleaved access is supposed to be uniform. For 2608 // uniform instructions, we're only required to generate a value for the 2609 // first vector lane in each unroll iteration. 2610 if (Group->isReverse()) 2611 Index += (VF - 1) * Group->getFactor(); 2612 2613 for (unsigned Part = 0; Part < UF; Part++) { 2614 Value *NewPtr = getScalarValue(Ptr, Part, 0); 2615 2616 // Notice current instruction could be any index. Need to adjust the address 2617 // to the member of index 0. 2618 // 2619 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2620 // b = A[i]; // Member of index 0 2621 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2622 // 2623 // E.g. A[i+1] = a; // Member of index 1 2624 // A[i] = b; // Member of index 0 2625 // A[i+2] = c; // Member of index 2 (Current instruction) 2626 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2627 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 2628 2629 // Cast to the vector pointer type. 2630 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2631 } 2632 2633 setDebugLocFromInst(Builder, Instr); 2634 Value *UndefVec = UndefValue::get(VecTy); 2635 2636 // Vectorize the interleaved load group. 2637 if (LI) { 2638 2639 // For each unroll part, create a wide load for the group. 2640 SmallVector<Value *, 2> NewLoads; 2641 for (unsigned Part = 0; Part < UF; Part++) { 2642 auto *NewLoad = Builder.CreateAlignedLoad( 2643 NewPtrs[Part], Group->getAlignment(), "wide.vec"); 2644 addMetadata(NewLoad, Instr); 2645 NewLoads.push_back(NewLoad); 2646 } 2647 2648 // For each member in the group, shuffle out the appropriate data from the 2649 // wide loads. 2650 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2651 Instruction *Member = Group->getMember(I); 2652 2653 // Skip the gaps in the group. 2654 if (!Member) 2655 continue; 2656 2657 VectorParts Entry(UF); 2658 Constant *StrideMask = getStridedMask(Builder, I, InterleaveFactor, VF); 2659 for (unsigned Part = 0; Part < UF; Part++) { 2660 Value *StridedVec = Builder.CreateShuffleVector( 2661 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 2662 2663 // If this member has different type, cast the result type. 2664 if (Member->getType() != ScalarTy) { 2665 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2666 StridedVec = Builder.CreateBitOrPointerCast(StridedVec, OtherVTy); 2667 } 2668 2669 Entry[Part] = 2670 Group->isReverse() ? reverseVector(StridedVec) : StridedVec; 2671 } 2672 VectorLoopValueMap.initVector(Member, Entry); 2673 } 2674 return; 2675 } 2676 2677 // The sub vector type for current instruction. 2678 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2679 2680 // Vectorize the interleaved store group. 2681 for (unsigned Part = 0; Part < UF; Part++) { 2682 // Collect the stored vector from each member. 2683 SmallVector<Value *, 4> StoredVecs; 2684 for (unsigned i = 0; i < InterleaveFactor; i++) { 2685 // Interleaved store group doesn't allow a gap, so each index has a member 2686 Instruction *Member = Group->getMember(i); 2687 assert(Member && "Fail to get a member from an interleaved store group"); 2688 2689 Value *StoredVec = 2690 getVectorValue(cast<StoreInst>(Member)->getValueOperand())[Part]; 2691 if (Group->isReverse()) 2692 StoredVec = reverseVector(StoredVec); 2693 2694 // If this member has different type, cast it to an unified type. 2695 if (StoredVec->getType() != SubVT) 2696 StoredVec = Builder.CreateBitOrPointerCast(StoredVec, SubVT); 2697 2698 StoredVecs.push_back(StoredVec); 2699 } 2700 2701 // Concatenate all vectors into a wide vector. 2702 Value *WideVec = ConcatenateVectors(Builder, StoredVecs); 2703 2704 // Interleave the elements in the wide vector. 2705 Constant *IMask = getInterleavedMask(Builder, VF, InterleaveFactor); 2706 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2707 "interleaved.vec"); 2708 2709 Instruction *NewStoreInstr = 2710 Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment()); 2711 addMetadata(NewStoreInstr, Instr); 2712 } 2713 } 2714 2715 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { 2716 // Attempt to issue a wide load. 2717 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2718 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2719 2720 assert((LI || SI) && "Invalid Load/Store instruction"); 2721 2722 // Try to vectorize the interleave group if this access is interleaved. 2723 if (Legal->isAccessInterleaved(Instr)) 2724 return vectorizeInterleaveGroup(Instr); 2725 2726 Type *ScalarDataTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2727 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2728 Value *Ptr = getPointerOperand(Instr); 2729 unsigned Alignment = LI ? LI->getAlignment() : SI->getAlignment(); 2730 // An alignment of 0 means target abi alignment. We need to use the scalar's 2731 // target abi alignment in such a case. 2732 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2733 if (!Alignment) 2734 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2735 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2736 uint64_t ScalarAllocatedSize = DL.getTypeAllocSize(ScalarDataTy); 2737 uint64_t VectorElementSize = DL.getTypeStoreSize(DataTy) / VF; 2738 2739 if (SI && Legal->blockNeedsPredication(SI->getParent()) && 2740 !Legal->isMaskRequired(SI)) 2741 return scalarizeInstruction(Instr, true); 2742 2743 if (ScalarAllocatedSize != VectorElementSize) 2744 return scalarizeInstruction(Instr); 2745 2746 // If the pointer is loop invariant scalarize the load. 2747 if (LI && Legal->isUniform(Ptr)) 2748 return scalarizeInstruction(Instr); 2749 2750 // If the pointer is non-consecutive and gather/scatter is not supported 2751 // scalarize the instruction. 2752 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 2753 bool Reverse = ConsecutiveStride < 0; 2754 bool CreateGatherScatter = 2755 !ConsecutiveStride && ((LI && Legal->isLegalMaskedGather(ScalarDataTy)) || 2756 (SI && Legal->isLegalMaskedScatter(ScalarDataTy))); 2757 2758 if (!ConsecutiveStride && !CreateGatherScatter) 2759 return scalarizeInstruction(Instr); 2760 2761 VectorParts VectorGep; 2762 2763 // Handle consecutive loads/stores. 2764 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2765 if (ConsecutiveStride) { 2766 if (Gep && Legal->isInductionVariable(Gep->getPointerOperand())) { 2767 setDebugLocFromInst(Builder, Gep); 2768 auto *FirstBasePtr = getScalarValue(Gep->getPointerOperand(), 0, 0); 2769 2770 // Create the new GEP with the new induction variable. 2771 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2772 Gep2->setOperand(0, FirstBasePtr); 2773 Gep2->setName("gep.indvar.base"); 2774 Ptr = Builder.Insert(Gep2); 2775 } else if (Gep) { 2776 setDebugLocFromInst(Builder, Gep); 2777 assert(PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getPointerOperand()), 2778 OrigLoop) && 2779 "Base ptr must be invariant"); 2780 // The last index does not have to be the induction. It can be 2781 // consecutive and be a function of the index. For example A[I+1]; 2782 unsigned NumOperands = Gep->getNumOperands(); 2783 unsigned InductionOperand = getGEPInductionOperand(Gep); 2784 // Create the new GEP with the new induction variable. 2785 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2786 2787 for (unsigned i = 0; i < NumOperands; ++i) { 2788 Value *GepOperand = Gep->getOperand(i); 2789 Instruction *GepOperandInst = dyn_cast<Instruction>(GepOperand); 2790 2791 // Update last index or loop invariant instruction anchored in loop. 2792 if (i == InductionOperand || 2793 (GepOperandInst && OrigLoop->contains(GepOperandInst))) { 2794 assert((i == InductionOperand || 2795 PSE.getSE()->isLoopInvariant(PSE.getSCEV(GepOperandInst), 2796 OrigLoop)) && 2797 "Must be last index or loop invariant"); 2798 2799 Gep2->setOperand(i, getScalarValue(GepOperand, 0, 0)); 2800 Gep2->setName("gep.indvar.idx"); 2801 } 2802 } 2803 Ptr = Builder.Insert(Gep2); 2804 } else { // No GEP 2805 // Use the induction element ptr. 2806 assert(isa<PHINode>(Ptr) && "Invalid induction ptr"); 2807 setDebugLocFromInst(Builder, Ptr); 2808 Ptr = getScalarValue(Ptr, 0, 0); 2809 } 2810 } else { 2811 // At this point we should vector version of GEP for Gather or Scatter 2812 assert(CreateGatherScatter && "The instruction should be scalarized"); 2813 if (Gep) { 2814 // Vectorizing GEP, across UF parts. We want to get a vector value for base 2815 // and each index that's defined inside the loop, even if it is 2816 // loop-invariant but wasn't hoisted out. Otherwise we want to keep them 2817 // scalar. 2818 SmallVector<VectorParts, 4> OpsV; 2819 for (Value *Op : Gep->operands()) { 2820 Instruction *SrcInst = dyn_cast<Instruction>(Op); 2821 if (SrcInst && OrigLoop->contains(SrcInst)) 2822 OpsV.push_back(getVectorValue(Op)); 2823 else 2824 OpsV.push_back(VectorParts(UF, Op)); 2825 } 2826 for (unsigned Part = 0; Part < UF; ++Part) { 2827 SmallVector<Value *, 4> Ops; 2828 Value *GEPBasePtr = OpsV[0][Part]; 2829 for (unsigned i = 1; i < Gep->getNumOperands(); i++) 2830 Ops.push_back(OpsV[i][Part]); 2831 Value *NewGep = Builder.CreateGEP(GEPBasePtr, Ops, "VectorGep"); 2832 cast<GetElementPtrInst>(NewGep)->setIsInBounds(Gep->isInBounds()); 2833 assert(NewGep->getType()->isVectorTy() && "Expected vector GEP"); 2834 2835 NewGep = 2836 Builder.CreateBitCast(NewGep, VectorType::get(Ptr->getType(), VF)); 2837 VectorGep.push_back(NewGep); 2838 } 2839 } else 2840 VectorGep = getVectorValue(Ptr); 2841 } 2842 2843 VectorParts Mask = createBlockInMask(Instr->getParent()); 2844 // Handle Stores: 2845 if (SI) { 2846 assert(!Legal->isUniform(SI->getPointerOperand()) && 2847 "We do not allow storing to uniform addresses"); 2848 setDebugLocFromInst(Builder, SI); 2849 // We don't want to update the value in the map as it might be used in 2850 // another expression. So don't use a reference type for "StoredVal". 2851 VectorParts StoredVal = getVectorValue(SI->getValueOperand()); 2852 2853 for (unsigned Part = 0; Part < UF; ++Part) { 2854 Instruction *NewSI = nullptr; 2855 if (CreateGatherScatter) { 2856 Value *MaskPart = Legal->isMaskRequired(SI) ? Mask[Part] : nullptr; 2857 NewSI = Builder.CreateMaskedScatter(StoredVal[Part], VectorGep[Part], 2858 Alignment, MaskPart); 2859 } else { 2860 // Calculate the pointer for the specific unroll-part. 2861 Value *PartPtr = 2862 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2863 2864 if (Reverse) { 2865 // If we store to reverse consecutive memory locations, then we need 2866 // to reverse the order of elements in the stored value. 2867 StoredVal[Part] = reverseVector(StoredVal[Part]); 2868 // If the address is consecutive but reversed, then the 2869 // wide store needs to start at the last vector element. 2870 PartPtr = 2871 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2872 PartPtr = 2873 Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2874 Mask[Part] = reverseVector(Mask[Part]); 2875 } 2876 2877 Value *VecPtr = 2878 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2879 2880 if (Legal->isMaskRequired(SI)) 2881 NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment, 2882 Mask[Part]); 2883 else 2884 NewSI = 2885 Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment); 2886 } 2887 addMetadata(NewSI, SI); 2888 } 2889 return; 2890 } 2891 2892 // Handle loads. 2893 assert(LI && "Must have a load instruction"); 2894 setDebugLocFromInst(Builder, LI); 2895 VectorParts Entry(UF); 2896 for (unsigned Part = 0; Part < UF; ++Part) { 2897 Instruction *NewLI; 2898 if (CreateGatherScatter) { 2899 Value *MaskPart = Legal->isMaskRequired(LI) ? Mask[Part] : nullptr; 2900 NewLI = Builder.CreateMaskedGather(VectorGep[Part], Alignment, MaskPart, 2901 0, "wide.masked.gather"); 2902 Entry[Part] = NewLI; 2903 } else { 2904 // Calculate the pointer for the specific unroll-part. 2905 Value *PartPtr = 2906 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2907 2908 if (Reverse) { 2909 // If the address is consecutive but reversed, then the 2910 // wide load needs to start at the last vector element. 2911 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2912 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2913 Mask[Part] = reverseVector(Mask[Part]); 2914 } 2915 2916 Value *VecPtr = 2917 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2918 if (Legal->isMaskRequired(LI)) 2919 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 2920 UndefValue::get(DataTy), 2921 "wide.masked.load"); 2922 else 2923 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 2924 Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI; 2925 } 2926 addMetadata(NewLI, LI); 2927 } 2928 VectorLoopValueMap.initVector(Instr, Entry); 2929 } 2930 2931 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2932 bool IfPredicateInstr) { 2933 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2934 DEBUG(dbgs() << "LV: Scalarizing" 2935 << (IfPredicateInstr ? " and predicating:" : ":") << *Instr 2936 << '\n'); 2937 // Holds vector parameters or scalars, in case of uniform vals. 2938 SmallVector<VectorParts, 4> Params; 2939 2940 setDebugLocFromInst(Builder, Instr); 2941 2942 // Does this instruction return a value ? 2943 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2944 2945 // Initialize a new scalar map entry. 2946 ScalarParts Entry(UF); 2947 2948 VectorParts Cond; 2949 if (IfPredicateInstr) 2950 Cond = createBlockInMask(Instr->getParent()); 2951 2952 // For each vector unroll 'part': 2953 for (unsigned Part = 0; Part < UF; ++Part) { 2954 Entry[Part].resize(VF); 2955 // For each scalar that we create: 2956 for (unsigned Width = 0; Width < VF; ++Width) { 2957 2958 // Start if-block. 2959 Value *Cmp = nullptr; 2960 if (IfPredicateInstr) { 2961 Cmp = Builder.CreateExtractElement(Cond[Part], Builder.getInt32(Width)); 2962 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cmp, 2963 ConstantInt::get(Cmp->getType(), 1)); 2964 } 2965 2966 Instruction *Cloned = Instr->clone(); 2967 if (!IsVoidRetTy) 2968 Cloned->setName(Instr->getName() + ".cloned"); 2969 2970 // Replace the operands of the cloned instructions with their scalar 2971 // equivalents in the new loop. 2972 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2973 auto *NewOp = getScalarValue(Instr->getOperand(op), Part, Width); 2974 Cloned->setOperand(op, NewOp); 2975 } 2976 addNewMetadata(Cloned, Instr); 2977 2978 // Place the cloned scalar in the new loop. 2979 Builder.Insert(Cloned); 2980 2981 // Add the cloned scalar to the scalar map entry. 2982 Entry[Part][Width] = Cloned; 2983 2984 // If we just cloned a new assumption, add it the assumption cache. 2985 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2986 if (II->getIntrinsicID() == Intrinsic::assume) 2987 AC->registerAssumption(II); 2988 2989 // End if-block. 2990 if (IfPredicateInstr) 2991 PredicatedInstructions.push_back(std::make_pair(Cloned, Cmp)); 2992 } 2993 } 2994 VectorLoopValueMap.initScalar(Instr, Entry); 2995 } 2996 2997 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2998 Value *End, Value *Step, 2999 Instruction *DL) { 3000 BasicBlock *Header = L->getHeader(); 3001 BasicBlock *Latch = L->getLoopLatch(); 3002 // As we're just creating this loop, it's possible no latch exists 3003 // yet. If so, use the header as this will be a single block loop. 3004 if (!Latch) 3005 Latch = Header; 3006 3007 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 3008 setDebugLocFromInst(Builder, getDebugLocFromInstOrOperands(OldInduction)); 3009 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 3010 3011 Builder.SetInsertPoint(Latch->getTerminator()); 3012 3013 // Create i+1 and fill the PHINode. 3014 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 3015 Induction->addIncoming(Start, L->getLoopPreheader()); 3016 Induction->addIncoming(Next, Latch); 3017 // Create the compare. 3018 Value *ICmp = Builder.CreateICmpEQ(Next, End); 3019 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 3020 3021 // Now we have two terminators. Remove the old one from the block. 3022 Latch->getTerminator()->eraseFromParent(); 3023 3024 return Induction; 3025 } 3026 3027 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3028 if (TripCount) 3029 return TripCount; 3030 3031 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3032 // Find the loop boundaries. 3033 ScalarEvolution *SE = PSE.getSE(); 3034 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3035 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 3036 "Invalid loop count"); 3037 3038 Type *IdxTy = Legal->getWidestInductionType(); 3039 3040 // The exit count might have the type of i64 while the phi is i32. This can 3041 // happen if we have an induction variable that is sign extended before the 3042 // compare. The only way that we get a backedge taken count is that the 3043 // induction variable was signed and as such will not overflow. In such a case 3044 // truncation is legal. 3045 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 3046 IdxTy->getPrimitiveSizeInBits()) 3047 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3048 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3049 3050 // Get the total trip count from the count by adding 1. 3051 const SCEV *ExitCount = SE->getAddExpr( 3052 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3053 3054 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3055 3056 // Expand the trip count and place the new instructions in the preheader. 3057 // Notice that the pre-header does not change, only the loop body. 3058 SCEVExpander Exp(*SE, DL, "induction"); 3059 3060 // Count holds the overall loop count (N). 3061 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3062 L->getLoopPreheader()->getTerminator()); 3063 3064 if (TripCount->getType()->isPointerTy()) 3065 TripCount = 3066 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3067 L->getLoopPreheader()->getTerminator()); 3068 3069 return TripCount; 3070 } 3071 3072 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3073 if (VectorTripCount) 3074 return VectorTripCount; 3075 3076 Value *TC = getOrCreateTripCount(L); 3077 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3078 3079 // Now we need to generate the expression for the part of the loop that the 3080 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3081 // iterations are not required for correctness, or N - Step, otherwise. Step 3082 // is equal to the vectorization factor (number of SIMD elements) times the 3083 // unroll factor (number of SIMD instructions). 3084 Constant *Step = ConstantInt::get(TC->getType(), VF * UF); 3085 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3086 3087 // If there is a non-reversed interleaved group that may speculatively access 3088 // memory out-of-bounds, we need to ensure that there will be at least one 3089 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 3090 // the trip count, we set the remainder to be equal to the step. If the step 3091 // does not evenly divide the trip count, no adjustment is necessary since 3092 // there will already be scalar iterations. Note that the minimum iterations 3093 // check ensures that N >= Step. 3094 if (VF > 1 && Legal->requiresScalarEpilogue()) { 3095 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3096 R = Builder.CreateSelect(IsZero, Step, R); 3097 } 3098 3099 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3100 3101 return VectorTripCount; 3102 } 3103 3104 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3105 BasicBlock *Bypass) { 3106 Value *Count = getOrCreateTripCount(L); 3107 BasicBlock *BB = L->getLoopPreheader(); 3108 IRBuilder<> Builder(BB->getTerminator()); 3109 3110 // Generate code to check that the loop's trip count that we computed by 3111 // adding one to the backedge-taken count will not overflow. 3112 Value *CheckMinIters = Builder.CreateICmpULT( 3113 Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check"); 3114 3115 BasicBlock *NewBB = 3116 BB->splitBasicBlock(BB->getTerminator(), "min.iters.checked"); 3117 // Update dominator tree immediately if the generated block is a 3118 // LoopBypassBlock because SCEV expansions to generate loop bypass 3119 // checks may query it before the current function is finished. 3120 DT->addNewBlock(NewBB, BB); 3121 if (L->getParentLoop()) 3122 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3123 ReplaceInstWithInst(BB->getTerminator(), 3124 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 3125 LoopBypassBlocks.push_back(BB); 3126 } 3127 3128 void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L, 3129 BasicBlock *Bypass) { 3130 Value *TC = getOrCreateVectorTripCount(L); 3131 BasicBlock *BB = L->getLoopPreheader(); 3132 IRBuilder<> Builder(BB->getTerminator()); 3133 3134 // Now, compare the new count to zero. If it is zero skip the vector loop and 3135 // jump to the scalar loop. 3136 Value *Cmp = Builder.CreateICmpEQ(TC, Constant::getNullValue(TC->getType()), 3137 "cmp.zero"); 3138 3139 // Generate code to check that the loop's trip count that we computed by 3140 // adding one to the backedge-taken count will not overflow. 3141 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3142 // Update dominator tree immediately if the generated block is a 3143 // LoopBypassBlock because SCEV expansions to generate loop bypass 3144 // checks may query it before the current function is finished. 3145 DT->addNewBlock(NewBB, BB); 3146 if (L->getParentLoop()) 3147 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3148 ReplaceInstWithInst(BB->getTerminator(), 3149 BranchInst::Create(Bypass, NewBB, Cmp)); 3150 LoopBypassBlocks.push_back(BB); 3151 } 3152 3153 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3154 BasicBlock *BB = L->getLoopPreheader(); 3155 3156 // Generate the code to check that the SCEV assumptions that we made. 3157 // We want the new basic block to start at the first instruction in a 3158 // sequence of instructions that form a check. 3159 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 3160 "scev.check"); 3161 Value *SCEVCheck = 3162 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 3163 3164 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 3165 if (C->isZero()) 3166 return; 3167 3168 // Create a new block containing the stride check. 3169 BB->setName("vector.scevcheck"); 3170 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3171 // Update dominator tree immediately if the generated block is a 3172 // LoopBypassBlock because SCEV expansions to generate loop bypass 3173 // checks may query it before the current function is finished. 3174 DT->addNewBlock(NewBB, BB); 3175 if (L->getParentLoop()) 3176 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3177 ReplaceInstWithInst(BB->getTerminator(), 3178 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 3179 LoopBypassBlocks.push_back(BB); 3180 AddedSafetyChecks = true; 3181 } 3182 3183 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 3184 BasicBlock *BB = L->getLoopPreheader(); 3185 3186 // Generate the code that checks in runtime if arrays overlap. We put the 3187 // checks into a separate block to make the more common case of few elements 3188 // faster. 3189 Instruction *FirstCheckInst; 3190 Instruction *MemRuntimeCheck; 3191 std::tie(FirstCheckInst, MemRuntimeCheck) = 3192 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 3193 if (!MemRuntimeCheck) 3194 return; 3195 3196 // Create a new block containing the memory check. 3197 BB->setName("vector.memcheck"); 3198 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3199 // Update dominator tree immediately if the generated block is a 3200 // LoopBypassBlock because SCEV expansions to generate loop bypass 3201 // checks may query it before the current function is finished. 3202 DT->addNewBlock(NewBB, BB); 3203 if (L->getParentLoop()) 3204 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3205 ReplaceInstWithInst(BB->getTerminator(), 3206 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 3207 LoopBypassBlocks.push_back(BB); 3208 AddedSafetyChecks = true; 3209 3210 // We currently don't use LoopVersioning for the actual loop cloning but we 3211 // still use it to add the noalias metadata. 3212 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 3213 PSE.getSE()); 3214 LVer->prepareNoAliasMetadata(); 3215 } 3216 3217 void InnerLoopVectorizer::createEmptyLoop() { 3218 /* 3219 In this function we generate a new loop. The new loop will contain 3220 the vectorized instructions while the old loop will continue to run the 3221 scalar remainder. 3222 3223 [ ] <-- loop iteration number check. 3224 / | 3225 / v 3226 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3227 | / | 3228 | / v 3229 || [ ] <-- vector pre header. 3230 |/ | 3231 | v 3232 | [ ] \ 3233 | [ ]_| <-- vector loop. 3234 | | 3235 | v 3236 | -[ ] <--- middle-block. 3237 | / | 3238 | / v 3239 -|- >[ ] <--- new preheader. 3240 | | 3241 | v 3242 | [ ] \ 3243 | [ ]_| <-- old scalar loop to handle remainder. 3244 \ | 3245 \ v 3246 >[ ] <-- exit block. 3247 ... 3248 */ 3249 3250 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 3251 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 3252 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 3253 assert(VectorPH && "Invalid loop structure"); 3254 assert(ExitBlock && "Must have an exit block"); 3255 3256 // Some loops have a single integer induction variable, while other loops 3257 // don't. One example is c++ iterators that often have multiple pointer 3258 // induction variables. In the code below we also support a case where we 3259 // don't have a single induction variable. 3260 // 3261 // We try to obtain an induction variable from the original loop as hard 3262 // as possible. However if we don't find one that: 3263 // - is an integer 3264 // - counts from zero, stepping by one 3265 // - is the size of the widest induction variable type 3266 // then we create a new one. 3267 OldInduction = Legal->getInduction(); 3268 Type *IdxTy = Legal->getWidestInductionType(); 3269 3270 // Split the single block loop into the two loop structure described above. 3271 BasicBlock *VecBody = 3272 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 3273 BasicBlock *MiddleBlock = 3274 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 3275 BasicBlock *ScalarPH = 3276 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 3277 3278 // Create and register the new vector loop. 3279 Loop *Lp = new Loop(); 3280 Loop *ParentLoop = OrigLoop->getParentLoop(); 3281 3282 // Insert the new loop into the loop nest and register the new basic blocks 3283 // before calling any utilities such as SCEV that require valid LoopInfo. 3284 if (ParentLoop) { 3285 ParentLoop->addChildLoop(Lp); 3286 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 3287 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 3288 } else { 3289 LI->addTopLevelLoop(Lp); 3290 } 3291 Lp->addBasicBlockToLoop(VecBody, *LI); 3292 3293 // Find the loop boundaries. 3294 Value *Count = getOrCreateTripCount(Lp); 3295 3296 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3297 3298 // We need to test whether the backedge-taken count is uint##_max. Adding one 3299 // to it will cause overflow and an incorrect loop trip count in the vector 3300 // body. In case of overflow we want to directly jump to the scalar remainder 3301 // loop. 3302 emitMinimumIterationCountCheck(Lp, ScalarPH); 3303 // Now, compare the new count to zero. If it is zero skip the vector loop and 3304 // jump to the scalar loop. 3305 emitVectorLoopEnteredCheck(Lp, ScalarPH); 3306 // Generate the code to check any assumptions that we've made for SCEV 3307 // expressions. 3308 emitSCEVChecks(Lp, ScalarPH); 3309 3310 // Generate the code that checks in runtime if arrays overlap. We put the 3311 // checks into a separate block to make the more common case of few elements 3312 // faster. 3313 emitMemRuntimeChecks(Lp, ScalarPH); 3314 3315 // Generate the induction variable. 3316 // The loop step is equal to the vectorization factor (num of SIMD elements) 3317 // times the unroll factor (num of SIMD instructions). 3318 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3319 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 3320 Induction = 3321 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3322 getDebugLocFromInstOrOperands(OldInduction)); 3323 3324 // We are going to resume the execution of the scalar loop. 3325 // Go over all of the induction variables that we found and fix the 3326 // PHIs that are left in the scalar version of the loop. 3327 // The starting values of PHI nodes depend on the counter of the last 3328 // iteration in the vectorized loop. 3329 // If we come from a bypass edge then we need to start from the original 3330 // start value. 3331 3332 // This variable saves the new starting index for the scalar loop. It is used 3333 // to test if there are any tail iterations left once the vector loop has 3334 // completed. 3335 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 3336 for (auto &InductionEntry : *List) { 3337 PHINode *OrigPhi = InductionEntry.first; 3338 InductionDescriptor II = InductionEntry.second; 3339 3340 // Create phi nodes to merge from the backedge-taken check block. 3341 PHINode *BCResumeVal = PHINode::Create( 3342 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 3343 Value *EndValue; 3344 if (OrigPhi == OldInduction) { 3345 // We know what the end value is. 3346 EndValue = CountRoundDown; 3347 } else { 3348 IRBuilder<> B(LoopBypassBlocks.back()->getTerminator()); 3349 Type *StepType = II.getStep()->getType(); 3350 Instruction::CastOps CastOp = 3351 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 3352 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 3353 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3354 EndValue = II.transform(B, CRD, PSE.getSE(), DL); 3355 EndValue->setName("ind.end"); 3356 } 3357 3358 // The new PHI merges the original incoming value, in case of a bypass, 3359 // or the value at the end of the vectorized loop. 3360 BCResumeVal->addIncoming(EndValue, MiddleBlock); 3361 3362 // Fix up external users of the induction variable. 3363 fixupIVUsers(OrigPhi, II, CountRoundDown, EndValue, MiddleBlock); 3364 3365 // Fix the scalar body counter (PHI node). 3366 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 3367 3368 // The old induction's phi node in the scalar body needs the truncated 3369 // value. 3370 for (BasicBlock *BB : LoopBypassBlocks) 3371 BCResumeVal->addIncoming(II.getStartValue(), BB); 3372 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 3373 } 3374 3375 // Add a check in the middle block to see if we have completed 3376 // all of the iterations in the first vector loop. 3377 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3378 Value *CmpN = 3379 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 3380 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 3381 ReplaceInstWithInst(MiddleBlock->getTerminator(), 3382 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 3383 3384 // Get ready to start creating new instructions into the vectorized body. 3385 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 3386 3387 // Save the state. 3388 LoopVectorPreHeader = Lp->getLoopPreheader(); 3389 LoopScalarPreHeader = ScalarPH; 3390 LoopMiddleBlock = MiddleBlock; 3391 LoopExitBlock = ExitBlock; 3392 LoopVectorBody = VecBody; 3393 LoopScalarBody = OldBasicBlock; 3394 3395 // Keep all loop hints from the original loop on the vector loop (we'll 3396 // replace the vectorizer-specific hints below). 3397 if (MDNode *LID = OrigLoop->getLoopID()) 3398 Lp->setLoopID(LID); 3399 3400 LoopVectorizeHints Hints(Lp, true, *ORE); 3401 Hints.setAlreadyVectorized(); 3402 } 3403 3404 // Fix up external users of the induction variable. At this point, we are 3405 // in LCSSA form, with all external PHIs that use the IV having one input value, 3406 // coming from the remainder loop. We need those PHIs to also have a correct 3407 // value for the IV when arriving directly from the middle block. 3408 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3409 const InductionDescriptor &II, 3410 Value *CountRoundDown, Value *EndValue, 3411 BasicBlock *MiddleBlock) { 3412 // There are two kinds of external IV usages - those that use the value 3413 // computed in the last iteration (the PHI) and those that use the penultimate 3414 // value (the value that feeds into the phi from the loop latch). 3415 // We allow both, but they, obviously, have different values. 3416 3417 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 3418 3419 DenseMap<Value *, Value *> MissingVals; 3420 3421 // An external user of the last iteration's value should see the value that 3422 // the remainder loop uses to initialize its own IV. 3423 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3424 for (User *U : PostInc->users()) { 3425 Instruction *UI = cast<Instruction>(U); 3426 if (!OrigLoop->contains(UI)) { 3427 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3428 MissingVals[UI] = EndValue; 3429 } 3430 } 3431 3432 // An external user of the penultimate value need to see EndValue - Step. 3433 // The simplest way to get this is to recompute it from the constituent SCEVs, 3434 // that is Start + (Step * (CRD - 1)). 3435 for (User *U : OrigPhi->users()) { 3436 auto *UI = cast<Instruction>(U); 3437 if (!OrigLoop->contains(UI)) { 3438 const DataLayout &DL = 3439 OrigLoop->getHeader()->getModule()->getDataLayout(); 3440 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3441 3442 IRBuilder<> B(MiddleBlock->getTerminator()); 3443 Value *CountMinusOne = B.CreateSub( 3444 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3445 Value *CMO = B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType(), 3446 "cast.cmo"); 3447 Value *Escape = II.transform(B, CMO, PSE.getSE(), DL); 3448 Escape->setName("ind.escape"); 3449 MissingVals[UI] = Escape; 3450 } 3451 } 3452 3453 for (auto &I : MissingVals) { 3454 PHINode *PHI = cast<PHINode>(I.first); 3455 // One corner case we have to handle is two IVs "chasing" each-other, 3456 // that is %IV2 = phi [...], [ %IV1, %latch ] 3457 // In this case, if IV1 has an external use, we need to avoid adding both 3458 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3459 // don't already have an incoming value for the middle block. 3460 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3461 PHI->addIncoming(I.second, MiddleBlock); 3462 } 3463 } 3464 3465 namespace { 3466 struct CSEDenseMapInfo { 3467 static bool canHandle(Instruction *I) { 3468 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3469 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3470 } 3471 static inline Instruction *getEmptyKey() { 3472 return DenseMapInfo<Instruction *>::getEmptyKey(); 3473 } 3474 static inline Instruction *getTombstoneKey() { 3475 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3476 } 3477 static unsigned getHashValue(Instruction *I) { 3478 assert(canHandle(I) && "Unknown instruction!"); 3479 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3480 I->value_op_end())); 3481 } 3482 static bool isEqual(Instruction *LHS, Instruction *RHS) { 3483 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3484 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3485 return LHS == RHS; 3486 return LHS->isIdenticalTo(RHS); 3487 } 3488 }; 3489 } 3490 3491 ///\brief Perform cse of induction variable instructions. 3492 static void cse(BasicBlock *BB) { 3493 // Perform simple cse. 3494 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3495 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3496 Instruction *In = &*I++; 3497 3498 if (!CSEDenseMapInfo::canHandle(In)) 3499 continue; 3500 3501 // Check if we can replace this instruction with any of the 3502 // visited instructions. 3503 if (Instruction *V = CSEMap.lookup(In)) { 3504 In->replaceAllUsesWith(V); 3505 In->eraseFromParent(); 3506 continue; 3507 } 3508 3509 CSEMap[In] = In; 3510 } 3511 } 3512 3513 /// \brief Adds a 'fast' flag to floating point operations. 3514 static Value *addFastMathFlag(Value *V) { 3515 if (isa<FPMathOperator>(V)) { 3516 FastMathFlags Flags; 3517 Flags.setUnsafeAlgebra(); 3518 cast<Instruction>(V)->setFastMathFlags(Flags); 3519 } 3520 return V; 3521 } 3522 3523 /// \brief Estimate the overhead of scalarizing a value based on its type. 3524 /// Insert and Extract are set if the result needs to be inserted and/or 3525 /// extracted from vectors. 3526 /// If the instruction is also to be predicated, add the cost of a PHI 3527 /// node to the insertion cost. 3528 static unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract, 3529 bool Predicated, 3530 const TargetTransformInfo &TTI) { 3531 if (Ty->isVoidTy()) 3532 return 0; 3533 3534 assert(Ty->isVectorTy() && "Can only scalarize vectors"); 3535 unsigned Cost = 0; 3536 3537 for (unsigned I = 0, E = Ty->getVectorNumElements(); I < E; ++I) { 3538 if (Extract) 3539 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, Ty, I); 3540 if (Insert) { 3541 Cost += TTI.getVectorInstrCost(Instruction::InsertElement, Ty, I); 3542 if (Predicated) 3543 Cost += TTI.getCFInstrCost(Instruction::PHI); 3544 } 3545 } 3546 3547 // We assume that if-converted blocks have a 50% chance of being executed. 3548 // Predicated scalarized instructions are avoided due to the CF that bypasses 3549 // turned off lanes. The extracts and inserts will be sinked/hoisted to the 3550 // predicated basic-block and are subjected to the same assumption. 3551 if (Predicated) 3552 Cost /= 2; 3553 3554 return Cost; 3555 } 3556 3557 /// \brief Estimate the overhead of scalarizing an Instruction based on the 3558 /// types of its operands and return value. 3559 static unsigned getScalarizationOverhead(SmallVectorImpl<Type *> &OpTys, 3560 Type *RetTy, bool Predicated, 3561 const TargetTransformInfo &TTI) { 3562 unsigned ScalarizationCost = 3563 getScalarizationOverhead(RetTy, true, false, Predicated, TTI); 3564 3565 for (Type *Ty : OpTys) 3566 ScalarizationCost += 3567 getScalarizationOverhead(Ty, false, true, Predicated, TTI); 3568 3569 return ScalarizationCost; 3570 } 3571 3572 /// \brief Estimate the overhead of scalarizing an instruction. This is a 3573 /// convenience wrapper for the type-based getScalarizationOverhead API. 3574 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF, 3575 bool Predicated, 3576 const TargetTransformInfo &TTI) { 3577 if (VF == 1) 3578 return 0; 3579 3580 Type *RetTy = ToVectorTy(I->getType(), VF); 3581 3582 SmallVector<Type *, 4> OpTys; 3583 unsigned OperandsNum = I->getNumOperands(); 3584 for (unsigned OpInd = 0; OpInd < OperandsNum; ++OpInd) 3585 OpTys.push_back(ToVectorTy(I->getOperand(OpInd)->getType(), VF)); 3586 3587 return getScalarizationOverhead(OpTys, RetTy, Predicated, TTI); 3588 } 3589 3590 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3591 // Return the cost of the instruction, including scalarization overhead if it's 3592 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3593 // i.e. either vector version isn't available, or is too expensive. 3594 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3595 const TargetTransformInfo &TTI, 3596 const TargetLibraryInfo *TLI, 3597 bool &NeedToScalarize) { 3598 Function *F = CI->getCalledFunction(); 3599 StringRef FnName = CI->getCalledFunction()->getName(); 3600 Type *ScalarRetTy = CI->getType(); 3601 SmallVector<Type *, 4> Tys, ScalarTys; 3602 for (auto &ArgOp : CI->arg_operands()) 3603 ScalarTys.push_back(ArgOp->getType()); 3604 3605 // Estimate cost of scalarized vector call. The source operands are assumed 3606 // to be vectors, so we need to extract individual elements from there, 3607 // execute VF scalar calls, and then gather the result into the vector return 3608 // value. 3609 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3610 if (VF == 1) 3611 return ScalarCallCost; 3612 3613 // Compute corresponding vector type for return value and arguments. 3614 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3615 for (Type *ScalarTy : ScalarTys) 3616 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3617 3618 // Compute costs of unpacking argument values for the scalar calls and 3619 // packing the return values to a vector. 3620 unsigned ScalarizationCost = getScalarizationOverhead(Tys, RetTy, false, TTI); 3621 3622 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3623 3624 // If we can't emit a vector call for this function, then the currently found 3625 // cost is the cost we need to return. 3626 NeedToScalarize = true; 3627 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3628 return Cost; 3629 3630 // If the corresponding vector cost is cheaper, return its cost. 3631 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3632 if (VectorCallCost < Cost) { 3633 NeedToScalarize = false; 3634 return VectorCallCost; 3635 } 3636 return Cost; 3637 } 3638 3639 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3640 // factor VF. Return the cost of the instruction, including scalarization 3641 // overhead if it's needed. 3642 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3643 const TargetTransformInfo &TTI, 3644 const TargetLibraryInfo *TLI) { 3645 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3646 assert(ID && "Expected intrinsic call!"); 3647 3648 Type *RetTy = ToVectorTy(CI->getType(), VF); 3649 SmallVector<Type *, 4> Tys; 3650 for (Value *ArgOperand : CI->arg_operands()) 3651 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 3652 3653 FastMathFlags FMF; 3654 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3655 FMF = FPMO->getFastMathFlags(); 3656 3657 return TTI.getIntrinsicInstrCost(ID, RetTy, Tys, FMF); 3658 } 3659 3660 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3661 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3662 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3663 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3664 } 3665 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3666 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3667 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3668 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3669 } 3670 3671 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3672 // For every instruction `I` in MinBWs, truncate the operands, create a 3673 // truncated version of `I` and reextend its result. InstCombine runs 3674 // later and will remove any ext/trunc pairs. 3675 // 3676 SmallPtrSet<Value *, 4> Erased; 3677 for (const auto &KV : *MinBWs) { 3678 VectorParts &Parts = VectorLoopValueMap.getVector(KV.first); 3679 for (Value *&I : Parts) { 3680 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3681 continue; 3682 Type *OriginalTy = I->getType(); 3683 Type *ScalarTruncatedTy = 3684 IntegerType::get(OriginalTy->getContext(), KV.second); 3685 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3686 OriginalTy->getVectorNumElements()); 3687 if (TruncatedTy == OriginalTy) 3688 continue; 3689 3690 IRBuilder<> B(cast<Instruction>(I)); 3691 auto ShrinkOperand = [&](Value *V) -> Value * { 3692 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3693 if (ZI->getSrcTy() == TruncatedTy) 3694 return ZI->getOperand(0); 3695 return B.CreateZExtOrTrunc(V, TruncatedTy); 3696 }; 3697 3698 // The actual instruction modification depends on the instruction type, 3699 // unfortunately. 3700 Value *NewI = nullptr; 3701 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3702 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3703 ShrinkOperand(BO->getOperand(1))); 3704 cast<BinaryOperator>(NewI)->copyIRFlags(I); 3705 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3706 NewI = 3707 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3708 ShrinkOperand(CI->getOperand(1))); 3709 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3710 NewI = B.CreateSelect(SI->getCondition(), 3711 ShrinkOperand(SI->getTrueValue()), 3712 ShrinkOperand(SI->getFalseValue())); 3713 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3714 switch (CI->getOpcode()) { 3715 default: 3716 llvm_unreachable("Unhandled cast!"); 3717 case Instruction::Trunc: 3718 NewI = ShrinkOperand(CI->getOperand(0)); 3719 break; 3720 case Instruction::SExt: 3721 NewI = B.CreateSExtOrTrunc( 3722 CI->getOperand(0), 3723 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3724 break; 3725 case Instruction::ZExt: 3726 NewI = B.CreateZExtOrTrunc( 3727 CI->getOperand(0), 3728 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3729 break; 3730 } 3731 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3732 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3733 auto *O0 = B.CreateZExtOrTrunc( 3734 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3735 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3736 auto *O1 = B.CreateZExtOrTrunc( 3737 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3738 3739 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3740 } else if (isa<LoadInst>(I)) { 3741 // Don't do anything with the operands, just extend the result. 3742 continue; 3743 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3744 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3745 auto *O0 = B.CreateZExtOrTrunc( 3746 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3747 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3748 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3749 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3750 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3751 auto *O0 = B.CreateZExtOrTrunc( 3752 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3753 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3754 } else { 3755 llvm_unreachable("Unhandled instruction type!"); 3756 } 3757 3758 // Lastly, extend the result. 3759 NewI->takeName(cast<Instruction>(I)); 3760 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3761 I->replaceAllUsesWith(Res); 3762 cast<Instruction>(I)->eraseFromParent(); 3763 Erased.insert(I); 3764 I = Res; 3765 } 3766 } 3767 3768 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3769 for (const auto &KV : *MinBWs) { 3770 VectorParts &Parts = VectorLoopValueMap.getVector(KV.first); 3771 for (Value *&I : Parts) { 3772 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3773 if (Inst && Inst->use_empty()) { 3774 Value *NewI = Inst->getOperand(0); 3775 Inst->eraseFromParent(); 3776 I = NewI; 3777 } 3778 } 3779 } 3780 } 3781 3782 void InnerLoopVectorizer::vectorizeLoop() { 3783 //===------------------------------------------------===// 3784 // 3785 // Notice: any optimization or new instruction that go 3786 // into the code below should be also be implemented in 3787 // the cost-model. 3788 // 3789 //===------------------------------------------------===// 3790 Constant *Zero = Builder.getInt32(0); 3791 3792 // In order to support recurrences we need to be able to vectorize Phi nodes. 3793 // Phi nodes have cycles, so we need to vectorize them in two stages. First, 3794 // we create a new vector PHI node with no incoming edges. We use this value 3795 // when we vectorize all of the instructions that use the PHI. Next, after 3796 // all of the instructions in the block are complete we add the new incoming 3797 // edges to the PHI. At this point all of the instructions in the basic block 3798 // are vectorized, so we can use them to construct the PHI. 3799 PhiVector PHIsToFix; 3800 3801 // Scan the loop in a topological order to ensure that defs are vectorized 3802 // before users. 3803 LoopBlocksDFS DFS(OrigLoop); 3804 DFS.perform(LI); 3805 3806 // Vectorize all of the blocks in the original loop. 3807 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 3808 vectorizeBlockInLoop(BB, &PHIsToFix); 3809 3810 // Insert truncates and extends for any truncated instructions as hints to 3811 // InstCombine. 3812 if (VF > 1) 3813 truncateToMinimalBitwidths(); 3814 3815 // At this point every instruction in the original loop is widened to a 3816 // vector form. Now we need to fix the recurrences in PHIsToFix. These PHI 3817 // nodes are currently empty because we did not want to introduce cycles. 3818 // This is the second stage of vectorizing recurrences. 3819 for (PHINode *Phi : PHIsToFix) { 3820 assert(Phi && "Unable to recover vectorized PHI"); 3821 3822 // Handle first-order recurrences that need to be fixed. 3823 if (Legal->isFirstOrderRecurrence(Phi)) { 3824 fixFirstOrderRecurrence(Phi); 3825 continue; 3826 } 3827 3828 // If the phi node is not a first-order recurrence, it must be a reduction. 3829 // Get it's reduction variable descriptor. 3830 assert(Legal->isReductionVariable(Phi) && 3831 "Unable to find the reduction variable"); 3832 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 3833 3834 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3835 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3836 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3837 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3838 RdxDesc.getMinMaxRecurrenceKind(); 3839 setDebugLocFromInst(Builder, ReductionStartValue); 3840 3841 // We need to generate a reduction vector from the incoming scalar. 3842 // To do so, we need to generate the 'identity' vector and override 3843 // one of the elements with the incoming scalar reduction. We need 3844 // to do it in the vector-loop preheader. 3845 Builder.SetInsertPoint(LoopBypassBlocks[1]->getTerminator()); 3846 3847 // This is the vector-clone of the value that leaves the loop. 3848 const VectorParts &VectorExit = getVectorValue(LoopExitInst); 3849 Type *VecTy = VectorExit[0]->getType(); 3850 3851 // Find the reduction identity variable. Zero for addition, or, xor, 3852 // one for multiplication, -1 for And. 3853 Value *Identity; 3854 Value *VectorStart; 3855 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3856 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3857 // MinMax reduction have the start value as their identify. 3858 if (VF == 1) { 3859 VectorStart = Identity = ReductionStartValue; 3860 } else { 3861 VectorStart = Identity = 3862 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3863 } 3864 } else { 3865 // Handle other reduction kinds: 3866 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3867 RK, VecTy->getScalarType()); 3868 if (VF == 1) { 3869 Identity = Iden; 3870 // This vector is the Identity vector where the first element is the 3871 // incoming scalar reduction. 3872 VectorStart = ReductionStartValue; 3873 } else { 3874 Identity = ConstantVector::getSplat(VF, Iden); 3875 3876 // This vector is the Identity vector where the first element is the 3877 // incoming scalar reduction. 3878 VectorStart = 3879 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3880 } 3881 } 3882 3883 // Fix the vector-loop phi. 3884 3885 // Reductions do not have to start at zero. They can start with 3886 // any loop invariant values. 3887 const VectorParts &VecRdxPhi = getVectorValue(Phi); 3888 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3889 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3890 const VectorParts &Val = getVectorValue(LoopVal); 3891 for (unsigned part = 0; part < UF; ++part) { 3892 // Make sure to add the reduction stat value only to the 3893 // first unroll part. 3894 Value *StartVal = (part == 0) ? VectorStart : Identity; 3895 cast<PHINode>(VecRdxPhi[part]) 3896 ->addIncoming(StartVal, LoopVectorPreHeader); 3897 cast<PHINode>(VecRdxPhi[part]) 3898 ->addIncoming(Val[part], LoopVectorBody); 3899 } 3900 3901 // Before each round, move the insertion point right between 3902 // the PHIs and the values we are going to write. 3903 // This allows us to write both PHINodes and the extractelement 3904 // instructions. 3905 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3906 3907 VectorParts &RdxParts = VectorLoopValueMap.getVector(LoopExitInst); 3908 setDebugLocFromInst(Builder, LoopExitInst); 3909 3910 // If the vector reduction can be performed in a smaller type, we truncate 3911 // then extend the loop exit value to enable InstCombine to evaluate the 3912 // entire expression in the smaller type. 3913 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3914 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3915 Builder.SetInsertPoint(LoopVectorBody->getTerminator()); 3916 for (unsigned part = 0; part < UF; ++part) { 3917 Value *Trunc = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3918 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3919 : Builder.CreateZExt(Trunc, VecTy); 3920 for (Value::user_iterator UI = RdxParts[part]->user_begin(); 3921 UI != RdxParts[part]->user_end();) 3922 if (*UI != Trunc) { 3923 (*UI++)->replaceUsesOfWith(RdxParts[part], Extnd); 3924 RdxParts[part] = Extnd; 3925 } else { 3926 ++UI; 3927 } 3928 } 3929 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3930 for (unsigned part = 0; part < UF; ++part) 3931 RdxParts[part] = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3932 } 3933 3934 // Reduce all of the unrolled parts into a single vector. 3935 Value *ReducedPartRdx = RdxParts[0]; 3936 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3937 setDebugLocFromInst(Builder, ReducedPartRdx); 3938 for (unsigned part = 1; part < UF; ++part) { 3939 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3940 // Floating point operations had to be 'fast' to enable the reduction. 3941 ReducedPartRdx = addFastMathFlag( 3942 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxParts[part], 3943 ReducedPartRdx, "bin.rdx")); 3944 else 3945 ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp( 3946 Builder, MinMaxKind, ReducedPartRdx, RdxParts[part]); 3947 } 3948 3949 if (VF > 1) { 3950 // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles 3951 // and vector ops, reducing the set of values being computed by half each 3952 // round. 3953 assert(isPowerOf2_32(VF) && 3954 "Reduction emission only supported for pow2 vectors!"); 3955 Value *TmpVec = ReducedPartRdx; 3956 SmallVector<Constant *, 32> ShuffleMask(VF, nullptr); 3957 for (unsigned i = VF; i != 1; i >>= 1) { 3958 // Move the upper half of the vector to the lower half. 3959 for (unsigned j = 0; j != i / 2; ++j) 3960 ShuffleMask[j] = Builder.getInt32(i / 2 + j); 3961 3962 // Fill the rest of the mask with undef. 3963 std::fill(&ShuffleMask[i / 2], ShuffleMask.end(), 3964 UndefValue::get(Builder.getInt32Ty())); 3965 3966 Value *Shuf = Builder.CreateShuffleVector( 3967 TmpVec, UndefValue::get(TmpVec->getType()), 3968 ConstantVector::get(ShuffleMask), "rdx.shuf"); 3969 3970 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3971 // Floating point operations had to be 'fast' to enable the reduction. 3972 TmpVec = addFastMathFlag(Builder.CreateBinOp( 3973 (Instruction::BinaryOps)Op, TmpVec, Shuf, "bin.rdx")); 3974 else 3975 TmpVec = RecurrenceDescriptor::createMinMaxOp(Builder, MinMaxKind, 3976 TmpVec, Shuf); 3977 } 3978 3979 // The result is in the first element of the vector. 3980 ReducedPartRdx = 3981 Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 3982 3983 // If the reduction can be performed in a smaller type, we need to extend 3984 // the reduction to the wider type before we branch to the original loop. 3985 if (Phi->getType() != RdxDesc.getRecurrenceType()) 3986 ReducedPartRdx = 3987 RdxDesc.isSigned() 3988 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 3989 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 3990 } 3991 3992 // Create a phi node that merges control-flow from the backedge-taken check 3993 // block and the middle block. 3994 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 3995 LoopScalarPreHeader->getTerminator()); 3996 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3997 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 3998 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3999 4000 // Now, we need to fix the users of the reduction variable 4001 // inside and outside of the scalar remainder loop. 4002 // We know that the loop is in LCSSA form. We need to update the 4003 // PHI nodes in the exit blocks. 4004 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 4005 LEE = LoopExitBlock->end(); 4006 LEI != LEE; ++LEI) { 4007 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 4008 if (!LCSSAPhi) 4009 break; 4010 4011 // All PHINodes need to have a single entry edge, or two if 4012 // we already fixed them. 4013 assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 4014 4015 // We found our reduction value exit-PHI. Update it with the 4016 // incoming bypass edge. 4017 if (LCSSAPhi->getIncomingValue(0) == LoopExitInst) { 4018 // Add an edge coming from the bypass. 4019 LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4020 break; 4021 } 4022 } // end of the LCSSA phi scan. 4023 4024 // Fix the scalar loop reduction variable with the incoming reduction sum 4025 // from the vector body and from the backedge value. 4026 int IncomingEdgeBlockIdx = 4027 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4028 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4029 // Pick the other block. 4030 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4031 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4032 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4033 } // end of for each Phi in PHIsToFix. 4034 4035 fixLCSSAPHIs(); 4036 4037 // Make sure DomTree is updated. 4038 updateAnalysis(); 4039 4040 predicateInstructions(); 4041 4042 // Remove redundant induction instructions. 4043 cse(LoopVectorBody); 4044 } 4045 4046 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 4047 4048 // This is the second phase of vectorizing first-order recurrences. An 4049 // overview of the transformation is described below. Suppose we have the 4050 // following loop. 4051 // 4052 // for (int i = 0; i < n; ++i) 4053 // b[i] = a[i] - a[i - 1]; 4054 // 4055 // There is a first-order recurrence on "a". For this loop, the shorthand 4056 // scalar IR looks like: 4057 // 4058 // scalar.ph: 4059 // s_init = a[-1] 4060 // br scalar.body 4061 // 4062 // scalar.body: 4063 // i = phi [0, scalar.ph], [i+1, scalar.body] 4064 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4065 // s2 = a[i] 4066 // b[i] = s2 - s1 4067 // br cond, scalar.body, ... 4068 // 4069 // In this example, s1 is a recurrence because it's value depends on the 4070 // previous iteration. In the first phase of vectorization, we created a 4071 // temporary value for s1. We now complete the vectorization and produce the 4072 // shorthand vector IR shown below (for VF = 4, UF = 1). 4073 // 4074 // vector.ph: 4075 // v_init = vector(..., ..., ..., a[-1]) 4076 // br vector.body 4077 // 4078 // vector.body 4079 // i = phi [0, vector.ph], [i+4, vector.body] 4080 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4081 // v2 = a[i, i+1, i+2, i+3]; 4082 // v3 = vector(v1(3), v2(0, 1, 2)) 4083 // b[i, i+1, i+2, i+3] = v2 - v3 4084 // br cond, vector.body, middle.block 4085 // 4086 // middle.block: 4087 // x = v2(3) 4088 // br scalar.ph 4089 // 4090 // scalar.ph: 4091 // s_init = phi [x, middle.block], [a[-1], otherwise] 4092 // br scalar.body 4093 // 4094 // After execution completes the vector loop, we extract the next value of 4095 // the recurrence (x) to use as the initial value in the scalar loop. 4096 4097 // Get the original loop preheader and single loop latch. 4098 auto *Preheader = OrigLoop->getLoopPreheader(); 4099 auto *Latch = OrigLoop->getLoopLatch(); 4100 4101 // Get the initial and previous values of the scalar recurrence. 4102 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 4103 auto *Previous = Phi->getIncomingValueForBlock(Latch); 4104 4105 // Create a vector from the initial value. 4106 auto *VectorInit = ScalarInit; 4107 if (VF > 1) { 4108 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4109 VectorInit = Builder.CreateInsertElement( 4110 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 4111 Builder.getInt32(VF - 1), "vector.recur.init"); 4112 } 4113 4114 // We constructed a temporary phi node in the first phase of vectorization. 4115 // This phi node will eventually be deleted. 4116 VectorParts &PhiParts = VectorLoopValueMap.getVector(Phi); 4117 Builder.SetInsertPoint(cast<Instruction>(PhiParts[0])); 4118 4119 // Create a phi node for the new recurrence. The current value will either be 4120 // the initial value inserted into a vector or loop-varying vector value. 4121 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 4122 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 4123 4124 // Get the vectorized previous value. We ensured the previous values was an 4125 // instruction when detecting the recurrence. 4126 auto &PreviousParts = getVectorValue(Previous); 4127 4128 // Set the insertion point to be after this instruction. We ensured the 4129 // previous value dominated all uses of the phi when detecting the 4130 // recurrence. 4131 Builder.SetInsertPoint( 4132 &*++BasicBlock::iterator(cast<Instruction>(PreviousParts[UF - 1]))); 4133 4134 // We will construct a vector for the recurrence by combining the values for 4135 // the current and previous iterations. This is the required shuffle mask. 4136 SmallVector<Constant *, 8> ShuffleMask(VF); 4137 ShuffleMask[0] = Builder.getInt32(VF - 1); 4138 for (unsigned I = 1; I < VF; ++I) 4139 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 4140 4141 // The vector from which to take the initial value for the current iteration 4142 // (actual or unrolled). Initially, this is the vector phi node. 4143 Value *Incoming = VecPhi; 4144 4145 // Shuffle the current and previous vector and update the vector parts. 4146 for (unsigned Part = 0; Part < UF; ++Part) { 4147 auto *Shuffle = 4148 VF > 1 4149 ? Builder.CreateShuffleVector(Incoming, PreviousParts[Part], 4150 ConstantVector::get(ShuffleMask)) 4151 : Incoming; 4152 PhiParts[Part]->replaceAllUsesWith(Shuffle); 4153 cast<Instruction>(PhiParts[Part])->eraseFromParent(); 4154 PhiParts[Part] = Shuffle; 4155 Incoming = PreviousParts[Part]; 4156 } 4157 4158 // Fix the latch value of the new recurrence in the vector loop. 4159 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4160 4161 // Extract the last vector element in the middle block. This will be the 4162 // initial value for the recurrence when jumping to the scalar loop. 4163 auto *Extract = Incoming; 4164 if (VF > 1) { 4165 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4166 Extract = Builder.CreateExtractElement(Extract, Builder.getInt32(VF - 1), 4167 "vector.recur.extract"); 4168 } 4169 4170 // Fix the initial value of the original recurrence in the scalar loop. 4171 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4172 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4173 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4174 auto *Incoming = BB == LoopMiddleBlock ? Extract : ScalarInit; 4175 Start->addIncoming(Incoming, BB); 4176 } 4177 4178 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 4179 Phi->setName("scalar.recur"); 4180 4181 // Finally, fix users of the recurrence outside the loop. The users will need 4182 // either the last value of the scalar recurrence or the last value of the 4183 // vector recurrence we extracted in the middle block. Since the loop is in 4184 // LCSSA form, we just need to find the phi node for the original scalar 4185 // recurrence in the exit block, and then add an edge for the middle block. 4186 for (auto &I : *LoopExitBlock) { 4187 auto *LCSSAPhi = dyn_cast<PHINode>(&I); 4188 if (!LCSSAPhi) 4189 break; 4190 if (LCSSAPhi->getIncomingValue(0) == Phi) { 4191 LCSSAPhi->addIncoming(Extract, LoopMiddleBlock); 4192 break; 4193 } 4194 } 4195 } 4196 4197 void InnerLoopVectorizer::fixLCSSAPHIs() { 4198 for (Instruction &LEI : *LoopExitBlock) { 4199 auto *LCSSAPhi = dyn_cast<PHINode>(&LEI); 4200 if (!LCSSAPhi) 4201 break; 4202 if (LCSSAPhi->getNumIncomingValues() == 1) 4203 LCSSAPhi->addIncoming(UndefValue::get(LCSSAPhi->getType()), 4204 LoopMiddleBlock); 4205 } 4206 } 4207 4208 void InnerLoopVectorizer::predicateInstructions() { 4209 4210 // For each instruction I marked for predication on value C, split I into its 4211 // own basic block to form an if-then construct over C. 4212 // Since I may be fed by extractelement and/or be feeding an insertelement 4213 // generated during scalarization we try to move such instructions into the 4214 // predicated basic block as well. For the insertelement this also means that 4215 // the PHI will be created for the resulting vector rather than for the 4216 // scalar instruction. 4217 // So for some predicated instruction, e.g. the conditional sdiv in: 4218 // 4219 // for.body: 4220 // ... 4221 // %add = add nsw i32 %mul, %0 4222 // %cmp5 = icmp sgt i32 %2, 7 4223 // br i1 %cmp5, label %if.then, label %if.end 4224 // 4225 // if.then: 4226 // %div = sdiv i32 %0, %1 4227 // br label %if.end 4228 // 4229 // if.end: 4230 // %x.0 = phi i32 [ %div, %if.then ], [ %add, %for.body ] 4231 // 4232 // the sdiv at this point is scalarized and if-converted using a select. 4233 // The inactive elements in the vector are not used, but the predicated 4234 // instruction is still executed for all vector elements, essentially: 4235 // 4236 // vector.body: 4237 // ... 4238 // %17 = add nsw <2 x i32> %16, %wide.load 4239 // %29 = extractelement <2 x i32> %wide.load, i32 0 4240 // %30 = extractelement <2 x i32> %wide.load51, i32 0 4241 // %31 = sdiv i32 %29, %30 4242 // %32 = insertelement <2 x i32> undef, i32 %31, i32 0 4243 // %35 = extractelement <2 x i32> %wide.load, i32 1 4244 // %36 = extractelement <2 x i32> %wide.load51, i32 1 4245 // %37 = sdiv i32 %35, %36 4246 // %38 = insertelement <2 x i32> %32, i32 %37, i32 1 4247 // %predphi = select <2 x i1> %26, <2 x i32> %38, <2 x i32> %17 4248 // 4249 // Predication will now re-introduce the original control flow to avoid false 4250 // side-effects by the sdiv instructions on the inactive elements, yielding 4251 // (after cleanup): 4252 // 4253 // vector.body: 4254 // ... 4255 // %5 = add nsw <2 x i32> %4, %wide.load 4256 // %8 = icmp sgt <2 x i32> %wide.load52, <i32 7, i32 7> 4257 // %9 = extractelement <2 x i1> %8, i32 0 4258 // br i1 %9, label %pred.sdiv.if, label %pred.sdiv.continue 4259 // 4260 // pred.sdiv.if: 4261 // %10 = extractelement <2 x i32> %wide.load, i32 0 4262 // %11 = extractelement <2 x i32> %wide.load51, i32 0 4263 // %12 = sdiv i32 %10, %11 4264 // %13 = insertelement <2 x i32> undef, i32 %12, i32 0 4265 // br label %pred.sdiv.continue 4266 // 4267 // pred.sdiv.continue: 4268 // %14 = phi <2 x i32> [ undef, %vector.body ], [ %13, %pred.sdiv.if ] 4269 // %15 = extractelement <2 x i1> %8, i32 1 4270 // br i1 %15, label %pred.sdiv.if54, label %pred.sdiv.continue55 4271 // 4272 // pred.sdiv.if54: 4273 // %16 = extractelement <2 x i32> %wide.load, i32 1 4274 // %17 = extractelement <2 x i32> %wide.load51, i32 1 4275 // %18 = sdiv i32 %16, %17 4276 // %19 = insertelement <2 x i32> %14, i32 %18, i32 1 4277 // br label %pred.sdiv.continue55 4278 // 4279 // pred.sdiv.continue55: 4280 // %20 = phi <2 x i32> [ %14, %pred.sdiv.continue ], [ %19, %pred.sdiv.if54 ] 4281 // %predphi = select <2 x i1> %8, <2 x i32> %20, <2 x i32> %5 4282 4283 for (auto KV : PredicatedInstructions) { 4284 BasicBlock::iterator I(KV.first); 4285 BasicBlock *Head = I->getParent(); 4286 auto *BB = SplitBlock(Head, &*std::next(I), DT, LI); 4287 auto *T = SplitBlockAndInsertIfThen(KV.second, &*I, /*Unreachable=*/false, 4288 /*BranchWeights=*/nullptr, DT, LI); 4289 I->moveBefore(T); 4290 // Try to move any extractelement we may have created for the predicated 4291 // instruction into the Then block. 4292 for (Use &Op : I->operands()) { 4293 auto *OpInst = dyn_cast<ExtractElementInst>(&*Op); 4294 if (OpInst && OpInst->hasOneUse()) // TODO: more accurately - hasOneUser() 4295 OpInst->moveBefore(&*I); 4296 } 4297 4298 I->getParent()->setName(Twine("pred.") + I->getOpcodeName() + ".if"); 4299 BB->setName(Twine("pred.") + I->getOpcodeName() + ".continue"); 4300 4301 // If the instruction is non-void create a Phi node at reconvergence point. 4302 if (!I->getType()->isVoidTy()) { 4303 Value *IncomingTrue = nullptr; 4304 Value *IncomingFalse = nullptr; 4305 4306 if (I->hasOneUse() && isa<InsertElementInst>(*I->user_begin())) { 4307 // If the predicated instruction is feeding an insert-element, move it 4308 // into the Then block; Phi node will be created for the vector. 4309 InsertElementInst *IEI = cast<InsertElementInst>(*I->user_begin()); 4310 IEI->moveBefore(T); 4311 IncomingTrue = IEI; // the new vector with the inserted element. 4312 IncomingFalse = IEI->getOperand(0); // the unmodified vector 4313 } else { 4314 // Phi node will be created for the scalar predicated instruction. 4315 IncomingTrue = &*I; 4316 IncomingFalse = UndefValue::get(I->getType()); 4317 } 4318 4319 BasicBlock *PostDom = I->getParent()->getSingleSuccessor(); 4320 assert(PostDom && "Then block has multiple successors"); 4321 PHINode *Phi = 4322 PHINode::Create(IncomingTrue->getType(), 2, "", &PostDom->front()); 4323 IncomingTrue->replaceAllUsesWith(Phi); 4324 Phi->addIncoming(IncomingFalse, Head); 4325 Phi->addIncoming(IncomingTrue, I->getParent()); 4326 } 4327 } 4328 4329 DEBUG(DT->verifyDomTree()); 4330 } 4331 4332 InnerLoopVectorizer::VectorParts 4333 InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) { 4334 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 4335 4336 // Look for cached value. 4337 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 4338 EdgeMaskCache::iterator ECEntryIt = MaskCache.find(Edge); 4339 if (ECEntryIt != MaskCache.end()) 4340 return ECEntryIt->second; 4341 4342 VectorParts SrcMask = createBlockInMask(Src); 4343 4344 // The terminator has to be a branch inst! 4345 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 4346 assert(BI && "Unexpected terminator found"); 4347 4348 if (BI->isConditional()) { 4349 VectorParts EdgeMask = getVectorValue(BI->getCondition()); 4350 4351 if (BI->getSuccessor(0) != Dst) 4352 for (unsigned part = 0; part < UF; ++part) 4353 EdgeMask[part] = Builder.CreateNot(EdgeMask[part]); 4354 4355 for (unsigned part = 0; part < UF; ++part) 4356 EdgeMask[part] = Builder.CreateAnd(EdgeMask[part], SrcMask[part]); 4357 4358 MaskCache[Edge] = EdgeMask; 4359 return EdgeMask; 4360 } 4361 4362 MaskCache[Edge] = SrcMask; 4363 return SrcMask; 4364 } 4365 4366 InnerLoopVectorizer::VectorParts 4367 InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) { 4368 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 4369 4370 // Loop incoming mask is all-one. 4371 if (OrigLoop->getHeader() == BB) { 4372 Value *C = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 1); 4373 return getVectorValue(C); 4374 } 4375 4376 // This is the block mask. We OR all incoming edges, and with zero. 4377 Value *Zero = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 0); 4378 VectorParts BlockMask = getVectorValue(Zero); 4379 4380 // For each pred: 4381 for (pred_iterator it = pred_begin(BB), e = pred_end(BB); it != e; ++it) { 4382 VectorParts EM = createEdgeMask(*it, BB); 4383 for (unsigned part = 0; part < UF; ++part) 4384 BlockMask[part] = Builder.CreateOr(BlockMask[part], EM[part]); 4385 } 4386 4387 return BlockMask; 4388 } 4389 4390 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 4391 unsigned VF, PhiVector *PV) { 4392 PHINode *P = cast<PHINode>(PN); 4393 // Handle recurrences. 4394 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 4395 VectorParts Entry(UF); 4396 for (unsigned part = 0; part < UF; ++part) { 4397 // This is phase one of vectorizing PHIs. 4398 Type *VecTy = 4399 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4400 Entry[part] = PHINode::Create( 4401 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4402 } 4403 VectorLoopValueMap.initVector(P, Entry); 4404 PV->push_back(P); 4405 return; 4406 } 4407 4408 setDebugLocFromInst(Builder, P); 4409 // Check for PHI nodes that are lowered to vector selects. 4410 if (P->getParent() != OrigLoop->getHeader()) { 4411 // We know that all PHIs in non-header blocks are converted into 4412 // selects, so we don't have to worry about the insertion order and we 4413 // can just use the builder. 4414 // At this point we generate the predication tree. There may be 4415 // duplications since this is a simple recursive scan, but future 4416 // optimizations will clean it up. 4417 4418 unsigned NumIncoming = P->getNumIncomingValues(); 4419 4420 // Generate a sequence of selects of the form: 4421 // SELECT(Mask3, In3, 4422 // SELECT(Mask2, In2, 4423 // ( ...))) 4424 VectorParts Entry(UF); 4425 for (unsigned In = 0; In < NumIncoming; In++) { 4426 VectorParts Cond = 4427 createEdgeMask(P->getIncomingBlock(In), P->getParent()); 4428 const VectorParts &In0 = getVectorValue(P->getIncomingValue(In)); 4429 4430 for (unsigned part = 0; part < UF; ++part) { 4431 // We might have single edge PHIs (blocks) - use an identity 4432 // 'select' for the first PHI operand. 4433 if (In == 0) 4434 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], In0[part]); 4435 else 4436 // Select between the current value and the previous incoming edge 4437 // based on the incoming mask. 4438 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], Entry[part], 4439 "predphi"); 4440 } 4441 } 4442 VectorLoopValueMap.initVector(P, Entry); 4443 return; 4444 } 4445 4446 // This PHINode must be an induction variable. 4447 // Make sure that we know about it. 4448 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 4449 4450 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 4451 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4452 4453 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4454 // which can be found from the original scalar operations. 4455 switch (II.getKind()) { 4456 case InductionDescriptor::IK_NoInduction: 4457 llvm_unreachable("Unknown induction"); 4458 case InductionDescriptor::IK_IntInduction: 4459 return widenIntInduction(P); 4460 case InductionDescriptor::IK_PtrInduction: { 4461 // Handle the pointer induction variable case. 4462 assert(P->getType()->isPointerTy() && "Unexpected type."); 4463 // This is the normalized GEP that starts counting at zero. 4464 Value *PtrInd = Induction; 4465 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 4466 // These are the scalar results. Notice that we don't generate vector GEPs 4467 // because scalar GEPs result in better code. 4468 ScalarParts Entry(UF); 4469 for (unsigned Part = 0; Part < UF; ++Part) { 4470 Entry[Part].resize(VF); 4471 for (unsigned Lane = 0; Lane < VF; ++Lane) { 4472 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 4473 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4474 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 4475 SclrGep->setName("next.gep"); 4476 Entry[Part][Lane] = SclrGep; 4477 } 4478 } 4479 VectorLoopValueMap.initScalar(P, Entry); 4480 return; 4481 } 4482 case InductionDescriptor::IK_FpInduction: { 4483 assert(P->getType() == II.getStartValue()->getType() && 4484 "Types must match"); 4485 // Handle other induction variables that are now based on the 4486 // canonical one. 4487 assert(P != OldInduction && "Primary induction can be integer only"); 4488 4489 Value *V = Builder.CreateCast(Instruction::SIToFP, Induction, P->getType()); 4490 V = II.transform(Builder, V, PSE.getSE(), DL); 4491 V->setName("fp.offset.idx"); 4492 4493 // Now we have scalar op: %fp.offset.idx = StartVal +/- Induction*StepVal 4494 4495 Value *Broadcasted = getBroadcastInstrs(V); 4496 // After broadcasting the induction variable we need to make the vector 4497 // consecutive by adding StepVal*0, StepVal*1, StepVal*2, etc. 4498 Value *StepVal = cast<SCEVUnknown>(II.getStep())->getValue(); 4499 VectorParts Entry(UF); 4500 for (unsigned part = 0; part < UF; ++part) 4501 Entry[part] = getStepVector(Broadcasted, VF * part, StepVal, 4502 II.getInductionOpcode()); 4503 VectorLoopValueMap.initVector(P, Entry); 4504 return; 4505 } 4506 } 4507 } 4508 4509 /// A helper function for checking whether an integer division-related 4510 /// instruction may divide by zero (in which case it must be predicated if 4511 /// executed conditionally in the scalar code). 4512 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4513 /// Non-zero divisors that are non compile-time constants will not be 4514 /// converted into multiplication, so we will still end up scalarizing 4515 /// the division, but can do so w/o predication. 4516 static bool mayDivideByZero(Instruction &I) { 4517 assert((I.getOpcode() == Instruction::UDiv || 4518 I.getOpcode() == Instruction::SDiv || 4519 I.getOpcode() == Instruction::URem || 4520 I.getOpcode() == Instruction::SRem) && 4521 "Unexpected instruction"); 4522 Value *Divisor = I.getOperand(1); 4523 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4524 return !CInt || CInt->isZero(); 4525 } 4526 4527 void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { 4528 // For each instruction in the old loop. 4529 for (Instruction &I : *BB) { 4530 switch (I.getOpcode()) { 4531 case Instruction::Br: 4532 // Nothing to do for PHIs and BR, since we already took care of the 4533 // loop control flow instructions. 4534 continue; 4535 case Instruction::PHI: { 4536 // Vectorize PHINodes. 4537 widenPHIInstruction(&I, UF, VF, PV); 4538 continue; 4539 } // End of PHI. 4540 4541 case Instruction::UDiv: 4542 case Instruction::SDiv: 4543 case Instruction::SRem: 4544 case Instruction::URem: 4545 // Scalarize with predication if this instruction may divide by zero and 4546 // block execution is conditional, otherwise fallthrough. 4547 if (mayDivideByZero(I) && Legal->blockNeedsPredication(I.getParent())) { 4548 scalarizeInstruction(&I, true); 4549 continue; 4550 } 4551 case Instruction::Add: 4552 case Instruction::FAdd: 4553 case Instruction::Sub: 4554 case Instruction::FSub: 4555 case Instruction::Mul: 4556 case Instruction::FMul: 4557 case Instruction::FDiv: 4558 case Instruction::FRem: 4559 case Instruction::Shl: 4560 case Instruction::LShr: 4561 case Instruction::AShr: 4562 case Instruction::And: 4563 case Instruction::Or: 4564 case Instruction::Xor: { 4565 // Just widen binops. 4566 auto *BinOp = cast<BinaryOperator>(&I); 4567 setDebugLocFromInst(Builder, BinOp); 4568 const VectorParts &A = getVectorValue(BinOp->getOperand(0)); 4569 const VectorParts &B = getVectorValue(BinOp->getOperand(1)); 4570 4571 // Use this vector value for all users of the original instruction. 4572 VectorParts Entry(UF); 4573 for (unsigned Part = 0; Part < UF; ++Part) { 4574 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A[Part], B[Part]); 4575 4576 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 4577 VecOp->copyIRFlags(BinOp); 4578 4579 Entry[Part] = V; 4580 } 4581 4582 VectorLoopValueMap.initVector(&I, Entry); 4583 addMetadata(Entry, BinOp); 4584 break; 4585 } 4586 case Instruction::Select: { 4587 // Widen selects. 4588 // If the selector is loop invariant we can create a select 4589 // instruction with a scalar condition. Otherwise, use vector-select. 4590 auto *SE = PSE.getSE(); 4591 bool InvariantCond = 4592 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); 4593 setDebugLocFromInst(Builder, &I); 4594 4595 // The condition can be loop invariant but still defined inside the 4596 // loop. This means that we can't just use the original 'cond' value. 4597 // We have to take the 'vectorized' value and pick the first lane. 4598 // Instcombine will make this a no-op. 4599 const VectorParts &Cond = getVectorValue(I.getOperand(0)); 4600 const VectorParts &Op0 = getVectorValue(I.getOperand(1)); 4601 const VectorParts &Op1 = getVectorValue(I.getOperand(2)); 4602 4603 auto *ScalarCond = getScalarValue(I.getOperand(0), 0, 0); 4604 4605 VectorParts Entry(UF); 4606 for (unsigned Part = 0; Part < UF; ++Part) { 4607 Entry[Part] = Builder.CreateSelect( 4608 InvariantCond ? ScalarCond : Cond[Part], Op0[Part], Op1[Part]); 4609 } 4610 4611 VectorLoopValueMap.initVector(&I, Entry); 4612 addMetadata(Entry, &I); 4613 break; 4614 } 4615 4616 case Instruction::ICmp: 4617 case Instruction::FCmp: { 4618 // Widen compares. Generate vector compares. 4619 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4620 auto *Cmp = dyn_cast<CmpInst>(&I); 4621 setDebugLocFromInst(Builder, Cmp); 4622 const VectorParts &A = getVectorValue(Cmp->getOperand(0)); 4623 const VectorParts &B = getVectorValue(Cmp->getOperand(1)); 4624 VectorParts Entry(UF); 4625 for (unsigned Part = 0; Part < UF; ++Part) { 4626 Value *C = nullptr; 4627 if (FCmp) { 4628 C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]); 4629 cast<FCmpInst>(C)->copyFastMathFlags(Cmp); 4630 } else { 4631 C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]); 4632 } 4633 Entry[Part] = C; 4634 } 4635 4636 VectorLoopValueMap.initVector(&I, Entry); 4637 addMetadata(Entry, &I); 4638 break; 4639 } 4640 4641 case Instruction::Store: 4642 case Instruction::Load: 4643 vectorizeMemoryInstruction(&I); 4644 break; 4645 case Instruction::ZExt: 4646 case Instruction::SExt: 4647 case Instruction::FPToUI: 4648 case Instruction::FPToSI: 4649 case Instruction::FPExt: 4650 case Instruction::PtrToInt: 4651 case Instruction::IntToPtr: 4652 case Instruction::SIToFP: 4653 case Instruction::UIToFP: 4654 case Instruction::Trunc: 4655 case Instruction::FPTrunc: 4656 case Instruction::BitCast: { 4657 auto *CI = dyn_cast<CastInst>(&I); 4658 setDebugLocFromInst(Builder, CI); 4659 4660 // Optimize the special case where the source is a constant integer 4661 // induction variable. Notice that we can only optimize the 'trunc' case 4662 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 4663 // (c) other casts depend on pointer size. 4664 auto ID = Legal->getInductionVars()->lookup(OldInduction); 4665 if (isa<TruncInst>(CI) && CI->getOperand(0) == OldInduction && 4666 ID.getConstIntStepValue()) { 4667 widenIntInduction(OldInduction, cast<TruncInst>(CI)); 4668 break; 4669 } 4670 4671 /// Vectorize casts. 4672 Type *DestTy = 4673 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4674 4675 const VectorParts &A = getVectorValue(CI->getOperand(0)); 4676 VectorParts Entry(UF); 4677 for (unsigned Part = 0; Part < UF; ++Part) 4678 Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy); 4679 VectorLoopValueMap.initVector(&I, Entry); 4680 addMetadata(Entry, &I); 4681 break; 4682 } 4683 4684 case Instruction::Call: { 4685 // Ignore dbg intrinsics. 4686 if (isa<DbgInfoIntrinsic>(I)) 4687 break; 4688 setDebugLocFromInst(Builder, &I); 4689 4690 Module *M = BB->getParent()->getParent(); 4691 auto *CI = cast<CallInst>(&I); 4692 4693 StringRef FnName = CI->getCalledFunction()->getName(); 4694 Function *F = CI->getCalledFunction(); 4695 Type *RetTy = ToVectorTy(CI->getType(), VF); 4696 SmallVector<Type *, 4> Tys; 4697 for (Value *ArgOperand : CI->arg_operands()) 4698 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4699 4700 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4701 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 4702 ID == Intrinsic::lifetime_start)) { 4703 scalarizeInstruction(&I); 4704 break; 4705 } 4706 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4707 // version of the instruction. 4708 // Is it beneficial to perform intrinsic call compared to lib call? 4709 bool NeedToScalarize; 4710 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4711 bool UseVectorIntrinsic = 4712 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4713 if (!UseVectorIntrinsic && NeedToScalarize) { 4714 scalarizeInstruction(&I); 4715 break; 4716 } 4717 4718 VectorParts Entry(UF); 4719 for (unsigned Part = 0; Part < UF; ++Part) { 4720 SmallVector<Value *, 4> Args; 4721 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4722 Value *Arg = CI->getArgOperand(i); 4723 // Some intrinsics have a scalar argument - don't replace it with a 4724 // vector. 4725 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) { 4726 const VectorParts &VectorArg = getVectorValue(CI->getArgOperand(i)); 4727 Arg = VectorArg[Part]; 4728 } 4729 Args.push_back(Arg); 4730 } 4731 4732 Function *VectorF; 4733 if (UseVectorIntrinsic) { 4734 // Use vector version of the intrinsic. 4735 Type *TysForDecl[] = {CI->getType()}; 4736 if (VF > 1) 4737 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4738 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4739 } else { 4740 // Use vector version of the library call. 4741 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4742 assert(!VFnName.empty() && "Vector function name is empty."); 4743 VectorF = M->getFunction(VFnName); 4744 if (!VectorF) { 4745 // Generate a declaration 4746 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4747 VectorF = 4748 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4749 VectorF->copyAttributesFrom(F); 4750 } 4751 } 4752 assert(VectorF && "Can't create vector function."); 4753 4754 SmallVector<OperandBundleDef, 1> OpBundles; 4755 CI->getOperandBundlesAsDefs(OpBundles); 4756 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4757 4758 if (isa<FPMathOperator>(V)) 4759 V->copyFastMathFlags(CI); 4760 4761 Entry[Part] = V; 4762 } 4763 4764 VectorLoopValueMap.initVector(&I, Entry); 4765 addMetadata(Entry, &I); 4766 break; 4767 } 4768 4769 default: 4770 // All other instructions are unsupported. Scalarize them. 4771 scalarizeInstruction(&I); 4772 break; 4773 } // end of switch. 4774 } // end of for_each instr. 4775 } 4776 4777 void InnerLoopVectorizer::updateAnalysis() { 4778 // Forget the original basic block. 4779 PSE.getSE()->forgetLoop(OrigLoop); 4780 4781 // Update the dominator tree information. 4782 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4783 "Entry does not dominate exit."); 4784 4785 // We don't predicate stores by this point, so the vector body should be a 4786 // single loop. 4787 DT->addNewBlock(LoopVectorBody, LoopVectorPreHeader); 4788 4789 DT->addNewBlock(LoopMiddleBlock, LoopVectorBody); 4790 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4791 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4792 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4793 4794 DEBUG(DT->verifyDomTree()); 4795 } 4796 4797 /// \brief Check whether it is safe to if-convert this phi node. 4798 /// 4799 /// Phi nodes with constant expressions that can trap are not safe to if 4800 /// convert. 4801 static bool canIfConvertPHINodes(BasicBlock *BB) { 4802 for (Instruction &I : *BB) { 4803 auto *Phi = dyn_cast<PHINode>(&I); 4804 if (!Phi) 4805 return true; 4806 for (Value *V : Phi->incoming_values()) 4807 if (auto *C = dyn_cast<Constant>(V)) 4808 if (C->canTrap()) 4809 return false; 4810 } 4811 return true; 4812 } 4813 4814 bool LoopVectorizationLegality::canVectorizeWithIfConvert() { 4815 if (!EnableIfConversion) { 4816 emitAnalysis(VectorizationReport() << "if-conversion is disabled"); 4817 return false; 4818 } 4819 4820 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable"); 4821 4822 // A list of pointers that we can safely read and write to. 4823 SmallPtrSet<Value *, 8> SafePointes; 4824 4825 // Collect safe addresses. 4826 for (BasicBlock *BB : TheLoop->blocks()) { 4827 if (blockNeedsPredication(BB)) 4828 continue; 4829 4830 for (Instruction &I : *BB) 4831 if (auto *Ptr = getPointerOperand(&I)) 4832 SafePointes.insert(Ptr); 4833 } 4834 4835 // Collect the blocks that need predication. 4836 BasicBlock *Header = TheLoop->getHeader(); 4837 for (BasicBlock *BB : TheLoop->blocks()) { 4838 // We don't support switch statements inside loops. 4839 if (!isa<BranchInst>(BB->getTerminator())) { 4840 emitAnalysis(VectorizationReport(BB->getTerminator()) 4841 << "loop contains a switch statement"); 4842 return false; 4843 } 4844 4845 // We must be able to predicate all blocks that need to be predicated. 4846 if (blockNeedsPredication(BB)) { 4847 if (!blockCanBePredicated(BB, SafePointes)) { 4848 emitAnalysis(VectorizationReport(BB->getTerminator()) 4849 << "control flow cannot be substituted for a select"); 4850 return false; 4851 } 4852 } else if (BB != Header && !canIfConvertPHINodes(BB)) { 4853 emitAnalysis(VectorizationReport(BB->getTerminator()) 4854 << "control flow cannot be substituted for a select"); 4855 return false; 4856 } 4857 } 4858 4859 // We can if-convert this loop. 4860 return true; 4861 } 4862 4863 bool LoopVectorizationLegality::canVectorize() { 4864 // We must have a loop in canonical form. Loops with indirectbr in them cannot 4865 // be canonicalized. 4866 if (!TheLoop->getLoopPreheader()) { 4867 emitAnalysis(VectorizationReport() 4868 << "loop control flow is not understood by vectorizer"); 4869 return false; 4870 } 4871 4872 // FIXME: The code is currently dead, since the loop gets sent to 4873 // LoopVectorizationLegality is already an innermost loop. 4874 // 4875 // We can only vectorize innermost loops. 4876 if (!TheLoop->empty()) { 4877 emitAnalysis(VectorizationReport() << "loop is not the innermost loop"); 4878 return false; 4879 } 4880 4881 // We must have a single backedge. 4882 if (TheLoop->getNumBackEdges() != 1) { 4883 emitAnalysis(VectorizationReport() 4884 << "loop control flow is not understood by vectorizer"); 4885 return false; 4886 } 4887 4888 // We must have a single exiting block. 4889 if (!TheLoop->getExitingBlock()) { 4890 emitAnalysis(VectorizationReport() 4891 << "loop control flow is not understood by vectorizer"); 4892 return false; 4893 } 4894 4895 // We only handle bottom-tested loops, i.e. loop in which the condition is 4896 // checked at the end of each iteration. With that we can assume that all 4897 // instructions in the loop are executed the same number of times. 4898 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 4899 emitAnalysis(VectorizationReport() 4900 << "loop control flow is not understood by vectorizer"); 4901 return false; 4902 } 4903 4904 // We need to have a loop header. 4905 DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName() 4906 << '\n'); 4907 4908 // Check if we can if-convert non-single-bb loops. 4909 unsigned NumBlocks = TheLoop->getNumBlocks(); 4910 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { 4911 DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); 4912 return false; 4913 } 4914 4915 // ScalarEvolution needs to be able to find the exit count. 4916 const SCEV *ExitCount = PSE.getBackedgeTakenCount(); 4917 if (ExitCount == PSE.getSE()->getCouldNotCompute()) { 4918 emitAnalysis(VectorizationReport() 4919 << "could not determine number of loop iterations"); 4920 DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n"); 4921 return false; 4922 } 4923 4924 // Check if we can vectorize the instructions and CFG in this loop. 4925 if (!canVectorizeInstrs()) { 4926 DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); 4927 return false; 4928 } 4929 4930 // Go over each instruction and look at memory deps. 4931 if (!canVectorizeMemory()) { 4932 DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); 4933 return false; 4934 } 4935 4936 DEBUG(dbgs() << "LV: We can vectorize this loop" 4937 << (LAI->getRuntimePointerChecking()->Need 4938 ? " (with a runtime bound check)" 4939 : "") 4940 << "!\n"); 4941 4942 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 4943 4944 // If an override option has been passed in for interleaved accesses, use it. 4945 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 4946 UseInterleaved = EnableInterleavedMemAccesses; 4947 4948 // Analyze interleaved memory accesses. 4949 if (UseInterleaved) 4950 InterleaveInfo.analyzeInterleaving(*getSymbolicStrides()); 4951 4952 // Collect all instructions that are known to be uniform after vectorization. 4953 collectLoopUniforms(); 4954 4955 // Collect all instructions that are known to be scalar after vectorization. 4956 collectLoopScalars(); 4957 4958 unsigned SCEVThreshold = VectorizeSCEVCheckThreshold; 4959 if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) 4960 SCEVThreshold = PragmaVectorizeSCEVCheckThreshold; 4961 4962 if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) { 4963 emitAnalysis(VectorizationReport() 4964 << "Too many SCEV assumptions need to be made and checked " 4965 << "at runtime"); 4966 DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n"); 4967 return false; 4968 } 4969 4970 // Okay! We can vectorize. At this point we don't have any other mem analysis 4971 // which may limit our maximum vectorization factor, so just return true with 4972 // no restrictions. 4973 return true; 4974 } 4975 4976 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { 4977 if (Ty->isPointerTy()) 4978 return DL.getIntPtrType(Ty); 4979 4980 // It is possible that char's or short's overflow when we ask for the loop's 4981 // trip count, work around this by changing the type size. 4982 if (Ty->getScalarSizeInBits() < 32) 4983 return Type::getInt32Ty(Ty->getContext()); 4984 4985 return Ty; 4986 } 4987 4988 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { 4989 Ty0 = convertPointerToIntegerType(DL, Ty0); 4990 Ty1 = convertPointerToIntegerType(DL, Ty1); 4991 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) 4992 return Ty0; 4993 return Ty1; 4994 } 4995 4996 /// \brief Check that the instruction has outside loop users and is not an 4997 /// identified reduction variable. 4998 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, 4999 SmallPtrSetImpl<Value *> &AllowedExit) { 5000 // Reduction and Induction instructions are allowed to have exit users. All 5001 // other instructions must not have external users. 5002 if (!AllowedExit.count(Inst)) 5003 // Check that all of the users of the loop are inside the BB. 5004 for (User *U : Inst->users()) { 5005 Instruction *UI = cast<Instruction>(U); 5006 // This user may be a reduction exit value. 5007 if (!TheLoop->contains(UI)) { 5008 DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); 5009 return true; 5010 } 5011 } 5012 return false; 5013 } 5014 5015 void LoopVectorizationLegality::addInductionPhi( 5016 PHINode *Phi, const InductionDescriptor &ID, 5017 SmallPtrSetImpl<Value *> &AllowedExit) { 5018 Inductions[Phi] = ID; 5019 Type *PhiTy = Phi->getType(); 5020 const DataLayout &DL = Phi->getModule()->getDataLayout(); 5021 5022 // Get the widest type. 5023 if (!PhiTy->isFloatingPointTy()) { 5024 if (!WidestIndTy) 5025 WidestIndTy = convertPointerToIntegerType(DL, PhiTy); 5026 else 5027 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); 5028 } 5029 5030 // Int inductions are special because we only allow one IV. 5031 if (ID.getKind() == InductionDescriptor::IK_IntInduction && 5032 ID.getConstIntStepValue() && 5033 ID.getConstIntStepValue()->isOne() && 5034 isa<Constant>(ID.getStartValue()) && 5035 cast<Constant>(ID.getStartValue())->isNullValue()) { 5036 5037 // Use the phi node with the widest type as induction. Use the last 5038 // one if there are multiple (no good reason for doing this other 5039 // than it is expedient). We've checked that it begins at zero and 5040 // steps by one, so this is a canonical induction variable. 5041 if (!Induction || PhiTy == WidestIndTy) 5042 Induction = Phi; 5043 } 5044 5045 // Both the PHI node itself, and the "post-increment" value feeding 5046 // back into the PHI node may have external users. 5047 AllowedExit.insert(Phi); 5048 AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch())); 5049 5050 DEBUG(dbgs() << "LV: Found an induction variable.\n"); 5051 return; 5052 } 5053 5054 bool LoopVectorizationLegality::canVectorizeInstrs() { 5055 BasicBlock *Header = TheLoop->getHeader(); 5056 5057 // Look for the attribute signaling the absence of NaNs. 5058 Function &F = *Header->getParent(); 5059 HasFunNoNaNAttr = 5060 F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; 5061 5062 // For each block in the loop. 5063 for (BasicBlock *BB : TheLoop->blocks()) { 5064 // Scan the instructions in the block and look for hazards. 5065 for (Instruction &I : *BB) { 5066 if (auto *Phi = dyn_cast<PHINode>(&I)) { 5067 Type *PhiTy = Phi->getType(); 5068 // Check that this PHI type is allowed. 5069 if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() && 5070 !PhiTy->isPointerTy()) { 5071 emitAnalysis(VectorizationReport(Phi) 5072 << "loop control flow is not understood by vectorizer"); 5073 DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); 5074 return false; 5075 } 5076 5077 // If this PHINode is not in the header block, then we know that we 5078 // can convert it to select during if-conversion. No need to check if 5079 // the PHIs in this block are induction or reduction variables. 5080 if (BB != Header) { 5081 // Check that this instruction has no outside users or is an 5082 // identified reduction value with an outside user. 5083 if (!hasOutsideLoopUser(TheLoop, Phi, AllowedExit)) 5084 continue; 5085 emitAnalysis(VectorizationReport(Phi) 5086 << "value could not be identified as " 5087 "an induction or reduction variable"); 5088 return false; 5089 } 5090 5091 // We only allow if-converted PHIs with exactly two incoming values. 5092 if (Phi->getNumIncomingValues() != 2) { 5093 emitAnalysis(VectorizationReport(Phi) 5094 << "control flow not understood by vectorizer"); 5095 DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); 5096 return false; 5097 } 5098 5099 RecurrenceDescriptor RedDes; 5100 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) { 5101 if (RedDes.hasUnsafeAlgebra()) 5102 Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst()); 5103 AllowedExit.insert(RedDes.getLoopExitInstr()); 5104 Reductions[Phi] = RedDes; 5105 continue; 5106 } 5107 5108 InductionDescriptor ID; 5109 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID)) { 5110 addInductionPhi(Phi, ID, AllowedExit); 5111 if (ID.hasUnsafeAlgebra() && !HasFunNoNaNAttr) 5112 Requirements->addUnsafeAlgebraInst(ID.getUnsafeAlgebraInst()); 5113 continue; 5114 } 5115 5116 if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, DT)) { 5117 FirstOrderRecurrences.insert(Phi); 5118 continue; 5119 } 5120 5121 // As a last resort, coerce the PHI to a AddRec expression 5122 // and re-try classifying it a an induction PHI. 5123 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true)) { 5124 addInductionPhi(Phi, ID, AllowedExit); 5125 continue; 5126 } 5127 5128 emitAnalysis(VectorizationReport(Phi) 5129 << "value that could not be identified as " 5130 "reduction is used outside the loop"); 5131 DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n"); 5132 return false; 5133 } // end of PHI handling 5134 5135 // We handle calls that: 5136 // * Are debug info intrinsics. 5137 // * Have a mapping to an IR intrinsic. 5138 // * Have a vector version available. 5139 auto *CI = dyn_cast<CallInst>(&I); 5140 if (CI && !getVectorIntrinsicIDForCall(CI, TLI) && 5141 !isa<DbgInfoIntrinsic>(CI) && 5142 !(CI->getCalledFunction() && TLI && 5143 TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) { 5144 emitAnalysis(VectorizationReport(CI) 5145 << "call instruction cannot be vectorized"); 5146 DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); 5147 return false; 5148 } 5149 5150 // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the 5151 // second argument is the same (i.e. loop invariant) 5152 if (CI && hasVectorInstrinsicScalarOpd( 5153 getVectorIntrinsicIDForCall(CI, TLI), 1)) { 5154 auto *SE = PSE.getSE(); 5155 if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) { 5156 emitAnalysis(VectorizationReport(CI) 5157 << "intrinsic instruction cannot be vectorized"); 5158 DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n"); 5159 return false; 5160 } 5161 } 5162 5163 // Check that the instruction return type is vectorizable. 5164 // Also, we can't vectorize extractelement instructions. 5165 if ((!VectorType::isValidElementType(I.getType()) && 5166 !I.getType()->isVoidTy()) || 5167 isa<ExtractElementInst>(I)) { 5168 emitAnalysis(VectorizationReport(&I) 5169 << "instruction return type cannot be vectorized"); 5170 DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); 5171 return false; 5172 } 5173 5174 // Check that the stored type is vectorizable. 5175 if (auto *ST = dyn_cast<StoreInst>(&I)) { 5176 Type *T = ST->getValueOperand()->getType(); 5177 if (!VectorType::isValidElementType(T)) { 5178 emitAnalysis(VectorizationReport(ST) 5179 << "store instruction cannot be vectorized"); 5180 return false; 5181 } 5182 5183 // FP instructions can allow unsafe algebra, thus vectorizable by 5184 // non-IEEE-754 compliant SIMD units. 5185 // This applies to floating-point math operations and calls, not memory 5186 // operations, shuffles, or casts, as they don't change precision or 5187 // semantics. 5188 } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) && 5189 !I.hasUnsafeAlgebra()) { 5190 DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n"); 5191 Hints->setPotentiallyUnsafe(); 5192 } 5193 5194 // Reduction instructions are allowed to have exit users. 5195 // All other instructions must not have external users. 5196 if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) { 5197 emitAnalysis(VectorizationReport(&I) 5198 << "value cannot be used outside the loop"); 5199 return false; 5200 } 5201 5202 } // next instr. 5203 } 5204 5205 if (!Induction) { 5206 DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); 5207 if (Inductions.empty()) { 5208 emitAnalysis(VectorizationReport() 5209 << "loop induction variable could not be identified"); 5210 return false; 5211 } 5212 } 5213 5214 // Now we know the widest induction type, check if our found induction 5215 // is the same size. If it's not, unset it here and InnerLoopVectorizer 5216 // will create another. 5217 if (Induction && WidestIndTy != Induction->getType()) 5218 Induction = nullptr; 5219 5220 return true; 5221 } 5222 5223 void LoopVectorizationLegality::collectLoopScalars() { 5224 5225 // If an instruction is uniform after vectorization, it will remain scalar. 5226 Scalars.insert(Uniforms.begin(), Uniforms.end()); 5227 5228 // Collect the getelementptr instructions that will not be vectorized. A 5229 // getelementptr instruction is only vectorized if it is used for a legal 5230 // gather or scatter operation. 5231 for (auto *BB : TheLoop->blocks()) 5232 for (auto &I : *BB) { 5233 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 5234 Scalars.insert(GEP); 5235 continue; 5236 } 5237 auto *Ptr = getPointerOperand(&I); 5238 if (!Ptr) 5239 continue; 5240 auto *GEP = getGEPInstruction(Ptr); 5241 if (GEP && isLegalGatherOrScatter(&I)) 5242 Scalars.erase(GEP); 5243 } 5244 5245 // An induction variable will remain scalar if all users of the induction 5246 // variable and induction variable update remain scalar. 5247 auto *Latch = TheLoop->getLoopLatch(); 5248 for (auto &Induction : *getInductionVars()) { 5249 auto *Ind = Induction.first; 5250 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5251 5252 // Determine if all users of the induction variable are scalar after 5253 // vectorization. 5254 auto ScalarInd = all_of(Ind->users(), [&](User *U) -> bool { 5255 auto *I = cast<Instruction>(U); 5256 return I == IndUpdate || !TheLoop->contains(I) || Scalars.count(I); 5257 }); 5258 if (!ScalarInd) 5259 continue; 5260 5261 // Determine if all users of the induction variable update instruction are 5262 // scalar after vectorization. 5263 auto ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool { 5264 auto *I = cast<Instruction>(U); 5265 return I == Ind || !TheLoop->contains(I) || Scalars.count(I); 5266 }); 5267 if (!ScalarIndUpdate) 5268 continue; 5269 5270 // The induction variable and its update instruction will remain scalar. 5271 Scalars.insert(Ind); 5272 Scalars.insert(IndUpdate); 5273 } 5274 } 5275 5276 void LoopVectorizationLegality::collectLoopUniforms() { 5277 // We now know that the loop is vectorizable! 5278 // Collect instructions inside the loop that will remain uniform after 5279 // vectorization. 5280 5281 // Global values, params and instructions outside of current loop are out of 5282 // scope. 5283 auto isOutOfScope = [&](Value *V) -> bool { 5284 Instruction *I = dyn_cast<Instruction>(V); 5285 return (!I || !TheLoop->contains(I)); 5286 }; 5287 5288 SetVector<Instruction *> Worklist; 5289 BasicBlock *Latch = TheLoop->getLoopLatch(); 5290 // Start with the conditional branch. 5291 if (!isOutOfScope(Latch->getTerminator()->getOperand(0))) { 5292 Instruction *Cmp = cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5293 Worklist.insert(Cmp); 5294 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); 5295 } 5296 5297 // Add all consecutive pointer values; these values will be uniform after 5298 // vectorization (and subsequent cleanup). Although non-consecutive, we also 5299 // add the pointer operands of interleaved accesses since they are treated 5300 // like consecutive pointers during vectorization. 5301 for (auto *BB : TheLoop->blocks()) 5302 for (auto &I : *BB) { 5303 Instruction *Ptr = nullptr; 5304 if (I.getType()->isPointerTy() && isConsecutivePtr(&I)) 5305 Ptr = &I; 5306 else if (isAccessInterleaved(&I)) 5307 Ptr = cast<Instruction>(getPointerOperand(&I)); 5308 else 5309 continue; 5310 Worklist.insert(Ptr); 5311 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ptr << "\n"); 5312 } 5313 5314 // Expand Worklist in topological order: whenever a new instruction 5315 // is added , its users should be either already inside Worklist, or 5316 // out of scope. It ensures a uniform instruction will only be used 5317 // by uniform instructions or out of scope instructions. 5318 unsigned idx = 0; 5319 while (idx != Worklist.size()) { 5320 Instruction *I = Worklist[idx++]; 5321 5322 for (auto OV : I->operand_values()) { 5323 if (isOutOfScope(OV)) 5324 continue; 5325 auto *OI = cast<Instruction>(OV); 5326 if (all_of(OI->users(), [&](User *U) -> bool { 5327 return isOutOfScope(U) || Worklist.count(cast<Instruction>(U)); 5328 })) { 5329 Worklist.insert(OI); 5330 DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); 5331 } 5332 } 5333 } 5334 5335 // For an instruction to be added into Worklist above, all its users inside 5336 // the current loop should be already added into Worklist. This condition 5337 // cannot be true for phi instructions which is always in a dependence loop. 5338 // Because any instruction in the dependence cycle always depends on others 5339 // in the cycle to be added into Worklist first, the result is no ones in 5340 // the cycle will be added into Worklist in the end. 5341 // That is why we process PHI separately. 5342 for (auto &Induction : *getInductionVars()) { 5343 auto *PN = Induction.first; 5344 auto *UpdateV = PN->getIncomingValueForBlock(TheLoop->getLoopLatch()); 5345 if (all_of(PN->users(), 5346 [&](User *U) -> bool { 5347 return U == UpdateV || isOutOfScope(U) || 5348 Worklist.count(cast<Instruction>(U)); 5349 }) && 5350 all_of(UpdateV->users(), [&](User *U) -> bool { 5351 return U == PN || isOutOfScope(U) || 5352 Worklist.count(cast<Instruction>(U)); 5353 })) { 5354 Worklist.insert(cast<Instruction>(PN)); 5355 Worklist.insert(cast<Instruction>(UpdateV)); 5356 DEBUG(dbgs() << "LV: Found uniform instruction: " << *PN << "\n"); 5357 DEBUG(dbgs() << "LV: Found uniform instruction: " << *UpdateV << "\n"); 5358 } 5359 } 5360 5361 Uniforms.insert(Worklist.begin(), Worklist.end()); 5362 } 5363 5364 bool LoopVectorizationLegality::canVectorizeMemory() { 5365 LAI = &(*GetLAA)(*TheLoop); 5366 InterleaveInfo.setLAI(LAI); 5367 auto &OptionalReport = LAI->getReport(); 5368 if (OptionalReport) 5369 emitAnalysis(VectorizationReport(*OptionalReport)); 5370 if (!LAI->canVectorizeMemory()) 5371 return false; 5372 5373 if (LAI->hasStoreToLoopInvariantAddress()) { 5374 emitAnalysis( 5375 VectorizationReport() 5376 << "write to a loop invariant address could not be vectorized"); 5377 DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); 5378 return false; 5379 } 5380 5381 Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks()); 5382 PSE.addPredicate(LAI->getPSE().getUnionPredicate()); 5383 5384 return true; 5385 } 5386 5387 bool LoopVectorizationLegality::isInductionVariable(const Value *V) { 5388 Value *In0 = const_cast<Value *>(V); 5389 PHINode *PN = dyn_cast_or_null<PHINode>(In0); 5390 if (!PN) 5391 return false; 5392 5393 return Inductions.count(PN); 5394 } 5395 5396 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) { 5397 return FirstOrderRecurrences.count(Phi); 5398 } 5399 5400 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { 5401 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 5402 } 5403 5404 bool LoopVectorizationLegality::blockCanBePredicated( 5405 BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) { 5406 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 5407 5408 for (Instruction &I : *BB) { 5409 // Check that we don't have a constant expression that can trap as operand. 5410 for (Value *Operand : I.operands()) { 5411 if (auto *C = dyn_cast<Constant>(Operand)) 5412 if (C->canTrap()) 5413 return false; 5414 } 5415 // We might be able to hoist the load. 5416 if (I.mayReadFromMemory()) { 5417 auto *LI = dyn_cast<LoadInst>(&I); 5418 if (!LI) 5419 return false; 5420 if (!SafePtrs.count(LI->getPointerOperand())) { 5421 if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) || 5422 isLegalMaskedGather(LI->getType())) { 5423 MaskedOp.insert(LI); 5424 continue; 5425 } 5426 // !llvm.mem.parallel_loop_access implies if-conversion safety. 5427 if (IsAnnotatedParallel) 5428 continue; 5429 return false; 5430 } 5431 } 5432 5433 if (I.mayWriteToMemory()) { 5434 auto *SI = dyn_cast<StoreInst>(&I); 5435 // We only support predication of stores in basic blocks with one 5436 // predecessor. 5437 if (!SI) 5438 return false; 5439 5440 // Build a masked store if it is legal for the target. 5441 if (isLegalMaskedStore(SI->getValueOperand()->getType(), 5442 SI->getPointerOperand()) || 5443 isLegalMaskedScatter(SI->getValueOperand()->getType())) { 5444 MaskedOp.insert(SI); 5445 continue; 5446 } 5447 5448 bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0); 5449 bool isSinglePredecessor = SI->getParent()->getSinglePredecessor(); 5450 5451 if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr || 5452 !isSinglePredecessor) 5453 return false; 5454 } 5455 if (I.mayThrow()) 5456 return false; 5457 } 5458 5459 return true; 5460 } 5461 5462 void InterleavedAccessInfo::collectConstStrideAccesses( 5463 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 5464 const ValueToValueMap &Strides) { 5465 5466 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 5467 5468 // Since it's desired that the load/store instructions be maintained in 5469 // "program order" for the interleaved access analysis, we have to visit the 5470 // blocks in the loop in reverse postorder (i.e., in a topological order). 5471 // Such an ordering will ensure that any load/store that may be executed 5472 // before a second load/store will precede the second load/store in 5473 // AccessStrideInfo. 5474 LoopBlocksDFS DFS(TheLoop); 5475 DFS.perform(LI); 5476 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 5477 for (auto &I : *BB) { 5478 auto *LI = dyn_cast<LoadInst>(&I); 5479 auto *SI = dyn_cast<StoreInst>(&I); 5480 if (!LI && !SI) 5481 continue; 5482 5483 Value *Ptr = getPointerOperand(&I); 5484 int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides); 5485 5486 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 5487 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 5488 uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 5489 5490 // An alignment of 0 means target ABI alignment. 5491 unsigned Align = LI ? LI->getAlignment() : SI->getAlignment(); 5492 if (!Align) 5493 Align = DL.getABITypeAlignment(PtrTy->getElementType()); 5494 5495 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align); 5496 } 5497 } 5498 5499 // Analyze interleaved accesses and collect them into interleaved load and 5500 // store groups. 5501 // 5502 // When generating code for an interleaved load group, we effectively hoist all 5503 // loads in the group to the location of the first load in program order. When 5504 // generating code for an interleaved store group, we sink all stores to the 5505 // location of the last store. This code motion can change the order of load 5506 // and store instructions and may break dependences. 5507 // 5508 // The code generation strategy mentioned above ensures that we won't violate 5509 // any write-after-read (WAR) dependences. 5510 // 5511 // E.g., for the WAR dependence: a = A[i]; // (1) 5512 // A[i] = b; // (2) 5513 // 5514 // The store group of (2) is always inserted at or below (2), and the load 5515 // group of (1) is always inserted at or above (1). Thus, the instructions will 5516 // never be reordered. All other dependences are checked to ensure the 5517 // correctness of the instruction reordering. 5518 // 5519 // The algorithm visits all memory accesses in the loop in bottom-up program 5520 // order. Program order is established by traversing the blocks in the loop in 5521 // reverse postorder when collecting the accesses. 5522 // 5523 // We visit the memory accesses in bottom-up order because it can simplify the 5524 // construction of store groups in the presence of write-after-write (WAW) 5525 // dependences. 5526 // 5527 // E.g., for the WAW dependence: A[i] = a; // (1) 5528 // A[i] = b; // (2) 5529 // A[i + 1] = c; // (3) 5530 // 5531 // We will first create a store group with (3) and (2). (1) can't be added to 5532 // this group because it and (2) are dependent. However, (1) can be grouped 5533 // with other accesses that may precede it in program order. Note that a 5534 // bottom-up order does not imply that WAW dependences should not be checked. 5535 void InterleavedAccessInfo::analyzeInterleaving( 5536 const ValueToValueMap &Strides) { 5537 DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 5538 5539 // Holds all accesses with a constant stride. 5540 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 5541 collectConstStrideAccesses(AccessStrideInfo, Strides); 5542 5543 if (AccessStrideInfo.empty()) 5544 return; 5545 5546 // Collect the dependences in the loop. 5547 collectDependences(); 5548 5549 // Holds all interleaved store groups temporarily. 5550 SmallSetVector<InterleaveGroup *, 4> StoreGroups; 5551 // Holds all interleaved load groups temporarily. 5552 SmallSetVector<InterleaveGroup *, 4> LoadGroups; 5553 5554 // Search in bottom-up program order for pairs of accesses (A and B) that can 5555 // form interleaved load or store groups. In the algorithm below, access A 5556 // precedes access B in program order. We initialize a group for B in the 5557 // outer loop of the algorithm, and then in the inner loop, we attempt to 5558 // insert each A into B's group if: 5559 // 5560 // 1. A and B have the same stride, 5561 // 2. A and B have the same memory object size, and 5562 // 3. A belongs in B's group according to its distance from B. 5563 // 5564 // Special care is taken to ensure group formation will not break any 5565 // dependences. 5566 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 5567 BI != E; ++BI) { 5568 Instruction *B = BI->first; 5569 StrideDescriptor DesB = BI->second; 5570 5571 // Initialize a group for B if it has an allowable stride. Even if we don't 5572 // create a group for B, we continue with the bottom-up algorithm to ensure 5573 // we don't break any of B's dependences. 5574 InterleaveGroup *Group = nullptr; 5575 if (isStrided(DesB.Stride)) { 5576 Group = getInterleaveGroup(B); 5577 if (!Group) { 5578 DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n'); 5579 Group = createInterleaveGroup(B, DesB.Stride, DesB.Align); 5580 } 5581 if (B->mayWriteToMemory()) 5582 StoreGroups.insert(Group); 5583 else 5584 LoadGroups.insert(Group); 5585 } 5586 5587 for (auto AI = std::next(BI); AI != E; ++AI) { 5588 Instruction *A = AI->first; 5589 StrideDescriptor DesA = AI->second; 5590 5591 // Our code motion strategy implies that we can't have dependences 5592 // between accesses in an interleaved group and other accesses located 5593 // between the first and last member of the group. Note that this also 5594 // means that a group can't have more than one member at a given offset. 5595 // The accesses in a group can have dependences with other accesses, but 5596 // we must ensure we don't extend the boundaries of the group such that 5597 // we encompass those dependent accesses. 5598 // 5599 // For example, assume we have the sequence of accesses shown below in a 5600 // stride-2 loop: 5601 // 5602 // (1, 2) is a group | A[i] = a; // (1) 5603 // | A[i-1] = b; // (2) | 5604 // A[i-3] = c; // (3) 5605 // A[i] = d; // (4) | (2, 4) is not a group 5606 // 5607 // Because accesses (2) and (3) are dependent, we can group (2) with (1) 5608 // but not with (4). If we did, the dependent access (3) would be within 5609 // the boundaries of the (2, 4) group. 5610 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { 5611 5612 // If a dependence exists and A is already in a group, we know that A 5613 // must be a store since A precedes B and WAR dependences are allowed. 5614 // Thus, A would be sunk below B. We release A's group to prevent this 5615 // illegal code motion. A will then be free to form another group with 5616 // instructions that precede it. 5617 if (isInterleaved(A)) { 5618 InterleaveGroup *StoreGroup = getInterleaveGroup(A); 5619 StoreGroups.remove(StoreGroup); 5620 releaseGroup(StoreGroup); 5621 } 5622 5623 // If a dependence exists and A is not already in a group (or it was 5624 // and we just released it), B might be hoisted above A (if B is a 5625 // load) or another store might be sunk below A (if B is a store). In 5626 // either case, we can't add additional instructions to B's group. B 5627 // will only form a group with instructions that it precedes. 5628 break; 5629 } 5630 5631 // At this point, we've checked for illegal code motion. If either A or B 5632 // isn't strided, there's nothing left to do. 5633 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 5634 continue; 5635 5636 // Ignore A if it's already in a group or isn't the same kind of memory 5637 // operation as B. 5638 if (isInterleaved(A) || A->mayReadFromMemory() != B->mayReadFromMemory()) 5639 continue; 5640 5641 // Check rules 1 and 2. Ignore A if its stride or size is different from 5642 // that of B. 5643 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 5644 continue; 5645 5646 // Calculate the distance from A to B. 5647 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 5648 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 5649 if (!DistToB) 5650 continue; 5651 int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 5652 5653 // Check rule 3. Ignore A if its distance to B is not a multiple of the 5654 // size. 5655 if (DistanceToB % static_cast<int64_t>(DesB.Size)) 5656 continue; 5657 5658 // Ignore A if either A or B is in a predicated block. Although we 5659 // currently prevent group formation for predicated accesses, we may be 5660 // able to relax this limitation in the future once we handle more 5661 // complicated blocks. 5662 if (isPredicated(A->getParent()) || isPredicated(B->getParent())) 5663 continue; 5664 5665 // The index of A is the index of B plus A's distance to B in multiples 5666 // of the size. 5667 int IndexA = 5668 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 5669 5670 // Try to insert A into B's group. 5671 if (Group->insertMember(A, IndexA, DesA.Align)) { 5672 DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 5673 << " into the interleave group with" << *B << '\n'); 5674 InterleaveGroupMap[A] = Group; 5675 5676 // Set the first load in program order as the insert position. 5677 if (A->mayReadFromMemory()) 5678 Group->setInsertPos(A); 5679 } 5680 } // Iteration over A accesses. 5681 } // Iteration over B accesses. 5682 5683 // Remove interleaved store groups with gaps. 5684 for (InterleaveGroup *Group : StoreGroups) 5685 if (Group->getNumMembers() != Group->getFactor()) 5686 releaseGroup(Group); 5687 5688 // If there is a non-reversed interleaved load group with gaps, we will need 5689 // to execute at least one scalar epilogue iteration. This will ensure that 5690 // we don't speculatively access memory out-of-bounds. Note that we only need 5691 // to look for a member at index factor - 1, since every group must have a 5692 // member at index zero. 5693 for (InterleaveGroup *Group : LoadGroups) 5694 if (!Group->getMember(Group->getFactor() - 1)) { 5695 if (Group->isReverse()) { 5696 releaseGroup(Group); 5697 } else { 5698 DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 5699 RequiresScalarEpilogue = true; 5700 } 5701 } 5702 } 5703 5704 LoopVectorizationCostModel::VectorizationFactor 5705 LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { 5706 // Width 1 means no vectorize 5707 VectorizationFactor Factor = {1U, 0U}; 5708 if (OptForSize && Legal->getRuntimePointerChecking()->Need) { 5709 emitAnalysis( 5710 VectorizationReport() 5711 << "runtime pointer checks needed. Enable vectorization of this " 5712 "loop with '#pragma clang loop vectorize(enable)' when " 5713 "compiling with -Os/-Oz"); 5714 DEBUG(dbgs() 5715 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 5716 return Factor; 5717 } 5718 5719 if (!EnableCondStoresVectorization && Legal->getNumPredStores()) { 5720 emitAnalysis( 5721 VectorizationReport() 5722 << "store that is conditionally executed prevents vectorization"); 5723 DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); 5724 return Factor; 5725 } 5726 5727 // Find the trip count. 5728 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5729 DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5730 5731 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5732 unsigned SmallestType, WidestType; 5733 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5734 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 5735 unsigned MaxSafeDepDist = -1U; 5736 5737 // Get the maximum safe dependence distance in bits computed by LAA. If the 5738 // loop contains any interleaved accesses, we divide the dependence distance 5739 // by the maximum interleave factor of all interleaved groups. Note that 5740 // although the division ensures correctness, this is a fairly conservative 5741 // computation because the maximum distance computed by LAA may not involve 5742 // any of the interleaved accesses. 5743 if (Legal->getMaxSafeDepDistBytes() != -1U) 5744 MaxSafeDepDist = 5745 Legal->getMaxSafeDepDistBytes() * 8 / Legal->getMaxInterleaveFactor(); 5746 5747 WidestRegister = 5748 ((WidestRegister < MaxSafeDepDist) ? WidestRegister : MaxSafeDepDist); 5749 unsigned MaxVectorSize = WidestRegister / WidestType; 5750 5751 DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " 5752 << WidestType << " bits.\n"); 5753 DEBUG(dbgs() << "LV: The Widest register is: " << WidestRegister 5754 << " bits.\n"); 5755 5756 if (MaxVectorSize == 0) { 5757 DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5758 MaxVectorSize = 1; 5759 } 5760 5761 assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements" 5762 " into one vector!"); 5763 5764 unsigned VF = MaxVectorSize; 5765 if (MaximizeBandwidth && !OptForSize) { 5766 // Collect all viable vectorization factors. 5767 SmallVector<unsigned, 8> VFs; 5768 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 5769 for (unsigned VS = MaxVectorSize; VS <= NewMaxVectorSize; VS *= 2) 5770 VFs.push_back(VS); 5771 5772 // For each VF calculate its register usage. 5773 auto RUs = calculateRegisterUsage(VFs); 5774 5775 // Select the largest VF which doesn't require more registers than existing 5776 // ones. 5777 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 5778 for (int i = RUs.size() - 1; i >= 0; --i) { 5779 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 5780 VF = VFs[i]; 5781 break; 5782 } 5783 } 5784 } 5785 5786 // If we optimize the program for size, avoid creating the tail loop. 5787 if (OptForSize) { 5788 // If we are unable to calculate the trip count then don't try to vectorize. 5789 if (TC < 2) { 5790 emitAnalysis( 5791 VectorizationReport() 5792 << "unable to calculate the loop count due to complex control flow"); 5793 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 5794 return Factor; 5795 } 5796 5797 // Find the maximum SIMD width that can fit within the trip count. 5798 VF = TC % MaxVectorSize; 5799 5800 if (VF == 0) 5801 VF = MaxVectorSize; 5802 else { 5803 // If the trip count that we found modulo the vectorization factor is not 5804 // zero then we require a tail. 5805 emitAnalysis(VectorizationReport() 5806 << "cannot optimize for size and vectorize at the " 5807 "same time. Enable vectorization of this loop " 5808 "with '#pragma clang loop vectorize(enable)' " 5809 "when compiling with -Os/-Oz"); 5810 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 5811 return Factor; 5812 } 5813 } 5814 5815 int UserVF = Hints->getWidth(); 5816 if (UserVF != 0) { 5817 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 5818 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 5819 5820 Factor.Width = UserVF; 5821 return Factor; 5822 } 5823 5824 float Cost = expectedCost(1).first; 5825 #ifndef NDEBUG 5826 const float ScalarCost = Cost; 5827 #endif /* NDEBUG */ 5828 unsigned Width = 1; 5829 DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 5830 5831 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5832 // Ignore scalar width, because the user explicitly wants vectorization. 5833 if (ForceVectorization && VF > 1) { 5834 Width = 2; 5835 Cost = expectedCost(Width).first / (float)Width; 5836 } 5837 5838 for (unsigned i = 2; i <= VF; i *= 2) { 5839 // Notice that the vector loop needs to be executed less times, so 5840 // we need to divide the cost of the vector loops by the width of 5841 // the vector elements. 5842 VectorizationCostTy C = expectedCost(i); 5843 float VectorCost = C.first / (float)i; 5844 DEBUG(dbgs() << "LV: Vector loop of width " << i 5845 << " costs: " << (int)VectorCost << ".\n"); 5846 if (!C.second && !ForceVectorization) { 5847 DEBUG( 5848 dbgs() << "LV: Not considering vector loop of width " << i 5849 << " because it will not generate any vector instructions.\n"); 5850 continue; 5851 } 5852 if (VectorCost < Cost) { 5853 Cost = VectorCost; 5854 Width = i; 5855 } 5856 } 5857 5858 DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 5859 << "LV: Vectorization seems to be not beneficial, " 5860 << "but was forced by a user.\n"); 5861 DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 5862 Factor.Width = Width; 5863 Factor.Cost = Width * Cost; 5864 return Factor; 5865 } 5866 5867 std::pair<unsigned, unsigned> 5868 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5869 unsigned MinWidth = -1U; 5870 unsigned MaxWidth = 8; 5871 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5872 5873 // For each block. 5874 for (BasicBlock *BB : TheLoop->blocks()) { 5875 // For each instruction in the loop. 5876 for (Instruction &I : *BB) { 5877 Type *T = I.getType(); 5878 5879 // Skip ignored values. 5880 if (ValuesToIgnore.count(&I)) 5881 continue; 5882 5883 // Only examine Loads, Stores and PHINodes. 5884 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5885 continue; 5886 5887 // Examine PHI nodes that are reduction variables. Update the type to 5888 // account for the recurrence type. 5889 if (auto *PN = dyn_cast<PHINode>(&I)) { 5890 if (!Legal->isReductionVariable(PN)) 5891 continue; 5892 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 5893 T = RdxDesc.getRecurrenceType(); 5894 } 5895 5896 // Examine the stored values. 5897 if (auto *ST = dyn_cast<StoreInst>(&I)) 5898 T = ST->getValueOperand()->getType(); 5899 5900 // Ignore loaded pointer types and stored pointer types that are not 5901 // consecutive. However, we do want to take consecutive stores/loads of 5902 // pointer vectors into account. 5903 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I)) 5904 continue; 5905 5906 MinWidth = std::min(MinWidth, 5907 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5908 MaxWidth = std::max(MaxWidth, 5909 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5910 } 5911 } 5912 5913 return {MinWidth, MaxWidth}; 5914 } 5915 5916 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 5917 unsigned VF, 5918 unsigned LoopCost) { 5919 5920 // -- The interleave heuristics -- 5921 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5922 // There are many micro-architectural considerations that we can't predict 5923 // at this level. For example, frontend pressure (on decode or fetch) due to 5924 // code size, or the number and capabilities of the execution ports. 5925 // 5926 // We use the following heuristics to select the interleave count: 5927 // 1. If the code has reductions, then we interleave to break the cross 5928 // iteration dependency. 5929 // 2. If the loop is really small, then we interleave to reduce the loop 5930 // overhead. 5931 // 3. We don't interleave if we think that we will spill registers to memory 5932 // due to the increased register pressure. 5933 5934 // When we optimize for size, we don't interleave. 5935 if (OptForSize) 5936 return 1; 5937 5938 // We used the distance for the interleave count. 5939 if (Legal->getMaxSafeDepDistBytes() != -1U) 5940 return 1; 5941 5942 // Do not interleave loops with a relatively small trip count. 5943 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5944 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 5945 return 1; 5946 5947 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 5948 DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5949 << " registers\n"); 5950 5951 if (VF == 1) { 5952 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5953 TargetNumRegisters = ForceTargetNumScalarRegs; 5954 } else { 5955 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5956 TargetNumRegisters = ForceTargetNumVectorRegs; 5957 } 5958 5959 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5960 // We divide by these constants so assume that we have at least one 5961 // instruction that uses at least one register. 5962 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 5963 R.NumInstructions = std::max(R.NumInstructions, 1U); 5964 5965 // We calculate the interleave count using the following formula. 5966 // Subtract the number of loop invariants from the number of available 5967 // registers. These registers are used by all of the interleaved instances. 5968 // Next, divide the remaining registers by the number of registers that is 5969 // required by the loop, in order to estimate how many parallel instances 5970 // fit without causing spills. All of this is rounded down if necessary to be 5971 // a power of two. We want power of two interleave count to simplify any 5972 // addressing operations or alignment considerations. 5973 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 5974 R.MaxLocalUsers); 5975 5976 // Don't count the induction variable as interleaved. 5977 if (EnableIndVarRegisterHeur) 5978 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 5979 std::max(1U, (R.MaxLocalUsers - 1))); 5980 5981 // Clamp the interleave ranges to reasonable counts. 5982 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 5983 5984 // Check if the user has overridden the max. 5985 if (VF == 1) { 5986 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5987 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5988 } else { 5989 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5990 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5991 } 5992 5993 // If we did not calculate the cost for VF (because the user selected the VF) 5994 // then we calculate the cost of VF here. 5995 if (LoopCost == 0) 5996 LoopCost = expectedCost(VF).first; 5997 5998 // Clamp the calculated IC to be between the 1 and the max interleave count 5999 // that the target allows. 6000 if (IC > MaxInterleaveCount) 6001 IC = MaxInterleaveCount; 6002 else if (IC < 1) 6003 IC = 1; 6004 6005 // Interleave if we vectorized this loop and there is a reduction that could 6006 // benefit from interleaving. 6007 if (VF > 1 && Legal->getReductionVars()->size()) { 6008 DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6009 return IC; 6010 } 6011 6012 // Note that if we've already vectorized the loop we will have done the 6013 // runtime check and so interleaving won't require further checks. 6014 bool InterleavingRequiresRuntimePointerCheck = 6015 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 6016 6017 // We want to interleave small loops in order to reduce the loop overhead and 6018 // potentially expose ILP opportunities. 6019 DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 6020 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6021 // We assume that the cost overhead is 1 and we use the cost model 6022 // to estimate the cost of the loop and interleave until the cost of the 6023 // loop overhead is about 5% of the cost of the loop. 6024 unsigned SmallIC = 6025 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6026 6027 // Interleave until store/load ports (estimated by max interleave count) are 6028 // saturated. 6029 unsigned NumStores = Legal->getNumStores(); 6030 unsigned NumLoads = Legal->getNumLoads(); 6031 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6032 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6033 6034 // If we have a scalar reduction (vector reductions are already dealt with 6035 // by this point), we can increase the critical path length if the loop 6036 // we're interleaving is inside another loop. Limit, by default to 2, so the 6037 // critical path only gets increased by one reduction operation. 6038 if (Legal->getReductionVars()->size() && TheLoop->getLoopDepth() > 1) { 6039 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6040 SmallIC = std::min(SmallIC, F); 6041 StoresIC = std::min(StoresIC, F); 6042 LoadsIC = std::min(LoadsIC, F); 6043 } 6044 6045 if (EnableLoadStoreRuntimeInterleave && 6046 std::max(StoresIC, LoadsIC) > SmallIC) { 6047 DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6048 return std::max(StoresIC, LoadsIC); 6049 } 6050 6051 DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6052 return SmallIC; 6053 } 6054 6055 // Interleave if this is a large loop (small loops are already dealt with by 6056 // this point) that could benefit from interleaving. 6057 bool HasReductions = (Legal->getReductionVars()->size() > 0); 6058 if (TTI.enableAggressiveInterleaving(HasReductions)) { 6059 DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6060 return IC; 6061 } 6062 6063 DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6064 return 1; 6065 } 6066 6067 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6068 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 6069 // This function calculates the register usage by measuring the highest number 6070 // of values that are alive at a single location. Obviously, this is a very 6071 // rough estimation. We scan the loop in a topological order in order and 6072 // assign a number to each instruction. We use RPO to ensure that defs are 6073 // met before their users. We assume that each instruction that has in-loop 6074 // users starts an interval. We record every time that an in-loop value is 6075 // used, so we have a list of the first and last occurrences of each 6076 // instruction. Next, we transpose this data structure into a multi map that 6077 // holds the list of intervals that *end* at a specific location. This multi 6078 // map allows us to perform a linear search. We scan the instructions linearly 6079 // and record each time that a new interval starts, by placing it in a set. 6080 // If we find this value in the multi-map then we remove it from the set. 6081 // The max register usage is the maximum size of the set. 6082 // We also search for instructions that are defined outside the loop, but are 6083 // used inside the loop. We need this number separately from the max-interval 6084 // usage number because when we unroll, loop-invariant values do not take 6085 // more register. 6086 LoopBlocksDFS DFS(TheLoop); 6087 DFS.perform(LI); 6088 6089 RegisterUsage RU; 6090 RU.NumInstructions = 0; 6091 6092 // Each 'key' in the map opens a new interval. The values 6093 // of the map are the index of the 'last seen' usage of the 6094 // instruction that is the key. 6095 typedef DenseMap<Instruction *, unsigned> IntervalMap; 6096 // Maps instruction to its index. 6097 DenseMap<unsigned, Instruction *> IdxToInstr; 6098 // Marks the end of each interval. 6099 IntervalMap EndPoint; 6100 // Saves the list of instruction indices that are used in the loop. 6101 SmallSet<Instruction *, 8> Ends; 6102 // Saves the list of values that are used in the loop but are 6103 // defined outside the loop, such as arguments and constants. 6104 SmallPtrSet<Value *, 8> LoopInvariants; 6105 6106 unsigned Index = 0; 6107 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6108 RU.NumInstructions += BB->size(); 6109 for (Instruction &I : *BB) { 6110 IdxToInstr[Index++] = &I; 6111 6112 // Save the end location of each USE. 6113 for (Value *U : I.operands()) { 6114 auto *Instr = dyn_cast<Instruction>(U); 6115 6116 // Ignore non-instruction values such as arguments, constants, etc. 6117 if (!Instr) 6118 continue; 6119 6120 // If this instruction is outside the loop then record it and continue. 6121 if (!TheLoop->contains(Instr)) { 6122 LoopInvariants.insert(Instr); 6123 continue; 6124 } 6125 6126 // Overwrite previous end points. 6127 EndPoint[Instr] = Index; 6128 Ends.insert(Instr); 6129 } 6130 } 6131 } 6132 6133 // Saves the list of intervals that end with the index in 'key'. 6134 typedef SmallVector<Instruction *, 2> InstrList; 6135 DenseMap<unsigned, InstrList> TransposeEnds; 6136 6137 // Transpose the EndPoints to a list of values that end at each index. 6138 for (auto &Interval : EndPoint) 6139 TransposeEnds[Interval.second].push_back(Interval.first); 6140 6141 SmallSet<Instruction *, 8> OpenIntervals; 6142 6143 // Get the size of the widest register. 6144 unsigned MaxSafeDepDist = -1U; 6145 if (Legal->getMaxSafeDepDistBytes() != -1U) 6146 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 6147 unsigned WidestRegister = 6148 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 6149 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6150 6151 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6152 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 6153 6154 DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6155 6156 // A lambda that gets the register usage for the given type and VF. 6157 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 6158 if (Ty->isTokenTy()) 6159 return 0U; 6160 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 6161 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 6162 }; 6163 6164 for (unsigned int i = 0; i < Index; ++i) { 6165 Instruction *I = IdxToInstr[i]; 6166 // Ignore instructions that are never used within the loop. 6167 if (!Ends.count(I)) 6168 continue; 6169 6170 // Remove all of the instructions that end at this location. 6171 InstrList &List = TransposeEnds[i]; 6172 for (Instruction *ToRemove : List) 6173 OpenIntervals.erase(ToRemove); 6174 6175 // Skip ignored values. 6176 if (ValuesToIgnore.count(I)) 6177 continue; 6178 6179 // For each VF find the maximum usage of registers. 6180 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6181 if (VFs[j] == 1) { 6182 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 6183 continue; 6184 } 6185 6186 // Count the number of live intervals. 6187 unsigned RegUsage = 0; 6188 for (auto Inst : OpenIntervals) { 6189 // Skip ignored values for VF > 1. 6190 if (VecValuesToIgnore.count(Inst)) 6191 continue; 6192 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 6193 } 6194 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 6195 } 6196 6197 DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6198 << OpenIntervals.size() << '\n'); 6199 6200 // Add the current instruction to the list of open intervals. 6201 OpenIntervals.insert(I); 6202 } 6203 6204 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6205 unsigned Invariant = 0; 6206 if (VFs[i] == 1) 6207 Invariant = LoopInvariants.size(); 6208 else { 6209 for (auto Inst : LoopInvariants) 6210 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 6211 } 6212 6213 DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 6214 DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 6215 DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); 6216 DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n'); 6217 6218 RU.LoopInvariantRegs = Invariant; 6219 RU.MaxLocalUsers = MaxUsages[i]; 6220 RUs[i] = RU; 6221 } 6222 6223 return RUs; 6224 } 6225 6226 LoopVectorizationCostModel::VectorizationCostTy 6227 LoopVectorizationCostModel::expectedCost(unsigned VF) { 6228 VectorizationCostTy Cost; 6229 6230 // For each block. 6231 for (BasicBlock *BB : TheLoop->blocks()) { 6232 VectorizationCostTy BlockCost; 6233 6234 // For each instruction in the old loop. 6235 for (Instruction &I : *BB) { 6236 // Skip dbg intrinsics. 6237 if (isa<DbgInfoIntrinsic>(I)) 6238 continue; 6239 6240 // Skip ignored values. 6241 if (ValuesToIgnore.count(&I)) 6242 continue; 6243 6244 VectorizationCostTy C = getInstructionCost(&I, VF); 6245 6246 // Check if we should override the cost. 6247 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6248 C.first = ForceTargetInstructionCost; 6249 6250 BlockCost.first += C.first; 6251 BlockCost.second |= C.second; 6252 DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " 6253 << VF << " For instruction: " << I << '\n'); 6254 } 6255 6256 // We assume that if-converted blocks have a 50% chance of being executed. 6257 // When the code is scalar then some of the blocks are avoided due to CF. 6258 // When the code is vectorized we execute all code paths. 6259 if (VF == 1 && Legal->blockNeedsPredication(BB)) 6260 BlockCost.first /= 2; 6261 6262 Cost.first += BlockCost.first; 6263 Cost.second |= BlockCost.second; 6264 } 6265 6266 return Cost; 6267 } 6268 6269 /// \brief Check whether the address computation for a non-consecutive memory 6270 /// access looks like an unlikely candidate for being merged into the indexing 6271 /// mode. 6272 /// 6273 /// We look for a GEP which has one index that is an induction variable and all 6274 /// other indices are loop invariant. If the stride of this access is also 6275 /// within a small bound we decide that this address computation can likely be 6276 /// merged into the addressing mode. 6277 /// In all other cases, we identify the address computation as complex. 6278 static bool isLikelyComplexAddressComputation(Value *Ptr, 6279 LoopVectorizationLegality *Legal, 6280 ScalarEvolution *SE, 6281 const Loop *TheLoop) { 6282 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6283 if (!Gep) 6284 return true; 6285 6286 // We are looking for a gep with all loop invariant indices except for one 6287 // which should be an induction variable. 6288 unsigned NumOperands = Gep->getNumOperands(); 6289 for (unsigned i = 1; i < NumOperands; ++i) { 6290 Value *Opd = Gep->getOperand(i); 6291 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6292 !Legal->isInductionVariable(Opd)) 6293 return true; 6294 } 6295 6296 // Now we know we have a GEP ptr, %inv, %ind, %inv. Make sure that the step 6297 // can likely be merged into the address computation. 6298 unsigned MaxMergeDistance = 64; 6299 6300 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Ptr)); 6301 if (!AddRec) 6302 return true; 6303 6304 // Check the step is constant. 6305 const SCEV *Step = AddRec->getStepRecurrence(*SE); 6306 // Calculate the pointer stride and check if it is consecutive. 6307 const auto *C = dyn_cast<SCEVConstant>(Step); 6308 if (!C) 6309 return true; 6310 6311 const APInt &APStepVal = C->getAPInt(); 6312 6313 // Huge step value - give up. 6314 if (APStepVal.getBitWidth() > 64) 6315 return true; 6316 6317 int64_t StepVal = APStepVal.getSExtValue(); 6318 6319 return StepVal > MaxMergeDistance; 6320 } 6321 6322 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6323 return Legal->hasStride(I->getOperand(0)) || 6324 Legal->hasStride(I->getOperand(1)); 6325 } 6326 6327 LoopVectorizationCostModel::VectorizationCostTy 6328 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 6329 // If we know that this instruction will remain uniform, check the cost of 6330 // the scalar version. 6331 if (Legal->isUniformAfterVectorization(I)) 6332 VF = 1; 6333 6334 Type *VectorTy; 6335 unsigned C = getInstructionCost(I, VF, VectorTy); 6336 6337 bool TypeNotScalarized = 6338 VF > 1 && !VectorTy->isVoidTy() && TTI.getNumberOfParts(VectorTy) < VF; 6339 return VectorizationCostTy(C, TypeNotScalarized); 6340 } 6341 6342 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6343 unsigned VF, 6344 Type *&VectorTy) { 6345 Type *RetTy = I->getType(); 6346 if (VF > 1 && MinBWs.count(I)) 6347 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 6348 VectorTy = ToVectorTy(RetTy, VF); 6349 auto SE = PSE.getSE(); 6350 6351 // TODO: We need to estimate the cost of intrinsic calls. 6352 switch (I->getOpcode()) { 6353 case Instruction::GetElementPtr: 6354 // We mark this instruction as zero-cost because the cost of GEPs in 6355 // vectorized code depends on whether the corresponding memory instruction 6356 // is scalarized or not. Therefore, we handle GEPs with the memory 6357 // instruction cost. 6358 return 0; 6359 case Instruction::Br: { 6360 return TTI.getCFInstrCost(I->getOpcode()); 6361 } 6362 case Instruction::PHI: { 6363 auto *Phi = cast<PHINode>(I); 6364 6365 // First-order recurrences are replaced by vector shuffles inside the loop. 6366 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 6367 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 6368 VectorTy, VF - 1, VectorTy); 6369 6370 // TODO: IF-converted IFs become selects. 6371 return 0; 6372 } 6373 case Instruction::UDiv: 6374 case Instruction::SDiv: 6375 case Instruction::URem: 6376 case Instruction::SRem: 6377 // We assume that if-converted blocks have a 50% chance of being executed. 6378 // Predicated scalarized instructions are avoided due to the CF that 6379 // bypasses turned off lanes. If we are not predicating, fallthrough. 6380 if (VF > 1 && mayDivideByZero(*I) && 6381 Legal->blockNeedsPredication(I->getParent())) 6382 return VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy) / 2 + 6383 getScalarizationOverhead(I, VF, true, TTI); 6384 case Instruction::Add: 6385 case Instruction::FAdd: 6386 case Instruction::Sub: 6387 case Instruction::FSub: 6388 case Instruction::Mul: 6389 case Instruction::FMul: 6390 case Instruction::FDiv: 6391 case Instruction::FRem: 6392 case Instruction::Shl: 6393 case Instruction::LShr: 6394 case Instruction::AShr: 6395 case Instruction::And: 6396 case Instruction::Or: 6397 case Instruction::Xor: { 6398 // Since we will replace the stride by 1 the multiplication should go away. 6399 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 6400 return 0; 6401 // Certain instructions can be cheaper to vectorize if they have a constant 6402 // second vector operand. One example of this are shifts on x86. 6403 TargetTransformInfo::OperandValueKind Op1VK = 6404 TargetTransformInfo::OK_AnyValue; 6405 TargetTransformInfo::OperandValueKind Op2VK = 6406 TargetTransformInfo::OK_AnyValue; 6407 TargetTransformInfo::OperandValueProperties Op1VP = 6408 TargetTransformInfo::OP_None; 6409 TargetTransformInfo::OperandValueProperties Op2VP = 6410 TargetTransformInfo::OP_None; 6411 Value *Op2 = I->getOperand(1); 6412 6413 // Check for a splat or for a non uniform vector of constants. 6414 if (isa<ConstantInt>(Op2)) { 6415 ConstantInt *CInt = cast<ConstantInt>(Op2); 6416 if (CInt && CInt->getValue().isPowerOf2()) 6417 Op2VP = TargetTransformInfo::OP_PowerOf2; 6418 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 6419 } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) { 6420 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 6421 Constant *SplatValue = cast<Constant>(Op2)->getSplatValue(); 6422 if (SplatValue) { 6423 ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue); 6424 if (CInt && CInt->getValue().isPowerOf2()) 6425 Op2VP = TargetTransformInfo::OP_PowerOf2; 6426 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 6427 } 6428 } else if (Legal->isUniform(Op2)) { 6429 Op2VK = TargetTransformInfo::OK_UniformValue; 6430 } 6431 6432 return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, Op2VK, 6433 Op1VP, Op2VP); 6434 } 6435 case Instruction::Select: { 6436 SelectInst *SI = cast<SelectInst>(I); 6437 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 6438 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 6439 Type *CondTy = SI->getCondition()->getType(); 6440 if (!ScalarCond) 6441 CondTy = VectorType::get(CondTy, VF); 6442 6443 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy); 6444 } 6445 case Instruction::ICmp: 6446 case Instruction::FCmp: { 6447 Type *ValTy = I->getOperand(0)->getType(); 6448 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 6449 auto It = MinBWs.find(Op0AsInstruction); 6450 if (VF > 1 && It != MinBWs.end()) 6451 ValTy = IntegerType::get(ValTy->getContext(), It->second); 6452 VectorTy = ToVectorTy(ValTy, VF); 6453 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy); 6454 } 6455 case Instruction::Store: 6456 case Instruction::Load: { 6457 StoreInst *SI = dyn_cast<StoreInst>(I); 6458 LoadInst *LI = dyn_cast<LoadInst>(I); 6459 Type *ValTy = (SI ? SI->getValueOperand()->getType() : LI->getType()); 6460 VectorTy = ToVectorTy(ValTy, VF); 6461 6462 unsigned Alignment = SI ? SI->getAlignment() : LI->getAlignment(); 6463 unsigned AS = 6464 SI ? SI->getPointerAddressSpace() : LI->getPointerAddressSpace(); 6465 Value *Ptr = getPointerOperand(I); 6466 // We add the cost of address computation here instead of with the gep 6467 // instruction because only here we know whether the operation is 6468 // scalarized. 6469 if (VF == 1) 6470 return TTI.getAddressComputationCost(VectorTy) + 6471 TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6472 6473 if (LI && Legal->isUniform(Ptr)) { 6474 // Scalar load + broadcast 6475 unsigned Cost = TTI.getAddressComputationCost(ValTy->getScalarType()); 6476 Cost += TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 6477 Alignment, AS); 6478 return Cost + 6479 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, ValTy); 6480 } 6481 6482 // For an interleaved access, calculate the total cost of the whole 6483 // interleave group. 6484 if (Legal->isAccessInterleaved(I)) { 6485 auto Group = Legal->getInterleavedAccessGroup(I); 6486 assert(Group && "Fail to get an interleaved access group."); 6487 6488 // Only calculate the cost once at the insert position. 6489 if (Group->getInsertPos() != I) 6490 return 0; 6491 6492 unsigned InterleaveFactor = Group->getFactor(); 6493 Type *WideVecTy = 6494 VectorType::get(VectorTy->getVectorElementType(), 6495 VectorTy->getVectorNumElements() * InterleaveFactor); 6496 6497 // Holds the indices of existing members in an interleaved load group. 6498 // An interleaved store group doesn't need this as it doesn't allow gaps. 6499 SmallVector<unsigned, 4> Indices; 6500 if (LI) { 6501 for (unsigned i = 0; i < InterleaveFactor; i++) 6502 if (Group->getMember(i)) 6503 Indices.push_back(i); 6504 } 6505 6506 // Calculate the cost of the whole interleaved group. 6507 unsigned Cost = TTI.getInterleavedMemoryOpCost( 6508 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 6509 Group->getAlignment(), AS); 6510 6511 if (Group->isReverse()) 6512 Cost += 6513 Group->getNumMembers() * 6514 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6515 6516 // FIXME: The interleaved load group with a huge gap could be even more 6517 // expensive than scalar operations. Then we could ignore such group and 6518 // use scalar operations instead. 6519 return Cost; 6520 } 6521 6522 // Scalarized loads/stores. 6523 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6524 bool UseGatherOrScatter = 6525 (ConsecutiveStride == 0) && Legal->isLegalGatherOrScatter(I); 6526 6527 bool Reverse = ConsecutiveStride < 0; 6528 const DataLayout &DL = I->getModule()->getDataLayout(); 6529 uint64_t ScalarAllocatedSize = DL.getTypeAllocSize(ValTy); 6530 uint64_t VectorElementSize = DL.getTypeStoreSize(VectorTy) / VF; 6531 if ((!ConsecutiveStride && !UseGatherOrScatter) || 6532 ScalarAllocatedSize != VectorElementSize) { 6533 bool IsComplexComputation = 6534 isLikelyComplexAddressComputation(Ptr, Legal, SE, TheLoop); 6535 unsigned Cost = 0; 6536 // The cost of extracting from the value vector and pointer vector. 6537 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6538 for (unsigned i = 0; i < VF; ++i) { 6539 // The cost of extracting the pointer operand. 6540 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, PtrTy, i); 6541 // In case of STORE, the cost of ExtractElement from the vector. 6542 // In case of LOAD, the cost of InsertElement into the returned 6543 // vector. 6544 Cost += TTI.getVectorInstrCost(SI ? Instruction::ExtractElement 6545 : Instruction::InsertElement, 6546 VectorTy, i); 6547 } 6548 6549 // The cost of the scalar loads/stores. 6550 Cost += VF * TTI.getAddressComputationCost(PtrTy, IsComplexComputation); 6551 Cost += VF * 6552 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 6553 Alignment, AS); 6554 return Cost; 6555 } 6556 6557 unsigned Cost = TTI.getAddressComputationCost(VectorTy); 6558 if (UseGatherOrScatter) { 6559 assert(ConsecutiveStride == 0 && 6560 "Gather/Scatter are not used for consecutive stride"); 6561 return Cost + 6562 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 6563 Legal->isMaskRequired(I), Alignment); 6564 } 6565 // Wide load/stores. 6566 if (Legal->isMaskRequired(I)) 6567 Cost += 6568 TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6569 else 6570 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6571 6572 if (Reverse) 6573 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6574 return Cost; 6575 } 6576 case Instruction::ZExt: 6577 case Instruction::SExt: 6578 case Instruction::FPToUI: 6579 case Instruction::FPToSI: 6580 case Instruction::FPExt: 6581 case Instruction::PtrToInt: 6582 case Instruction::IntToPtr: 6583 case Instruction::SIToFP: 6584 case Instruction::UIToFP: 6585 case Instruction::Trunc: 6586 case Instruction::FPTrunc: 6587 case Instruction::BitCast: { 6588 // We optimize the truncation of induction variable. 6589 // The cost of these is the same as the scalar operation. 6590 if (I->getOpcode() == Instruction::Trunc && 6591 Legal->isInductionVariable(I->getOperand(0))) 6592 return TTI.getCastInstrCost(I->getOpcode(), I->getType(), 6593 I->getOperand(0)->getType()); 6594 6595 Type *SrcScalarTy = I->getOperand(0)->getType(); 6596 Type *SrcVecTy = ToVectorTy(SrcScalarTy, VF); 6597 if (VF > 1 && MinBWs.count(I)) { 6598 // This cast is going to be shrunk. This may remove the cast or it might 6599 // turn it into slightly different cast. For example, if MinBW == 16, 6600 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 6601 // 6602 // Calculate the modified src and dest types. 6603 Type *MinVecTy = VectorTy; 6604 if (I->getOpcode() == Instruction::Trunc) { 6605 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 6606 VectorTy = 6607 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6608 } else if (I->getOpcode() == Instruction::ZExt || 6609 I->getOpcode() == Instruction::SExt) { 6610 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 6611 VectorTy = 6612 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6613 } 6614 } 6615 6616 return TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy); 6617 } 6618 case Instruction::Call: { 6619 bool NeedToScalarize; 6620 CallInst *CI = cast<CallInst>(I); 6621 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 6622 if (getVectorIntrinsicIDForCall(CI, TLI)) 6623 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 6624 return CallCost; 6625 } 6626 default: 6627 // The cost of executing VF copies of the scalar instruction. This opcode 6628 // is unknown. Assume that it is the same as 'mul'. 6629 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) + 6630 getScalarizationOverhead(I, VF, false, TTI); 6631 } // end of switch. 6632 } 6633 6634 char LoopVectorize::ID = 0; 6635 static const char lv_name[] = "Loop Vectorization"; 6636 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 6637 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6638 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 6639 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6640 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 6641 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6642 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6643 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6644 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6645 INITIALIZE_PASS_DEPENDENCY(LCSSAWrapperPass) 6646 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6647 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 6648 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 6649 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6650 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 6651 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6652 6653 namespace llvm { 6654 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { 6655 return new LoopVectorize(NoUnrolling, AlwaysVectorize); 6656 } 6657 } 6658 6659 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6660 6661 // Check if the pointer operand of a load or store instruction is 6662 // consecutive. 6663 if (auto *Ptr = getPointerOperand(Inst)) 6664 return Legal->isConsecutivePtr(Ptr); 6665 return false; 6666 } 6667 6668 void LoopVectorizationCostModel::collectValuesToIgnore() { 6669 // Ignore ephemeral values. 6670 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 6671 6672 // Ignore type-promoting instructions we identified during reduction 6673 // detection. 6674 for (auto &Reduction : *Legal->getReductionVars()) { 6675 RecurrenceDescriptor &RedDes = Reduction.second; 6676 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 6677 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6678 } 6679 6680 // Insert values known to be scalar into VecValuesToIgnore. 6681 for (auto *BB : TheLoop->getBlocks()) 6682 for (auto &I : *BB) 6683 if (Legal->isScalarAfterVectorization(&I)) 6684 VecValuesToIgnore.insert(&I); 6685 } 6686 6687 void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr, 6688 bool IfPredicateInstr) { 6689 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 6690 // Holds vector parameters or scalars, in case of uniform vals. 6691 SmallVector<VectorParts, 4> Params; 6692 6693 setDebugLocFromInst(Builder, Instr); 6694 6695 // Does this instruction return a value ? 6696 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 6697 6698 // Initialize a new scalar map entry. 6699 ScalarParts Entry(UF); 6700 6701 VectorParts Cond; 6702 if (IfPredicateInstr) 6703 Cond = createBlockInMask(Instr->getParent()); 6704 6705 // For each vector unroll 'part': 6706 for (unsigned Part = 0; Part < UF; ++Part) { 6707 Entry[Part].resize(1); 6708 // For each scalar that we create: 6709 6710 // Start an "if (pred) a[i] = ..." block. 6711 Value *Cmp = nullptr; 6712 if (IfPredicateInstr) { 6713 if (Cond[Part]->getType()->isVectorTy()) 6714 Cond[Part] = 6715 Builder.CreateExtractElement(Cond[Part], Builder.getInt32(0)); 6716 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cond[Part], 6717 ConstantInt::get(Cond[Part]->getType(), 1)); 6718 } 6719 6720 Instruction *Cloned = Instr->clone(); 6721 if (!IsVoidRetTy) 6722 Cloned->setName(Instr->getName() + ".cloned"); 6723 6724 // Replace the operands of the cloned instructions with their scalar 6725 // equivalents in the new loop. 6726 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 6727 auto *NewOp = getScalarValue(Instr->getOperand(op), Part, 0); 6728 Cloned->setOperand(op, NewOp); 6729 } 6730 6731 // Place the cloned scalar in the new loop. 6732 Builder.Insert(Cloned); 6733 6734 // Add the cloned scalar to the scalar map entry. 6735 Entry[Part][0] = Cloned; 6736 6737 // If we just cloned a new assumption, add it the assumption cache. 6738 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 6739 if (II->getIntrinsicID() == Intrinsic::assume) 6740 AC->registerAssumption(II); 6741 6742 // End if-block. 6743 if (IfPredicateInstr) 6744 PredicatedInstructions.push_back(std::make_pair(Cloned, Cmp)); 6745 } 6746 VectorLoopValueMap.initScalar(Instr, Entry); 6747 } 6748 6749 void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) { 6750 auto *SI = dyn_cast<StoreInst>(Instr); 6751 bool IfPredicateInstr = (SI && Legal->blockNeedsPredication(SI->getParent())); 6752 6753 return scalarizeInstruction(Instr, IfPredicateInstr); 6754 } 6755 6756 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 6757 6758 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 6759 6760 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 6761 Instruction::BinaryOps BinOp) { 6762 // When unrolling and the VF is 1, we only need to add a simple scalar. 6763 Type *Ty = Val->getType(); 6764 assert(!Ty->isVectorTy() && "Val must be a scalar"); 6765 6766 if (Ty->isFloatingPointTy()) { 6767 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 6768 6769 // Floating point operations had to be 'fast' to enable the unrolling. 6770 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 6771 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 6772 } 6773 Constant *C = ConstantInt::get(Ty, StartIdx); 6774 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 6775 } 6776 6777 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 6778 SmallVector<Metadata *, 4> MDs; 6779 // Reserve first location for self reference to the LoopID metadata node. 6780 MDs.push_back(nullptr); 6781 bool IsUnrollMetadata = false; 6782 MDNode *LoopID = L->getLoopID(); 6783 if (LoopID) { 6784 // First find existing loop unrolling disable metadata. 6785 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 6786 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 6787 if (MD) { 6788 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 6789 IsUnrollMetadata = 6790 S && S->getString().startswith("llvm.loop.unroll.disable"); 6791 } 6792 MDs.push_back(LoopID->getOperand(i)); 6793 } 6794 } 6795 6796 if (!IsUnrollMetadata) { 6797 // Add runtime unroll disable metadata. 6798 LLVMContext &Context = L->getHeader()->getContext(); 6799 SmallVector<Metadata *, 1> DisableOperands; 6800 DisableOperands.push_back( 6801 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 6802 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 6803 MDs.push_back(DisableNode); 6804 MDNode *NewLoopID = MDNode::get(Context, MDs); 6805 // Set operand 0 to refer to the loop id itself. 6806 NewLoopID->replaceOperandWith(0, NewLoopID); 6807 L->setLoopID(NewLoopID); 6808 } 6809 } 6810 6811 bool LoopVectorizePass::processLoop(Loop *L) { 6812 assert(L->empty() && "Only process inner loops."); 6813 6814 #ifndef NDEBUG 6815 const std::string DebugLocStr = getDebugLocString(L); 6816 #endif /* NDEBUG */ 6817 6818 DEBUG(dbgs() << "\nLV: Checking a loop in \"" 6819 << L->getHeader()->getParent()->getName() << "\" from " 6820 << DebugLocStr << "\n"); 6821 6822 LoopVectorizeHints Hints(L, DisableUnrolling, *ORE); 6823 6824 DEBUG(dbgs() << "LV: Loop hints:" 6825 << " force=" 6826 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 6827 ? "disabled" 6828 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 6829 ? "enabled" 6830 : "?")) 6831 << " width=" << Hints.getWidth() 6832 << " unroll=" << Hints.getInterleave() << "\n"); 6833 6834 // Function containing loop 6835 Function *F = L->getHeader()->getParent(); 6836 6837 // Looking at the diagnostic output is the only way to determine if a loop 6838 // was vectorized (other than looking at the IR or machine code), so it 6839 // is important to generate an optimization remark for each loop. Most of 6840 // these messages are generated by emitOptimizationRemarkAnalysis. Remarks 6841 // generated by emitOptimizationRemark and emitOptimizationRemarkMissed are 6842 // less verbose reporting vectorized loops and unvectorized loops that may 6843 // benefit from vectorization, respectively. 6844 6845 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { 6846 DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 6847 return false; 6848 } 6849 6850 // Check the loop for a trip count threshold: 6851 // do not vectorize loops with a tiny trip count. 6852 const unsigned TC = SE->getSmallConstantTripCount(L); 6853 if (TC > 0u && TC < TinyTripCountVectorThreshold) { 6854 DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 6855 << "This loop is not worth vectorizing."); 6856 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 6857 DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 6858 else { 6859 DEBUG(dbgs() << "\n"); 6860 emitAnalysisDiag(L, Hints, *ORE, VectorizationReport() 6861 << "vectorization is not beneficial " 6862 "and is not explicitly forced"); 6863 return false; 6864 } 6865 } 6866 6867 PredicatedScalarEvolution PSE(*SE, *L); 6868 6869 // Check if it is legal to vectorize the loop. 6870 LoopVectorizationRequirements Requirements(*ORE); 6871 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI, ORE, 6872 &Requirements, &Hints); 6873 if (!LVL.canVectorize()) { 6874 DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 6875 emitMissedWarning(F, L, Hints, ORE); 6876 return false; 6877 } 6878 6879 // Use the cost model. 6880 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F, 6881 &Hints); 6882 CM.collectValuesToIgnore(); 6883 6884 // Check the function attributes to find out if this function should be 6885 // optimized for size. 6886 bool OptForSize = 6887 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 6888 6889 // Compute the weighted frequency of this loop being executed and see if it 6890 // is less than 20% of the function entry baseline frequency. Note that we 6891 // always have a canonical loop here because we think we *can* vectorize. 6892 // FIXME: This is hidden behind a flag due to pervasive problems with 6893 // exactly what block frequency models. 6894 if (LoopVectorizeWithBlockFrequency) { 6895 BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader()); 6896 if (Hints.getForce() != LoopVectorizeHints::FK_Enabled && 6897 LoopEntryFreq < ColdEntryFreq) 6898 OptForSize = true; 6899 } 6900 6901 // Check the function attributes to see if implicit floats are allowed. 6902 // FIXME: This check doesn't seem possibly correct -- what if the loop is 6903 // an integer loop and the vector instructions selected are purely integer 6904 // vector instructions? 6905 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 6906 DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 6907 "attribute is used.\n"); 6908 emitAnalysisDiag( 6909 L, Hints, *ORE, 6910 VectorizationReport() 6911 << "loop not vectorized due to NoImplicitFloat attribute"); 6912 emitMissedWarning(F, L, Hints, ORE); 6913 return false; 6914 } 6915 6916 // Check if the target supports potentially unsafe FP vectorization. 6917 // FIXME: Add a check for the type of safety issue (denormal, signaling) 6918 // for the target we're vectorizing for, to make sure none of the 6919 // additional fp-math flags can help. 6920 if (Hints.isPotentiallyUnsafe() && 6921 TTI->isFPVectorizationPotentiallyUnsafe()) { 6922 DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 6923 emitAnalysisDiag(L, Hints, *ORE, 6924 VectorizationReport() 6925 << "loop not vectorized due to unsafe FP support."); 6926 emitMissedWarning(F, L, Hints, ORE); 6927 return false; 6928 } 6929 6930 // Select the optimal vectorization factor. 6931 const LoopVectorizationCostModel::VectorizationFactor VF = 6932 CM.selectVectorizationFactor(OptForSize); 6933 6934 // Select the interleave count. 6935 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 6936 6937 // Get user interleave count. 6938 unsigned UserIC = Hints.getInterleave(); 6939 6940 // Identify the diagnostic messages that should be produced. 6941 std::string VecDiagMsg, IntDiagMsg; 6942 bool VectorizeLoop = true, InterleaveLoop = true; 6943 if (Requirements.doesNotMeet(F, L, Hints)) { 6944 DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 6945 "requirements.\n"); 6946 emitMissedWarning(F, L, Hints, ORE); 6947 return false; 6948 } 6949 6950 if (VF.Width == 1) { 6951 DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 6952 VecDiagMsg = 6953 "the cost-model indicates that vectorization is not beneficial"; 6954 VectorizeLoop = false; 6955 } 6956 6957 if (IC == 1 && UserIC <= 1) { 6958 // Tell the user interleaving is not beneficial. 6959 DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 6960 IntDiagMsg = 6961 "the cost-model indicates that interleaving is not beneficial"; 6962 InterleaveLoop = false; 6963 if (UserIC == 1) 6964 IntDiagMsg += 6965 " and is explicitly disabled or interleave count is set to 1"; 6966 } else if (IC > 1 && UserIC == 1) { 6967 // Tell the user interleaving is beneficial, but it explicitly disabled. 6968 DEBUG(dbgs() 6969 << "LV: Interleaving is beneficial but is explicitly disabled."); 6970 IntDiagMsg = "the cost-model indicates that interleaving is beneficial " 6971 "but is explicitly disabled or interleave count is set to 1"; 6972 InterleaveLoop = false; 6973 } 6974 6975 // Override IC if user provided an interleave count. 6976 IC = UserIC > 0 ? UserIC : IC; 6977 6978 // Emit diagnostic messages, if any. 6979 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 6980 if (!VectorizeLoop && !InterleaveLoop) { 6981 // Do not vectorize or interleaving the loop. 6982 ORE->emitOptimizationRemarkAnalysis(VAPassName, L, VecDiagMsg); 6983 ORE->emitOptimizationRemarkAnalysis(LV_NAME, L, IntDiagMsg); 6984 return false; 6985 } else if (!VectorizeLoop && InterleaveLoop) { 6986 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 6987 ORE->emitOptimizationRemarkAnalysis(VAPassName, L, VecDiagMsg); 6988 } else if (VectorizeLoop && !InterleaveLoop) { 6989 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 6990 << DebugLocStr << '\n'); 6991 ORE->emitOptimizationRemarkAnalysis(LV_NAME, L, IntDiagMsg); 6992 } else if (VectorizeLoop && InterleaveLoop) { 6993 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 6994 << DebugLocStr << '\n'); 6995 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 6996 } 6997 6998 if (!VectorizeLoop) { 6999 assert(IC > 1 && "interleave count should not be 1 or 0"); 7000 // If we decided that it is not legal to vectorize the loop, then 7001 // interleave it. 7002 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC); 7003 Unroller.vectorize(&LVL, CM.MinBWs); 7004 7005 ORE->emitOptimizationRemark(LV_NAME, L, 7006 Twine("interleaved loop (interleaved count: ") + 7007 Twine(IC) + ")"); 7008 } else { 7009 // If we decided that it is *legal* to vectorize the loop, then do it. 7010 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC); 7011 LB.vectorize(&LVL, CM.MinBWs); 7012 ++LoopsVectorized; 7013 7014 // Add metadata to disable runtime unrolling a scalar loop when there are 7015 // no runtime checks about strides and memory. A scalar loop that is 7016 // rarely used is not worth unrolling. 7017 if (!LB.areSafetyChecksAdded()) 7018 AddRuntimeUnrollDisableMetaData(L); 7019 7020 // Report the vectorization decision. 7021 ORE->emitOptimizationRemark( 7022 LV_NAME, L, Twine("vectorized loop (vectorization width: ") + 7023 Twine(VF.Width) + ", interleaved count: " + Twine(IC) + 7024 ")"); 7025 } 7026 7027 // Mark the loop as already vectorized to avoid vectorizing again. 7028 Hints.setAlreadyVectorized(); 7029 7030 DEBUG(verifyFunction(*L->getHeader()->getParent())); 7031 return true; 7032 } 7033 7034 bool LoopVectorizePass::runImpl( 7035 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 7036 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 7037 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 7038 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 7039 OptimizationRemarkEmitter &ORE_) { 7040 7041 SE = &SE_; 7042 LI = &LI_; 7043 TTI = &TTI_; 7044 DT = &DT_; 7045 BFI = &BFI_; 7046 TLI = TLI_; 7047 AA = &AA_; 7048 AC = &AC_; 7049 GetLAA = &GetLAA_; 7050 DB = &DB_; 7051 ORE = &ORE_; 7052 7053 // Compute some weights outside of the loop over the loops. Compute this 7054 // using a BranchProbability to re-use its scaling math. 7055 const BranchProbability ColdProb(1, 5); // 20% 7056 ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb; 7057 7058 // Don't attempt if 7059 // 1. the target claims to have no vector registers, and 7060 // 2. interleaving won't help ILP. 7061 // 7062 // The second condition is necessary because, even if the target has no 7063 // vector registers, loop vectorization may still enable scalar 7064 // interleaving. 7065 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 7066 return false; 7067 7068 // Build up a worklist of inner-loops to vectorize. This is necessary as 7069 // the act of vectorizing or partially unrolling a loop creates new loops 7070 // and can invalidate iterators across the loops. 7071 SmallVector<Loop *, 8> Worklist; 7072 7073 for (Loop *L : *LI) 7074 addAcyclicInnerLoop(*L, Worklist); 7075 7076 LoopsAnalyzed += Worklist.size(); 7077 7078 // Now walk the identified inner loops. 7079 bool Changed = false; 7080 while (!Worklist.empty()) 7081 Changed |= processLoop(Worklist.pop_back_val()); 7082 7083 // Process each loop nest in the function. 7084 return Changed; 7085 7086 } 7087 7088 7089 PreservedAnalyses LoopVectorizePass::run(Function &F, 7090 FunctionAnalysisManager &AM) { 7091 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 7092 auto &LI = AM.getResult<LoopAnalysis>(F); 7093 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 7094 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 7095 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 7096 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 7097 auto &AA = AM.getResult<AAManager>(F); 7098 auto &AC = AM.getResult<AssumptionAnalysis>(F); 7099 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 7100 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 7101 7102 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 7103 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 7104 [&](Loop &L) -> const LoopAccessInfo & { 7105 return LAM.getResult<LoopAccessAnalysis>(L); 7106 }; 7107 bool Changed = 7108 runImpl(F, SE, LI, TTI, DT, BFI, TLI, DB, AA, AC, GetLAA, ORE); 7109 if (!Changed) 7110 return PreservedAnalyses::all(); 7111 PreservedAnalyses PA; 7112 PA.preserve<LoopAnalysis>(); 7113 PA.preserve<DominatorTreeAnalysis>(); 7114 PA.preserve<BasicAA>(); 7115 PA.preserve<GlobalsAA>(); 7116 return PA; 7117 } 7118