1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 11 // and generates target-independent LLVM-IR. 12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 13 // of instructions in order to estimate the profitability of vectorization. 14 // 15 // The loop vectorizer combines consecutive loop iterations into a single 16 // 'wide' iteration. After this transformation the index is incremented 17 // by the SIMD vector width, and not by one. 18 // 19 // This pass has three parts: 20 // 1. The main loop pass that drives the different parts. 21 // 2. LoopVectorizationLegality - A unit that checks for the legality 22 // of the vectorization. 23 // 3. InnerLoopVectorizer - A unit that performs the actual 24 // widening of instructions. 25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 26 // of vectorization. It decides on the optimal vector width, which 27 // can be one, if vectorization is not profitable. 28 // 29 //===----------------------------------------------------------------------===// 30 // 31 // The reduction-variable vectorization is based on the paper: 32 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 33 // 34 // Variable uniformity checks are inspired by: 35 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 36 // 37 // The interleaved access vectorization is based on the paper: 38 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 39 // Data for SIMD 40 // 41 // Other ideas/concepts are from: 42 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 43 // 44 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 45 // Vectorizing Compilers. 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 50 #include "llvm/ADT/DenseMap.h" 51 #include "llvm/ADT/Hashing.h" 52 #include "llvm/ADT/MapVector.h" 53 #include "llvm/ADT/SCCIterator.h" 54 #include "llvm/ADT/SetVector.h" 55 #include "llvm/ADT/SmallPtrSet.h" 56 #include "llvm/ADT/SmallSet.h" 57 #include "llvm/ADT/SmallVector.h" 58 #include "llvm/ADT/Statistic.h" 59 #include "llvm/ADT/StringExtras.h" 60 #include "llvm/Analysis/CodeMetrics.h" 61 #include "llvm/Analysis/GlobalsModRef.h" 62 #include "llvm/Analysis/LoopInfo.h" 63 #include "llvm/Analysis/LoopIterator.h" 64 #include "llvm/Analysis/LoopPass.h" 65 #include "llvm/Analysis/ScalarEvolutionExpander.h" 66 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 67 #include "llvm/Analysis/ValueTracking.h" 68 #include "llvm/Analysis/VectorUtils.h" 69 #include "llvm/IR/Constants.h" 70 #include "llvm/IR/DataLayout.h" 71 #include "llvm/IR/DebugInfo.h" 72 #include "llvm/IR/DerivedTypes.h" 73 #include "llvm/IR/DiagnosticInfo.h" 74 #include "llvm/IR/Dominators.h" 75 #include "llvm/IR/Function.h" 76 #include "llvm/IR/IRBuilder.h" 77 #include "llvm/IR/Instructions.h" 78 #include "llvm/IR/IntrinsicInst.h" 79 #include "llvm/IR/LLVMContext.h" 80 #include "llvm/IR/Module.h" 81 #include "llvm/IR/PatternMatch.h" 82 #include "llvm/IR/Type.h" 83 #include "llvm/IR/Value.h" 84 #include "llvm/IR/ValueHandle.h" 85 #include "llvm/IR/Verifier.h" 86 #include "llvm/Pass.h" 87 #include "llvm/Support/BranchProbability.h" 88 #include "llvm/Support/CommandLine.h" 89 #include "llvm/Support/Debug.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include "llvm/Transforms/Scalar.h" 92 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 93 #include "llvm/Transforms/Utils/Local.h" 94 #include "llvm/Transforms/Utils/LoopUtils.h" 95 #include "llvm/Transforms/Utils/LoopVersioning.h" 96 #include "llvm/Transforms/Vectorize.h" 97 #include <algorithm> 98 #include <map> 99 #include <tuple> 100 101 using namespace llvm; 102 using namespace llvm::PatternMatch; 103 104 #define LV_NAME "loop-vectorize" 105 #define DEBUG_TYPE LV_NAME 106 107 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 108 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 109 110 static cl::opt<bool> 111 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, 112 cl::desc("Enable if-conversion during vectorization.")); 113 114 /// We don't vectorize loops with a known constant trip count below this number. 115 static cl::opt<unsigned> TinyTripCountVectorThreshold( 116 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 117 cl::desc("Don't vectorize loops with a constant " 118 "trip count that is smaller than this " 119 "value.")); 120 121 static cl::opt<bool> MaximizeBandwidth( 122 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 123 cl::desc("Maximize bandwidth when selecting vectorization factor which " 124 "will be determined by the smallest type in loop.")); 125 126 static cl::opt<bool> EnableInterleavedMemAccesses( 127 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 128 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 129 130 /// Maximum factor for an interleaved memory access. 131 static cl::opt<unsigned> MaxInterleaveGroupFactor( 132 "max-interleave-group-factor", cl::Hidden, 133 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 134 cl::init(8)); 135 136 /// We don't interleave loops with a known constant trip count below this 137 /// number. 138 static const unsigned TinyTripCountInterleaveThreshold = 128; 139 140 static cl::opt<unsigned> ForceTargetNumScalarRegs( 141 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 142 cl::desc("A flag that overrides the target's number of scalar registers.")); 143 144 static cl::opt<unsigned> ForceTargetNumVectorRegs( 145 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 146 cl::desc("A flag that overrides the target's number of vector registers.")); 147 148 /// Maximum vectorization interleave count. 149 static const unsigned MaxInterleaveFactor = 16; 150 151 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 152 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 153 cl::desc("A flag that overrides the target's max interleave factor for " 154 "scalar loops.")); 155 156 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 157 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 158 cl::desc("A flag that overrides the target's max interleave factor for " 159 "vectorized loops.")); 160 161 static cl::opt<unsigned> ForceTargetInstructionCost( 162 "force-target-instruction-cost", cl::init(0), cl::Hidden, 163 cl::desc("A flag that overrides the target's expected cost for " 164 "an instruction to a single constant value. Mostly " 165 "useful for getting consistent testing.")); 166 167 static cl::opt<unsigned> SmallLoopCost( 168 "small-loop-cost", cl::init(20), cl::Hidden, 169 cl::desc( 170 "The cost of a loop that is considered 'small' by the interleaver.")); 171 172 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 173 "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden, 174 cl::desc("Enable the use of the block frequency analysis to access PGO " 175 "heuristics minimizing code growth in cold regions and being more " 176 "aggressive in hot regions.")); 177 178 // Runtime interleave loops for load/store throughput. 179 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 180 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 181 cl::desc( 182 "Enable runtime interleaving until load/store ports are saturated")); 183 184 /// The number of stores in a loop that are allowed to need predication. 185 static cl::opt<unsigned> NumberOfStoresToPredicate( 186 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 187 cl::desc("Max number of stores to be predicated behind an if.")); 188 189 static cl::opt<bool> EnableIndVarRegisterHeur( 190 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 191 cl::desc("Count the induction variable only once when interleaving")); 192 193 static cl::opt<bool> EnableCondStoresVectorization( 194 "enable-cond-stores-vec", cl::init(false), cl::Hidden, 195 cl::desc("Enable if predication of stores during vectorization.")); 196 197 static cl::opt<unsigned> MaxNestedScalarReductionIC( 198 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 199 cl::desc("The maximum interleave count to use when interleaving a scalar " 200 "reduction in a nested loop.")); 201 202 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 203 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 204 cl::desc("The maximum allowed number of runtime memory checks with a " 205 "vectorize(enable) pragma.")); 206 207 static cl::opt<unsigned> VectorizeSCEVCheckThreshold( 208 "vectorize-scev-check-threshold", cl::init(16), cl::Hidden, 209 cl::desc("The maximum number of SCEV checks allowed.")); 210 211 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold( 212 "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden, 213 cl::desc("The maximum number of SCEV checks allowed with a " 214 "vectorize(enable) pragma")); 215 216 namespace { 217 218 // Forward declarations. 219 class LoopVectorizeHints; 220 class LoopVectorizationLegality; 221 class LoopVectorizationCostModel; 222 class LoopVectorizationRequirements; 223 224 /// Returns true if the given loop body has a cycle, excluding the loop 225 /// itself. 226 static bool hasCyclesInLoopBody(const Loop &L) { 227 if (!L.empty()) 228 return true; 229 230 for (const auto &SCC : 231 make_range(scc_iterator<Loop, LoopBodyTraits>::begin(L), 232 scc_iterator<Loop, LoopBodyTraits>::end(L))) { 233 if (SCC.size() > 1) { 234 DEBUG(dbgs() << "LVL: Detected a cycle in the loop body:\n"); 235 DEBUG(L.dump()); 236 return true; 237 } 238 } 239 return false; 240 } 241 242 /// \brief This modifies LoopAccessReport to initialize message with 243 /// loop-vectorizer-specific part. 244 class VectorizationReport : public LoopAccessReport { 245 public: 246 VectorizationReport(Instruction *I = nullptr) 247 : LoopAccessReport("loop not vectorized: ", I) {} 248 249 /// \brief This allows promotion of the loop-access analysis report into the 250 /// loop-vectorizer report. It modifies the message to add the 251 /// loop-vectorizer-specific part of the message. 252 explicit VectorizationReport(const LoopAccessReport &R) 253 : LoopAccessReport(Twine("loop not vectorized: ") + R.str(), 254 R.getInstr()) {} 255 }; 256 257 /// A helper function for converting Scalar types to vector types. 258 /// If the incoming type is void, we return void. If the VF is 1, we return 259 /// the scalar type. 260 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 261 if (Scalar->isVoidTy() || VF == 1) 262 return Scalar; 263 return VectorType::get(Scalar, VF); 264 } 265 266 /// A helper function that returns GEP instruction and knows to skip a 267 /// 'bitcast'. The 'bitcast' may be skipped if the source and the destination 268 /// pointee types of the 'bitcast' have the same size. 269 /// For example: 270 /// bitcast double** %var to i64* - can be skipped 271 /// bitcast double** %var to i8* - can not 272 static GetElementPtrInst *getGEPInstruction(Value *Ptr) { 273 274 if (isa<GetElementPtrInst>(Ptr)) 275 return cast<GetElementPtrInst>(Ptr); 276 277 if (isa<BitCastInst>(Ptr) && 278 isa<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0))) { 279 Type *BitcastTy = Ptr->getType(); 280 Type *GEPTy = cast<BitCastInst>(Ptr)->getSrcTy(); 281 if (!isa<PointerType>(BitcastTy) || !isa<PointerType>(GEPTy)) 282 return nullptr; 283 Type *Pointee1Ty = cast<PointerType>(BitcastTy)->getPointerElementType(); 284 Type *Pointee2Ty = cast<PointerType>(GEPTy)->getPointerElementType(); 285 const DataLayout &DL = cast<BitCastInst>(Ptr)->getModule()->getDataLayout(); 286 if (DL.getTypeSizeInBits(Pointee1Ty) == DL.getTypeSizeInBits(Pointee2Ty)) 287 return cast<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0)); 288 } 289 return nullptr; 290 } 291 292 /// A helper function that returns the pointer operand of a load or store 293 /// instruction. 294 static Value *getPointerOperand(Value *I) { 295 if (auto *LI = dyn_cast<LoadInst>(I)) 296 return LI->getPointerOperand(); 297 if (auto *SI = dyn_cast<StoreInst>(I)) 298 return SI->getPointerOperand(); 299 return nullptr; 300 } 301 302 /// A helper function that returns true if the given type is irregular. The 303 /// type is irregular if its allocated size doesn't equal the store size of an 304 /// element of the corresponding vector type at the given vectorization factor. 305 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) { 306 307 // Determine if an array of VF elements of type Ty is "bitcast compatible" 308 // with a <VF x Ty> vector. 309 if (VF > 1) { 310 auto *VectorTy = VectorType::get(Ty, VF); 311 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); 312 } 313 314 // If the vectorization factor is one, we just check if an array of type Ty 315 // requires padding between elements. 316 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 317 } 318 319 /// InnerLoopVectorizer vectorizes loops which contain only one basic 320 /// block to a specified vectorization factor (VF). 321 /// This class performs the widening of scalars into vectors, or multiple 322 /// scalars. This class also implements the following features: 323 /// * It inserts an epilogue loop for handling loops that don't have iteration 324 /// counts that are known to be a multiple of the vectorization factor. 325 /// * It handles the code generation for reduction variables. 326 /// * Scalarization (implementation using scalars) of un-vectorizable 327 /// instructions. 328 /// InnerLoopVectorizer does not perform any vectorization-legality 329 /// checks, and relies on the caller to check for the different legality 330 /// aspects. The InnerLoopVectorizer relies on the 331 /// LoopVectorizationLegality class to provide information about the induction 332 /// and reduction variables that were found to a given vectorization factor. 333 class InnerLoopVectorizer { 334 public: 335 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 336 LoopInfo *LI, DominatorTree *DT, 337 const TargetLibraryInfo *TLI, 338 const TargetTransformInfo *TTI, AssumptionCache *AC, 339 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 340 unsigned UnrollFactor) 341 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 342 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 343 Builder(PSE.getSE()->getContext()), Induction(nullptr), 344 OldInduction(nullptr), VectorLoopValueMap(UnrollFactor, VecWidth), 345 TripCount(nullptr), VectorTripCount(nullptr), Legal(nullptr), 346 AddedSafetyChecks(false) {} 347 348 // Perform the actual loop widening (vectorization). 349 // MinimumBitWidths maps scalar integer values to the smallest bitwidth they 350 // can be validly truncated to. The cost model has assumed this truncation 351 // will happen when vectorizing. VecValuesToIgnore contains scalar values 352 // that the cost model has chosen to ignore because they will not be 353 // vectorized. 354 void vectorize(LoopVectorizationLegality *L, 355 const MapVector<Instruction *, uint64_t> &MinimumBitWidths) { 356 MinBWs = &MinimumBitWidths; 357 Legal = L; 358 // Create a new empty loop. Unlink the old loop and connect the new one. 359 createEmptyLoop(); 360 // Widen each instruction in the old loop to a new one in the new loop. 361 // Use the Legality module to find the induction and reduction variables. 362 vectorizeLoop(); 363 } 364 365 // Return true if any runtime check is added. 366 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 367 368 virtual ~InnerLoopVectorizer() {} 369 370 protected: 371 /// A small list of PHINodes. 372 typedef SmallVector<PHINode *, 4> PhiVector; 373 374 /// A type for vectorized values in the new loop. Each value from the 375 /// original loop, when vectorized, is represented by UF vector values in the 376 /// new unrolled loop, where UF is the unroll factor. 377 typedef SmallVector<Value *, 2> VectorParts; 378 379 /// A type for scalarized values in the new loop. Each value from the 380 /// original loop, when scalarized, is represented by UF x VF scalar values 381 /// in the new unrolled loop, where UF is the unroll factor and VF is the 382 /// vectorization factor. 383 typedef SmallVector<SmallVector<Value *, 4>, 2> ScalarParts; 384 385 // When we if-convert we need to create edge masks. We have to cache values 386 // so that we don't end up with exponential recursion/IR. 387 typedef DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts> 388 EdgeMaskCache; 389 390 /// Create an empty loop, based on the loop ranges of the old loop. 391 void createEmptyLoop(); 392 393 /// Set up the values of the IVs correctly when exiting the vector loop. 394 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 395 Value *CountRoundDown, Value *EndValue, 396 BasicBlock *MiddleBlock); 397 398 /// Create a new induction variable inside L. 399 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 400 Value *Step, Instruction *DL); 401 /// Copy and widen the instructions from the old loop. 402 virtual void vectorizeLoop(); 403 404 /// Fix a first-order recurrence. This is the second phase of vectorizing 405 /// this phi node. 406 void fixFirstOrderRecurrence(PHINode *Phi); 407 408 /// \brief The Loop exit block may have single value PHI nodes where the 409 /// incoming value is 'Undef'. While vectorizing we only handled real values 410 /// that were defined inside the loop. Here we fix the 'undef case'. 411 /// See PR14725. 412 void fixLCSSAPHIs(); 413 414 /// Predicate conditional instructions that require predication on their 415 /// respective conditions. 416 void predicateInstructions(); 417 418 /// Shrinks vector element sizes based on information in "MinBWs". 419 void truncateToMinimalBitwidths(); 420 421 /// A helper function that computes the predicate of the block BB, assuming 422 /// that the header block of the loop is set to True. It returns the *entry* 423 /// mask for the block BB. 424 VectorParts createBlockInMask(BasicBlock *BB); 425 /// A helper function that computes the predicate of the edge between SRC 426 /// and DST. 427 VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst); 428 429 /// A helper function to vectorize a single BB within the innermost loop. 430 void vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV); 431 432 /// Vectorize a single PHINode in a block. This method handles the induction 433 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 434 /// arbitrary length vectors. 435 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF, 436 PhiVector *PV); 437 438 /// Insert the new loop to the loop hierarchy and pass manager 439 /// and update the analysis passes. 440 void updateAnalysis(); 441 442 /// This instruction is un-vectorizable. Implement it as a sequence 443 /// of scalars. If \p IfPredicateInstr is true we need to 'hide' each 444 /// scalarized instruction behind an if block predicated on the control 445 /// dependence of the instruction. 446 virtual void scalarizeInstruction(Instruction *Instr, 447 bool IfPredicateInstr = false); 448 449 /// Vectorize Load and Store instructions, 450 virtual void vectorizeMemoryInstruction(Instruction *Instr); 451 452 /// Create a broadcast instruction. This method generates a broadcast 453 /// instruction (shuffle) for loop invariant values and for the induction 454 /// value. If this is the induction variable then we extend it to N, N+1, ... 455 /// this is needed because each iteration in the loop corresponds to a SIMD 456 /// element. 457 virtual Value *getBroadcastInstrs(Value *V); 458 459 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 460 /// to each vector element of Val. The sequence starts at StartIndex. 461 /// \p Opcode is relevant for FP induction variable. 462 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 463 Instruction::BinaryOps Opcode = 464 Instruction::BinaryOpsEnd); 465 466 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 467 /// variable on which to base the steps, \p Step is the size of the step, and 468 /// \p EntryVal is the value from the original loop that maps to the steps. 469 /// Note that \p EntryVal doesn't have to be an induction variable (e.g., it 470 /// can be a truncate instruction). 471 void buildScalarSteps(Value *ScalarIV, Value *Step, Value *EntryVal); 472 473 /// Create a vector induction phi node based on an existing scalar one. This 474 /// currently only works for integer induction variables with a constant 475 /// step. \p EntryVal is the value from the original loop that maps to the 476 /// vector phi node. If \p EntryVal is a truncate instruction, instead of 477 /// widening the original IV, we widen a version of the IV truncated to \p 478 /// EntryVal's type. 479 void createVectorIntInductionPHI(const InductionDescriptor &II, 480 Instruction *EntryVal); 481 482 /// Widen an integer induction variable \p IV. If \p Trunc is provided, the 483 /// induction variable will first be truncated to the corresponding type. 484 void widenIntInduction(PHINode *IV, TruncInst *Trunc = nullptr); 485 486 /// Returns true if we should generate a scalar version of \p IV. 487 bool needsScalarInduction(Instruction *IV) const; 488 489 /// Return a constant reference to the VectorParts corresponding to \p V from 490 /// the original loop. If the value has already been vectorized, the 491 /// corresponding vector entry in VectorLoopValueMap is returned. If, 492 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 493 /// new vector values on-demand by inserting the scalar values into vectors 494 /// with an insertelement sequence. If the value has been neither vectorized 495 /// nor scalarized, it must be loop invariant, so we simply broadcast the 496 /// value into vectors. 497 const VectorParts &getVectorValue(Value *V); 498 499 /// Return a value in the new loop corresponding to \p V from the original 500 /// loop at unroll index \p Part and vector index \p Lane. If the value has 501 /// been vectorized but not scalarized, the necessary extractelement 502 /// instruction will be generated. 503 Value *getScalarValue(Value *V, unsigned Part, unsigned Lane); 504 505 /// Try to vectorize the interleaved access group that \p Instr belongs to. 506 void vectorizeInterleaveGroup(Instruction *Instr); 507 508 /// Generate a shuffle sequence that will reverse the vector Vec. 509 virtual Value *reverseVector(Value *Vec); 510 511 /// Returns (and creates if needed) the original loop trip count. 512 Value *getOrCreateTripCount(Loop *NewLoop); 513 514 /// Returns (and creates if needed) the trip count of the widened loop. 515 Value *getOrCreateVectorTripCount(Loop *NewLoop); 516 517 /// Emit a bypass check to see if the trip count would overflow, or we 518 /// wouldn't have enough iterations to execute one vector loop. 519 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 520 /// Emit a bypass check to see if the vector trip count is nonzero. 521 void emitVectorLoopEnteredCheck(Loop *L, BasicBlock *Bypass); 522 /// Emit a bypass check to see if all of the SCEV assumptions we've 523 /// had to make are correct. 524 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 525 /// Emit bypass checks to check any memory assumptions we may have made. 526 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 527 528 /// Add additional metadata to \p To that was not present on \p Orig. 529 /// 530 /// Currently this is used to add the noalias annotations based on the 531 /// inserted memchecks. Use this for instructions that are *cloned* into the 532 /// vector loop. 533 void addNewMetadata(Instruction *To, const Instruction *Orig); 534 535 /// Add metadata from one instruction to another. 536 /// 537 /// This includes both the original MDs from \p From and additional ones (\see 538 /// addNewMetadata). Use this for *newly created* instructions in the vector 539 /// loop. 540 void addMetadata(Instruction *To, Instruction *From); 541 542 /// \brief Similar to the previous function but it adds the metadata to a 543 /// vector of instructions. 544 void addMetadata(ArrayRef<Value *> To, Instruction *From); 545 546 /// This is a helper class for maintaining vectorization state. It's used for 547 /// mapping values from the original loop to their corresponding values in 548 /// the new loop. Two mappings are maintained: one for vectorized values and 549 /// one for scalarized values. Vectorized values are represented with UF 550 /// vector values in the new loop, and scalarized values are represented with 551 /// UF x VF scalar values in the new loop. UF and VF are the unroll and 552 /// vectorization factors, respectively. 553 /// 554 /// Entries can be added to either map with initVector and initScalar, which 555 /// initialize and return a constant reference to the new entry. If a 556 /// non-constant reference to a vector entry is required, getVector can be 557 /// used to retrieve a mutable entry. We currently directly modify the mapped 558 /// values during "fix-up" operations that occur once the first phase of 559 /// widening is complete. These operations include type truncation and the 560 /// second phase of recurrence widening. 561 /// 562 /// Otherwise, entries from either map should be accessed using the 563 /// getVectorValue or getScalarValue functions from InnerLoopVectorizer. 564 /// getVectorValue and getScalarValue coordinate to generate a vector or 565 /// scalar value on-demand if one is not yet available. When vectorizing a 566 /// loop, we visit the definition of an instruction before its uses. When 567 /// visiting the definition, we either vectorize or scalarize the 568 /// instruction, creating an entry for it in the corresponding map. (In some 569 /// cases, such as induction variables, we will create both vector and scalar 570 /// entries.) Then, as we encounter uses of the definition, we derive values 571 /// for each scalar or vector use unless such a value is already available. 572 /// For example, if we scalarize a definition and one of its uses is vector, 573 /// we build the required vector on-demand with an insertelement sequence 574 /// when visiting the use. Otherwise, if the use is scalar, we can use the 575 /// existing scalar definition. 576 struct ValueMap { 577 578 /// Construct an empty map with the given unroll and vectorization factors. 579 ValueMap(unsigned UnrollFactor, unsigned VecWidth) 580 : UF(UnrollFactor), VF(VecWidth) { 581 // The unroll and vectorization factors are only used in asserts builds 582 // to verify map entries are sized appropriately. 583 (void)UF; 584 (void)VF; 585 } 586 587 /// \return True if the map has a vector entry for \p Key. 588 bool hasVector(Value *Key) const { return VectorMapStorage.count(Key); } 589 590 /// \return True if the map has a scalar entry for \p Key. 591 bool hasScalar(Value *Key) const { return ScalarMapStorage.count(Key); } 592 593 /// \brief Map \p Key to the given VectorParts \p Entry, and return a 594 /// constant reference to the new vector map entry. The given key should 595 /// not already be in the map, and the given VectorParts should be 596 /// correctly sized for the current unroll factor. 597 const VectorParts &initVector(Value *Key, const VectorParts &Entry) { 598 assert(!hasVector(Key) && "Vector entry already initialized"); 599 assert(Entry.size() == UF && "VectorParts has wrong dimensions"); 600 VectorMapStorage[Key] = Entry; 601 return VectorMapStorage[Key]; 602 } 603 604 /// \brief Map \p Key to the given ScalarParts \p Entry, and return a 605 /// constant reference to the new scalar map entry. The given key should 606 /// not already be in the map, and the given ScalarParts should be 607 /// correctly sized for the current unroll and vectorization factors. 608 const ScalarParts &initScalar(Value *Key, const ScalarParts &Entry) { 609 assert(!hasScalar(Key) && "Scalar entry already initialized"); 610 assert(Entry.size() == UF && 611 all_of(make_range(Entry.begin(), Entry.end()), 612 [&](const SmallVectorImpl<Value *> &Values) -> bool { 613 return Values.size() == VF; 614 }) && 615 "ScalarParts has wrong dimensions"); 616 ScalarMapStorage[Key] = Entry; 617 return ScalarMapStorage[Key]; 618 } 619 620 /// \return A reference to the vector map entry corresponding to \p Key. 621 /// The key should already be in the map. This function should only be used 622 /// when it's necessary to update values that have already been vectorized. 623 /// This is the case for "fix-up" operations including type truncation and 624 /// the second phase of recurrence vectorization. If a non-const reference 625 /// isn't required, getVectorValue should be used instead. 626 VectorParts &getVector(Value *Key) { 627 assert(hasVector(Key) && "Vector entry not initialized"); 628 return VectorMapStorage.find(Key)->second; 629 } 630 631 /// Retrieve an entry from the vector or scalar maps. The preferred way to 632 /// access an existing mapped entry is with getVectorValue or 633 /// getScalarValue from InnerLoopVectorizer. Until those functions can be 634 /// moved inside ValueMap, we have to declare them as friends. 635 friend const VectorParts &InnerLoopVectorizer::getVectorValue(Value *V); 636 friend Value *InnerLoopVectorizer::getScalarValue(Value *V, unsigned Part, 637 unsigned Lane); 638 639 private: 640 /// The unroll factor. Each entry in the vector map contains UF vector 641 /// values. 642 unsigned UF; 643 644 /// The vectorization factor. Each entry in the scalar map contains UF x VF 645 /// scalar values. 646 unsigned VF; 647 648 /// The vector and scalar map storage. We use std::map and not DenseMap 649 /// because insertions to DenseMap invalidate its iterators. 650 std::map<Value *, VectorParts> VectorMapStorage; 651 std::map<Value *, ScalarParts> ScalarMapStorage; 652 }; 653 654 /// The original loop. 655 Loop *OrigLoop; 656 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 657 /// dynamic knowledge to simplify SCEV expressions and converts them to a 658 /// more usable form. 659 PredicatedScalarEvolution &PSE; 660 /// Loop Info. 661 LoopInfo *LI; 662 /// Dominator Tree. 663 DominatorTree *DT; 664 /// Alias Analysis. 665 AliasAnalysis *AA; 666 /// Target Library Info. 667 const TargetLibraryInfo *TLI; 668 /// Target Transform Info. 669 const TargetTransformInfo *TTI; 670 /// Assumption Cache. 671 AssumptionCache *AC; 672 /// Interface to emit optimization remarks. 673 OptimizationRemarkEmitter *ORE; 674 675 /// \brief LoopVersioning. It's only set up (non-null) if memchecks were 676 /// used. 677 /// 678 /// This is currently only used to add no-alias metadata based on the 679 /// memchecks. The actually versioning is performed manually. 680 std::unique_ptr<LoopVersioning> LVer; 681 682 /// The vectorization SIMD factor to use. Each vector will have this many 683 /// vector elements. 684 unsigned VF; 685 686 protected: 687 /// The vectorization unroll factor to use. Each scalar is vectorized to this 688 /// many different vector instructions. 689 unsigned UF; 690 691 /// The builder that we use 692 IRBuilder<> Builder; 693 694 // --- Vectorization state --- 695 696 /// The vector-loop preheader. 697 BasicBlock *LoopVectorPreHeader; 698 /// The scalar-loop preheader. 699 BasicBlock *LoopScalarPreHeader; 700 /// Middle Block between the vector and the scalar. 701 BasicBlock *LoopMiddleBlock; 702 /// The ExitBlock of the scalar loop. 703 BasicBlock *LoopExitBlock; 704 /// The vector loop body. 705 BasicBlock *LoopVectorBody; 706 /// The scalar loop body. 707 BasicBlock *LoopScalarBody; 708 /// A list of all bypass blocks. The first block is the entry of the loop. 709 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 710 711 /// The new Induction variable which was added to the new block. 712 PHINode *Induction; 713 /// The induction variable of the old basic block. 714 PHINode *OldInduction; 715 716 /// Maps values from the original loop to their corresponding values in the 717 /// vectorized loop. A key value can map to either vector values, scalar 718 /// values or both kinds of values, depending on whether the key was 719 /// vectorized and scalarized. 720 ValueMap VectorLoopValueMap; 721 722 /// Store instructions that should be predicated, as a pair 723 /// <StoreInst, Predicate> 724 SmallVector<std::pair<Instruction *, Value *>, 4> PredicatedInstructions; 725 EdgeMaskCache MaskCache; 726 /// Trip count of the original loop. 727 Value *TripCount; 728 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 729 Value *VectorTripCount; 730 731 /// Map of scalar integer values to the smallest bitwidth they can be legally 732 /// represented as. The vector equivalents of these values should be truncated 733 /// to this type. 734 const MapVector<Instruction *, uint64_t> *MinBWs; 735 736 LoopVectorizationLegality *Legal; 737 738 // Record whether runtime checks are added. 739 bool AddedSafetyChecks; 740 }; 741 742 class InnerLoopUnroller : public InnerLoopVectorizer { 743 public: 744 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 745 LoopInfo *LI, DominatorTree *DT, 746 const TargetLibraryInfo *TLI, 747 const TargetTransformInfo *TTI, AssumptionCache *AC, 748 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor) 749 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 750 UnrollFactor) {} 751 752 private: 753 void scalarizeInstruction(Instruction *Instr, 754 bool IfPredicateInstr = false) override; 755 void vectorizeMemoryInstruction(Instruction *Instr) override; 756 Value *getBroadcastInstrs(Value *V) override; 757 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 758 Instruction::BinaryOps Opcode = 759 Instruction::BinaryOpsEnd) override; 760 Value *reverseVector(Value *Vec) override; 761 }; 762 763 /// \brief Look for a meaningful debug location on the instruction or it's 764 /// operands. 765 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 766 if (!I) 767 return I; 768 769 DebugLoc Empty; 770 if (I->getDebugLoc() != Empty) 771 return I; 772 773 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 774 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 775 if (OpInst->getDebugLoc() != Empty) 776 return OpInst; 777 } 778 779 return I; 780 } 781 782 /// \brief Set the debug location in the builder using the debug location in the 783 /// instruction. 784 static void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 785 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) 786 B.SetCurrentDebugLocation(Inst->getDebugLoc()); 787 else 788 B.SetCurrentDebugLocation(DebugLoc()); 789 } 790 791 #ifndef NDEBUG 792 /// \return string containing a file name and a line # for the given loop. 793 static std::string getDebugLocString(const Loop *L) { 794 std::string Result; 795 if (L) { 796 raw_string_ostream OS(Result); 797 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 798 LoopDbgLoc.print(OS); 799 else 800 // Just print the module name. 801 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 802 OS.flush(); 803 } 804 return Result; 805 } 806 #endif 807 808 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 809 const Instruction *Orig) { 810 // If the loop was versioned with memchecks, add the corresponding no-alias 811 // metadata. 812 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 813 LVer->annotateInstWithNoAlias(To, Orig); 814 } 815 816 void InnerLoopVectorizer::addMetadata(Instruction *To, 817 Instruction *From) { 818 propagateMetadata(To, From); 819 addNewMetadata(To, From); 820 } 821 822 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 823 Instruction *From) { 824 for (Value *V : To) { 825 if (Instruction *I = dyn_cast<Instruction>(V)) 826 addMetadata(I, From); 827 } 828 } 829 830 /// \brief The group of interleaved loads/stores sharing the same stride and 831 /// close to each other. 832 /// 833 /// Each member in this group has an index starting from 0, and the largest 834 /// index should be less than interleaved factor, which is equal to the absolute 835 /// value of the access's stride. 836 /// 837 /// E.g. An interleaved load group of factor 4: 838 /// for (unsigned i = 0; i < 1024; i+=4) { 839 /// a = A[i]; // Member of index 0 840 /// b = A[i+1]; // Member of index 1 841 /// d = A[i+3]; // Member of index 3 842 /// ... 843 /// } 844 /// 845 /// An interleaved store group of factor 4: 846 /// for (unsigned i = 0; i < 1024; i+=4) { 847 /// ... 848 /// A[i] = a; // Member of index 0 849 /// A[i+1] = b; // Member of index 1 850 /// A[i+2] = c; // Member of index 2 851 /// A[i+3] = d; // Member of index 3 852 /// } 853 /// 854 /// Note: the interleaved load group could have gaps (missing members), but 855 /// the interleaved store group doesn't allow gaps. 856 class InterleaveGroup { 857 public: 858 InterleaveGroup(Instruction *Instr, int Stride, unsigned Align) 859 : Align(Align), SmallestKey(0), LargestKey(0), InsertPos(Instr) { 860 assert(Align && "The alignment should be non-zero"); 861 862 Factor = std::abs(Stride); 863 assert(Factor > 1 && "Invalid interleave factor"); 864 865 Reverse = Stride < 0; 866 Members[0] = Instr; 867 } 868 869 bool isReverse() const { return Reverse; } 870 unsigned getFactor() const { return Factor; } 871 unsigned getAlignment() const { return Align; } 872 unsigned getNumMembers() const { return Members.size(); } 873 874 /// \brief Try to insert a new member \p Instr with index \p Index and 875 /// alignment \p NewAlign. The index is related to the leader and it could be 876 /// negative if it is the new leader. 877 /// 878 /// \returns false if the instruction doesn't belong to the group. 879 bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) { 880 assert(NewAlign && "The new member's alignment should be non-zero"); 881 882 int Key = Index + SmallestKey; 883 884 // Skip if there is already a member with the same index. 885 if (Members.count(Key)) 886 return false; 887 888 if (Key > LargestKey) { 889 // The largest index is always less than the interleave factor. 890 if (Index >= static_cast<int>(Factor)) 891 return false; 892 893 LargestKey = Key; 894 } else if (Key < SmallestKey) { 895 // The largest index is always less than the interleave factor. 896 if (LargestKey - Key >= static_cast<int>(Factor)) 897 return false; 898 899 SmallestKey = Key; 900 } 901 902 // It's always safe to select the minimum alignment. 903 Align = std::min(Align, NewAlign); 904 Members[Key] = Instr; 905 return true; 906 } 907 908 /// \brief Get the member with the given index \p Index 909 /// 910 /// \returns nullptr if contains no such member. 911 Instruction *getMember(unsigned Index) const { 912 int Key = SmallestKey + Index; 913 if (!Members.count(Key)) 914 return nullptr; 915 916 return Members.find(Key)->second; 917 } 918 919 /// \brief Get the index for the given member. Unlike the key in the member 920 /// map, the index starts from 0. 921 unsigned getIndex(Instruction *Instr) const { 922 for (auto I : Members) 923 if (I.second == Instr) 924 return I.first - SmallestKey; 925 926 llvm_unreachable("InterleaveGroup contains no such member"); 927 } 928 929 Instruction *getInsertPos() const { return InsertPos; } 930 void setInsertPos(Instruction *Inst) { InsertPos = Inst; } 931 932 private: 933 unsigned Factor; // Interleave Factor. 934 bool Reverse; 935 unsigned Align; 936 DenseMap<int, Instruction *> Members; 937 int SmallestKey; 938 int LargestKey; 939 940 // To avoid breaking dependences, vectorized instructions of an interleave 941 // group should be inserted at either the first load or the last store in 942 // program order. 943 // 944 // E.g. %even = load i32 // Insert Position 945 // %add = add i32 %even // Use of %even 946 // %odd = load i32 947 // 948 // store i32 %even 949 // %odd = add i32 // Def of %odd 950 // store i32 %odd // Insert Position 951 Instruction *InsertPos; 952 }; 953 954 /// \brief Drive the analysis of interleaved memory accesses in the loop. 955 /// 956 /// Use this class to analyze interleaved accesses only when we can vectorize 957 /// a loop. Otherwise it's meaningless to do analysis as the vectorization 958 /// on interleaved accesses is unsafe. 959 /// 960 /// The analysis collects interleave groups and records the relationships 961 /// between the member and the group in a map. 962 class InterleavedAccessInfo { 963 public: 964 InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L, 965 DominatorTree *DT, LoopInfo *LI) 966 : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(nullptr), 967 RequiresScalarEpilogue(false) {} 968 969 ~InterleavedAccessInfo() { 970 SmallSet<InterleaveGroup *, 4> DelSet; 971 // Avoid releasing a pointer twice. 972 for (auto &I : InterleaveGroupMap) 973 DelSet.insert(I.second); 974 for (auto *Ptr : DelSet) 975 delete Ptr; 976 } 977 978 /// \brief Analyze the interleaved accesses and collect them in interleave 979 /// groups. Substitute symbolic strides using \p Strides. 980 void analyzeInterleaving(const ValueToValueMap &Strides); 981 982 /// \brief Check if \p Instr belongs to any interleave group. 983 bool isInterleaved(Instruction *Instr) const { 984 return InterleaveGroupMap.count(Instr); 985 } 986 987 /// \brief Return the maximum interleave factor of all interleaved groups. 988 unsigned getMaxInterleaveFactor() const { 989 unsigned MaxFactor = 1; 990 for (auto &Entry : InterleaveGroupMap) 991 MaxFactor = std::max(MaxFactor, Entry.second->getFactor()); 992 return MaxFactor; 993 } 994 995 /// \brief Get the interleave group that \p Instr belongs to. 996 /// 997 /// \returns nullptr if doesn't have such group. 998 InterleaveGroup *getInterleaveGroup(Instruction *Instr) const { 999 if (InterleaveGroupMap.count(Instr)) 1000 return InterleaveGroupMap.find(Instr)->second; 1001 return nullptr; 1002 } 1003 1004 /// \brief Returns true if an interleaved group that may access memory 1005 /// out-of-bounds requires a scalar epilogue iteration for correctness. 1006 bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; } 1007 1008 /// \brief Initialize the LoopAccessInfo used for dependence checking. 1009 void setLAI(const LoopAccessInfo *Info) { LAI = Info; } 1010 1011 private: 1012 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks. 1013 /// Simplifies SCEV expressions in the context of existing SCEV assumptions. 1014 /// The interleaved access analysis can also add new predicates (for example 1015 /// by versioning strides of pointers). 1016 PredicatedScalarEvolution &PSE; 1017 Loop *TheLoop; 1018 DominatorTree *DT; 1019 LoopInfo *LI; 1020 const LoopAccessInfo *LAI; 1021 1022 /// True if the loop may contain non-reversed interleaved groups with 1023 /// out-of-bounds accesses. We ensure we don't speculatively access memory 1024 /// out-of-bounds by executing at least one scalar epilogue iteration. 1025 bool RequiresScalarEpilogue; 1026 1027 /// Holds the relationships between the members and the interleave group. 1028 DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap; 1029 1030 /// Holds dependences among the memory accesses in the loop. It maps a source 1031 /// access to a set of dependent sink accesses. 1032 DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences; 1033 1034 /// \brief The descriptor for a strided memory access. 1035 struct StrideDescriptor { 1036 StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size, 1037 unsigned Align) 1038 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {} 1039 1040 StrideDescriptor() = default; 1041 1042 // The access's stride. It is negative for a reverse access. 1043 int64_t Stride = 0; 1044 const SCEV *Scev = nullptr; // The scalar expression of this access 1045 uint64_t Size = 0; // The size of the memory object. 1046 unsigned Align = 0; // The alignment of this access. 1047 }; 1048 1049 /// \brief A type for holding instructions and their stride descriptors. 1050 typedef std::pair<Instruction *, StrideDescriptor> StrideEntry; 1051 1052 /// \brief Create a new interleave group with the given instruction \p Instr, 1053 /// stride \p Stride and alignment \p Align. 1054 /// 1055 /// \returns the newly created interleave group. 1056 InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride, 1057 unsigned Align) { 1058 assert(!InterleaveGroupMap.count(Instr) && 1059 "Already in an interleaved access group"); 1060 InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align); 1061 return InterleaveGroupMap[Instr]; 1062 } 1063 1064 /// \brief Release the group and remove all the relationships. 1065 void releaseGroup(InterleaveGroup *Group) { 1066 for (unsigned i = 0; i < Group->getFactor(); i++) 1067 if (Instruction *Member = Group->getMember(i)) 1068 InterleaveGroupMap.erase(Member); 1069 1070 delete Group; 1071 } 1072 1073 /// \brief Collect all the accesses with a constant stride in program order. 1074 void collectConstStrideAccesses( 1075 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 1076 const ValueToValueMap &Strides); 1077 1078 /// \brief Returns true if \p Stride is allowed in an interleaved group. 1079 static bool isStrided(int Stride) { 1080 unsigned Factor = std::abs(Stride); 1081 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 1082 } 1083 1084 /// \brief Returns true if \p BB is a predicated block. 1085 bool isPredicated(BasicBlock *BB) const { 1086 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 1087 } 1088 1089 /// \brief Returns true if LoopAccessInfo can be used for dependence queries. 1090 bool areDependencesValid() const { 1091 return LAI && LAI->getDepChecker().getDependences(); 1092 } 1093 1094 /// \brief Returns true if memory accesses \p A and \p B can be reordered, if 1095 /// necessary, when constructing interleaved groups. 1096 /// 1097 /// \p A must precede \p B in program order. We return false if reordering is 1098 /// not necessary or is prevented because \p A and \p B may be dependent. 1099 bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A, 1100 StrideEntry *B) const { 1101 1102 // Code motion for interleaved accesses can potentially hoist strided loads 1103 // and sink strided stores. The code below checks the legality of the 1104 // following two conditions: 1105 // 1106 // 1. Potentially moving a strided load (B) before any store (A) that 1107 // precedes B, or 1108 // 1109 // 2. Potentially moving a strided store (A) after any load or store (B) 1110 // that A precedes. 1111 // 1112 // It's legal to reorder A and B if we know there isn't a dependence from A 1113 // to B. Note that this determination is conservative since some 1114 // dependences could potentially be reordered safely. 1115 1116 // A is potentially the source of a dependence. 1117 auto *Src = A->first; 1118 auto SrcDes = A->second; 1119 1120 // B is potentially the sink of a dependence. 1121 auto *Sink = B->first; 1122 auto SinkDes = B->second; 1123 1124 // Code motion for interleaved accesses can't violate WAR dependences. 1125 // Thus, reordering is legal if the source isn't a write. 1126 if (!Src->mayWriteToMemory()) 1127 return true; 1128 1129 // At least one of the accesses must be strided. 1130 if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride)) 1131 return true; 1132 1133 // If dependence information is not available from LoopAccessInfo, 1134 // conservatively assume the instructions can't be reordered. 1135 if (!areDependencesValid()) 1136 return false; 1137 1138 // If we know there is a dependence from source to sink, assume the 1139 // instructions can't be reordered. Otherwise, reordering is legal. 1140 return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink); 1141 } 1142 1143 /// \brief Collect the dependences from LoopAccessInfo. 1144 /// 1145 /// We process the dependences once during the interleaved access analysis to 1146 /// enable constant-time dependence queries. 1147 void collectDependences() { 1148 if (!areDependencesValid()) 1149 return; 1150 auto *Deps = LAI->getDepChecker().getDependences(); 1151 for (auto Dep : *Deps) 1152 Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI)); 1153 } 1154 }; 1155 1156 /// Utility class for getting and setting loop vectorizer hints in the form 1157 /// of loop metadata. 1158 /// This class keeps a number of loop annotations locally (as member variables) 1159 /// and can, upon request, write them back as metadata on the loop. It will 1160 /// initially scan the loop for existing metadata, and will update the local 1161 /// values based on information in the loop. 1162 /// We cannot write all values to metadata, as the mere presence of some info, 1163 /// for example 'force', means a decision has been made. So, we need to be 1164 /// careful NOT to add them if the user hasn't specifically asked so. 1165 class LoopVectorizeHints { 1166 enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE }; 1167 1168 /// Hint - associates name and validation with the hint value. 1169 struct Hint { 1170 const char *Name; 1171 unsigned Value; // This may have to change for non-numeric values. 1172 HintKind Kind; 1173 1174 Hint(const char *Name, unsigned Value, HintKind Kind) 1175 : Name(Name), Value(Value), Kind(Kind) {} 1176 1177 bool validate(unsigned Val) { 1178 switch (Kind) { 1179 case HK_WIDTH: 1180 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth; 1181 case HK_UNROLL: 1182 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor; 1183 case HK_FORCE: 1184 return (Val <= 1); 1185 } 1186 return false; 1187 } 1188 }; 1189 1190 /// Vectorization width. 1191 Hint Width; 1192 /// Vectorization interleave factor. 1193 Hint Interleave; 1194 /// Vectorization forced 1195 Hint Force; 1196 1197 /// Return the loop metadata prefix. 1198 static StringRef Prefix() { return "llvm.loop."; } 1199 1200 /// True if there is any unsafe math in the loop. 1201 bool PotentiallyUnsafe; 1202 1203 public: 1204 enum ForceKind { 1205 FK_Undefined = -1, ///< Not selected. 1206 FK_Disabled = 0, ///< Forcing disabled. 1207 FK_Enabled = 1, ///< Forcing enabled. 1208 }; 1209 1210 LoopVectorizeHints(const Loop *L, bool DisableInterleaving, 1211 OptimizationRemarkEmitter &ORE) 1212 : Width("vectorize.width", VectorizerParams::VectorizationFactor, 1213 HK_WIDTH), 1214 Interleave("interleave.count", DisableInterleaving, HK_UNROLL), 1215 Force("vectorize.enable", FK_Undefined, HK_FORCE), 1216 PotentiallyUnsafe(false), TheLoop(L), ORE(ORE) { 1217 // Populate values with existing loop metadata. 1218 getHintsFromMetadata(); 1219 1220 // force-vector-interleave overrides DisableInterleaving. 1221 if (VectorizerParams::isInterleaveForced()) 1222 Interleave.Value = VectorizerParams::VectorizationInterleave; 1223 1224 DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs() 1225 << "LV: Interleaving disabled by the pass manager\n"); 1226 } 1227 1228 /// Mark the loop L as already vectorized by setting the width to 1. 1229 void setAlreadyVectorized() { 1230 Width.Value = Interleave.Value = 1; 1231 Hint Hints[] = {Width, Interleave}; 1232 writeHintsToMetadata(Hints); 1233 } 1234 1235 bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const { 1236 if (getForce() == LoopVectorizeHints::FK_Disabled) { 1237 DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); 1238 ORE.emitOptimizationRemarkAnalysis(vectorizeAnalysisPassName(), L, 1239 emitRemark()); 1240 return false; 1241 } 1242 1243 if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) { 1244 DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); 1245 ORE.emitOptimizationRemarkAnalysis(vectorizeAnalysisPassName(), L, 1246 emitRemark()); 1247 return false; 1248 } 1249 1250 if (getWidth() == 1 && getInterleave() == 1) { 1251 // FIXME: Add a separate metadata to indicate when the loop has already 1252 // been vectorized instead of setting width and count to 1. 1253 DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); 1254 // FIXME: Add interleave.disable metadata. This will allow 1255 // vectorize.disable to be used without disabling the pass and errors 1256 // to differentiate between disabled vectorization and a width of 1. 1257 ORE.emitOptimizationRemarkAnalysis( 1258 vectorizeAnalysisPassName(), L, 1259 "loop not vectorized: vectorization and interleaving are explicitly " 1260 "disabled, or vectorize width and interleave count are both set to " 1261 "1"); 1262 return false; 1263 } 1264 1265 return true; 1266 } 1267 1268 /// Dumps all the hint information. 1269 std::string emitRemark() const { 1270 VectorizationReport R; 1271 if (Force.Value == LoopVectorizeHints::FK_Disabled) 1272 R << "vectorization is explicitly disabled"; 1273 else { 1274 R << "use -Rpass-analysis=loop-vectorize for more info"; 1275 if (Force.Value == LoopVectorizeHints::FK_Enabled) { 1276 R << " (Force=true"; 1277 if (Width.Value != 0) 1278 R << ", Vector Width=" << Width.Value; 1279 if (Interleave.Value != 0) 1280 R << ", Interleave Count=" << Interleave.Value; 1281 R << ")"; 1282 } 1283 } 1284 1285 return R.str(); 1286 } 1287 1288 unsigned getWidth() const { return Width.Value; } 1289 unsigned getInterleave() const { return Interleave.Value; } 1290 enum ForceKind getForce() const { return (ForceKind)Force.Value; } 1291 1292 /// \brief If hints are provided that force vectorization, use the AlwaysPrint 1293 /// pass name to force the frontend to print the diagnostic. 1294 const char *vectorizeAnalysisPassName() const { 1295 if (getWidth() == 1) 1296 return LV_NAME; 1297 if (getForce() == LoopVectorizeHints::FK_Disabled) 1298 return LV_NAME; 1299 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0) 1300 return LV_NAME; 1301 return DiagnosticInfoOptimizationRemarkAnalysis::AlwaysPrint; 1302 } 1303 1304 bool allowReordering() const { 1305 // When enabling loop hints are provided we allow the vectorizer to change 1306 // the order of operations that is given by the scalar loop. This is not 1307 // enabled by default because can be unsafe or inefficient. For example, 1308 // reordering floating-point operations will change the way round-off 1309 // error accumulates in the loop. 1310 return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1; 1311 } 1312 1313 bool isPotentiallyUnsafe() const { 1314 // Avoid FP vectorization if the target is unsure about proper support. 1315 // This may be related to the SIMD unit in the target not handling 1316 // IEEE 754 FP ops properly, or bad single-to-double promotions. 1317 // Otherwise, a sequence of vectorized loops, even without reduction, 1318 // could lead to different end results on the destination vectors. 1319 return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe; 1320 } 1321 1322 void setPotentiallyUnsafe() { PotentiallyUnsafe = true; } 1323 1324 private: 1325 /// Find hints specified in the loop metadata and update local values. 1326 void getHintsFromMetadata() { 1327 MDNode *LoopID = TheLoop->getLoopID(); 1328 if (!LoopID) 1329 return; 1330 1331 // First operand should refer to the loop id itself. 1332 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 1333 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 1334 1335 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1336 const MDString *S = nullptr; 1337 SmallVector<Metadata *, 4> Args; 1338 1339 // The expected hint is either a MDString or a MDNode with the first 1340 // operand a MDString. 1341 if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) { 1342 if (!MD || MD->getNumOperands() == 0) 1343 continue; 1344 S = dyn_cast<MDString>(MD->getOperand(0)); 1345 for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i) 1346 Args.push_back(MD->getOperand(i)); 1347 } else { 1348 S = dyn_cast<MDString>(LoopID->getOperand(i)); 1349 assert(Args.size() == 0 && "too many arguments for MDString"); 1350 } 1351 1352 if (!S) 1353 continue; 1354 1355 // Check if the hint starts with the loop metadata prefix. 1356 StringRef Name = S->getString(); 1357 if (Args.size() == 1) 1358 setHint(Name, Args[0]); 1359 } 1360 } 1361 1362 /// Checks string hint with one operand and set value if valid. 1363 void setHint(StringRef Name, Metadata *Arg) { 1364 if (!Name.startswith(Prefix())) 1365 return; 1366 Name = Name.substr(Prefix().size(), StringRef::npos); 1367 1368 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); 1369 if (!C) 1370 return; 1371 unsigned Val = C->getZExtValue(); 1372 1373 Hint *Hints[] = {&Width, &Interleave, &Force}; 1374 for (auto H : Hints) { 1375 if (Name == H->Name) { 1376 if (H->validate(Val)) 1377 H->Value = Val; 1378 else 1379 DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); 1380 break; 1381 } 1382 } 1383 } 1384 1385 /// Create a new hint from name / value pair. 1386 MDNode *createHintMetadata(StringRef Name, unsigned V) const { 1387 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1388 Metadata *MDs[] = {MDString::get(Context, Name), 1389 ConstantAsMetadata::get( 1390 ConstantInt::get(Type::getInt32Ty(Context), V))}; 1391 return MDNode::get(Context, MDs); 1392 } 1393 1394 /// Matches metadata with hint name. 1395 bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) { 1396 MDString *Name = dyn_cast<MDString>(Node->getOperand(0)); 1397 if (!Name) 1398 return false; 1399 1400 for (auto H : HintTypes) 1401 if (Name->getString().endswith(H.Name)) 1402 return true; 1403 return false; 1404 } 1405 1406 /// Sets current hints into loop metadata, keeping other values intact. 1407 void writeHintsToMetadata(ArrayRef<Hint> HintTypes) { 1408 if (HintTypes.size() == 0) 1409 return; 1410 1411 // Reserve the first element to LoopID (see below). 1412 SmallVector<Metadata *, 4> MDs(1); 1413 // If the loop already has metadata, then ignore the existing operands. 1414 MDNode *LoopID = TheLoop->getLoopID(); 1415 if (LoopID) { 1416 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1417 MDNode *Node = cast<MDNode>(LoopID->getOperand(i)); 1418 // If node in update list, ignore old value. 1419 if (!matchesHintMetadataName(Node, HintTypes)) 1420 MDs.push_back(Node); 1421 } 1422 } 1423 1424 // Now, add the missing hints. 1425 for (auto H : HintTypes) 1426 MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value)); 1427 1428 // Replace current metadata node with new one. 1429 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1430 MDNode *NewLoopID = MDNode::get(Context, MDs); 1431 // Set operand 0 to refer to the loop id itself. 1432 NewLoopID->replaceOperandWith(0, NewLoopID); 1433 1434 TheLoop->setLoopID(NewLoopID); 1435 } 1436 1437 /// The loop these hints belong to. 1438 const Loop *TheLoop; 1439 1440 /// Interface to emit optimization remarks. 1441 OptimizationRemarkEmitter &ORE; 1442 }; 1443 1444 static void emitAnalysisDiag(const Loop *TheLoop, 1445 const LoopVectorizeHints &Hints, 1446 OptimizationRemarkEmitter &ORE, 1447 const LoopAccessReport &Message) { 1448 const char *Name = Hints.vectorizeAnalysisPassName(); 1449 LoopAccessReport::emitAnalysis(Message, TheLoop, Name, ORE); 1450 } 1451 1452 static void emitMissedWarning(Function *F, Loop *L, 1453 const LoopVectorizeHints &LH, 1454 OptimizationRemarkEmitter *ORE) { 1455 ORE->emitOptimizationRemarkMissed(LV_NAME, L, LH.emitRemark()); 1456 1457 if (LH.getForce() == LoopVectorizeHints::FK_Enabled) { 1458 if (LH.getWidth() != 1) 1459 emitLoopVectorizeWarning( 1460 F->getContext(), *F, L->getStartLoc(), 1461 "failed explicitly specified loop vectorization"); 1462 else if (LH.getInterleave() != 1) 1463 emitLoopInterleaveWarning( 1464 F->getContext(), *F, L->getStartLoc(), 1465 "failed explicitly specified loop interleaving"); 1466 } 1467 } 1468 1469 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and 1470 /// to what vectorization factor. 1471 /// This class does not look at the profitability of vectorization, only the 1472 /// legality. This class has two main kinds of checks: 1473 /// * Memory checks - The code in canVectorizeMemory checks if vectorization 1474 /// will change the order of memory accesses in a way that will change the 1475 /// correctness of the program. 1476 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory 1477 /// checks for a number of different conditions, such as the availability of a 1478 /// single induction variable, that all types are supported and vectorize-able, 1479 /// etc. This code reflects the capabilities of InnerLoopVectorizer. 1480 /// This class is also used by InnerLoopVectorizer for identifying 1481 /// induction variable and the different reduction variables. 1482 class LoopVectorizationLegality { 1483 public: 1484 LoopVectorizationLegality( 1485 Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT, 1486 TargetLibraryInfo *TLI, AliasAnalysis *AA, Function *F, 1487 const TargetTransformInfo *TTI, 1488 std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI, 1489 OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R, 1490 LoopVectorizeHints *H) 1491 : NumPredStores(0), TheLoop(L), PSE(PSE), TLI(TLI), TTI(TTI), DT(DT), 1492 GetLAA(GetLAA), LAI(nullptr), ORE(ORE), InterleaveInfo(PSE, L, DT, LI), 1493 Induction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false), 1494 Requirements(R), Hints(H) {} 1495 1496 /// ReductionList contains the reduction descriptors for all 1497 /// of the reductions that were found in the loop. 1498 typedef DenseMap<PHINode *, RecurrenceDescriptor> ReductionList; 1499 1500 /// InductionList saves induction variables and maps them to the 1501 /// induction descriptor. 1502 typedef MapVector<PHINode *, InductionDescriptor> InductionList; 1503 1504 /// RecurrenceSet contains the phi nodes that are recurrences other than 1505 /// inductions and reductions. 1506 typedef SmallPtrSet<const PHINode *, 8> RecurrenceSet; 1507 1508 /// Returns true if it is legal to vectorize this loop. 1509 /// This does not mean that it is profitable to vectorize this 1510 /// loop, only that it is legal to do so. 1511 bool canVectorize(); 1512 1513 /// Returns the Induction variable. 1514 PHINode *getInduction() { return Induction; } 1515 1516 /// Returns the reduction variables found in the loop. 1517 ReductionList *getReductionVars() { return &Reductions; } 1518 1519 /// Returns the induction variables found in the loop. 1520 InductionList *getInductionVars() { return &Inductions; } 1521 1522 /// Return the first-order recurrences found in the loop. 1523 RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; } 1524 1525 /// Returns the widest induction type. 1526 Type *getWidestInductionType() { return WidestIndTy; } 1527 1528 /// Returns True if V is an induction variable in this loop. 1529 bool isInductionVariable(const Value *V); 1530 1531 /// Returns True if PN is a reduction variable in this loop. 1532 bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); } 1533 1534 /// Returns True if Phi is a first-order recurrence in this loop. 1535 bool isFirstOrderRecurrence(const PHINode *Phi); 1536 1537 /// Return true if the block BB needs to be predicated in order for the loop 1538 /// to be vectorized. 1539 bool blockNeedsPredication(BasicBlock *BB); 1540 1541 /// Check if this pointer is consecutive when vectorizing. This happens 1542 /// when the last index of the GEP is the induction variable, or that the 1543 /// pointer itself is an induction variable. 1544 /// This check allows us to vectorize A[idx] into a wide load/store. 1545 /// Returns: 1546 /// 0 - Stride is unknown or non-consecutive. 1547 /// 1 - Address is consecutive. 1548 /// -1 - Address is consecutive, and decreasing. 1549 int isConsecutivePtr(Value *Ptr); 1550 1551 /// Returns true if the value V is uniform within the loop. 1552 bool isUniform(Value *V); 1553 1554 /// Returns true if \p I is known to be uniform after vectorization. 1555 bool isUniformAfterVectorization(Instruction *I) { return Uniforms.count(I); } 1556 1557 /// Returns true if \p I is known to be scalar after vectorization. 1558 bool isScalarAfterVectorization(Instruction *I) { return Scalars.count(I); } 1559 1560 /// Returns the information that we collected about runtime memory check. 1561 const RuntimePointerChecking *getRuntimePointerChecking() const { 1562 return LAI->getRuntimePointerChecking(); 1563 } 1564 1565 const LoopAccessInfo *getLAI() const { return LAI; } 1566 1567 /// \brief Check if \p Instr belongs to any interleaved access group. 1568 bool isAccessInterleaved(Instruction *Instr) { 1569 return InterleaveInfo.isInterleaved(Instr); 1570 } 1571 1572 /// \brief Return the maximum interleave factor of all interleaved groups. 1573 unsigned getMaxInterleaveFactor() const { 1574 return InterleaveInfo.getMaxInterleaveFactor(); 1575 } 1576 1577 /// \brief Get the interleaved access group that \p Instr belongs to. 1578 const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { 1579 return InterleaveInfo.getInterleaveGroup(Instr); 1580 } 1581 1582 /// \brief Returns true if an interleaved group requires a scalar iteration 1583 /// to handle accesses with gaps. 1584 bool requiresScalarEpilogue() const { 1585 return InterleaveInfo.requiresScalarEpilogue(); 1586 } 1587 1588 unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); } 1589 1590 bool hasStride(Value *V) { return LAI->hasStride(V); } 1591 1592 /// Returns true if the target machine supports masked store operation 1593 /// for the given \p DataType and kind of access to \p Ptr. 1594 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1595 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType); 1596 } 1597 /// Returns true if the target machine supports masked load operation 1598 /// for the given \p DataType and kind of access to \p Ptr. 1599 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1600 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType); 1601 } 1602 /// Returns true if the target machine supports masked scatter operation 1603 /// for the given \p DataType. 1604 bool isLegalMaskedScatter(Type *DataType) { 1605 return TTI->isLegalMaskedScatter(DataType); 1606 } 1607 /// Returns true if the target machine supports masked gather operation 1608 /// for the given \p DataType. 1609 bool isLegalMaskedGather(Type *DataType) { 1610 return TTI->isLegalMaskedGather(DataType); 1611 } 1612 /// Returns true if the target machine can represent \p V as a masked gather 1613 /// or scatter operation. 1614 bool isLegalGatherOrScatter(Value *V) { 1615 auto *LI = dyn_cast<LoadInst>(V); 1616 auto *SI = dyn_cast<StoreInst>(V); 1617 if (!LI && !SI) 1618 return false; 1619 auto *Ptr = getPointerOperand(V); 1620 auto *Ty = cast<PointerType>(Ptr->getType())->getElementType(); 1621 return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty)); 1622 } 1623 1624 /// Returns true if vector representation of the instruction \p I 1625 /// requires mask. 1626 bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); } 1627 unsigned getNumStores() const { return LAI->getNumStores(); } 1628 unsigned getNumLoads() const { return LAI->getNumLoads(); } 1629 unsigned getNumPredStores() const { return NumPredStores; } 1630 1631 /// Returns true if \p I is a store instruction in a predicated block that 1632 /// will be scalarized during vectorization. 1633 bool isPredicatedStore(Instruction *I); 1634 1635 /// Returns true if \p I is a memory instruction that has a consecutive or 1636 /// consecutive-like pointer operand. Consecutive-like pointers are pointers 1637 /// that are treated like consecutive pointers during vectorization. The 1638 /// pointer operands of interleaved accesses are an example. 1639 bool hasConsecutiveLikePtrOperand(Instruction *I); 1640 1641 /// Returns true if \p I is a memory instruction that must be scalarized 1642 /// during vectorization. 1643 bool memoryInstructionMustBeScalarized(Instruction *I, unsigned VF = 1); 1644 1645 private: 1646 /// Check if a single basic block loop is vectorizable. 1647 /// At this point we know that this is a loop with a constant trip count 1648 /// and we only need to check individual instructions. 1649 bool canVectorizeInstrs(); 1650 1651 /// When we vectorize loops we may change the order in which 1652 /// we read and write from memory. This method checks if it is 1653 /// legal to vectorize the code, considering only memory constrains. 1654 /// Returns true if the loop is vectorizable 1655 bool canVectorizeMemory(); 1656 1657 /// Return true if we can vectorize this loop using the IF-conversion 1658 /// transformation. 1659 bool canVectorizeWithIfConvert(); 1660 1661 /// Collect the instructions that are uniform after vectorization. An 1662 /// instruction is uniform if we represent it with a single scalar value in 1663 /// the vectorized loop corresponding to each vector iteration. Examples of 1664 /// uniform instructions include pointer operands of consecutive or 1665 /// interleaved memory accesses. Note that although uniformity implies an 1666 /// instruction will be scalar, the reverse is not true. In general, a 1667 /// scalarized instruction will be represented by VF scalar values in the 1668 /// vectorized loop, each corresponding to an iteration of the original 1669 /// scalar loop. 1670 void collectLoopUniforms(); 1671 1672 /// Collect the instructions that are scalar after vectorization. An 1673 /// instruction is scalar if it is known to be uniform or will be scalarized 1674 /// during vectorization. Non-uniform scalarized instructions will be 1675 /// represented by VF values in the vectorized loop, each corresponding to an 1676 /// iteration of the original scalar loop. 1677 void collectLoopScalars(); 1678 1679 /// Return true if all of the instructions in the block can be speculatively 1680 /// executed. \p SafePtrs is a list of addresses that are known to be legal 1681 /// and we know that we can read from them without segfault. 1682 bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs); 1683 1684 /// Updates the vectorization state by adding \p Phi to the inductions list. 1685 /// This can set \p Phi as the main induction of the loop if \p Phi is a 1686 /// better choice for the main induction than the existing one. 1687 void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID, 1688 SmallPtrSetImpl<Value *> &AllowedExit); 1689 1690 /// Report an analysis message to assist the user in diagnosing loops that are 1691 /// not vectorized. These are handled as LoopAccessReport rather than 1692 /// VectorizationReport because the << operator of VectorizationReport returns 1693 /// LoopAccessReport. 1694 void emitAnalysis(const LoopAccessReport &Message) const { 1695 emitAnalysisDiag(TheLoop, *Hints, *ORE, Message); 1696 } 1697 1698 /// \brief If an access has a symbolic strides, this maps the pointer value to 1699 /// the stride symbol. 1700 const ValueToValueMap *getSymbolicStrides() { 1701 // FIXME: Currently, the set of symbolic strides is sometimes queried before 1702 // it's collected. This happens from canVectorizeWithIfConvert, when the 1703 // pointer is checked to reference consecutive elements suitable for a 1704 // masked access. 1705 return LAI ? &LAI->getSymbolicStrides() : nullptr; 1706 } 1707 1708 unsigned NumPredStores; 1709 1710 /// The loop that we evaluate. 1711 Loop *TheLoop; 1712 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. 1713 /// Applies dynamic knowledge to simplify SCEV expressions in the context 1714 /// of existing SCEV assumptions. The analysis will also add a minimal set 1715 /// of new predicates if this is required to enable vectorization and 1716 /// unrolling. 1717 PredicatedScalarEvolution &PSE; 1718 /// Target Library Info. 1719 TargetLibraryInfo *TLI; 1720 /// Target Transform Info 1721 const TargetTransformInfo *TTI; 1722 /// Dominator Tree. 1723 DominatorTree *DT; 1724 // LoopAccess analysis. 1725 std::function<const LoopAccessInfo &(Loop &)> *GetLAA; 1726 // And the loop-accesses info corresponding to this loop. This pointer is 1727 // null until canVectorizeMemory sets it up. 1728 const LoopAccessInfo *LAI; 1729 /// Interface to emit optimization remarks. 1730 OptimizationRemarkEmitter *ORE; 1731 1732 /// The interleave access information contains groups of interleaved accesses 1733 /// with the same stride and close to each other. 1734 InterleavedAccessInfo InterleaveInfo; 1735 1736 // --- vectorization state --- // 1737 1738 /// Holds the integer induction variable. This is the counter of the 1739 /// loop. 1740 PHINode *Induction; 1741 /// Holds the reduction variables. 1742 ReductionList Reductions; 1743 /// Holds all of the induction variables that we found in the loop. 1744 /// Notice that inductions don't need to start at zero and that induction 1745 /// variables can be pointers. 1746 InductionList Inductions; 1747 /// Holds the phi nodes that are first-order recurrences. 1748 RecurrenceSet FirstOrderRecurrences; 1749 /// Holds the widest induction type encountered. 1750 Type *WidestIndTy; 1751 1752 /// Allowed outside users. This holds the induction and reduction 1753 /// vars which can be accessed from outside the loop. 1754 SmallPtrSet<Value *, 4> AllowedExit; 1755 1756 /// Holds the instructions known to be uniform after vectorization. 1757 SmallPtrSet<Instruction *, 4> Uniforms; 1758 1759 /// Holds the instructions known to be scalar after vectorization. 1760 SmallPtrSet<Instruction *, 4> Scalars; 1761 1762 /// Can we assume the absence of NaNs. 1763 bool HasFunNoNaNAttr; 1764 1765 /// Vectorization requirements that will go through late-evaluation. 1766 LoopVectorizationRequirements *Requirements; 1767 1768 /// Used to emit an analysis of any legality issues. 1769 LoopVectorizeHints *Hints; 1770 1771 /// While vectorizing these instructions we have to generate a 1772 /// call to the appropriate masked intrinsic 1773 SmallPtrSet<const Instruction *, 8> MaskedOp; 1774 }; 1775 1776 /// LoopVectorizationCostModel - estimates the expected speedups due to 1777 /// vectorization. 1778 /// In many cases vectorization is not profitable. This can happen because of 1779 /// a number of reasons. In this class we mainly attempt to predict the 1780 /// expected speedup/slowdowns due to the supported instruction set. We use the 1781 /// TargetTransformInfo to query the different backends for the cost of 1782 /// different operations. 1783 class LoopVectorizationCostModel { 1784 public: 1785 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, 1786 LoopInfo *LI, LoopVectorizationLegality *Legal, 1787 const TargetTransformInfo &TTI, 1788 const TargetLibraryInfo *TLI, DemandedBits *DB, 1789 AssumptionCache *AC, 1790 OptimizationRemarkEmitter *ORE, const Function *F, 1791 const LoopVectorizeHints *Hints) 1792 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 1793 AC(AC), ORE(ORE), TheFunction(F), Hints(Hints) {} 1794 1795 /// Information about vectorization costs 1796 struct VectorizationFactor { 1797 unsigned Width; // Vector width with best cost 1798 unsigned Cost; // Cost of the loop with that width 1799 }; 1800 /// \return The most profitable vectorization factor and the cost of that VF. 1801 /// This method checks every power of two up to VF. If UserVF is not ZERO 1802 /// then this vectorization factor will be selected if vectorization is 1803 /// possible. 1804 VectorizationFactor selectVectorizationFactor(bool OptForSize); 1805 1806 /// \return The size (in bits) of the smallest and widest types in the code 1807 /// that needs to be vectorized. We ignore values that remain scalar such as 1808 /// 64 bit loop indices. 1809 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1810 1811 /// \return The desired interleave count. 1812 /// If interleave count has been specified by metadata it will be returned. 1813 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1814 /// are the selected vectorization factor and the cost of the selected VF. 1815 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 1816 unsigned LoopCost); 1817 1818 /// \return The most profitable unroll factor. 1819 /// This method finds the best unroll-factor based on register pressure and 1820 /// other parameters. VF and LoopCost are the selected vectorization factor 1821 /// and the cost of the selected VF. 1822 unsigned computeInterleaveCount(bool OptForSize, unsigned VF, 1823 unsigned LoopCost); 1824 1825 /// \brief A struct that represents some properties of the register usage 1826 /// of a loop. 1827 struct RegisterUsage { 1828 /// Holds the number of loop invariant values that are used in the loop. 1829 unsigned LoopInvariantRegs; 1830 /// Holds the maximum number of concurrent live intervals in the loop. 1831 unsigned MaxLocalUsers; 1832 /// Holds the number of instructions in the loop. 1833 unsigned NumInstructions; 1834 }; 1835 1836 /// \return Returns information about the register usages of the loop for the 1837 /// given vectorization factors. 1838 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 1839 1840 /// Collect values we want to ignore in the cost model. 1841 void collectValuesToIgnore(); 1842 1843 private: 1844 /// The vectorization cost is a combination of the cost itself and a boolean 1845 /// indicating whether any of the contributing operations will actually 1846 /// operate on 1847 /// vector values after type legalization in the backend. If this latter value 1848 /// is 1849 /// false, then all operations will be scalarized (i.e. no vectorization has 1850 /// actually taken place). 1851 typedef std::pair<unsigned, bool> VectorizationCostTy; 1852 1853 /// Returns the expected execution cost. The unit of the cost does 1854 /// not matter because we use the 'cost' units to compare different 1855 /// vector widths. The cost that is returned is *not* normalized by 1856 /// the factor width. 1857 VectorizationCostTy expectedCost(unsigned VF); 1858 1859 /// Returns the execution time cost of an instruction for a given vector 1860 /// width. Vector width of one means scalar. 1861 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1862 1863 /// The cost-computation logic from getInstructionCost which provides 1864 /// the vector type as an output parameter. 1865 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1866 1867 /// Returns whether the instruction is a load or store and will be a emitted 1868 /// as a vector operation. 1869 bool isConsecutiveLoadOrStore(Instruction *I); 1870 1871 /// Report an analysis message to assist the user in diagnosing loops that are 1872 /// not vectorized. These are handled as LoopAccessReport rather than 1873 /// VectorizationReport because the << operator of VectorizationReport returns 1874 /// LoopAccessReport. 1875 void emitAnalysis(const LoopAccessReport &Message) const { 1876 emitAnalysisDiag(TheLoop, *Hints, *ORE, Message); 1877 } 1878 1879 public: 1880 /// Map of scalar integer values to the smallest bitwidth they can be legally 1881 /// represented as. The vector equivalents of these values should be truncated 1882 /// to this type. 1883 MapVector<Instruction *, uint64_t> MinBWs; 1884 1885 /// The loop that we evaluate. 1886 Loop *TheLoop; 1887 /// Predicated scalar evolution analysis. 1888 PredicatedScalarEvolution &PSE; 1889 /// Loop Info analysis. 1890 LoopInfo *LI; 1891 /// Vectorization legality. 1892 LoopVectorizationLegality *Legal; 1893 /// Vector target information. 1894 const TargetTransformInfo &TTI; 1895 /// Target Library Info. 1896 const TargetLibraryInfo *TLI; 1897 /// Demanded bits analysis. 1898 DemandedBits *DB; 1899 /// Assumption cache. 1900 AssumptionCache *AC; 1901 /// Interface to emit optimization remarks. 1902 OptimizationRemarkEmitter *ORE; 1903 1904 const Function *TheFunction; 1905 /// Loop Vectorize Hint. 1906 const LoopVectorizeHints *Hints; 1907 /// Values to ignore in the cost model. 1908 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1909 /// Values to ignore in the cost model when VF > 1. 1910 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1911 }; 1912 1913 /// \brief This holds vectorization requirements that must be verified late in 1914 /// the process. The requirements are set by legalize and costmodel. Once 1915 /// vectorization has been determined to be possible and profitable the 1916 /// requirements can be verified by looking for metadata or compiler options. 1917 /// For example, some loops require FP commutativity which is only allowed if 1918 /// vectorization is explicitly specified or if the fast-math compiler option 1919 /// has been provided. 1920 /// Late evaluation of these requirements allows helpful diagnostics to be 1921 /// composed that tells the user what need to be done to vectorize the loop. For 1922 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late 1923 /// evaluation should be used only when diagnostics can generated that can be 1924 /// followed by a non-expert user. 1925 class LoopVectorizationRequirements { 1926 public: 1927 LoopVectorizationRequirements(OptimizationRemarkEmitter &ORE) 1928 : NumRuntimePointerChecks(0), UnsafeAlgebraInst(nullptr), ORE(ORE) {} 1929 1930 void addUnsafeAlgebraInst(Instruction *I) { 1931 // First unsafe algebra instruction. 1932 if (!UnsafeAlgebraInst) 1933 UnsafeAlgebraInst = I; 1934 } 1935 1936 void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; } 1937 1938 bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) { 1939 const char *Name = Hints.vectorizeAnalysisPassName(); 1940 bool Failed = false; 1941 if (UnsafeAlgebraInst && !Hints.allowReordering()) { 1942 ORE.emitOptimizationRemarkAnalysisFPCommute( 1943 Name, UnsafeAlgebraInst->getDebugLoc(), 1944 UnsafeAlgebraInst->getParent(), 1945 VectorizationReport() << "cannot prove it is safe to reorder " 1946 "floating-point operations"); 1947 Failed = true; 1948 } 1949 1950 // Test if runtime memcheck thresholds are exceeded. 1951 bool PragmaThresholdReached = 1952 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 1953 bool ThresholdReached = 1954 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 1955 if ((ThresholdReached && !Hints.allowReordering()) || 1956 PragmaThresholdReached) { 1957 ORE.emitOptimizationRemarkAnalysisAliasing( 1958 Name, L, 1959 VectorizationReport() 1960 << "cannot prove it is safe to reorder memory operations"); 1961 DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 1962 Failed = true; 1963 } 1964 1965 return Failed; 1966 } 1967 1968 private: 1969 unsigned NumRuntimePointerChecks; 1970 Instruction *UnsafeAlgebraInst; 1971 1972 /// Interface to emit optimization remarks. 1973 OptimizationRemarkEmitter &ORE; 1974 }; 1975 1976 static void addAcyclicInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) { 1977 if (L.empty()) { 1978 if (!hasCyclesInLoopBody(L)) 1979 V.push_back(&L); 1980 return; 1981 } 1982 for (Loop *InnerL : L) 1983 addAcyclicInnerLoop(*InnerL, V); 1984 } 1985 1986 /// The LoopVectorize Pass. 1987 struct LoopVectorize : public FunctionPass { 1988 /// Pass identification, replacement for typeid 1989 static char ID; 1990 1991 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) 1992 : FunctionPass(ID) { 1993 Impl.DisableUnrolling = NoUnrolling; 1994 Impl.AlwaysVectorize = AlwaysVectorize; 1995 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1996 } 1997 1998 LoopVectorizePass Impl; 1999 2000 bool runOnFunction(Function &F) override { 2001 if (skipFunction(F)) 2002 return false; 2003 2004 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2005 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2006 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2007 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2008 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2009 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2010 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 2011 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2012 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2013 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2014 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2015 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2016 2017 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2018 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2019 2020 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2021 GetLAA, *ORE); 2022 } 2023 2024 void getAnalysisUsage(AnalysisUsage &AU) const override { 2025 AU.addRequired<AssumptionCacheTracker>(); 2026 AU.addRequiredID(LoopSimplifyID); 2027 AU.addRequiredID(LCSSAID); 2028 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2029 AU.addRequired<DominatorTreeWrapperPass>(); 2030 AU.addRequired<LoopInfoWrapperPass>(); 2031 AU.addRequired<ScalarEvolutionWrapperPass>(); 2032 AU.addRequired<TargetTransformInfoWrapperPass>(); 2033 AU.addRequired<AAResultsWrapperPass>(); 2034 AU.addRequired<LoopAccessLegacyAnalysis>(); 2035 AU.addRequired<DemandedBitsWrapperPass>(); 2036 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2037 AU.addPreserved<LoopInfoWrapperPass>(); 2038 AU.addPreserved<DominatorTreeWrapperPass>(); 2039 AU.addPreserved<BasicAAWrapperPass>(); 2040 AU.addPreserved<GlobalsAAWrapperPass>(); 2041 } 2042 }; 2043 2044 } // end anonymous namespace 2045 2046 //===----------------------------------------------------------------------===// 2047 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2048 // LoopVectorizationCostModel. 2049 //===----------------------------------------------------------------------===// 2050 2051 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2052 // We need to place the broadcast of invariant variables outside the loop. 2053 Instruction *Instr = dyn_cast<Instruction>(V); 2054 bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody); 2055 bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr; 2056 2057 // Place the code for broadcasting invariant variables in the new preheader. 2058 IRBuilder<>::InsertPointGuard Guard(Builder); 2059 if (Invariant) 2060 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2061 2062 // Broadcast the scalar into all locations in the vector. 2063 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2064 2065 return Shuf; 2066 } 2067 2068 void InnerLoopVectorizer::createVectorIntInductionPHI( 2069 const InductionDescriptor &II, Instruction *EntryVal) { 2070 Value *Start = II.getStartValue(); 2071 ConstantInt *Step = II.getConstIntStepValue(); 2072 assert(Step && "Can not widen an IV with a non-constant step"); 2073 2074 // Construct the initial value of the vector IV in the vector loop preheader 2075 auto CurrIP = Builder.saveIP(); 2076 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2077 if (isa<TruncInst>(EntryVal)) { 2078 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2079 Step = ConstantInt::getSigned(TruncType, Step->getSExtValue()); 2080 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2081 } 2082 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2083 Value *SteppedStart = getStepVector(SplatStart, 0, Step); 2084 Builder.restoreIP(CurrIP); 2085 2086 Value *SplatVF = 2087 ConstantVector::getSplat(VF, ConstantInt::getSigned(Start->getType(), 2088 VF * Step->getSExtValue())); 2089 // We may need to add the step a number of times, depending on the unroll 2090 // factor. The last of those goes into the PHI. 2091 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2092 &*LoopVectorBody->getFirstInsertionPt()); 2093 Instruction *LastInduction = VecInd; 2094 VectorParts Entry(UF); 2095 for (unsigned Part = 0; Part < UF; ++Part) { 2096 Entry[Part] = LastInduction; 2097 LastInduction = cast<Instruction>( 2098 Builder.CreateAdd(LastInduction, SplatVF, "step.add")); 2099 } 2100 VectorLoopValueMap.initVector(EntryVal, Entry); 2101 if (isa<TruncInst>(EntryVal)) 2102 addMetadata(Entry, EntryVal); 2103 2104 // Move the last step to the end of the latch block. This ensures consistent 2105 // placement of all induction updates. 2106 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2107 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2108 auto *ICmp = cast<Instruction>(Br->getCondition()); 2109 LastInduction->moveBefore(ICmp); 2110 LastInduction->setName("vec.ind.next"); 2111 2112 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2113 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2114 } 2115 2116 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2117 if (Legal->isScalarAfterVectorization(IV)) 2118 return true; 2119 auto isScalarInst = [&](User *U) -> bool { 2120 auto *I = cast<Instruction>(U); 2121 return (OrigLoop->contains(I) && Legal->isScalarAfterVectorization(I)); 2122 }; 2123 return any_of(IV->users(), isScalarInst); 2124 } 2125 2126 void InnerLoopVectorizer::widenIntInduction(PHINode *IV, TruncInst *Trunc) { 2127 2128 auto II = Legal->getInductionVars()->find(IV); 2129 assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); 2130 2131 auto ID = II->second; 2132 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2133 2134 // The scalar value to broadcast. This will be derived from the canonical 2135 // induction variable. 2136 Value *ScalarIV = nullptr; 2137 2138 // The step of the induction. 2139 Value *Step = nullptr; 2140 2141 // The value from the original loop to which we are mapping the new induction 2142 // variable. 2143 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2144 2145 // True if we have vectorized the induction variable. 2146 auto VectorizedIV = false; 2147 2148 // Determine if we want a scalar version of the induction variable. This is 2149 // true if the induction variable itself is not widened, or if it has at 2150 // least one user in the loop that is not widened. 2151 auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal); 2152 2153 // If the induction variable has a constant integer step value, go ahead and 2154 // get it now. 2155 if (ID.getConstIntStepValue()) 2156 Step = ID.getConstIntStepValue(); 2157 2158 // Try to create a new independent vector induction variable. If we can't 2159 // create the phi node, we will splat the scalar induction variable in each 2160 // loop iteration. 2161 if (VF > 1 && IV->getType() == Induction->getType() && Step && 2162 !Legal->isScalarAfterVectorization(EntryVal)) { 2163 createVectorIntInductionPHI(ID, EntryVal); 2164 VectorizedIV = true; 2165 } 2166 2167 // If we haven't yet vectorized the induction variable, or if we will create 2168 // a scalar one, we need to define the scalar induction variable and step 2169 // values. If we were given a truncation type, truncate the canonical 2170 // induction variable and constant step. Otherwise, derive these values from 2171 // the induction descriptor. 2172 if (!VectorizedIV || NeedsScalarIV) { 2173 if (Trunc) { 2174 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2175 assert(Step && "Truncation requires constant integer step"); 2176 auto StepInt = cast<ConstantInt>(Step)->getSExtValue(); 2177 ScalarIV = Builder.CreateCast(Instruction::Trunc, Induction, TruncType); 2178 Step = ConstantInt::getSigned(TruncType, StepInt); 2179 } else { 2180 ScalarIV = Induction; 2181 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2182 if (IV != OldInduction) { 2183 ScalarIV = Builder.CreateSExtOrTrunc(ScalarIV, IV->getType()); 2184 ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL); 2185 ScalarIV->setName("offset.idx"); 2186 } 2187 if (!Step) { 2188 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2189 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), 2190 &*Builder.GetInsertPoint()); 2191 } 2192 } 2193 } 2194 2195 // If we haven't yet vectorized the induction variable, splat the scalar 2196 // induction variable, and build the necessary step vectors. 2197 if (!VectorizedIV) { 2198 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2199 VectorParts Entry(UF); 2200 for (unsigned Part = 0; Part < UF; ++Part) 2201 Entry[Part] = getStepVector(Broadcasted, VF * Part, Step); 2202 VectorLoopValueMap.initVector(EntryVal, Entry); 2203 if (Trunc) 2204 addMetadata(Entry, Trunc); 2205 } 2206 2207 // If an induction variable is only used for counting loop iterations or 2208 // calculating addresses, it doesn't need to be widened. Create scalar steps 2209 // that can be used by instructions we will later scalarize. Note that the 2210 // addition of the scalar steps will not increase the number of instructions 2211 // in the loop in the common case prior to InstCombine. We will be trading 2212 // one vector extract for each scalar step. 2213 if (NeedsScalarIV) 2214 buildScalarSteps(ScalarIV, Step, EntryVal); 2215 } 2216 2217 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2218 Instruction::BinaryOps BinOp) { 2219 // Create and check the types. 2220 assert(Val->getType()->isVectorTy() && "Must be a vector"); 2221 int VLen = Val->getType()->getVectorNumElements(); 2222 2223 Type *STy = Val->getType()->getScalarType(); 2224 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2225 "Induction Step must be an integer or FP"); 2226 assert(Step->getType() == STy && "Step has wrong type"); 2227 2228 SmallVector<Constant *, 8> Indices; 2229 2230 if (STy->isIntegerTy()) { 2231 // Create a vector of consecutive numbers from zero to VF. 2232 for (int i = 0; i < VLen; ++i) 2233 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 2234 2235 // Add the consecutive indices to the vector value. 2236 Constant *Cv = ConstantVector::get(Indices); 2237 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2238 Step = Builder.CreateVectorSplat(VLen, Step); 2239 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2240 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2241 // which can be found from the original scalar operations. 2242 Step = Builder.CreateMul(Cv, Step); 2243 return Builder.CreateAdd(Val, Step, "induction"); 2244 } 2245 2246 // Floating point induction. 2247 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2248 "Binary Opcode should be specified for FP induction"); 2249 // Create a vector of consecutive numbers from zero to VF. 2250 for (int i = 0; i < VLen; ++i) 2251 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2252 2253 // Add the consecutive indices to the vector value. 2254 Constant *Cv = ConstantVector::get(Indices); 2255 2256 Step = Builder.CreateVectorSplat(VLen, Step); 2257 2258 // Floating point operations had to be 'fast' to enable the induction. 2259 FastMathFlags Flags; 2260 Flags.setUnsafeAlgebra(); 2261 2262 Value *MulOp = Builder.CreateFMul(Cv, Step); 2263 if (isa<Instruction>(MulOp)) 2264 // Have to check, MulOp may be a constant 2265 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2266 2267 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2268 if (isa<Instruction>(BOp)) 2269 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2270 return BOp; 2271 } 2272 2273 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2274 Value *EntryVal) { 2275 2276 // We shouldn't have to build scalar steps if we aren't vectorizing. 2277 assert(VF > 1 && "VF should be greater than one"); 2278 2279 // Get the value type and ensure it and the step have the same integer type. 2280 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2281 assert(ScalarIVTy->isIntegerTy() && ScalarIVTy == Step->getType() && 2282 "Val and Step should have the same integer type"); 2283 2284 // Compute the scalar steps and save the results in VectorLoopValueMap. 2285 ScalarParts Entry(UF); 2286 for (unsigned Part = 0; Part < UF; ++Part) { 2287 Entry[Part].resize(VF); 2288 for (unsigned Lane = 0; Lane < VF; ++Lane) { 2289 auto *StartIdx = ConstantInt::get(ScalarIVTy, VF * Part + Lane); 2290 auto *Mul = Builder.CreateMul(StartIdx, Step); 2291 auto *Add = Builder.CreateAdd(ScalarIV, Mul); 2292 Entry[Part][Lane] = Add; 2293 } 2294 } 2295 VectorLoopValueMap.initScalar(EntryVal, Entry); 2296 } 2297 2298 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { 2299 assert(Ptr->getType()->isPointerTy() && "Unexpected non-ptr"); 2300 auto *SE = PSE.getSE(); 2301 // Make sure that the pointer does not point to structs. 2302 if (Ptr->getType()->getPointerElementType()->isAggregateType()) 2303 return 0; 2304 2305 // If this value is a pointer induction variable, we know it is consecutive. 2306 PHINode *Phi = dyn_cast_or_null<PHINode>(Ptr); 2307 if (Phi && Inductions.count(Phi)) { 2308 InductionDescriptor II = Inductions[Phi]; 2309 return II.getConsecutiveDirection(); 2310 } 2311 2312 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2313 if (!Gep) 2314 return 0; 2315 2316 unsigned NumOperands = Gep->getNumOperands(); 2317 Value *GpPtr = Gep->getPointerOperand(); 2318 // If this GEP value is a consecutive pointer induction variable and all of 2319 // the indices are constant, then we know it is consecutive. 2320 Phi = dyn_cast<PHINode>(GpPtr); 2321 if (Phi && Inductions.count(Phi)) { 2322 2323 // Make sure that the pointer does not point to structs. 2324 PointerType *GepPtrType = cast<PointerType>(GpPtr->getType()); 2325 if (GepPtrType->getElementType()->isAggregateType()) 2326 return 0; 2327 2328 // Make sure that all of the index operands are loop invariant. 2329 for (unsigned i = 1; i < NumOperands; ++i) 2330 if (!SE->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), TheLoop)) 2331 return 0; 2332 2333 InductionDescriptor II = Inductions[Phi]; 2334 return II.getConsecutiveDirection(); 2335 } 2336 2337 unsigned InductionOperand = getGEPInductionOperand(Gep); 2338 2339 // Check that all of the gep indices are uniform except for our induction 2340 // operand. 2341 for (unsigned i = 0; i != NumOperands; ++i) 2342 if (i != InductionOperand && 2343 !SE->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), TheLoop)) 2344 return 0; 2345 2346 // We can emit wide load/stores only if the last non-zero index is the 2347 // induction variable. 2348 const SCEV *Last = nullptr; 2349 if (!getSymbolicStrides() || !getSymbolicStrides()->count(Gep)) 2350 Last = PSE.getSCEV(Gep->getOperand(InductionOperand)); 2351 else { 2352 // Because of the multiplication by a stride we can have a s/zext cast. 2353 // We are going to replace this stride by 1 so the cast is safe to ignore. 2354 // 2355 // %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] 2356 // %0 = trunc i64 %indvars.iv to i32 2357 // %mul = mul i32 %0, %Stride1 2358 // %idxprom = zext i32 %mul to i64 << Safe cast. 2359 // %arrayidx = getelementptr inbounds i32* %B, i64 %idxprom 2360 // 2361 Last = replaceSymbolicStrideSCEV(PSE, *getSymbolicStrides(), 2362 Gep->getOperand(InductionOperand), Gep); 2363 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(Last)) 2364 Last = 2365 (C->getSCEVType() == scSignExtend || C->getSCEVType() == scZeroExtend) 2366 ? C->getOperand() 2367 : Last; 2368 } 2369 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Last)) { 2370 const SCEV *Step = AR->getStepRecurrence(*SE); 2371 2372 // The memory is consecutive because the last index is consecutive 2373 // and all other indices are loop invariant. 2374 if (Step->isOne()) 2375 return 1; 2376 if (Step->isAllOnesValue()) 2377 return -1; 2378 } 2379 2380 return 0; 2381 } 2382 2383 bool LoopVectorizationLegality::isUniform(Value *V) { 2384 return LAI->isUniform(V); 2385 } 2386 2387 const InnerLoopVectorizer::VectorParts & 2388 InnerLoopVectorizer::getVectorValue(Value *V) { 2389 assert(V != Induction && "The new induction variable should not be used."); 2390 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2391 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2392 2393 // If we have a stride that is replaced by one, do it here. 2394 if (Legal->hasStride(V)) 2395 V = ConstantInt::get(V->getType(), 1); 2396 2397 // If we have this scalar in the map, return it. 2398 if (VectorLoopValueMap.hasVector(V)) 2399 return VectorLoopValueMap.VectorMapStorage[V]; 2400 2401 // If the value has not been vectorized, check if it has been scalarized 2402 // instead. If it has been scalarized, and we actually need the value in 2403 // vector form, we will construct the vector values on demand. 2404 if (VectorLoopValueMap.hasScalar(V)) { 2405 2406 // Initialize a new vector map entry. 2407 VectorParts Entry(UF); 2408 2409 // If we aren't vectorizing, we can just copy the scalar map values over to 2410 // the vector map. 2411 if (VF == 1) { 2412 for (unsigned Part = 0; Part < UF; ++Part) 2413 Entry[Part] = getScalarValue(V, Part, 0); 2414 return VectorLoopValueMap.initVector(V, Entry); 2415 } 2416 2417 // Get the last scalarized instruction. This corresponds to the instruction 2418 // we created for the last vector lane on the last unroll iteration. 2419 auto *LastInst = cast<Instruction>(getScalarValue(V, UF - 1, VF - 1)); 2420 2421 // Set the insert point after the last scalarized instruction. This ensures 2422 // the insertelement sequence will directly follow the scalar definitions. 2423 auto OldIP = Builder.saveIP(); 2424 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 2425 Builder.SetInsertPoint(&*NewIP); 2426 2427 // However, if we are vectorizing, we need to construct the vector values 2428 // using insertelement instructions. Since the resulting vectors are stored 2429 // in VectorLoopValueMap, we will only generate the insertelements once. 2430 for (unsigned Part = 0; Part < UF; ++Part) { 2431 Value *Insert = UndefValue::get(VectorType::get(V->getType(), VF)); 2432 for (unsigned Width = 0; Width < VF; ++Width) 2433 Insert = Builder.CreateInsertElement( 2434 Insert, getScalarValue(V, Part, Width), Builder.getInt32(Width)); 2435 Entry[Part] = Insert; 2436 } 2437 Builder.restoreIP(OldIP); 2438 return VectorLoopValueMap.initVector(V, Entry); 2439 } 2440 2441 // If this scalar is unknown, assume that it is a constant or that it is 2442 // loop invariant. Broadcast V and save the value for future uses. 2443 Value *B = getBroadcastInstrs(V); 2444 return VectorLoopValueMap.initVector(V, VectorParts(UF, B)); 2445 } 2446 2447 Value *InnerLoopVectorizer::getScalarValue(Value *V, unsigned Part, 2448 unsigned Lane) { 2449 2450 // If the value is not an instruction contained in the loop, it should 2451 // already be scalar. 2452 if (OrigLoop->isLoopInvariant(V)) 2453 return V; 2454 2455 // If the value from the original loop has not been vectorized, it is 2456 // represented by UF x VF scalar values in the new loop. Return the requested 2457 // scalar value. 2458 if (VectorLoopValueMap.hasScalar(V)) 2459 return VectorLoopValueMap.ScalarMapStorage[V][Part][Lane]; 2460 2461 // If the value has not been scalarized, get its entry in VectorLoopValueMap 2462 // for the given unroll part. If this entry is not a vector type (i.e., the 2463 // vectorization factor is one), there is no need to generate an 2464 // extractelement instruction. 2465 auto *U = getVectorValue(V)[Part]; 2466 if (!U->getType()->isVectorTy()) { 2467 assert(VF == 1 && "Value not scalarized has non-vector type"); 2468 return U; 2469 } 2470 2471 // Otherwise, the value from the original loop has been vectorized and is 2472 // represented by UF vector values. Extract and return the requested scalar 2473 // value from the appropriate vector lane. 2474 return Builder.CreateExtractElement(U, Builder.getInt32(Lane)); 2475 } 2476 2477 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2478 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2479 SmallVector<Constant *, 8> ShuffleMask; 2480 for (unsigned i = 0; i < VF; ++i) 2481 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 2482 2483 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2484 ConstantVector::get(ShuffleMask), 2485 "reverse"); 2486 } 2487 2488 // Get a mask to interleave \p NumVec vectors into a wide vector. 2489 // I.e. <0, VF, VF*2, ..., VF*(NumVec-1), 1, VF+1, VF*2+1, ...> 2490 // E.g. For 2 interleaved vectors, if VF is 4, the mask is: 2491 // <0, 4, 1, 5, 2, 6, 3, 7> 2492 static Constant *getInterleavedMask(IRBuilder<> &Builder, unsigned VF, 2493 unsigned NumVec) { 2494 SmallVector<Constant *, 16> Mask; 2495 for (unsigned i = 0; i < VF; i++) 2496 for (unsigned j = 0; j < NumVec; j++) 2497 Mask.push_back(Builder.getInt32(j * VF + i)); 2498 2499 return ConstantVector::get(Mask); 2500 } 2501 2502 // Get the strided mask starting from index \p Start. 2503 // I.e. <Start, Start + Stride, ..., Start + Stride*(VF-1)> 2504 static Constant *getStridedMask(IRBuilder<> &Builder, unsigned Start, 2505 unsigned Stride, unsigned VF) { 2506 SmallVector<Constant *, 16> Mask; 2507 for (unsigned i = 0; i < VF; i++) 2508 Mask.push_back(Builder.getInt32(Start + i * Stride)); 2509 2510 return ConstantVector::get(Mask); 2511 } 2512 2513 // Get a mask of two parts: The first part consists of sequential integers 2514 // starting from 0, The second part consists of UNDEFs. 2515 // I.e. <0, 1, 2, ..., NumInt - 1, undef, ..., undef> 2516 static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned NumInt, 2517 unsigned NumUndef) { 2518 SmallVector<Constant *, 16> Mask; 2519 for (unsigned i = 0; i < NumInt; i++) 2520 Mask.push_back(Builder.getInt32(i)); 2521 2522 Constant *Undef = UndefValue::get(Builder.getInt32Ty()); 2523 for (unsigned i = 0; i < NumUndef; i++) 2524 Mask.push_back(Undef); 2525 2526 return ConstantVector::get(Mask); 2527 } 2528 2529 // Concatenate two vectors with the same element type. The 2nd vector should 2530 // not have more elements than the 1st vector. If the 2nd vector has less 2531 // elements, extend it with UNDEFs. 2532 static Value *ConcatenateTwoVectors(IRBuilder<> &Builder, Value *V1, 2533 Value *V2) { 2534 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); 2535 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); 2536 assert(VecTy1 && VecTy2 && 2537 VecTy1->getScalarType() == VecTy2->getScalarType() && 2538 "Expect two vectors with the same element type"); 2539 2540 unsigned NumElts1 = VecTy1->getNumElements(); 2541 unsigned NumElts2 = VecTy2->getNumElements(); 2542 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); 2543 2544 if (NumElts1 > NumElts2) { 2545 // Extend with UNDEFs. 2546 Constant *ExtMask = 2547 getSequentialMask(Builder, NumElts2, NumElts1 - NumElts2); 2548 V2 = Builder.CreateShuffleVector(V2, UndefValue::get(VecTy2), ExtMask); 2549 } 2550 2551 Constant *Mask = getSequentialMask(Builder, NumElts1 + NumElts2, 0); 2552 return Builder.CreateShuffleVector(V1, V2, Mask); 2553 } 2554 2555 // Concatenate vectors in the given list. All vectors have the same type. 2556 static Value *ConcatenateVectors(IRBuilder<> &Builder, 2557 ArrayRef<Value *> InputList) { 2558 unsigned NumVec = InputList.size(); 2559 assert(NumVec > 1 && "Should be at least two vectors"); 2560 2561 SmallVector<Value *, 8> ResList; 2562 ResList.append(InputList.begin(), InputList.end()); 2563 do { 2564 SmallVector<Value *, 8> TmpList; 2565 for (unsigned i = 0; i < NumVec - 1; i += 2) { 2566 Value *V0 = ResList[i], *V1 = ResList[i + 1]; 2567 assert((V0->getType() == V1->getType() || i == NumVec - 2) && 2568 "Only the last vector may have a different type"); 2569 2570 TmpList.push_back(ConcatenateTwoVectors(Builder, V0, V1)); 2571 } 2572 2573 // Push the last vector if the total number of vectors is odd. 2574 if (NumVec % 2 != 0) 2575 TmpList.push_back(ResList[NumVec - 1]); 2576 2577 ResList = TmpList; 2578 NumVec = ResList.size(); 2579 } while (NumVec > 1); 2580 2581 return ResList[0]; 2582 } 2583 2584 // Try to vectorize the interleave group that \p Instr belongs to. 2585 // 2586 // E.g. Translate following interleaved load group (factor = 3): 2587 // for (i = 0; i < N; i+=3) { 2588 // R = Pic[i]; // Member of index 0 2589 // G = Pic[i+1]; // Member of index 1 2590 // B = Pic[i+2]; // Member of index 2 2591 // ... // do something to R, G, B 2592 // } 2593 // To: 2594 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2595 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2596 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2597 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2598 // 2599 // Or translate following interleaved store group (factor = 3): 2600 // for (i = 0; i < N; i+=3) { 2601 // ... do something to R, G, B 2602 // Pic[i] = R; // Member of index 0 2603 // Pic[i+1] = G; // Member of index 1 2604 // Pic[i+2] = B; // Member of index 2 2605 // } 2606 // To: 2607 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2608 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2609 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2610 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2611 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2612 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { 2613 const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr); 2614 assert(Group && "Fail to get an interleaved access group."); 2615 2616 // Skip if current instruction is not the insert position. 2617 if (Instr != Group->getInsertPos()) 2618 return; 2619 2620 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2621 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2622 Value *Ptr = getPointerOperand(Instr); 2623 2624 // Prepare for the vector type of the interleaved load/store. 2625 Type *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2626 unsigned InterleaveFactor = Group->getFactor(); 2627 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2628 Type *PtrTy = VecTy->getPointerTo(Ptr->getType()->getPointerAddressSpace()); 2629 2630 // Prepare for the new pointers. 2631 setDebugLocFromInst(Builder, Ptr); 2632 SmallVector<Value *, 2> NewPtrs; 2633 unsigned Index = Group->getIndex(Instr); 2634 2635 // If the group is reverse, adjust the index to refer to the last vector lane 2636 // instead of the first. We adjust the index from the first vector lane, 2637 // rather than directly getting the pointer for lane VF - 1, because the 2638 // pointer operand of the interleaved access is supposed to be uniform. For 2639 // uniform instructions, we're only required to generate a value for the 2640 // first vector lane in each unroll iteration. 2641 if (Group->isReverse()) 2642 Index += (VF - 1) * Group->getFactor(); 2643 2644 for (unsigned Part = 0; Part < UF; Part++) { 2645 Value *NewPtr = getScalarValue(Ptr, Part, 0); 2646 2647 // Notice current instruction could be any index. Need to adjust the address 2648 // to the member of index 0. 2649 // 2650 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2651 // b = A[i]; // Member of index 0 2652 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2653 // 2654 // E.g. A[i+1] = a; // Member of index 1 2655 // A[i] = b; // Member of index 0 2656 // A[i+2] = c; // Member of index 2 (Current instruction) 2657 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2658 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 2659 2660 // Cast to the vector pointer type. 2661 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2662 } 2663 2664 setDebugLocFromInst(Builder, Instr); 2665 Value *UndefVec = UndefValue::get(VecTy); 2666 2667 // Vectorize the interleaved load group. 2668 if (LI) { 2669 2670 // For each unroll part, create a wide load for the group. 2671 SmallVector<Value *, 2> NewLoads; 2672 for (unsigned Part = 0; Part < UF; Part++) { 2673 auto *NewLoad = Builder.CreateAlignedLoad( 2674 NewPtrs[Part], Group->getAlignment(), "wide.vec"); 2675 addMetadata(NewLoad, Instr); 2676 NewLoads.push_back(NewLoad); 2677 } 2678 2679 // For each member in the group, shuffle out the appropriate data from the 2680 // wide loads. 2681 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2682 Instruction *Member = Group->getMember(I); 2683 2684 // Skip the gaps in the group. 2685 if (!Member) 2686 continue; 2687 2688 VectorParts Entry(UF); 2689 Constant *StrideMask = getStridedMask(Builder, I, InterleaveFactor, VF); 2690 for (unsigned Part = 0; Part < UF; Part++) { 2691 Value *StridedVec = Builder.CreateShuffleVector( 2692 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 2693 2694 // If this member has different type, cast the result type. 2695 if (Member->getType() != ScalarTy) { 2696 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2697 StridedVec = Builder.CreateBitOrPointerCast(StridedVec, OtherVTy); 2698 } 2699 2700 Entry[Part] = 2701 Group->isReverse() ? reverseVector(StridedVec) : StridedVec; 2702 } 2703 VectorLoopValueMap.initVector(Member, Entry); 2704 } 2705 return; 2706 } 2707 2708 // The sub vector type for current instruction. 2709 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2710 2711 // Vectorize the interleaved store group. 2712 for (unsigned Part = 0; Part < UF; Part++) { 2713 // Collect the stored vector from each member. 2714 SmallVector<Value *, 4> StoredVecs; 2715 for (unsigned i = 0; i < InterleaveFactor; i++) { 2716 // Interleaved store group doesn't allow a gap, so each index has a member 2717 Instruction *Member = Group->getMember(i); 2718 assert(Member && "Fail to get a member from an interleaved store group"); 2719 2720 Value *StoredVec = 2721 getVectorValue(cast<StoreInst>(Member)->getValueOperand())[Part]; 2722 if (Group->isReverse()) 2723 StoredVec = reverseVector(StoredVec); 2724 2725 // If this member has different type, cast it to an unified type. 2726 if (StoredVec->getType() != SubVT) 2727 StoredVec = Builder.CreateBitOrPointerCast(StoredVec, SubVT); 2728 2729 StoredVecs.push_back(StoredVec); 2730 } 2731 2732 // Concatenate all vectors into a wide vector. 2733 Value *WideVec = ConcatenateVectors(Builder, StoredVecs); 2734 2735 // Interleave the elements in the wide vector. 2736 Constant *IMask = getInterleavedMask(Builder, VF, InterleaveFactor); 2737 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2738 "interleaved.vec"); 2739 2740 Instruction *NewStoreInstr = 2741 Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment()); 2742 addMetadata(NewStoreInstr, Instr); 2743 } 2744 } 2745 2746 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { 2747 // Attempt to issue a wide load. 2748 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2749 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2750 2751 assert((LI || SI) && "Invalid Load/Store instruction"); 2752 2753 // Try to vectorize the interleave group if this access is interleaved. 2754 if (Legal->isAccessInterleaved(Instr)) 2755 return vectorizeInterleaveGroup(Instr); 2756 2757 Type *ScalarDataTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2758 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2759 Value *Ptr = getPointerOperand(Instr); 2760 unsigned Alignment = LI ? LI->getAlignment() : SI->getAlignment(); 2761 // An alignment of 0 means target abi alignment. We need to use the scalar's 2762 // target abi alignment in such a case. 2763 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2764 if (!Alignment) 2765 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2766 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2767 2768 // Scalarize the memory instruction if necessary. 2769 if (Legal->memoryInstructionMustBeScalarized(Instr, VF)) 2770 return scalarizeInstruction(Instr, Legal->isPredicatedStore(Instr)); 2771 2772 // Determine if the pointer operand of the access is either consecutive or 2773 // reverse consecutive. 2774 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 2775 bool Reverse = ConsecutiveStride < 0; 2776 2777 // Determine if either a gather or scatter operation is legal. 2778 bool CreateGatherScatter = 2779 !ConsecutiveStride && Legal->isLegalGatherOrScatter(Instr); 2780 2781 VectorParts VectorGep; 2782 2783 // Handle consecutive loads/stores. 2784 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2785 if (ConsecutiveStride) { 2786 if (Gep && Legal->isInductionVariable(Gep->getPointerOperand())) { 2787 setDebugLocFromInst(Builder, Gep); 2788 auto *FirstBasePtr = getScalarValue(Gep->getPointerOperand(), 0, 0); 2789 2790 // Create the new GEP with the new induction variable. 2791 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2792 Gep2->setOperand(0, FirstBasePtr); 2793 Gep2->setName("gep.indvar.base"); 2794 Ptr = Builder.Insert(Gep2); 2795 } else if (Gep) { 2796 setDebugLocFromInst(Builder, Gep); 2797 assert(PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getPointerOperand()), 2798 OrigLoop) && 2799 "Base ptr must be invariant"); 2800 // The last index does not have to be the induction. It can be 2801 // consecutive and be a function of the index. For example A[I+1]; 2802 unsigned NumOperands = Gep->getNumOperands(); 2803 unsigned InductionOperand = getGEPInductionOperand(Gep); 2804 // Create the new GEP with the new induction variable. 2805 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2806 2807 for (unsigned i = 0; i < NumOperands; ++i) { 2808 Value *GepOperand = Gep->getOperand(i); 2809 Instruction *GepOperandInst = dyn_cast<Instruction>(GepOperand); 2810 2811 // Update last index or loop invariant instruction anchored in loop. 2812 if (i == InductionOperand || 2813 (GepOperandInst && OrigLoop->contains(GepOperandInst))) { 2814 assert((i == InductionOperand || 2815 PSE.getSE()->isLoopInvariant(PSE.getSCEV(GepOperandInst), 2816 OrigLoop)) && 2817 "Must be last index or loop invariant"); 2818 2819 Gep2->setOperand(i, getScalarValue(GepOperand, 0, 0)); 2820 Gep2->setName("gep.indvar.idx"); 2821 } 2822 } 2823 Ptr = Builder.Insert(Gep2); 2824 } else { // No GEP 2825 // Use the induction element ptr. 2826 assert(isa<PHINode>(Ptr) && "Invalid induction ptr"); 2827 setDebugLocFromInst(Builder, Ptr); 2828 Ptr = getScalarValue(Ptr, 0, 0); 2829 } 2830 } else { 2831 // At this point we should vector version of GEP for Gather or Scatter 2832 assert(CreateGatherScatter && "The instruction should be scalarized"); 2833 if (Gep) { 2834 // Vectorizing GEP, across UF parts. We want to get a vector value for base 2835 // and each index that's defined inside the loop, even if it is 2836 // loop-invariant but wasn't hoisted out. Otherwise we want to keep them 2837 // scalar. 2838 SmallVector<VectorParts, 4> OpsV; 2839 for (Value *Op : Gep->operands()) { 2840 Instruction *SrcInst = dyn_cast<Instruction>(Op); 2841 if (SrcInst && OrigLoop->contains(SrcInst)) 2842 OpsV.push_back(getVectorValue(Op)); 2843 else 2844 OpsV.push_back(VectorParts(UF, Op)); 2845 } 2846 for (unsigned Part = 0; Part < UF; ++Part) { 2847 SmallVector<Value *, 4> Ops; 2848 Value *GEPBasePtr = OpsV[0][Part]; 2849 for (unsigned i = 1; i < Gep->getNumOperands(); i++) 2850 Ops.push_back(OpsV[i][Part]); 2851 Value *NewGep = Builder.CreateGEP(GEPBasePtr, Ops, "VectorGep"); 2852 cast<GetElementPtrInst>(NewGep)->setIsInBounds(Gep->isInBounds()); 2853 assert(NewGep->getType()->isVectorTy() && "Expected vector GEP"); 2854 2855 NewGep = 2856 Builder.CreateBitCast(NewGep, VectorType::get(Ptr->getType(), VF)); 2857 VectorGep.push_back(NewGep); 2858 } 2859 } else 2860 VectorGep = getVectorValue(Ptr); 2861 } 2862 2863 VectorParts Mask = createBlockInMask(Instr->getParent()); 2864 // Handle Stores: 2865 if (SI) { 2866 assert(!Legal->isUniform(SI->getPointerOperand()) && 2867 "We do not allow storing to uniform addresses"); 2868 setDebugLocFromInst(Builder, SI); 2869 // We don't want to update the value in the map as it might be used in 2870 // another expression. So don't use a reference type for "StoredVal". 2871 VectorParts StoredVal = getVectorValue(SI->getValueOperand()); 2872 2873 for (unsigned Part = 0; Part < UF; ++Part) { 2874 Instruction *NewSI = nullptr; 2875 if (CreateGatherScatter) { 2876 Value *MaskPart = Legal->isMaskRequired(SI) ? Mask[Part] : nullptr; 2877 NewSI = Builder.CreateMaskedScatter(StoredVal[Part], VectorGep[Part], 2878 Alignment, MaskPart); 2879 } else { 2880 // Calculate the pointer for the specific unroll-part. 2881 Value *PartPtr = 2882 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2883 2884 if (Reverse) { 2885 // If we store to reverse consecutive memory locations, then we need 2886 // to reverse the order of elements in the stored value. 2887 StoredVal[Part] = reverseVector(StoredVal[Part]); 2888 // If the address is consecutive but reversed, then the 2889 // wide store needs to start at the last vector element. 2890 PartPtr = 2891 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2892 PartPtr = 2893 Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2894 Mask[Part] = reverseVector(Mask[Part]); 2895 } 2896 2897 Value *VecPtr = 2898 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2899 2900 if (Legal->isMaskRequired(SI)) 2901 NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment, 2902 Mask[Part]); 2903 else 2904 NewSI = 2905 Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment); 2906 } 2907 addMetadata(NewSI, SI); 2908 } 2909 return; 2910 } 2911 2912 // Handle loads. 2913 assert(LI && "Must have a load instruction"); 2914 setDebugLocFromInst(Builder, LI); 2915 VectorParts Entry(UF); 2916 for (unsigned Part = 0; Part < UF; ++Part) { 2917 Instruction *NewLI; 2918 if (CreateGatherScatter) { 2919 Value *MaskPart = Legal->isMaskRequired(LI) ? Mask[Part] : nullptr; 2920 NewLI = Builder.CreateMaskedGather(VectorGep[Part], Alignment, MaskPart, 2921 0, "wide.masked.gather"); 2922 Entry[Part] = NewLI; 2923 } else { 2924 // Calculate the pointer for the specific unroll-part. 2925 Value *PartPtr = 2926 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2927 2928 if (Reverse) { 2929 // If the address is consecutive but reversed, then the 2930 // wide load needs to start at the last vector element. 2931 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2932 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2933 Mask[Part] = reverseVector(Mask[Part]); 2934 } 2935 2936 Value *VecPtr = 2937 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2938 if (Legal->isMaskRequired(LI)) 2939 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 2940 UndefValue::get(DataTy), 2941 "wide.masked.load"); 2942 else 2943 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 2944 Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI; 2945 } 2946 addMetadata(NewLI, LI); 2947 } 2948 VectorLoopValueMap.initVector(Instr, Entry); 2949 } 2950 2951 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2952 bool IfPredicateInstr) { 2953 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2954 DEBUG(dbgs() << "LV: Scalarizing" 2955 << (IfPredicateInstr ? " and predicating:" : ":") << *Instr 2956 << '\n'); 2957 // Holds vector parameters or scalars, in case of uniform vals. 2958 SmallVector<VectorParts, 4> Params; 2959 2960 setDebugLocFromInst(Builder, Instr); 2961 2962 // Does this instruction return a value ? 2963 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2964 2965 // Initialize a new scalar map entry. 2966 ScalarParts Entry(UF); 2967 2968 VectorParts Cond; 2969 if (IfPredicateInstr) 2970 Cond = createBlockInMask(Instr->getParent()); 2971 2972 // For each vector unroll 'part': 2973 for (unsigned Part = 0; Part < UF; ++Part) { 2974 Entry[Part].resize(VF); 2975 // For each scalar that we create: 2976 for (unsigned Width = 0; Width < VF; ++Width) { 2977 2978 // Start if-block. 2979 Value *Cmp = nullptr; 2980 if (IfPredicateInstr) { 2981 Cmp = Builder.CreateExtractElement(Cond[Part], Builder.getInt32(Width)); 2982 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cmp, 2983 ConstantInt::get(Cmp->getType(), 1)); 2984 } 2985 2986 Instruction *Cloned = Instr->clone(); 2987 if (!IsVoidRetTy) 2988 Cloned->setName(Instr->getName() + ".cloned"); 2989 2990 // Replace the operands of the cloned instructions with their scalar 2991 // equivalents in the new loop. 2992 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2993 auto *NewOp = getScalarValue(Instr->getOperand(op), Part, Width); 2994 Cloned->setOperand(op, NewOp); 2995 } 2996 addNewMetadata(Cloned, Instr); 2997 2998 // Place the cloned scalar in the new loop. 2999 Builder.Insert(Cloned); 3000 3001 // Add the cloned scalar to the scalar map entry. 3002 Entry[Part][Width] = Cloned; 3003 3004 // If we just cloned a new assumption, add it the assumption cache. 3005 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 3006 if (II->getIntrinsicID() == Intrinsic::assume) 3007 AC->registerAssumption(II); 3008 3009 // End if-block. 3010 if (IfPredicateInstr) 3011 PredicatedInstructions.push_back(std::make_pair(Cloned, Cmp)); 3012 } 3013 } 3014 VectorLoopValueMap.initScalar(Instr, Entry); 3015 } 3016 3017 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3018 Value *End, Value *Step, 3019 Instruction *DL) { 3020 BasicBlock *Header = L->getHeader(); 3021 BasicBlock *Latch = L->getLoopLatch(); 3022 // As we're just creating this loop, it's possible no latch exists 3023 // yet. If so, use the header as this will be a single block loop. 3024 if (!Latch) 3025 Latch = Header; 3026 3027 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 3028 setDebugLocFromInst(Builder, getDebugLocFromInstOrOperands(OldInduction)); 3029 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 3030 3031 Builder.SetInsertPoint(Latch->getTerminator()); 3032 3033 // Create i+1 and fill the PHINode. 3034 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 3035 Induction->addIncoming(Start, L->getLoopPreheader()); 3036 Induction->addIncoming(Next, Latch); 3037 // Create the compare. 3038 Value *ICmp = Builder.CreateICmpEQ(Next, End); 3039 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 3040 3041 // Now we have two terminators. Remove the old one from the block. 3042 Latch->getTerminator()->eraseFromParent(); 3043 3044 return Induction; 3045 } 3046 3047 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3048 if (TripCount) 3049 return TripCount; 3050 3051 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3052 // Find the loop boundaries. 3053 ScalarEvolution *SE = PSE.getSE(); 3054 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3055 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 3056 "Invalid loop count"); 3057 3058 Type *IdxTy = Legal->getWidestInductionType(); 3059 3060 // The exit count might have the type of i64 while the phi is i32. This can 3061 // happen if we have an induction variable that is sign extended before the 3062 // compare. The only way that we get a backedge taken count is that the 3063 // induction variable was signed and as such will not overflow. In such a case 3064 // truncation is legal. 3065 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 3066 IdxTy->getPrimitiveSizeInBits()) 3067 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3068 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3069 3070 // Get the total trip count from the count by adding 1. 3071 const SCEV *ExitCount = SE->getAddExpr( 3072 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3073 3074 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3075 3076 // Expand the trip count and place the new instructions in the preheader. 3077 // Notice that the pre-header does not change, only the loop body. 3078 SCEVExpander Exp(*SE, DL, "induction"); 3079 3080 // Count holds the overall loop count (N). 3081 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3082 L->getLoopPreheader()->getTerminator()); 3083 3084 if (TripCount->getType()->isPointerTy()) 3085 TripCount = 3086 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3087 L->getLoopPreheader()->getTerminator()); 3088 3089 return TripCount; 3090 } 3091 3092 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3093 if (VectorTripCount) 3094 return VectorTripCount; 3095 3096 Value *TC = getOrCreateTripCount(L); 3097 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3098 3099 // Now we need to generate the expression for the part of the loop that the 3100 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3101 // iterations are not required for correctness, or N - Step, otherwise. Step 3102 // is equal to the vectorization factor (number of SIMD elements) times the 3103 // unroll factor (number of SIMD instructions). 3104 Constant *Step = ConstantInt::get(TC->getType(), VF * UF); 3105 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3106 3107 // If there is a non-reversed interleaved group that may speculatively access 3108 // memory out-of-bounds, we need to ensure that there will be at least one 3109 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 3110 // the trip count, we set the remainder to be equal to the step. If the step 3111 // does not evenly divide the trip count, no adjustment is necessary since 3112 // there will already be scalar iterations. Note that the minimum iterations 3113 // check ensures that N >= Step. 3114 if (VF > 1 && Legal->requiresScalarEpilogue()) { 3115 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3116 R = Builder.CreateSelect(IsZero, Step, R); 3117 } 3118 3119 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3120 3121 return VectorTripCount; 3122 } 3123 3124 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3125 BasicBlock *Bypass) { 3126 Value *Count = getOrCreateTripCount(L); 3127 BasicBlock *BB = L->getLoopPreheader(); 3128 IRBuilder<> Builder(BB->getTerminator()); 3129 3130 // Generate code to check that the loop's trip count that we computed by 3131 // adding one to the backedge-taken count will not overflow. 3132 Value *CheckMinIters = Builder.CreateICmpULT( 3133 Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check"); 3134 3135 BasicBlock *NewBB = 3136 BB->splitBasicBlock(BB->getTerminator(), "min.iters.checked"); 3137 // Update dominator tree immediately if the generated block is a 3138 // LoopBypassBlock because SCEV expansions to generate loop bypass 3139 // checks may query it before the current function is finished. 3140 DT->addNewBlock(NewBB, BB); 3141 if (L->getParentLoop()) 3142 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3143 ReplaceInstWithInst(BB->getTerminator(), 3144 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 3145 LoopBypassBlocks.push_back(BB); 3146 } 3147 3148 void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L, 3149 BasicBlock *Bypass) { 3150 Value *TC = getOrCreateVectorTripCount(L); 3151 BasicBlock *BB = L->getLoopPreheader(); 3152 IRBuilder<> Builder(BB->getTerminator()); 3153 3154 // Now, compare the new count to zero. If it is zero skip the vector loop and 3155 // jump to the scalar loop. 3156 Value *Cmp = Builder.CreateICmpEQ(TC, Constant::getNullValue(TC->getType()), 3157 "cmp.zero"); 3158 3159 // Generate code to check that the loop's trip count that we computed by 3160 // adding one to the backedge-taken count will not overflow. 3161 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3162 // Update dominator tree immediately if the generated block is a 3163 // LoopBypassBlock because SCEV expansions to generate loop bypass 3164 // checks may query it before the current function is finished. 3165 DT->addNewBlock(NewBB, BB); 3166 if (L->getParentLoop()) 3167 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3168 ReplaceInstWithInst(BB->getTerminator(), 3169 BranchInst::Create(Bypass, NewBB, Cmp)); 3170 LoopBypassBlocks.push_back(BB); 3171 } 3172 3173 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3174 BasicBlock *BB = L->getLoopPreheader(); 3175 3176 // Generate the code to check that the SCEV assumptions that we made. 3177 // We want the new basic block to start at the first instruction in a 3178 // sequence of instructions that form a check. 3179 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 3180 "scev.check"); 3181 Value *SCEVCheck = 3182 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 3183 3184 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 3185 if (C->isZero()) 3186 return; 3187 3188 // Create a new block containing the stride check. 3189 BB->setName("vector.scevcheck"); 3190 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3191 // Update dominator tree immediately if the generated block is a 3192 // LoopBypassBlock because SCEV expansions to generate loop bypass 3193 // checks may query it before the current function is finished. 3194 DT->addNewBlock(NewBB, BB); 3195 if (L->getParentLoop()) 3196 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3197 ReplaceInstWithInst(BB->getTerminator(), 3198 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 3199 LoopBypassBlocks.push_back(BB); 3200 AddedSafetyChecks = true; 3201 } 3202 3203 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 3204 BasicBlock *BB = L->getLoopPreheader(); 3205 3206 // Generate the code that checks in runtime if arrays overlap. We put the 3207 // checks into a separate block to make the more common case of few elements 3208 // faster. 3209 Instruction *FirstCheckInst; 3210 Instruction *MemRuntimeCheck; 3211 std::tie(FirstCheckInst, MemRuntimeCheck) = 3212 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 3213 if (!MemRuntimeCheck) 3214 return; 3215 3216 // Create a new block containing the memory check. 3217 BB->setName("vector.memcheck"); 3218 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3219 // Update dominator tree immediately if the generated block is a 3220 // LoopBypassBlock because SCEV expansions to generate loop bypass 3221 // checks may query it before the current function is finished. 3222 DT->addNewBlock(NewBB, BB); 3223 if (L->getParentLoop()) 3224 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3225 ReplaceInstWithInst(BB->getTerminator(), 3226 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 3227 LoopBypassBlocks.push_back(BB); 3228 AddedSafetyChecks = true; 3229 3230 // We currently don't use LoopVersioning for the actual loop cloning but we 3231 // still use it to add the noalias metadata. 3232 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 3233 PSE.getSE()); 3234 LVer->prepareNoAliasMetadata(); 3235 } 3236 3237 void InnerLoopVectorizer::createEmptyLoop() { 3238 /* 3239 In this function we generate a new loop. The new loop will contain 3240 the vectorized instructions while the old loop will continue to run the 3241 scalar remainder. 3242 3243 [ ] <-- loop iteration number check. 3244 / | 3245 / v 3246 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3247 | / | 3248 | / v 3249 || [ ] <-- vector pre header. 3250 |/ | 3251 | v 3252 | [ ] \ 3253 | [ ]_| <-- vector loop. 3254 | | 3255 | v 3256 | -[ ] <--- middle-block. 3257 | / | 3258 | / v 3259 -|- >[ ] <--- new preheader. 3260 | | 3261 | v 3262 | [ ] \ 3263 | [ ]_| <-- old scalar loop to handle remainder. 3264 \ | 3265 \ v 3266 >[ ] <-- exit block. 3267 ... 3268 */ 3269 3270 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 3271 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 3272 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 3273 assert(VectorPH && "Invalid loop structure"); 3274 assert(ExitBlock && "Must have an exit block"); 3275 3276 // Some loops have a single integer induction variable, while other loops 3277 // don't. One example is c++ iterators that often have multiple pointer 3278 // induction variables. In the code below we also support a case where we 3279 // don't have a single induction variable. 3280 // 3281 // We try to obtain an induction variable from the original loop as hard 3282 // as possible. However if we don't find one that: 3283 // - is an integer 3284 // - counts from zero, stepping by one 3285 // - is the size of the widest induction variable type 3286 // then we create a new one. 3287 OldInduction = Legal->getInduction(); 3288 Type *IdxTy = Legal->getWidestInductionType(); 3289 3290 // Split the single block loop into the two loop structure described above. 3291 BasicBlock *VecBody = 3292 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 3293 BasicBlock *MiddleBlock = 3294 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 3295 BasicBlock *ScalarPH = 3296 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 3297 3298 // Create and register the new vector loop. 3299 Loop *Lp = new Loop(); 3300 Loop *ParentLoop = OrigLoop->getParentLoop(); 3301 3302 // Insert the new loop into the loop nest and register the new basic blocks 3303 // before calling any utilities such as SCEV that require valid LoopInfo. 3304 if (ParentLoop) { 3305 ParentLoop->addChildLoop(Lp); 3306 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 3307 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 3308 } else { 3309 LI->addTopLevelLoop(Lp); 3310 } 3311 Lp->addBasicBlockToLoop(VecBody, *LI); 3312 3313 // Find the loop boundaries. 3314 Value *Count = getOrCreateTripCount(Lp); 3315 3316 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3317 3318 // We need to test whether the backedge-taken count is uint##_max. Adding one 3319 // to it will cause overflow and an incorrect loop trip count in the vector 3320 // body. In case of overflow we want to directly jump to the scalar remainder 3321 // loop. 3322 emitMinimumIterationCountCheck(Lp, ScalarPH); 3323 // Now, compare the new count to zero. If it is zero skip the vector loop and 3324 // jump to the scalar loop. 3325 emitVectorLoopEnteredCheck(Lp, ScalarPH); 3326 // Generate the code to check any assumptions that we've made for SCEV 3327 // expressions. 3328 emitSCEVChecks(Lp, ScalarPH); 3329 3330 // Generate the code that checks in runtime if arrays overlap. We put the 3331 // checks into a separate block to make the more common case of few elements 3332 // faster. 3333 emitMemRuntimeChecks(Lp, ScalarPH); 3334 3335 // Generate the induction variable. 3336 // The loop step is equal to the vectorization factor (num of SIMD elements) 3337 // times the unroll factor (num of SIMD instructions). 3338 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3339 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 3340 Induction = 3341 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3342 getDebugLocFromInstOrOperands(OldInduction)); 3343 3344 // We are going to resume the execution of the scalar loop. 3345 // Go over all of the induction variables that we found and fix the 3346 // PHIs that are left in the scalar version of the loop. 3347 // The starting values of PHI nodes depend on the counter of the last 3348 // iteration in the vectorized loop. 3349 // If we come from a bypass edge then we need to start from the original 3350 // start value. 3351 3352 // This variable saves the new starting index for the scalar loop. It is used 3353 // to test if there are any tail iterations left once the vector loop has 3354 // completed. 3355 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 3356 for (auto &InductionEntry : *List) { 3357 PHINode *OrigPhi = InductionEntry.first; 3358 InductionDescriptor II = InductionEntry.second; 3359 3360 // Create phi nodes to merge from the backedge-taken check block. 3361 PHINode *BCResumeVal = PHINode::Create( 3362 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 3363 Value *EndValue; 3364 if (OrigPhi == OldInduction) { 3365 // We know what the end value is. 3366 EndValue = CountRoundDown; 3367 } else { 3368 IRBuilder<> B(LoopBypassBlocks.back()->getTerminator()); 3369 Type *StepType = II.getStep()->getType(); 3370 Instruction::CastOps CastOp = 3371 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 3372 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 3373 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3374 EndValue = II.transform(B, CRD, PSE.getSE(), DL); 3375 EndValue->setName("ind.end"); 3376 } 3377 3378 // The new PHI merges the original incoming value, in case of a bypass, 3379 // or the value at the end of the vectorized loop. 3380 BCResumeVal->addIncoming(EndValue, MiddleBlock); 3381 3382 // Fix up external users of the induction variable. 3383 fixupIVUsers(OrigPhi, II, CountRoundDown, EndValue, MiddleBlock); 3384 3385 // Fix the scalar body counter (PHI node). 3386 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 3387 3388 // The old induction's phi node in the scalar body needs the truncated 3389 // value. 3390 for (BasicBlock *BB : LoopBypassBlocks) 3391 BCResumeVal->addIncoming(II.getStartValue(), BB); 3392 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 3393 } 3394 3395 // Add a check in the middle block to see if we have completed 3396 // all of the iterations in the first vector loop. 3397 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3398 Value *CmpN = 3399 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 3400 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 3401 ReplaceInstWithInst(MiddleBlock->getTerminator(), 3402 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 3403 3404 // Get ready to start creating new instructions into the vectorized body. 3405 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 3406 3407 // Save the state. 3408 LoopVectorPreHeader = Lp->getLoopPreheader(); 3409 LoopScalarPreHeader = ScalarPH; 3410 LoopMiddleBlock = MiddleBlock; 3411 LoopExitBlock = ExitBlock; 3412 LoopVectorBody = VecBody; 3413 LoopScalarBody = OldBasicBlock; 3414 3415 // Keep all loop hints from the original loop on the vector loop (we'll 3416 // replace the vectorizer-specific hints below). 3417 if (MDNode *LID = OrigLoop->getLoopID()) 3418 Lp->setLoopID(LID); 3419 3420 LoopVectorizeHints Hints(Lp, true, *ORE); 3421 Hints.setAlreadyVectorized(); 3422 } 3423 3424 // Fix up external users of the induction variable. At this point, we are 3425 // in LCSSA form, with all external PHIs that use the IV having one input value, 3426 // coming from the remainder loop. We need those PHIs to also have a correct 3427 // value for the IV when arriving directly from the middle block. 3428 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3429 const InductionDescriptor &II, 3430 Value *CountRoundDown, Value *EndValue, 3431 BasicBlock *MiddleBlock) { 3432 // There are two kinds of external IV usages - those that use the value 3433 // computed in the last iteration (the PHI) and those that use the penultimate 3434 // value (the value that feeds into the phi from the loop latch). 3435 // We allow both, but they, obviously, have different values. 3436 3437 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 3438 3439 DenseMap<Value *, Value *> MissingVals; 3440 3441 // An external user of the last iteration's value should see the value that 3442 // the remainder loop uses to initialize its own IV. 3443 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3444 for (User *U : PostInc->users()) { 3445 Instruction *UI = cast<Instruction>(U); 3446 if (!OrigLoop->contains(UI)) { 3447 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3448 MissingVals[UI] = EndValue; 3449 } 3450 } 3451 3452 // An external user of the penultimate value need to see EndValue - Step. 3453 // The simplest way to get this is to recompute it from the constituent SCEVs, 3454 // that is Start + (Step * (CRD - 1)). 3455 for (User *U : OrigPhi->users()) { 3456 auto *UI = cast<Instruction>(U); 3457 if (!OrigLoop->contains(UI)) { 3458 const DataLayout &DL = 3459 OrigLoop->getHeader()->getModule()->getDataLayout(); 3460 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3461 3462 IRBuilder<> B(MiddleBlock->getTerminator()); 3463 Value *CountMinusOne = B.CreateSub( 3464 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3465 Value *CMO = B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType(), 3466 "cast.cmo"); 3467 Value *Escape = II.transform(B, CMO, PSE.getSE(), DL); 3468 Escape->setName("ind.escape"); 3469 MissingVals[UI] = Escape; 3470 } 3471 } 3472 3473 for (auto &I : MissingVals) { 3474 PHINode *PHI = cast<PHINode>(I.first); 3475 // One corner case we have to handle is two IVs "chasing" each-other, 3476 // that is %IV2 = phi [...], [ %IV1, %latch ] 3477 // In this case, if IV1 has an external use, we need to avoid adding both 3478 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3479 // don't already have an incoming value for the middle block. 3480 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3481 PHI->addIncoming(I.second, MiddleBlock); 3482 } 3483 } 3484 3485 namespace { 3486 struct CSEDenseMapInfo { 3487 static bool canHandle(Instruction *I) { 3488 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3489 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3490 } 3491 static inline Instruction *getEmptyKey() { 3492 return DenseMapInfo<Instruction *>::getEmptyKey(); 3493 } 3494 static inline Instruction *getTombstoneKey() { 3495 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3496 } 3497 static unsigned getHashValue(Instruction *I) { 3498 assert(canHandle(I) && "Unknown instruction!"); 3499 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3500 I->value_op_end())); 3501 } 3502 static bool isEqual(Instruction *LHS, Instruction *RHS) { 3503 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3504 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3505 return LHS == RHS; 3506 return LHS->isIdenticalTo(RHS); 3507 } 3508 }; 3509 } 3510 3511 ///\brief Perform cse of induction variable instructions. 3512 static void cse(BasicBlock *BB) { 3513 // Perform simple cse. 3514 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3515 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3516 Instruction *In = &*I++; 3517 3518 if (!CSEDenseMapInfo::canHandle(In)) 3519 continue; 3520 3521 // Check if we can replace this instruction with any of the 3522 // visited instructions. 3523 if (Instruction *V = CSEMap.lookup(In)) { 3524 In->replaceAllUsesWith(V); 3525 In->eraseFromParent(); 3526 continue; 3527 } 3528 3529 CSEMap[In] = In; 3530 } 3531 } 3532 3533 /// \brief Adds a 'fast' flag to floating point operations. 3534 static Value *addFastMathFlag(Value *V) { 3535 if (isa<FPMathOperator>(V)) { 3536 FastMathFlags Flags; 3537 Flags.setUnsafeAlgebra(); 3538 cast<Instruction>(V)->setFastMathFlags(Flags); 3539 } 3540 return V; 3541 } 3542 3543 /// \brief Estimate the overhead of scalarizing a value based on its type. 3544 /// Insert and Extract are set if the result needs to be inserted and/or 3545 /// extracted from vectors. 3546 /// If the instruction is also to be predicated, add the cost of a PHI 3547 /// node to the insertion cost. 3548 static unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract, 3549 bool Predicated, 3550 const TargetTransformInfo &TTI) { 3551 if (Ty->isVoidTy()) 3552 return 0; 3553 3554 assert(Ty->isVectorTy() && "Can only scalarize vectors"); 3555 unsigned Cost = 0; 3556 3557 for (unsigned I = 0, E = Ty->getVectorNumElements(); I < E; ++I) { 3558 if (Extract) 3559 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, Ty, I); 3560 if (Insert) { 3561 Cost += TTI.getVectorInstrCost(Instruction::InsertElement, Ty, I); 3562 if (Predicated) 3563 Cost += TTI.getCFInstrCost(Instruction::PHI); 3564 } 3565 } 3566 3567 // We assume that if-converted blocks have a 50% chance of being executed. 3568 // Predicated scalarized instructions are avoided due to the CF that bypasses 3569 // turned off lanes. The extracts and inserts will be sinked/hoisted to the 3570 // predicated basic-block and are subjected to the same assumption. 3571 if (Predicated) 3572 Cost /= 2; 3573 3574 return Cost; 3575 } 3576 3577 /// \brief Estimate the overhead of scalarizing an Instruction based on the 3578 /// types of its operands and return value. 3579 static unsigned getScalarizationOverhead(SmallVectorImpl<Type *> &OpTys, 3580 Type *RetTy, bool Predicated, 3581 const TargetTransformInfo &TTI) { 3582 unsigned ScalarizationCost = 3583 getScalarizationOverhead(RetTy, true, false, Predicated, TTI); 3584 3585 for (Type *Ty : OpTys) 3586 ScalarizationCost += 3587 getScalarizationOverhead(Ty, false, true, Predicated, TTI); 3588 3589 return ScalarizationCost; 3590 } 3591 3592 /// \brief Estimate the overhead of scalarizing an instruction. This is a 3593 /// convenience wrapper for the type-based getScalarizationOverhead API. 3594 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF, 3595 bool Predicated, 3596 const TargetTransformInfo &TTI) { 3597 if (VF == 1) 3598 return 0; 3599 3600 Type *RetTy = ToVectorTy(I->getType(), VF); 3601 3602 SmallVector<Type *, 4> OpTys; 3603 unsigned OperandsNum = I->getNumOperands(); 3604 for (unsigned OpInd = 0; OpInd < OperandsNum; ++OpInd) 3605 OpTys.push_back(ToVectorTy(I->getOperand(OpInd)->getType(), VF)); 3606 3607 return getScalarizationOverhead(OpTys, RetTy, Predicated, TTI); 3608 } 3609 3610 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3611 // Return the cost of the instruction, including scalarization overhead if it's 3612 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3613 // i.e. either vector version isn't available, or is too expensive. 3614 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3615 const TargetTransformInfo &TTI, 3616 const TargetLibraryInfo *TLI, 3617 bool &NeedToScalarize) { 3618 Function *F = CI->getCalledFunction(); 3619 StringRef FnName = CI->getCalledFunction()->getName(); 3620 Type *ScalarRetTy = CI->getType(); 3621 SmallVector<Type *, 4> Tys, ScalarTys; 3622 for (auto &ArgOp : CI->arg_operands()) 3623 ScalarTys.push_back(ArgOp->getType()); 3624 3625 // Estimate cost of scalarized vector call. The source operands are assumed 3626 // to be vectors, so we need to extract individual elements from there, 3627 // execute VF scalar calls, and then gather the result into the vector return 3628 // value. 3629 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3630 if (VF == 1) 3631 return ScalarCallCost; 3632 3633 // Compute corresponding vector type for return value and arguments. 3634 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3635 for (Type *ScalarTy : ScalarTys) 3636 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3637 3638 // Compute costs of unpacking argument values for the scalar calls and 3639 // packing the return values to a vector. 3640 unsigned ScalarizationCost = getScalarizationOverhead(Tys, RetTy, false, TTI); 3641 3642 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3643 3644 // If we can't emit a vector call for this function, then the currently found 3645 // cost is the cost we need to return. 3646 NeedToScalarize = true; 3647 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3648 return Cost; 3649 3650 // If the corresponding vector cost is cheaper, return its cost. 3651 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3652 if (VectorCallCost < Cost) { 3653 NeedToScalarize = false; 3654 return VectorCallCost; 3655 } 3656 return Cost; 3657 } 3658 3659 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3660 // factor VF. Return the cost of the instruction, including scalarization 3661 // overhead if it's needed. 3662 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3663 const TargetTransformInfo &TTI, 3664 const TargetLibraryInfo *TLI) { 3665 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3666 assert(ID && "Expected intrinsic call!"); 3667 3668 Type *RetTy = ToVectorTy(CI->getType(), VF); 3669 SmallVector<Type *, 4> Tys; 3670 for (Value *ArgOperand : CI->arg_operands()) 3671 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 3672 3673 FastMathFlags FMF; 3674 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3675 FMF = FPMO->getFastMathFlags(); 3676 3677 return TTI.getIntrinsicInstrCost(ID, RetTy, Tys, FMF); 3678 } 3679 3680 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3681 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3682 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3683 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3684 } 3685 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3686 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3687 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3688 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3689 } 3690 3691 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3692 // For every instruction `I` in MinBWs, truncate the operands, create a 3693 // truncated version of `I` and reextend its result. InstCombine runs 3694 // later and will remove any ext/trunc pairs. 3695 // 3696 SmallPtrSet<Value *, 4> Erased; 3697 for (const auto &KV : *MinBWs) { 3698 VectorParts &Parts = VectorLoopValueMap.getVector(KV.first); 3699 for (Value *&I : Parts) { 3700 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3701 continue; 3702 Type *OriginalTy = I->getType(); 3703 Type *ScalarTruncatedTy = 3704 IntegerType::get(OriginalTy->getContext(), KV.second); 3705 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3706 OriginalTy->getVectorNumElements()); 3707 if (TruncatedTy == OriginalTy) 3708 continue; 3709 3710 IRBuilder<> B(cast<Instruction>(I)); 3711 auto ShrinkOperand = [&](Value *V) -> Value * { 3712 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3713 if (ZI->getSrcTy() == TruncatedTy) 3714 return ZI->getOperand(0); 3715 return B.CreateZExtOrTrunc(V, TruncatedTy); 3716 }; 3717 3718 // The actual instruction modification depends on the instruction type, 3719 // unfortunately. 3720 Value *NewI = nullptr; 3721 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3722 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3723 ShrinkOperand(BO->getOperand(1))); 3724 cast<BinaryOperator>(NewI)->copyIRFlags(I); 3725 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3726 NewI = 3727 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3728 ShrinkOperand(CI->getOperand(1))); 3729 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3730 NewI = B.CreateSelect(SI->getCondition(), 3731 ShrinkOperand(SI->getTrueValue()), 3732 ShrinkOperand(SI->getFalseValue())); 3733 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3734 switch (CI->getOpcode()) { 3735 default: 3736 llvm_unreachable("Unhandled cast!"); 3737 case Instruction::Trunc: 3738 NewI = ShrinkOperand(CI->getOperand(0)); 3739 break; 3740 case Instruction::SExt: 3741 NewI = B.CreateSExtOrTrunc( 3742 CI->getOperand(0), 3743 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3744 break; 3745 case Instruction::ZExt: 3746 NewI = B.CreateZExtOrTrunc( 3747 CI->getOperand(0), 3748 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3749 break; 3750 } 3751 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3752 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3753 auto *O0 = B.CreateZExtOrTrunc( 3754 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3755 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3756 auto *O1 = B.CreateZExtOrTrunc( 3757 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3758 3759 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3760 } else if (isa<LoadInst>(I)) { 3761 // Don't do anything with the operands, just extend the result. 3762 continue; 3763 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3764 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3765 auto *O0 = B.CreateZExtOrTrunc( 3766 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3767 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3768 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3769 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3770 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3771 auto *O0 = B.CreateZExtOrTrunc( 3772 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3773 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3774 } else { 3775 llvm_unreachable("Unhandled instruction type!"); 3776 } 3777 3778 // Lastly, extend the result. 3779 NewI->takeName(cast<Instruction>(I)); 3780 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3781 I->replaceAllUsesWith(Res); 3782 cast<Instruction>(I)->eraseFromParent(); 3783 Erased.insert(I); 3784 I = Res; 3785 } 3786 } 3787 3788 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3789 for (const auto &KV : *MinBWs) { 3790 VectorParts &Parts = VectorLoopValueMap.getVector(KV.first); 3791 for (Value *&I : Parts) { 3792 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3793 if (Inst && Inst->use_empty()) { 3794 Value *NewI = Inst->getOperand(0); 3795 Inst->eraseFromParent(); 3796 I = NewI; 3797 } 3798 } 3799 } 3800 } 3801 3802 void InnerLoopVectorizer::vectorizeLoop() { 3803 //===------------------------------------------------===// 3804 // 3805 // Notice: any optimization or new instruction that go 3806 // into the code below should be also be implemented in 3807 // the cost-model. 3808 // 3809 //===------------------------------------------------===// 3810 Constant *Zero = Builder.getInt32(0); 3811 3812 // In order to support recurrences we need to be able to vectorize Phi nodes. 3813 // Phi nodes have cycles, so we need to vectorize them in two stages. First, 3814 // we create a new vector PHI node with no incoming edges. We use this value 3815 // when we vectorize all of the instructions that use the PHI. Next, after 3816 // all of the instructions in the block are complete we add the new incoming 3817 // edges to the PHI. At this point all of the instructions in the basic block 3818 // are vectorized, so we can use them to construct the PHI. 3819 PhiVector PHIsToFix; 3820 3821 // Scan the loop in a topological order to ensure that defs are vectorized 3822 // before users. 3823 LoopBlocksDFS DFS(OrigLoop); 3824 DFS.perform(LI); 3825 3826 // Vectorize all of the blocks in the original loop. 3827 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 3828 vectorizeBlockInLoop(BB, &PHIsToFix); 3829 3830 // Insert truncates and extends for any truncated instructions as hints to 3831 // InstCombine. 3832 if (VF > 1) 3833 truncateToMinimalBitwidths(); 3834 3835 // At this point every instruction in the original loop is widened to a 3836 // vector form. Now we need to fix the recurrences in PHIsToFix. These PHI 3837 // nodes are currently empty because we did not want to introduce cycles. 3838 // This is the second stage of vectorizing recurrences. 3839 for (PHINode *Phi : PHIsToFix) { 3840 assert(Phi && "Unable to recover vectorized PHI"); 3841 3842 // Handle first-order recurrences that need to be fixed. 3843 if (Legal->isFirstOrderRecurrence(Phi)) { 3844 fixFirstOrderRecurrence(Phi); 3845 continue; 3846 } 3847 3848 // If the phi node is not a first-order recurrence, it must be a reduction. 3849 // Get it's reduction variable descriptor. 3850 assert(Legal->isReductionVariable(Phi) && 3851 "Unable to find the reduction variable"); 3852 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 3853 3854 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3855 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3856 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3857 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3858 RdxDesc.getMinMaxRecurrenceKind(); 3859 setDebugLocFromInst(Builder, ReductionStartValue); 3860 3861 // We need to generate a reduction vector from the incoming scalar. 3862 // To do so, we need to generate the 'identity' vector and override 3863 // one of the elements with the incoming scalar reduction. We need 3864 // to do it in the vector-loop preheader. 3865 Builder.SetInsertPoint(LoopBypassBlocks[1]->getTerminator()); 3866 3867 // This is the vector-clone of the value that leaves the loop. 3868 const VectorParts &VectorExit = getVectorValue(LoopExitInst); 3869 Type *VecTy = VectorExit[0]->getType(); 3870 3871 // Find the reduction identity variable. Zero for addition, or, xor, 3872 // one for multiplication, -1 for And. 3873 Value *Identity; 3874 Value *VectorStart; 3875 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3876 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3877 // MinMax reduction have the start value as their identify. 3878 if (VF == 1) { 3879 VectorStart = Identity = ReductionStartValue; 3880 } else { 3881 VectorStart = Identity = 3882 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3883 } 3884 } else { 3885 // Handle other reduction kinds: 3886 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3887 RK, VecTy->getScalarType()); 3888 if (VF == 1) { 3889 Identity = Iden; 3890 // This vector is the Identity vector where the first element is the 3891 // incoming scalar reduction. 3892 VectorStart = ReductionStartValue; 3893 } else { 3894 Identity = ConstantVector::getSplat(VF, Iden); 3895 3896 // This vector is the Identity vector where the first element is the 3897 // incoming scalar reduction. 3898 VectorStart = 3899 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3900 } 3901 } 3902 3903 // Fix the vector-loop phi. 3904 3905 // Reductions do not have to start at zero. They can start with 3906 // any loop invariant values. 3907 const VectorParts &VecRdxPhi = getVectorValue(Phi); 3908 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3909 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3910 const VectorParts &Val = getVectorValue(LoopVal); 3911 for (unsigned part = 0; part < UF; ++part) { 3912 // Make sure to add the reduction stat value only to the 3913 // first unroll part. 3914 Value *StartVal = (part == 0) ? VectorStart : Identity; 3915 cast<PHINode>(VecRdxPhi[part]) 3916 ->addIncoming(StartVal, LoopVectorPreHeader); 3917 cast<PHINode>(VecRdxPhi[part]) 3918 ->addIncoming(Val[part], LoopVectorBody); 3919 } 3920 3921 // Before each round, move the insertion point right between 3922 // the PHIs and the values we are going to write. 3923 // This allows us to write both PHINodes and the extractelement 3924 // instructions. 3925 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3926 3927 VectorParts &RdxParts = VectorLoopValueMap.getVector(LoopExitInst); 3928 setDebugLocFromInst(Builder, LoopExitInst); 3929 3930 // If the vector reduction can be performed in a smaller type, we truncate 3931 // then extend the loop exit value to enable InstCombine to evaluate the 3932 // entire expression in the smaller type. 3933 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3934 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3935 Builder.SetInsertPoint(LoopVectorBody->getTerminator()); 3936 for (unsigned part = 0; part < UF; ++part) { 3937 Value *Trunc = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3938 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3939 : Builder.CreateZExt(Trunc, VecTy); 3940 for (Value::user_iterator UI = RdxParts[part]->user_begin(); 3941 UI != RdxParts[part]->user_end();) 3942 if (*UI != Trunc) { 3943 (*UI++)->replaceUsesOfWith(RdxParts[part], Extnd); 3944 RdxParts[part] = Extnd; 3945 } else { 3946 ++UI; 3947 } 3948 } 3949 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3950 for (unsigned part = 0; part < UF; ++part) 3951 RdxParts[part] = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3952 } 3953 3954 // Reduce all of the unrolled parts into a single vector. 3955 Value *ReducedPartRdx = RdxParts[0]; 3956 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3957 setDebugLocFromInst(Builder, ReducedPartRdx); 3958 for (unsigned part = 1; part < UF; ++part) { 3959 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3960 // Floating point operations had to be 'fast' to enable the reduction. 3961 ReducedPartRdx = addFastMathFlag( 3962 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxParts[part], 3963 ReducedPartRdx, "bin.rdx")); 3964 else 3965 ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp( 3966 Builder, MinMaxKind, ReducedPartRdx, RdxParts[part]); 3967 } 3968 3969 if (VF > 1) { 3970 // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles 3971 // and vector ops, reducing the set of values being computed by half each 3972 // round. 3973 assert(isPowerOf2_32(VF) && 3974 "Reduction emission only supported for pow2 vectors!"); 3975 Value *TmpVec = ReducedPartRdx; 3976 SmallVector<Constant *, 32> ShuffleMask(VF, nullptr); 3977 for (unsigned i = VF; i != 1; i >>= 1) { 3978 // Move the upper half of the vector to the lower half. 3979 for (unsigned j = 0; j != i / 2; ++j) 3980 ShuffleMask[j] = Builder.getInt32(i / 2 + j); 3981 3982 // Fill the rest of the mask with undef. 3983 std::fill(&ShuffleMask[i / 2], ShuffleMask.end(), 3984 UndefValue::get(Builder.getInt32Ty())); 3985 3986 Value *Shuf = Builder.CreateShuffleVector( 3987 TmpVec, UndefValue::get(TmpVec->getType()), 3988 ConstantVector::get(ShuffleMask), "rdx.shuf"); 3989 3990 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3991 // Floating point operations had to be 'fast' to enable the reduction. 3992 TmpVec = addFastMathFlag(Builder.CreateBinOp( 3993 (Instruction::BinaryOps)Op, TmpVec, Shuf, "bin.rdx")); 3994 else 3995 TmpVec = RecurrenceDescriptor::createMinMaxOp(Builder, MinMaxKind, 3996 TmpVec, Shuf); 3997 } 3998 3999 // The result is in the first element of the vector. 4000 ReducedPartRdx = 4001 Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 4002 4003 // If the reduction can be performed in a smaller type, we need to extend 4004 // the reduction to the wider type before we branch to the original loop. 4005 if (Phi->getType() != RdxDesc.getRecurrenceType()) 4006 ReducedPartRdx = 4007 RdxDesc.isSigned() 4008 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 4009 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 4010 } 4011 4012 // Create a phi node that merges control-flow from the backedge-taken check 4013 // block and the middle block. 4014 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 4015 LoopScalarPreHeader->getTerminator()); 4016 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4017 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4018 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4019 4020 // Now, we need to fix the users of the reduction variable 4021 // inside and outside of the scalar remainder loop. 4022 // We know that the loop is in LCSSA form. We need to update the 4023 // PHI nodes in the exit blocks. 4024 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 4025 LEE = LoopExitBlock->end(); 4026 LEI != LEE; ++LEI) { 4027 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 4028 if (!LCSSAPhi) 4029 break; 4030 4031 // All PHINodes need to have a single entry edge, or two if 4032 // we already fixed them. 4033 assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 4034 4035 // We found our reduction value exit-PHI. Update it with the 4036 // incoming bypass edge. 4037 if (LCSSAPhi->getIncomingValue(0) == LoopExitInst) { 4038 // Add an edge coming from the bypass. 4039 LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4040 break; 4041 } 4042 } // end of the LCSSA phi scan. 4043 4044 // Fix the scalar loop reduction variable with the incoming reduction sum 4045 // from the vector body and from the backedge value. 4046 int IncomingEdgeBlockIdx = 4047 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4048 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4049 // Pick the other block. 4050 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4051 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4052 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4053 } // end of for each Phi in PHIsToFix. 4054 4055 fixLCSSAPHIs(); 4056 4057 // Make sure DomTree is updated. 4058 updateAnalysis(); 4059 4060 predicateInstructions(); 4061 4062 // Remove redundant induction instructions. 4063 cse(LoopVectorBody); 4064 } 4065 4066 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 4067 4068 // This is the second phase of vectorizing first-order recurrences. An 4069 // overview of the transformation is described below. Suppose we have the 4070 // following loop. 4071 // 4072 // for (int i = 0; i < n; ++i) 4073 // b[i] = a[i] - a[i - 1]; 4074 // 4075 // There is a first-order recurrence on "a". For this loop, the shorthand 4076 // scalar IR looks like: 4077 // 4078 // scalar.ph: 4079 // s_init = a[-1] 4080 // br scalar.body 4081 // 4082 // scalar.body: 4083 // i = phi [0, scalar.ph], [i+1, scalar.body] 4084 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4085 // s2 = a[i] 4086 // b[i] = s2 - s1 4087 // br cond, scalar.body, ... 4088 // 4089 // In this example, s1 is a recurrence because it's value depends on the 4090 // previous iteration. In the first phase of vectorization, we created a 4091 // temporary value for s1. We now complete the vectorization and produce the 4092 // shorthand vector IR shown below (for VF = 4, UF = 1). 4093 // 4094 // vector.ph: 4095 // v_init = vector(..., ..., ..., a[-1]) 4096 // br vector.body 4097 // 4098 // vector.body 4099 // i = phi [0, vector.ph], [i+4, vector.body] 4100 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4101 // v2 = a[i, i+1, i+2, i+3]; 4102 // v3 = vector(v1(3), v2(0, 1, 2)) 4103 // b[i, i+1, i+2, i+3] = v2 - v3 4104 // br cond, vector.body, middle.block 4105 // 4106 // middle.block: 4107 // x = v2(3) 4108 // br scalar.ph 4109 // 4110 // scalar.ph: 4111 // s_init = phi [x, middle.block], [a[-1], otherwise] 4112 // br scalar.body 4113 // 4114 // After execution completes the vector loop, we extract the next value of 4115 // the recurrence (x) to use as the initial value in the scalar loop. 4116 4117 // Get the original loop preheader and single loop latch. 4118 auto *Preheader = OrigLoop->getLoopPreheader(); 4119 auto *Latch = OrigLoop->getLoopLatch(); 4120 4121 // Get the initial and previous values of the scalar recurrence. 4122 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 4123 auto *Previous = Phi->getIncomingValueForBlock(Latch); 4124 4125 // Create a vector from the initial value. 4126 auto *VectorInit = ScalarInit; 4127 if (VF > 1) { 4128 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4129 VectorInit = Builder.CreateInsertElement( 4130 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 4131 Builder.getInt32(VF - 1), "vector.recur.init"); 4132 } 4133 4134 // We constructed a temporary phi node in the first phase of vectorization. 4135 // This phi node will eventually be deleted. 4136 VectorParts &PhiParts = VectorLoopValueMap.getVector(Phi); 4137 Builder.SetInsertPoint(cast<Instruction>(PhiParts[0])); 4138 4139 // Create a phi node for the new recurrence. The current value will either be 4140 // the initial value inserted into a vector or loop-varying vector value. 4141 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 4142 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 4143 4144 // Get the vectorized previous value. We ensured the previous values was an 4145 // instruction when detecting the recurrence. 4146 auto &PreviousParts = getVectorValue(Previous); 4147 4148 // Set the insertion point to be after this instruction. We ensured the 4149 // previous value dominated all uses of the phi when detecting the 4150 // recurrence. 4151 Builder.SetInsertPoint( 4152 &*++BasicBlock::iterator(cast<Instruction>(PreviousParts[UF - 1]))); 4153 4154 // We will construct a vector for the recurrence by combining the values for 4155 // the current and previous iterations. This is the required shuffle mask. 4156 SmallVector<Constant *, 8> ShuffleMask(VF); 4157 ShuffleMask[0] = Builder.getInt32(VF - 1); 4158 for (unsigned I = 1; I < VF; ++I) 4159 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 4160 4161 // The vector from which to take the initial value for the current iteration 4162 // (actual or unrolled). Initially, this is the vector phi node. 4163 Value *Incoming = VecPhi; 4164 4165 // Shuffle the current and previous vector and update the vector parts. 4166 for (unsigned Part = 0; Part < UF; ++Part) { 4167 auto *Shuffle = 4168 VF > 1 4169 ? Builder.CreateShuffleVector(Incoming, PreviousParts[Part], 4170 ConstantVector::get(ShuffleMask)) 4171 : Incoming; 4172 PhiParts[Part]->replaceAllUsesWith(Shuffle); 4173 cast<Instruction>(PhiParts[Part])->eraseFromParent(); 4174 PhiParts[Part] = Shuffle; 4175 Incoming = PreviousParts[Part]; 4176 } 4177 4178 // Fix the latch value of the new recurrence in the vector loop. 4179 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4180 4181 // Extract the last vector element in the middle block. This will be the 4182 // initial value for the recurrence when jumping to the scalar loop. 4183 auto *Extract = Incoming; 4184 if (VF > 1) { 4185 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4186 Extract = Builder.CreateExtractElement(Extract, Builder.getInt32(VF - 1), 4187 "vector.recur.extract"); 4188 } 4189 4190 // Fix the initial value of the original recurrence in the scalar loop. 4191 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4192 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4193 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4194 auto *Incoming = BB == LoopMiddleBlock ? Extract : ScalarInit; 4195 Start->addIncoming(Incoming, BB); 4196 } 4197 4198 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 4199 Phi->setName("scalar.recur"); 4200 4201 // Finally, fix users of the recurrence outside the loop. The users will need 4202 // either the last value of the scalar recurrence or the last value of the 4203 // vector recurrence we extracted in the middle block. Since the loop is in 4204 // LCSSA form, we just need to find the phi node for the original scalar 4205 // recurrence in the exit block, and then add an edge for the middle block. 4206 for (auto &I : *LoopExitBlock) { 4207 auto *LCSSAPhi = dyn_cast<PHINode>(&I); 4208 if (!LCSSAPhi) 4209 break; 4210 if (LCSSAPhi->getIncomingValue(0) == Phi) { 4211 LCSSAPhi->addIncoming(Extract, LoopMiddleBlock); 4212 break; 4213 } 4214 } 4215 } 4216 4217 void InnerLoopVectorizer::fixLCSSAPHIs() { 4218 for (Instruction &LEI : *LoopExitBlock) { 4219 auto *LCSSAPhi = dyn_cast<PHINode>(&LEI); 4220 if (!LCSSAPhi) 4221 break; 4222 if (LCSSAPhi->getNumIncomingValues() == 1) 4223 LCSSAPhi->addIncoming(UndefValue::get(LCSSAPhi->getType()), 4224 LoopMiddleBlock); 4225 } 4226 } 4227 4228 void InnerLoopVectorizer::predicateInstructions() { 4229 4230 // For each instruction I marked for predication on value C, split I into its 4231 // own basic block to form an if-then construct over C. 4232 // Since I may be fed by extractelement and/or be feeding an insertelement 4233 // generated during scalarization we try to move such instructions into the 4234 // predicated basic block as well. For the insertelement this also means that 4235 // the PHI will be created for the resulting vector rather than for the 4236 // scalar instruction. 4237 // So for some predicated instruction, e.g. the conditional sdiv in: 4238 // 4239 // for.body: 4240 // ... 4241 // %add = add nsw i32 %mul, %0 4242 // %cmp5 = icmp sgt i32 %2, 7 4243 // br i1 %cmp5, label %if.then, label %if.end 4244 // 4245 // if.then: 4246 // %div = sdiv i32 %0, %1 4247 // br label %if.end 4248 // 4249 // if.end: 4250 // %x.0 = phi i32 [ %div, %if.then ], [ %add, %for.body ] 4251 // 4252 // the sdiv at this point is scalarized and if-converted using a select. 4253 // The inactive elements in the vector are not used, but the predicated 4254 // instruction is still executed for all vector elements, essentially: 4255 // 4256 // vector.body: 4257 // ... 4258 // %17 = add nsw <2 x i32> %16, %wide.load 4259 // %29 = extractelement <2 x i32> %wide.load, i32 0 4260 // %30 = extractelement <2 x i32> %wide.load51, i32 0 4261 // %31 = sdiv i32 %29, %30 4262 // %32 = insertelement <2 x i32> undef, i32 %31, i32 0 4263 // %35 = extractelement <2 x i32> %wide.load, i32 1 4264 // %36 = extractelement <2 x i32> %wide.load51, i32 1 4265 // %37 = sdiv i32 %35, %36 4266 // %38 = insertelement <2 x i32> %32, i32 %37, i32 1 4267 // %predphi = select <2 x i1> %26, <2 x i32> %38, <2 x i32> %17 4268 // 4269 // Predication will now re-introduce the original control flow to avoid false 4270 // side-effects by the sdiv instructions on the inactive elements, yielding 4271 // (after cleanup): 4272 // 4273 // vector.body: 4274 // ... 4275 // %5 = add nsw <2 x i32> %4, %wide.load 4276 // %8 = icmp sgt <2 x i32> %wide.load52, <i32 7, i32 7> 4277 // %9 = extractelement <2 x i1> %8, i32 0 4278 // br i1 %9, label %pred.sdiv.if, label %pred.sdiv.continue 4279 // 4280 // pred.sdiv.if: 4281 // %10 = extractelement <2 x i32> %wide.load, i32 0 4282 // %11 = extractelement <2 x i32> %wide.load51, i32 0 4283 // %12 = sdiv i32 %10, %11 4284 // %13 = insertelement <2 x i32> undef, i32 %12, i32 0 4285 // br label %pred.sdiv.continue 4286 // 4287 // pred.sdiv.continue: 4288 // %14 = phi <2 x i32> [ undef, %vector.body ], [ %13, %pred.sdiv.if ] 4289 // %15 = extractelement <2 x i1> %8, i32 1 4290 // br i1 %15, label %pred.sdiv.if54, label %pred.sdiv.continue55 4291 // 4292 // pred.sdiv.if54: 4293 // %16 = extractelement <2 x i32> %wide.load, i32 1 4294 // %17 = extractelement <2 x i32> %wide.load51, i32 1 4295 // %18 = sdiv i32 %16, %17 4296 // %19 = insertelement <2 x i32> %14, i32 %18, i32 1 4297 // br label %pred.sdiv.continue55 4298 // 4299 // pred.sdiv.continue55: 4300 // %20 = phi <2 x i32> [ %14, %pred.sdiv.continue ], [ %19, %pred.sdiv.if54 ] 4301 // %predphi = select <2 x i1> %8, <2 x i32> %20, <2 x i32> %5 4302 4303 for (auto KV : PredicatedInstructions) { 4304 BasicBlock::iterator I(KV.first); 4305 BasicBlock *Head = I->getParent(); 4306 auto *BB = SplitBlock(Head, &*std::next(I), DT, LI); 4307 auto *T = SplitBlockAndInsertIfThen(KV.second, &*I, /*Unreachable=*/false, 4308 /*BranchWeights=*/nullptr, DT, LI); 4309 I->moveBefore(T); 4310 // Try to move any extractelement we may have created for the predicated 4311 // instruction into the Then block. 4312 for (Use &Op : I->operands()) { 4313 auto *OpInst = dyn_cast<ExtractElementInst>(&*Op); 4314 if (OpInst && OpInst->hasOneUse()) // TODO: more accurately - hasOneUser() 4315 OpInst->moveBefore(&*I); 4316 } 4317 4318 I->getParent()->setName(Twine("pred.") + I->getOpcodeName() + ".if"); 4319 BB->setName(Twine("pred.") + I->getOpcodeName() + ".continue"); 4320 4321 // If the instruction is non-void create a Phi node at reconvergence point. 4322 if (!I->getType()->isVoidTy()) { 4323 Value *IncomingTrue = nullptr; 4324 Value *IncomingFalse = nullptr; 4325 4326 if (I->hasOneUse() && isa<InsertElementInst>(*I->user_begin())) { 4327 // If the predicated instruction is feeding an insert-element, move it 4328 // into the Then block; Phi node will be created for the vector. 4329 InsertElementInst *IEI = cast<InsertElementInst>(*I->user_begin()); 4330 IEI->moveBefore(T); 4331 IncomingTrue = IEI; // the new vector with the inserted element. 4332 IncomingFalse = IEI->getOperand(0); // the unmodified vector 4333 } else { 4334 // Phi node will be created for the scalar predicated instruction. 4335 IncomingTrue = &*I; 4336 IncomingFalse = UndefValue::get(I->getType()); 4337 } 4338 4339 BasicBlock *PostDom = I->getParent()->getSingleSuccessor(); 4340 assert(PostDom && "Then block has multiple successors"); 4341 PHINode *Phi = 4342 PHINode::Create(IncomingTrue->getType(), 2, "", &PostDom->front()); 4343 IncomingTrue->replaceAllUsesWith(Phi); 4344 Phi->addIncoming(IncomingFalse, Head); 4345 Phi->addIncoming(IncomingTrue, I->getParent()); 4346 } 4347 } 4348 4349 DEBUG(DT->verifyDomTree()); 4350 } 4351 4352 InnerLoopVectorizer::VectorParts 4353 InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) { 4354 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 4355 4356 // Look for cached value. 4357 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 4358 EdgeMaskCache::iterator ECEntryIt = MaskCache.find(Edge); 4359 if (ECEntryIt != MaskCache.end()) 4360 return ECEntryIt->second; 4361 4362 VectorParts SrcMask = createBlockInMask(Src); 4363 4364 // The terminator has to be a branch inst! 4365 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 4366 assert(BI && "Unexpected terminator found"); 4367 4368 if (BI->isConditional()) { 4369 VectorParts EdgeMask = getVectorValue(BI->getCondition()); 4370 4371 if (BI->getSuccessor(0) != Dst) 4372 for (unsigned part = 0; part < UF; ++part) 4373 EdgeMask[part] = Builder.CreateNot(EdgeMask[part]); 4374 4375 for (unsigned part = 0; part < UF; ++part) 4376 EdgeMask[part] = Builder.CreateAnd(EdgeMask[part], SrcMask[part]); 4377 4378 MaskCache[Edge] = EdgeMask; 4379 return EdgeMask; 4380 } 4381 4382 MaskCache[Edge] = SrcMask; 4383 return SrcMask; 4384 } 4385 4386 InnerLoopVectorizer::VectorParts 4387 InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) { 4388 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 4389 4390 // Loop incoming mask is all-one. 4391 if (OrigLoop->getHeader() == BB) { 4392 Value *C = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 1); 4393 return getVectorValue(C); 4394 } 4395 4396 // This is the block mask. We OR all incoming edges, and with zero. 4397 Value *Zero = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 0); 4398 VectorParts BlockMask = getVectorValue(Zero); 4399 4400 // For each pred: 4401 for (pred_iterator it = pred_begin(BB), e = pred_end(BB); it != e; ++it) { 4402 VectorParts EM = createEdgeMask(*it, BB); 4403 for (unsigned part = 0; part < UF; ++part) 4404 BlockMask[part] = Builder.CreateOr(BlockMask[part], EM[part]); 4405 } 4406 4407 return BlockMask; 4408 } 4409 4410 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 4411 unsigned VF, PhiVector *PV) { 4412 PHINode *P = cast<PHINode>(PN); 4413 // Handle recurrences. 4414 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 4415 VectorParts Entry(UF); 4416 for (unsigned part = 0; part < UF; ++part) { 4417 // This is phase one of vectorizing PHIs. 4418 Type *VecTy = 4419 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4420 Entry[part] = PHINode::Create( 4421 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4422 } 4423 VectorLoopValueMap.initVector(P, Entry); 4424 PV->push_back(P); 4425 return; 4426 } 4427 4428 setDebugLocFromInst(Builder, P); 4429 // Check for PHI nodes that are lowered to vector selects. 4430 if (P->getParent() != OrigLoop->getHeader()) { 4431 // We know that all PHIs in non-header blocks are converted into 4432 // selects, so we don't have to worry about the insertion order and we 4433 // can just use the builder. 4434 // At this point we generate the predication tree. There may be 4435 // duplications since this is a simple recursive scan, but future 4436 // optimizations will clean it up. 4437 4438 unsigned NumIncoming = P->getNumIncomingValues(); 4439 4440 // Generate a sequence of selects of the form: 4441 // SELECT(Mask3, In3, 4442 // SELECT(Mask2, In2, 4443 // ( ...))) 4444 VectorParts Entry(UF); 4445 for (unsigned In = 0; In < NumIncoming; In++) { 4446 VectorParts Cond = 4447 createEdgeMask(P->getIncomingBlock(In), P->getParent()); 4448 const VectorParts &In0 = getVectorValue(P->getIncomingValue(In)); 4449 4450 for (unsigned part = 0; part < UF; ++part) { 4451 // We might have single edge PHIs (blocks) - use an identity 4452 // 'select' for the first PHI operand. 4453 if (In == 0) 4454 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], In0[part]); 4455 else 4456 // Select between the current value and the previous incoming edge 4457 // based on the incoming mask. 4458 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], Entry[part], 4459 "predphi"); 4460 } 4461 } 4462 VectorLoopValueMap.initVector(P, Entry); 4463 return; 4464 } 4465 4466 // This PHINode must be an induction variable. 4467 // Make sure that we know about it. 4468 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 4469 4470 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 4471 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4472 4473 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4474 // which can be found from the original scalar operations. 4475 switch (II.getKind()) { 4476 case InductionDescriptor::IK_NoInduction: 4477 llvm_unreachable("Unknown induction"); 4478 case InductionDescriptor::IK_IntInduction: 4479 return widenIntInduction(P); 4480 case InductionDescriptor::IK_PtrInduction: { 4481 // Handle the pointer induction variable case. 4482 assert(P->getType()->isPointerTy() && "Unexpected type."); 4483 // This is the normalized GEP that starts counting at zero. 4484 Value *PtrInd = Induction; 4485 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 4486 // These are the scalar results. Notice that we don't generate vector GEPs 4487 // because scalar GEPs result in better code. 4488 ScalarParts Entry(UF); 4489 for (unsigned Part = 0; Part < UF; ++Part) { 4490 Entry[Part].resize(VF); 4491 for (unsigned Lane = 0; Lane < VF; ++Lane) { 4492 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 4493 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4494 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 4495 SclrGep->setName("next.gep"); 4496 Entry[Part][Lane] = SclrGep; 4497 } 4498 } 4499 VectorLoopValueMap.initScalar(P, Entry); 4500 return; 4501 } 4502 case InductionDescriptor::IK_FpInduction: { 4503 assert(P->getType() == II.getStartValue()->getType() && 4504 "Types must match"); 4505 // Handle other induction variables that are now based on the 4506 // canonical one. 4507 assert(P != OldInduction && "Primary induction can be integer only"); 4508 4509 Value *V = Builder.CreateCast(Instruction::SIToFP, Induction, P->getType()); 4510 V = II.transform(Builder, V, PSE.getSE(), DL); 4511 V->setName("fp.offset.idx"); 4512 4513 // Now we have scalar op: %fp.offset.idx = StartVal +/- Induction*StepVal 4514 4515 Value *Broadcasted = getBroadcastInstrs(V); 4516 // After broadcasting the induction variable we need to make the vector 4517 // consecutive by adding StepVal*0, StepVal*1, StepVal*2, etc. 4518 Value *StepVal = cast<SCEVUnknown>(II.getStep())->getValue(); 4519 VectorParts Entry(UF); 4520 for (unsigned part = 0; part < UF; ++part) 4521 Entry[part] = getStepVector(Broadcasted, VF * part, StepVal, 4522 II.getInductionOpcode()); 4523 VectorLoopValueMap.initVector(P, Entry); 4524 return; 4525 } 4526 } 4527 } 4528 4529 /// A helper function for checking whether an integer division-related 4530 /// instruction may divide by zero (in which case it must be predicated if 4531 /// executed conditionally in the scalar code). 4532 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4533 /// Non-zero divisors that are non compile-time constants will not be 4534 /// converted into multiplication, so we will still end up scalarizing 4535 /// the division, but can do so w/o predication. 4536 static bool mayDivideByZero(Instruction &I) { 4537 assert((I.getOpcode() == Instruction::UDiv || 4538 I.getOpcode() == Instruction::SDiv || 4539 I.getOpcode() == Instruction::URem || 4540 I.getOpcode() == Instruction::SRem) && 4541 "Unexpected instruction"); 4542 Value *Divisor = I.getOperand(1); 4543 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4544 return !CInt || CInt->isZero(); 4545 } 4546 4547 void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { 4548 // For each instruction in the old loop. 4549 for (Instruction &I : *BB) { 4550 switch (I.getOpcode()) { 4551 case Instruction::Br: 4552 // Nothing to do for PHIs and BR, since we already took care of the 4553 // loop control flow instructions. 4554 continue; 4555 case Instruction::PHI: { 4556 // Vectorize PHINodes. 4557 widenPHIInstruction(&I, UF, VF, PV); 4558 continue; 4559 } // End of PHI. 4560 4561 case Instruction::UDiv: 4562 case Instruction::SDiv: 4563 case Instruction::SRem: 4564 case Instruction::URem: 4565 // Scalarize with predication if this instruction may divide by zero and 4566 // block execution is conditional, otherwise fallthrough. 4567 if (mayDivideByZero(I) && Legal->blockNeedsPredication(I.getParent())) { 4568 scalarizeInstruction(&I, true); 4569 continue; 4570 } 4571 case Instruction::Add: 4572 case Instruction::FAdd: 4573 case Instruction::Sub: 4574 case Instruction::FSub: 4575 case Instruction::Mul: 4576 case Instruction::FMul: 4577 case Instruction::FDiv: 4578 case Instruction::FRem: 4579 case Instruction::Shl: 4580 case Instruction::LShr: 4581 case Instruction::AShr: 4582 case Instruction::And: 4583 case Instruction::Or: 4584 case Instruction::Xor: { 4585 // Just widen binops. 4586 auto *BinOp = cast<BinaryOperator>(&I); 4587 setDebugLocFromInst(Builder, BinOp); 4588 const VectorParts &A = getVectorValue(BinOp->getOperand(0)); 4589 const VectorParts &B = getVectorValue(BinOp->getOperand(1)); 4590 4591 // Use this vector value for all users of the original instruction. 4592 VectorParts Entry(UF); 4593 for (unsigned Part = 0; Part < UF; ++Part) { 4594 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A[Part], B[Part]); 4595 4596 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 4597 VecOp->copyIRFlags(BinOp); 4598 4599 Entry[Part] = V; 4600 } 4601 4602 VectorLoopValueMap.initVector(&I, Entry); 4603 addMetadata(Entry, BinOp); 4604 break; 4605 } 4606 case Instruction::Select: { 4607 // Widen selects. 4608 // If the selector is loop invariant we can create a select 4609 // instruction with a scalar condition. Otherwise, use vector-select. 4610 auto *SE = PSE.getSE(); 4611 bool InvariantCond = 4612 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); 4613 setDebugLocFromInst(Builder, &I); 4614 4615 // The condition can be loop invariant but still defined inside the 4616 // loop. This means that we can't just use the original 'cond' value. 4617 // We have to take the 'vectorized' value and pick the first lane. 4618 // Instcombine will make this a no-op. 4619 const VectorParts &Cond = getVectorValue(I.getOperand(0)); 4620 const VectorParts &Op0 = getVectorValue(I.getOperand(1)); 4621 const VectorParts &Op1 = getVectorValue(I.getOperand(2)); 4622 4623 auto *ScalarCond = getScalarValue(I.getOperand(0), 0, 0); 4624 4625 VectorParts Entry(UF); 4626 for (unsigned Part = 0; Part < UF; ++Part) { 4627 Entry[Part] = Builder.CreateSelect( 4628 InvariantCond ? ScalarCond : Cond[Part], Op0[Part], Op1[Part]); 4629 } 4630 4631 VectorLoopValueMap.initVector(&I, Entry); 4632 addMetadata(Entry, &I); 4633 break; 4634 } 4635 4636 case Instruction::ICmp: 4637 case Instruction::FCmp: { 4638 // Widen compares. Generate vector compares. 4639 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4640 auto *Cmp = dyn_cast<CmpInst>(&I); 4641 setDebugLocFromInst(Builder, Cmp); 4642 const VectorParts &A = getVectorValue(Cmp->getOperand(0)); 4643 const VectorParts &B = getVectorValue(Cmp->getOperand(1)); 4644 VectorParts Entry(UF); 4645 for (unsigned Part = 0; Part < UF; ++Part) { 4646 Value *C = nullptr; 4647 if (FCmp) { 4648 C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]); 4649 cast<FCmpInst>(C)->copyFastMathFlags(Cmp); 4650 } else { 4651 C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]); 4652 } 4653 Entry[Part] = C; 4654 } 4655 4656 VectorLoopValueMap.initVector(&I, Entry); 4657 addMetadata(Entry, &I); 4658 break; 4659 } 4660 4661 case Instruction::Store: 4662 case Instruction::Load: 4663 vectorizeMemoryInstruction(&I); 4664 break; 4665 case Instruction::ZExt: 4666 case Instruction::SExt: 4667 case Instruction::FPToUI: 4668 case Instruction::FPToSI: 4669 case Instruction::FPExt: 4670 case Instruction::PtrToInt: 4671 case Instruction::IntToPtr: 4672 case Instruction::SIToFP: 4673 case Instruction::UIToFP: 4674 case Instruction::Trunc: 4675 case Instruction::FPTrunc: 4676 case Instruction::BitCast: { 4677 auto *CI = dyn_cast<CastInst>(&I); 4678 setDebugLocFromInst(Builder, CI); 4679 4680 // Optimize the special case where the source is a constant integer 4681 // induction variable. Notice that we can only optimize the 'trunc' case 4682 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 4683 // (c) other casts depend on pointer size. 4684 auto ID = Legal->getInductionVars()->lookup(OldInduction); 4685 if (isa<TruncInst>(CI) && CI->getOperand(0) == OldInduction && 4686 ID.getConstIntStepValue()) { 4687 widenIntInduction(OldInduction, cast<TruncInst>(CI)); 4688 break; 4689 } 4690 4691 /// Vectorize casts. 4692 Type *DestTy = 4693 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4694 4695 const VectorParts &A = getVectorValue(CI->getOperand(0)); 4696 VectorParts Entry(UF); 4697 for (unsigned Part = 0; Part < UF; ++Part) 4698 Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy); 4699 VectorLoopValueMap.initVector(&I, Entry); 4700 addMetadata(Entry, &I); 4701 break; 4702 } 4703 4704 case Instruction::Call: { 4705 // Ignore dbg intrinsics. 4706 if (isa<DbgInfoIntrinsic>(I)) 4707 break; 4708 setDebugLocFromInst(Builder, &I); 4709 4710 Module *M = BB->getParent()->getParent(); 4711 auto *CI = cast<CallInst>(&I); 4712 4713 StringRef FnName = CI->getCalledFunction()->getName(); 4714 Function *F = CI->getCalledFunction(); 4715 Type *RetTy = ToVectorTy(CI->getType(), VF); 4716 SmallVector<Type *, 4> Tys; 4717 for (Value *ArgOperand : CI->arg_operands()) 4718 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4719 4720 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4721 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 4722 ID == Intrinsic::lifetime_start)) { 4723 scalarizeInstruction(&I); 4724 break; 4725 } 4726 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4727 // version of the instruction. 4728 // Is it beneficial to perform intrinsic call compared to lib call? 4729 bool NeedToScalarize; 4730 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4731 bool UseVectorIntrinsic = 4732 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4733 if (!UseVectorIntrinsic && NeedToScalarize) { 4734 scalarizeInstruction(&I); 4735 break; 4736 } 4737 4738 VectorParts Entry(UF); 4739 for (unsigned Part = 0; Part < UF; ++Part) { 4740 SmallVector<Value *, 4> Args; 4741 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4742 Value *Arg = CI->getArgOperand(i); 4743 // Some intrinsics have a scalar argument - don't replace it with a 4744 // vector. 4745 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) { 4746 const VectorParts &VectorArg = getVectorValue(CI->getArgOperand(i)); 4747 Arg = VectorArg[Part]; 4748 } 4749 Args.push_back(Arg); 4750 } 4751 4752 Function *VectorF; 4753 if (UseVectorIntrinsic) { 4754 // Use vector version of the intrinsic. 4755 Type *TysForDecl[] = {CI->getType()}; 4756 if (VF > 1) 4757 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4758 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4759 } else { 4760 // Use vector version of the library call. 4761 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4762 assert(!VFnName.empty() && "Vector function name is empty."); 4763 VectorF = M->getFunction(VFnName); 4764 if (!VectorF) { 4765 // Generate a declaration 4766 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4767 VectorF = 4768 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4769 VectorF->copyAttributesFrom(F); 4770 } 4771 } 4772 assert(VectorF && "Can't create vector function."); 4773 4774 SmallVector<OperandBundleDef, 1> OpBundles; 4775 CI->getOperandBundlesAsDefs(OpBundles); 4776 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4777 4778 if (isa<FPMathOperator>(V)) 4779 V->copyFastMathFlags(CI); 4780 4781 Entry[Part] = V; 4782 } 4783 4784 VectorLoopValueMap.initVector(&I, Entry); 4785 addMetadata(Entry, &I); 4786 break; 4787 } 4788 4789 default: 4790 // All other instructions are unsupported. Scalarize them. 4791 scalarizeInstruction(&I); 4792 break; 4793 } // end of switch. 4794 } // end of for_each instr. 4795 } 4796 4797 void InnerLoopVectorizer::updateAnalysis() { 4798 // Forget the original basic block. 4799 PSE.getSE()->forgetLoop(OrigLoop); 4800 4801 // Update the dominator tree information. 4802 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4803 "Entry does not dominate exit."); 4804 4805 // We don't predicate stores by this point, so the vector body should be a 4806 // single loop. 4807 DT->addNewBlock(LoopVectorBody, LoopVectorPreHeader); 4808 4809 DT->addNewBlock(LoopMiddleBlock, LoopVectorBody); 4810 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4811 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4812 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4813 4814 DEBUG(DT->verifyDomTree()); 4815 } 4816 4817 /// \brief Check whether it is safe to if-convert this phi node. 4818 /// 4819 /// Phi nodes with constant expressions that can trap are not safe to if 4820 /// convert. 4821 static bool canIfConvertPHINodes(BasicBlock *BB) { 4822 for (Instruction &I : *BB) { 4823 auto *Phi = dyn_cast<PHINode>(&I); 4824 if (!Phi) 4825 return true; 4826 for (Value *V : Phi->incoming_values()) 4827 if (auto *C = dyn_cast<Constant>(V)) 4828 if (C->canTrap()) 4829 return false; 4830 } 4831 return true; 4832 } 4833 4834 bool LoopVectorizationLegality::canVectorizeWithIfConvert() { 4835 if (!EnableIfConversion) { 4836 emitAnalysis(VectorizationReport() << "if-conversion is disabled"); 4837 return false; 4838 } 4839 4840 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable"); 4841 4842 // A list of pointers that we can safely read and write to. 4843 SmallPtrSet<Value *, 8> SafePointes; 4844 4845 // Collect safe addresses. 4846 for (BasicBlock *BB : TheLoop->blocks()) { 4847 if (blockNeedsPredication(BB)) 4848 continue; 4849 4850 for (Instruction &I : *BB) 4851 if (auto *Ptr = getPointerOperand(&I)) 4852 SafePointes.insert(Ptr); 4853 } 4854 4855 // Collect the blocks that need predication. 4856 BasicBlock *Header = TheLoop->getHeader(); 4857 for (BasicBlock *BB : TheLoop->blocks()) { 4858 // We don't support switch statements inside loops. 4859 if (!isa<BranchInst>(BB->getTerminator())) { 4860 emitAnalysis(VectorizationReport(BB->getTerminator()) 4861 << "loop contains a switch statement"); 4862 return false; 4863 } 4864 4865 // We must be able to predicate all blocks that need to be predicated. 4866 if (blockNeedsPredication(BB)) { 4867 if (!blockCanBePredicated(BB, SafePointes)) { 4868 emitAnalysis(VectorizationReport(BB->getTerminator()) 4869 << "control flow cannot be substituted for a select"); 4870 return false; 4871 } 4872 } else if (BB != Header && !canIfConvertPHINodes(BB)) { 4873 emitAnalysis(VectorizationReport(BB->getTerminator()) 4874 << "control flow cannot be substituted for a select"); 4875 return false; 4876 } 4877 } 4878 4879 // We can if-convert this loop. 4880 return true; 4881 } 4882 4883 bool LoopVectorizationLegality::canVectorize() { 4884 // We must have a loop in canonical form. Loops with indirectbr in them cannot 4885 // be canonicalized. 4886 if (!TheLoop->getLoopPreheader()) { 4887 emitAnalysis(VectorizationReport() 4888 << "loop control flow is not understood by vectorizer"); 4889 return false; 4890 } 4891 4892 // FIXME: The code is currently dead, since the loop gets sent to 4893 // LoopVectorizationLegality is already an innermost loop. 4894 // 4895 // We can only vectorize innermost loops. 4896 if (!TheLoop->empty()) { 4897 emitAnalysis(VectorizationReport() << "loop is not the innermost loop"); 4898 return false; 4899 } 4900 4901 // We must have a single backedge. 4902 if (TheLoop->getNumBackEdges() != 1) { 4903 emitAnalysis(VectorizationReport() 4904 << "loop control flow is not understood by vectorizer"); 4905 return false; 4906 } 4907 4908 // We must have a single exiting block. 4909 if (!TheLoop->getExitingBlock()) { 4910 emitAnalysis(VectorizationReport() 4911 << "loop control flow is not understood by vectorizer"); 4912 return false; 4913 } 4914 4915 // We only handle bottom-tested loops, i.e. loop in which the condition is 4916 // checked at the end of each iteration. With that we can assume that all 4917 // instructions in the loop are executed the same number of times. 4918 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 4919 emitAnalysis(VectorizationReport() 4920 << "loop control flow is not understood by vectorizer"); 4921 return false; 4922 } 4923 4924 // We need to have a loop header. 4925 DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName() 4926 << '\n'); 4927 4928 // Check if we can if-convert non-single-bb loops. 4929 unsigned NumBlocks = TheLoop->getNumBlocks(); 4930 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { 4931 DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); 4932 return false; 4933 } 4934 4935 // ScalarEvolution needs to be able to find the exit count. 4936 const SCEV *ExitCount = PSE.getBackedgeTakenCount(); 4937 if (ExitCount == PSE.getSE()->getCouldNotCompute()) { 4938 emitAnalysis(VectorizationReport() 4939 << "could not determine number of loop iterations"); 4940 DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n"); 4941 return false; 4942 } 4943 4944 // Check if we can vectorize the instructions and CFG in this loop. 4945 if (!canVectorizeInstrs()) { 4946 DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); 4947 return false; 4948 } 4949 4950 // Go over each instruction and look at memory deps. 4951 if (!canVectorizeMemory()) { 4952 DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); 4953 return false; 4954 } 4955 4956 DEBUG(dbgs() << "LV: We can vectorize this loop" 4957 << (LAI->getRuntimePointerChecking()->Need 4958 ? " (with a runtime bound check)" 4959 : "") 4960 << "!\n"); 4961 4962 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 4963 4964 // If an override option has been passed in for interleaved accesses, use it. 4965 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 4966 UseInterleaved = EnableInterleavedMemAccesses; 4967 4968 // Analyze interleaved memory accesses. 4969 if (UseInterleaved) 4970 InterleaveInfo.analyzeInterleaving(*getSymbolicStrides()); 4971 4972 // Collect all instructions that are known to be uniform after vectorization. 4973 collectLoopUniforms(); 4974 4975 // Collect all instructions that are known to be scalar after vectorization. 4976 collectLoopScalars(); 4977 4978 unsigned SCEVThreshold = VectorizeSCEVCheckThreshold; 4979 if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) 4980 SCEVThreshold = PragmaVectorizeSCEVCheckThreshold; 4981 4982 if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) { 4983 emitAnalysis(VectorizationReport() 4984 << "Too many SCEV assumptions need to be made and checked " 4985 << "at runtime"); 4986 DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n"); 4987 return false; 4988 } 4989 4990 // Okay! We can vectorize. At this point we don't have any other mem analysis 4991 // which may limit our maximum vectorization factor, so just return true with 4992 // no restrictions. 4993 return true; 4994 } 4995 4996 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { 4997 if (Ty->isPointerTy()) 4998 return DL.getIntPtrType(Ty); 4999 5000 // It is possible that char's or short's overflow when we ask for the loop's 5001 // trip count, work around this by changing the type size. 5002 if (Ty->getScalarSizeInBits() < 32) 5003 return Type::getInt32Ty(Ty->getContext()); 5004 5005 return Ty; 5006 } 5007 5008 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { 5009 Ty0 = convertPointerToIntegerType(DL, Ty0); 5010 Ty1 = convertPointerToIntegerType(DL, Ty1); 5011 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) 5012 return Ty0; 5013 return Ty1; 5014 } 5015 5016 /// \brief Check that the instruction has outside loop users and is not an 5017 /// identified reduction variable. 5018 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, 5019 SmallPtrSetImpl<Value *> &AllowedExit) { 5020 // Reduction and Induction instructions are allowed to have exit users. All 5021 // other instructions must not have external users. 5022 if (!AllowedExit.count(Inst)) 5023 // Check that all of the users of the loop are inside the BB. 5024 for (User *U : Inst->users()) { 5025 Instruction *UI = cast<Instruction>(U); 5026 // This user may be a reduction exit value. 5027 if (!TheLoop->contains(UI)) { 5028 DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); 5029 return true; 5030 } 5031 } 5032 return false; 5033 } 5034 5035 void LoopVectorizationLegality::addInductionPhi( 5036 PHINode *Phi, const InductionDescriptor &ID, 5037 SmallPtrSetImpl<Value *> &AllowedExit) { 5038 Inductions[Phi] = ID; 5039 Type *PhiTy = Phi->getType(); 5040 const DataLayout &DL = Phi->getModule()->getDataLayout(); 5041 5042 // Get the widest type. 5043 if (!PhiTy->isFloatingPointTy()) { 5044 if (!WidestIndTy) 5045 WidestIndTy = convertPointerToIntegerType(DL, PhiTy); 5046 else 5047 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); 5048 } 5049 5050 // Int inductions are special because we only allow one IV. 5051 if (ID.getKind() == InductionDescriptor::IK_IntInduction && 5052 ID.getConstIntStepValue() && 5053 ID.getConstIntStepValue()->isOne() && 5054 isa<Constant>(ID.getStartValue()) && 5055 cast<Constant>(ID.getStartValue())->isNullValue()) { 5056 5057 // Use the phi node with the widest type as induction. Use the last 5058 // one if there are multiple (no good reason for doing this other 5059 // than it is expedient). We've checked that it begins at zero and 5060 // steps by one, so this is a canonical induction variable. 5061 if (!Induction || PhiTy == WidestIndTy) 5062 Induction = Phi; 5063 } 5064 5065 // Both the PHI node itself, and the "post-increment" value feeding 5066 // back into the PHI node may have external users. 5067 AllowedExit.insert(Phi); 5068 AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch())); 5069 5070 DEBUG(dbgs() << "LV: Found an induction variable.\n"); 5071 return; 5072 } 5073 5074 bool LoopVectorizationLegality::canVectorizeInstrs() { 5075 BasicBlock *Header = TheLoop->getHeader(); 5076 5077 // Look for the attribute signaling the absence of NaNs. 5078 Function &F = *Header->getParent(); 5079 HasFunNoNaNAttr = 5080 F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; 5081 5082 // For each block in the loop. 5083 for (BasicBlock *BB : TheLoop->blocks()) { 5084 // Scan the instructions in the block and look for hazards. 5085 for (Instruction &I : *BB) { 5086 if (auto *Phi = dyn_cast<PHINode>(&I)) { 5087 Type *PhiTy = Phi->getType(); 5088 // Check that this PHI type is allowed. 5089 if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() && 5090 !PhiTy->isPointerTy()) { 5091 emitAnalysis(VectorizationReport(Phi) 5092 << "loop control flow is not understood by vectorizer"); 5093 DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); 5094 return false; 5095 } 5096 5097 // If this PHINode is not in the header block, then we know that we 5098 // can convert it to select during if-conversion. No need to check if 5099 // the PHIs in this block are induction or reduction variables. 5100 if (BB != Header) { 5101 // Check that this instruction has no outside users or is an 5102 // identified reduction value with an outside user. 5103 if (!hasOutsideLoopUser(TheLoop, Phi, AllowedExit)) 5104 continue; 5105 emitAnalysis(VectorizationReport(Phi) 5106 << "value could not be identified as " 5107 "an induction or reduction variable"); 5108 return false; 5109 } 5110 5111 // We only allow if-converted PHIs with exactly two incoming values. 5112 if (Phi->getNumIncomingValues() != 2) { 5113 emitAnalysis(VectorizationReport(Phi) 5114 << "control flow not understood by vectorizer"); 5115 DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); 5116 return false; 5117 } 5118 5119 RecurrenceDescriptor RedDes; 5120 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) { 5121 if (RedDes.hasUnsafeAlgebra()) 5122 Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst()); 5123 AllowedExit.insert(RedDes.getLoopExitInstr()); 5124 Reductions[Phi] = RedDes; 5125 continue; 5126 } 5127 5128 InductionDescriptor ID; 5129 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID)) { 5130 addInductionPhi(Phi, ID, AllowedExit); 5131 if (ID.hasUnsafeAlgebra() && !HasFunNoNaNAttr) 5132 Requirements->addUnsafeAlgebraInst(ID.getUnsafeAlgebraInst()); 5133 continue; 5134 } 5135 5136 if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, DT)) { 5137 FirstOrderRecurrences.insert(Phi); 5138 continue; 5139 } 5140 5141 // As a last resort, coerce the PHI to a AddRec expression 5142 // and re-try classifying it a an induction PHI. 5143 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true)) { 5144 addInductionPhi(Phi, ID, AllowedExit); 5145 continue; 5146 } 5147 5148 emitAnalysis(VectorizationReport(Phi) 5149 << "value that could not be identified as " 5150 "reduction is used outside the loop"); 5151 DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n"); 5152 return false; 5153 } // end of PHI handling 5154 5155 // We handle calls that: 5156 // * Are debug info intrinsics. 5157 // * Have a mapping to an IR intrinsic. 5158 // * Have a vector version available. 5159 auto *CI = dyn_cast<CallInst>(&I); 5160 if (CI && !getVectorIntrinsicIDForCall(CI, TLI) && 5161 !isa<DbgInfoIntrinsic>(CI) && 5162 !(CI->getCalledFunction() && TLI && 5163 TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) { 5164 emitAnalysis(VectorizationReport(CI) 5165 << "call instruction cannot be vectorized"); 5166 DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); 5167 return false; 5168 } 5169 5170 // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the 5171 // second argument is the same (i.e. loop invariant) 5172 if (CI && hasVectorInstrinsicScalarOpd( 5173 getVectorIntrinsicIDForCall(CI, TLI), 1)) { 5174 auto *SE = PSE.getSE(); 5175 if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) { 5176 emitAnalysis(VectorizationReport(CI) 5177 << "intrinsic instruction cannot be vectorized"); 5178 DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n"); 5179 return false; 5180 } 5181 } 5182 5183 // Check that the instruction return type is vectorizable. 5184 // Also, we can't vectorize extractelement instructions. 5185 if ((!VectorType::isValidElementType(I.getType()) && 5186 !I.getType()->isVoidTy()) || 5187 isa<ExtractElementInst>(I)) { 5188 emitAnalysis(VectorizationReport(&I) 5189 << "instruction return type cannot be vectorized"); 5190 DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); 5191 return false; 5192 } 5193 5194 // Check that the stored type is vectorizable. 5195 if (auto *ST = dyn_cast<StoreInst>(&I)) { 5196 Type *T = ST->getValueOperand()->getType(); 5197 if (!VectorType::isValidElementType(T)) { 5198 emitAnalysis(VectorizationReport(ST) 5199 << "store instruction cannot be vectorized"); 5200 return false; 5201 } 5202 5203 // FP instructions can allow unsafe algebra, thus vectorizable by 5204 // non-IEEE-754 compliant SIMD units. 5205 // This applies to floating-point math operations and calls, not memory 5206 // operations, shuffles, or casts, as they don't change precision or 5207 // semantics. 5208 } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) && 5209 !I.hasUnsafeAlgebra()) { 5210 DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n"); 5211 Hints->setPotentiallyUnsafe(); 5212 } 5213 5214 // Reduction instructions are allowed to have exit users. 5215 // All other instructions must not have external users. 5216 if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) { 5217 emitAnalysis(VectorizationReport(&I) 5218 << "value cannot be used outside the loop"); 5219 return false; 5220 } 5221 5222 } // next instr. 5223 } 5224 5225 if (!Induction) { 5226 DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); 5227 if (Inductions.empty()) { 5228 emitAnalysis(VectorizationReport() 5229 << "loop induction variable could not be identified"); 5230 return false; 5231 } 5232 } 5233 5234 // Now we know the widest induction type, check if our found induction 5235 // is the same size. If it's not, unset it here and InnerLoopVectorizer 5236 // will create another. 5237 if (Induction && WidestIndTy != Induction->getType()) 5238 Induction = nullptr; 5239 5240 return true; 5241 } 5242 5243 void LoopVectorizationLegality::collectLoopScalars() { 5244 5245 // If an instruction is uniform after vectorization, it will remain scalar. 5246 Scalars.insert(Uniforms.begin(), Uniforms.end()); 5247 5248 // Collect the getelementptr instructions that will not be vectorized. A 5249 // getelementptr instruction is only vectorized if it is used for a legal 5250 // gather or scatter operation. 5251 for (auto *BB : TheLoop->blocks()) 5252 for (auto &I : *BB) { 5253 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 5254 Scalars.insert(GEP); 5255 continue; 5256 } 5257 auto *Ptr = getPointerOperand(&I); 5258 if (!Ptr) 5259 continue; 5260 auto *GEP = getGEPInstruction(Ptr); 5261 if (GEP && isLegalGatherOrScatter(&I)) 5262 Scalars.erase(GEP); 5263 } 5264 5265 // An induction variable will remain scalar if all users of the induction 5266 // variable and induction variable update remain scalar. 5267 auto *Latch = TheLoop->getLoopLatch(); 5268 for (auto &Induction : *getInductionVars()) { 5269 auto *Ind = Induction.first; 5270 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5271 5272 // Determine if all users of the induction variable are scalar after 5273 // vectorization. 5274 auto ScalarInd = all_of(Ind->users(), [&](User *U) -> bool { 5275 auto *I = cast<Instruction>(U); 5276 return I == IndUpdate || !TheLoop->contains(I) || Scalars.count(I); 5277 }); 5278 if (!ScalarInd) 5279 continue; 5280 5281 // Determine if all users of the induction variable update instruction are 5282 // scalar after vectorization. 5283 auto ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool { 5284 auto *I = cast<Instruction>(U); 5285 return I == Ind || !TheLoop->contains(I) || Scalars.count(I); 5286 }); 5287 if (!ScalarIndUpdate) 5288 continue; 5289 5290 // The induction variable and its update instruction will remain scalar. 5291 Scalars.insert(Ind); 5292 Scalars.insert(IndUpdate); 5293 } 5294 } 5295 5296 bool LoopVectorizationLegality::hasConsecutiveLikePtrOperand(Instruction *I) { 5297 if (isAccessInterleaved(I)) 5298 return true; 5299 if (auto *Ptr = getPointerOperand(I)) 5300 return isConsecutivePtr(Ptr); 5301 return false; 5302 } 5303 5304 bool LoopVectorizationLegality::isPredicatedStore(Instruction *I) { 5305 auto *SI = dyn_cast<StoreInst>(I); 5306 return SI && blockNeedsPredication(SI->getParent()) && !isMaskRequired(SI); 5307 } 5308 5309 bool LoopVectorizationLegality::memoryInstructionMustBeScalarized( 5310 Instruction *I, unsigned VF) { 5311 5312 // If the memory instruction is in an interleaved group, it will be 5313 // vectorized and its pointer will remain uniform. 5314 if (isAccessInterleaved(I)) 5315 return false; 5316 5317 // Get and ensure we have a valid memory instruction. 5318 LoadInst *LI = dyn_cast<LoadInst>(I); 5319 StoreInst *SI = dyn_cast<StoreInst>(I); 5320 assert((LI || SI) && "Invalid memory instruction"); 5321 5322 // If the pointer operand is uniform (loop invariant), the memory instruction 5323 // will be scalarized. 5324 auto *Ptr = getPointerOperand(I); 5325 if (LI && isUniform(Ptr)) 5326 return true; 5327 5328 // If the pointer operand is non-consecutive and neither a gather nor a 5329 // scatter operation is legal, the memory instruction will be scalarized. 5330 if (!isConsecutivePtr(Ptr) && !isLegalGatherOrScatter(I)) 5331 return true; 5332 5333 // If the instruction is a store located in a predicated block, it will be 5334 // scalarized. 5335 if (isPredicatedStore(I)) 5336 return true; 5337 5338 // If the instruction's allocated size doesn't equal it's type size, it 5339 // requires padding and will be scalarized. 5340 auto &DL = I->getModule()->getDataLayout(); 5341 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 5342 if (hasIrregularType(ScalarTy, DL, VF)) 5343 return true; 5344 5345 // Otherwise, the memory instruction should be vectorized if the rest of the 5346 // loop is. 5347 return false; 5348 } 5349 5350 void LoopVectorizationLegality::collectLoopUniforms() { 5351 // We now know that the loop is vectorizable! 5352 // Collect instructions inside the loop that will remain uniform after 5353 // vectorization. 5354 5355 // Global values, params and instructions outside of current loop are out of 5356 // scope. 5357 auto isOutOfScope = [&](Value *V) -> bool { 5358 Instruction *I = dyn_cast<Instruction>(V); 5359 return (!I || !TheLoop->contains(I)); 5360 }; 5361 5362 SetVector<Instruction *> Worklist; 5363 BasicBlock *Latch = TheLoop->getLoopLatch(); 5364 // Start with the conditional branch. 5365 if (!isOutOfScope(Latch->getTerminator()->getOperand(0))) { 5366 Instruction *Cmp = cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5367 Worklist.insert(Cmp); 5368 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); 5369 } 5370 5371 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers 5372 // are pointers that are treated like consecutive pointers during 5373 // vectorization. The pointer operands of interleaved accesses are an 5374 // example. 5375 SmallPtrSet<Instruction *, 8> ConsecutiveLikePtrs; 5376 5377 // Holds pointer operands of instructions that are possibly non-uniform. 5378 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs; 5379 5380 // Iterate over the instructions in the loop, and collect all 5381 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible 5382 // that a consecutive-like pointer operand will be scalarized, we collect it 5383 // in PossibleNonUniformPtrs instead. We use two sets here because a single 5384 // getelementptr instruction can be used by both vectorized and scalarized 5385 // memory instructions. For example, if a loop loads and stores from the same 5386 // location, but the store is conditional, the store will be scalarized, and 5387 // the getelementptr won't remain uniform. 5388 for (auto *BB : TheLoop->blocks()) 5389 for (auto &I : *BB) { 5390 5391 // If there's no pointer operand, there's nothing to do. 5392 auto *Ptr = dyn_cast_or_null<Instruction>(getPointerOperand(&I)); 5393 if (!Ptr) 5394 continue; 5395 5396 // Ensure the memory instruction will not be scalarized, making its 5397 // pointer operand non-uniform. 5398 if (memoryInstructionMustBeScalarized(&I)) 5399 PossibleNonUniformPtrs.insert(Ptr); 5400 5401 // If the memory instruction will be vectorized and its pointer operand 5402 // is consecutive-like, the pointer operand should remain uniform. 5403 else if (hasConsecutiveLikePtrOperand(&I)) 5404 ConsecutiveLikePtrs.insert(Ptr); 5405 } 5406 5407 // Add to the Worklist all consecutive and consecutive-like pointers that 5408 // aren't also identified as possibly non-uniform. 5409 for (auto *V : ConsecutiveLikePtrs) 5410 if (!PossibleNonUniformPtrs.count(V)) { 5411 DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n"); 5412 Worklist.insert(V); 5413 } 5414 5415 // Expand Worklist in topological order: whenever a new instruction 5416 // is added , its users should be either already inside Worklist, or 5417 // out of scope. It ensures a uniform instruction will only be used 5418 // by uniform instructions or out of scope instructions. 5419 unsigned idx = 0; 5420 while (idx != Worklist.size()) { 5421 Instruction *I = Worklist[idx++]; 5422 5423 for (auto OV : I->operand_values()) { 5424 if (isOutOfScope(OV)) 5425 continue; 5426 auto *OI = cast<Instruction>(OV); 5427 if (all_of(OI->users(), [&](User *U) -> bool { 5428 return isOutOfScope(U) || Worklist.count(cast<Instruction>(U)); 5429 })) { 5430 Worklist.insert(OI); 5431 DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); 5432 } 5433 } 5434 } 5435 5436 // For an instruction to be added into Worklist above, all its users inside 5437 // the current loop should be already added into Worklist. This condition 5438 // cannot be true for phi instructions which is always in a dependence loop. 5439 // Because any instruction in the dependence cycle always depends on others 5440 // in the cycle to be added into Worklist first, the result is no ones in 5441 // the cycle will be added into Worklist in the end. 5442 // That is why we process PHI separately. 5443 for (auto &Induction : *getInductionVars()) { 5444 auto *PN = Induction.first; 5445 auto *UpdateV = PN->getIncomingValueForBlock(TheLoop->getLoopLatch()); 5446 if (all_of(PN->users(), 5447 [&](User *U) -> bool { 5448 return U == UpdateV || isOutOfScope(U) || 5449 Worklist.count(cast<Instruction>(U)); 5450 }) && 5451 all_of(UpdateV->users(), [&](User *U) -> bool { 5452 return U == PN || isOutOfScope(U) || 5453 Worklist.count(cast<Instruction>(U)); 5454 })) { 5455 Worklist.insert(cast<Instruction>(PN)); 5456 Worklist.insert(cast<Instruction>(UpdateV)); 5457 DEBUG(dbgs() << "LV: Found uniform instruction: " << *PN << "\n"); 5458 DEBUG(dbgs() << "LV: Found uniform instruction: " << *UpdateV << "\n"); 5459 } 5460 } 5461 5462 Uniforms.insert(Worklist.begin(), Worklist.end()); 5463 } 5464 5465 bool LoopVectorizationLegality::canVectorizeMemory() { 5466 LAI = &(*GetLAA)(*TheLoop); 5467 InterleaveInfo.setLAI(LAI); 5468 auto &OptionalReport = LAI->getReport(); 5469 if (OptionalReport) 5470 emitAnalysis(VectorizationReport(*OptionalReport)); 5471 if (!LAI->canVectorizeMemory()) 5472 return false; 5473 5474 if (LAI->hasStoreToLoopInvariantAddress()) { 5475 emitAnalysis( 5476 VectorizationReport() 5477 << "write to a loop invariant address could not be vectorized"); 5478 DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); 5479 return false; 5480 } 5481 5482 Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks()); 5483 PSE.addPredicate(LAI->getPSE().getUnionPredicate()); 5484 5485 return true; 5486 } 5487 5488 bool LoopVectorizationLegality::isInductionVariable(const Value *V) { 5489 Value *In0 = const_cast<Value *>(V); 5490 PHINode *PN = dyn_cast_or_null<PHINode>(In0); 5491 if (!PN) 5492 return false; 5493 5494 return Inductions.count(PN); 5495 } 5496 5497 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) { 5498 return FirstOrderRecurrences.count(Phi); 5499 } 5500 5501 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { 5502 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 5503 } 5504 5505 bool LoopVectorizationLegality::blockCanBePredicated( 5506 BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) { 5507 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 5508 5509 for (Instruction &I : *BB) { 5510 // Check that we don't have a constant expression that can trap as operand. 5511 for (Value *Operand : I.operands()) { 5512 if (auto *C = dyn_cast<Constant>(Operand)) 5513 if (C->canTrap()) 5514 return false; 5515 } 5516 // We might be able to hoist the load. 5517 if (I.mayReadFromMemory()) { 5518 auto *LI = dyn_cast<LoadInst>(&I); 5519 if (!LI) 5520 return false; 5521 if (!SafePtrs.count(LI->getPointerOperand())) { 5522 if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) || 5523 isLegalMaskedGather(LI->getType())) { 5524 MaskedOp.insert(LI); 5525 continue; 5526 } 5527 // !llvm.mem.parallel_loop_access implies if-conversion safety. 5528 if (IsAnnotatedParallel) 5529 continue; 5530 return false; 5531 } 5532 } 5533 5534 if (I.mayWriteToMemory()) { 5535 auto *SI = dyn_cast<StoreInst>(&I); 5536 // We only support predication of stores in basic blocks with one 5537 // predecessor. 5538 if (!SI) 5539 return false; 5540 5541 // Build a masked store if it is legal for the target. 5542 if (isLegalMaskedStore(SI->getValueOperand()->getType(), 5543 SI->getPointerOperand()) || 5544 isLegalMaskedScatter(SI->getValueOperand()->getType())) { 5545 MaskedOp.insert(SI); 5546 continue; 5547 } 5548 5549 bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0); 5550 bool isSinglePredecessor = SI->getParent()->getSinglePredecessor(); 5551 5552 if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr || 5553 !isSinglePredecessor) 5554 return false; 5555 } 5556 if (I.mayThrow()) 5557 return false; 5558 } 5559 5560 return true; 5561 } 5562 5563 void InterleavedAccessInfo::collectConstStrideAccesses( 5564 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 5565 const ValueToValueMap &Strides) { 5566 5567 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 5568 5569 // Since it's desired that the load/store instructions be maintained in 5570 // "program order" for the interleaved access analysis, we have to visit the 5571 // blocks in the loop in reverse postorder (i.e., in a topological order). 5572 // Such an ordering will ensure that any load/store that may be executed 5573 // before a second load/store will precede the second load/store in 5574 // AccessStrideInfo. 5575 LoopBlocksDFS DFS(TheLoop); 5576 DFS.perform(LI); 5577 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 5578 for (auto &I : *BB) { 5579 auto *LI = dyn_cast<LoadInst>(&I); 5580 auto *SI = dyn_cast<StoreInst>(&I); 5581 if (!LI && !SI) 5582 continue; 5583 5584 Value *Ptr = getPointerOperand(&I); 5585 int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides); 5586 5587 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 5588 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 5589 uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 5590 5591 // An alignment of 0 means target ABI alignment. 5592 unsigned Align = LI ? LI->getAlignment() : SI->getAlignment(); 5593 if (!Align) 5594 Align = DL.getABITypeAlignment(PtrTy->getElementType()); 5595 5596 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align); 5597 } 5598 } 5599 5600 // Analyze interleaved accesses and collect them into interleaved load and 5601 // store groups. 5602 // 5603 // When generating code for an interleaved load group, we effectively hoist all 5604 // loads in the group to the location of the first load in program order. When 5605 // generating code for an interleaved store group, we sink all stores to the 5606 // location of the last store. This code motion can change the order of load 5607 // and store instructions and may break dependences. 5608 // 5609 // The code generation strategy mentioned above ensures that we won't violate 5610 // any write-after-read (WAR) dependences. 5611 // 5612 // E.g., for the WAR dependence: a = A[i]; // (1) 5613 // A[i] = b; // (2) 5614 // 5615 // The store group of (2) is always inserted at or below (2), and the load 5616 // group of (1) is always inserted at or above (1). Thus, the instructions will 5617 // never be reordered. All other dependences are checked to ensure the 5618 // correctness of the instruction reordering. 5619 // 5620 // The algorithm visits all memory accesses in the loop in bottom-up program 5621 // order. Program order is established by traversing the blocks in the loop in 5622 // reverse postorder when collecting the accesses. 5623 // 5624 // We visit the memory accesses in bottom-up order because it can simplify the 5625 // construction of store groups in the presence of write-after-write (WAW) 5626 // dependences. 5627 // 5628 // E.g., for the WAW dependence: A[i] = a; // (1) 5629 // A[i] = b; // (2) 5630 // A[i + 1] = c; // (3) 5631 // 5632 // We will first create a store group with (3) and (2). (1) can't be added to 5633 // this group because it and (2) are dependent. However, (1) can be grouped 5634 // with other accesses that may precede it in program order. Note that a 5635 // bottom-up order does not imply that WAW dependences should not be checked. 5636 void InterleavedAccessInfo::analyzeInterleaving( 5637 const ValueToValueMap &Strides) { 5638 DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 5639 5640 // Holds all accesses with a constant stride. 5641 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 5642 collectConstStrideAccesses(AccessStrideInfo, Strides); 5643 5644 if (AccessStrideInfo.empty()) 5645 return; 5646 5647 // Collect the dependences in the loop. 5648 collectDependences(); 5649 5650 // Holds all interleaved store groups temporarily. 5651 SmallSetVector<InterleaveGroup *, 4> StoreGroups; 5652 // Holds all interleaved load groups temporarily. 5653 SmallSetVector<InterleaveGroup *, 4> LoadGroups; 5654 5655 // Search in bottom-up program order for pairs of accesses (A and B) that can 5656 // form interleaved load or store groups. In the algorithm below, access A 5657 // precedes access B in program order. We initialize a group for B in the 5658 // outer loop of the algorithm, and then in the inner loop, we attempt to 5659 // insert each A into B's group if: 5660 // 5661 // 1. A and B have the same stride, 5662 // 2. A and B have the same memory object size, and 5663 // 3. A belongs in B's group according to its distance from B. 5664 // 5665 // Special care is taken to ensure group formation will not break any 5666 // dependences. 5667 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 5668 BI != E; ++BI) { 5669 Instruction *B = BI->first; 5670 StrideDescriptor DesB = BI->second; 5671 5672 // Initialize a group for B if it has an allowable stride. Even if we don't 5673 // create a group for B, we continue with the bottom-up algorithm to ensure 5674 // we don't break any of B's dependences. 5675 InterleaveGroup *Group = nullptr; 5676 if (isStrided(DesB.Stride)) { 5677 Group = getInterleaveGroup(B); 5678 if (!Group) { 5679 DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n'); 5680 Group = createInterleaveGroup(B, DesB.Stride, DesB.Align); 5681 } 5682 if (B->mayWriteToMemory()) 5683 StoreGroups.insert(Group); 5684 else 5685 LoadGroups.insert(Group); 5686 } 5687 5688 for (auto AI = std::next(BI); AI != E; ++AI) { 5689 Instruction *A = AI->first; 5690 StrideDescriptor DesA = AI->second; 5691 5692 // Our code motion strategy implies that we can't have dependences 5693 // between accesses in an interleaved group and other accesses located 5694 // between the first and last member of the group. Note that this also 5695 // means that a group can't have more than one member at a given offset. 5696 // The accesses in a group can have dependences with other accesses, but 5697 // we must ensure we don't extend the boundaries of the group such that 5698 // we encompass those dependent accesses. 5699 // 5700 // For example, assume we have the sequence of accesses shown below in a 5701 // stride-2 loop: 5702 // 5703 // (1, 2) is a group | A[i] = a; // (1) 5704 // | A[i-1] = b; // (2) | 5705 // A[i-3] = c; // (3) 5706 // A[i] = d; // (4) | (2, 4) is not a group 5707 // 5708 // Because accesses (2) and (3) are dependent, we can group (2) with (1) 5709 // but not with (4). If we did, the dependent access (3) would be within 5710 // the boundaries of the (2, 4) group. 5711 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { 5712 5713 // If a dependence exists and A is already in a group, we know that A 5714 // must be a store since A precedes B and WAR dependences are allowed. 5715 // Thus, A would be sunk below B. We release A's group to prevent this 5716 // illegal code motion. A will then be free to form another group with 5717 // instructions that precede it. 5718 if (isInterleaved(A)) { 5719 InterleaveGroup *StoreGroup = getInterleaveGroup(A); 5720 StoreGroups.remove(StoreGroup); 5721 releaseGroup(StoreGroup); 5722 } 5723 5724 // If a dependence exists and A is not already in a group (or it was 5725 // and we just released it), B might be hoisted above A (if B is a 5726 // load) or another store might be sunk below A (if B is a store). In 5727 // either case, we can't add additional instructions to B's group. B 5728 // will only form a group with instructions that it precedes. 5729 break; 5730 } 5731 5732 // At this point, we've checked for illegal code motion. If either A or B 5733 // isn't strided, there's nothing left to do. 5734 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 5735 continue; 5736 5737 // Ignore A if it's already in a group or isn't the same kind of memory 5738 // operation as B. 5739 if (isInterleaved(A) || A->mayReadFromMemory() != B->mayReadFromMemory()) 5740 continue; 5741 5742 // Check rules 1 and 2. Ignore A if its stride or size is different from 5743 // that of B. 5744 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 5745 continue; 5746 5747 // Calculate the distance from A to B. 5748 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 5749 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 5750 if (!DistToB) 5751 continue; 5752 int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 5753 5754 // Check rule 3. Ignore A if its distance to B is not a multiple of the 5755 // size. 5756 if (DistanceToB % static_cast<int64_t>(DesB.Size)) 5757 continue; 5758 5759 // Ignore A if either A or B is in a predicated block. Although we 5760 // currently prevent group formation for predicated accesses, we may be 5761 // able to relax this limitation in the future once we handle more 5762 // complicated blocks. 5763 if (isPredicated(A->getParent()) || isPredicated(B->getParent())) 5764 continue; 5765 5766 // The index of A is the index of B plus A's distance to B in multiples 5767 // of the size. 5768 int IndexA = 5769 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 5770 5771 // Try to insert A into B's group. 5772 if (Group->insertMember(A, IndexA, DesA.Align)) { 5773 DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 5774 << " into the interleave group with" << *B << '\n'); 5775 InterleaveGroupMap[A] = Group; 5776 5777 // Set the first load in program order as the insert position. 5778 if (A->mayReadFromMemory()) 5779 Group->setInsertPos(A); 5780 } 5781 } // Iteration over A accesses. 5782 } // Iteration over B accesses. 5783 5784 // Remove interleaved store groups with gaps. 5785 for (InterleaveGroup *Group : StoreGroups) 5786 if (Group->getNumMembers() != Group->getFactor()) 5787 releaseGroup(Group); 5788 5789 // If there is a non-reversed interleaved load group with gaps, we will need 5790 // to execute at least one scalar epilogue iteration. This will ensure that 5791 // we don't speculatively access memory out-of-bounds. Note that we only need 5792 // to look for a member at index factor - 1, since every group must have a 5793 // member at index zero. 5794 for (InterleaveGroup *Group : LoadGroups) 5795 if (!Group->getMember(Group->getFactor() - 1)) { 5796 if (Group->isReverse()) { 5797 releaseGroup(Group); 5798 } else { 5799 DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 5800 RequiresScalarEpilogue = true; 5801 } 5802 } 5803 } 5804 5805 LoopVectorizationCostModel::VectorizationFactor 5806 LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { 5807 // Width 1 means no vectorize 5808 VectorizationFactor Factor = {1U, 0U}; 5809 if (OptForSize && Legal->getRuntimePointerChecking()->Need) { 5810 emitAnalysis( 5811 VectorizationReport() 5812 << "runtime pointer checks needed. Enable vectorization of this " 5813 "loop with '#pragma clang loop vectorize(enable)' when " 5814 "compiling with -Os/-Oz"); 5815 DEBUG(dbgs() 5816 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 5817 return Factor; 5818 } 5819 5820 if (!EnableCondStoresVectorization && Legal->getNumPredStores()) { 5821 emitAnalysis( 5822 VectorizationReport() 5823 << "store that is conditionally executed prevents vectorization"); 5824 DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); 5825 return Factor; 5826 } 5827 5828 // Find the trip count. 5829 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5830 DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5831 5832 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5833 unsigned SmallestType, WidestType; 5834 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5835 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 5836 unsigned MaxSafeDepDist = -1U; 5837 5838 // Get the maximum safe dependence distance in bits computed by LAA. If the 5839 // loop contains any interleaved accesses, we divide the dependence distance 5840 // by the maximum interleave factor of all interleaved groups. Note that 5841 // although the division ensures correctness, this is a fairly conservative 5842 // computation because the maximum distance computed by LAA may not involve 5843 // any of the interleaved accesses. 5844 if (Legal->getMaxSafeDepDistBytes() != -1U) 5845 MaxSafeDepDist = 5846 Legal->getMaxSafeDepDistBytes() * 8 / Legal->getMaxInterleaveFactor(); 5847 5848 WidestRegister = 5849 ((WidestRegister < MaxSafeDepDist) ? WidestRegister : MaxSafeDepDist); 5850 unsigned MaxVectorSize = WidestRegister / WidestType; 5851 5852 DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " 5853 << WidestType << " bits.\n"); 5854 DEBUG(dbgs() << "LV: The Widest register is: " << WidestRegister 5855 << " bits.\n"); 5856 5857 if (MaxVectorSize == 0) { 5858 DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5859 MaxVectorSize = 1; 5860 } 5861 5862 assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements" 5863 " into one vector!"); 5864 5865 unsigned VF = MaxVectorSize; 5866 if (MaximizeBandwidth && !OptForSize) { 5867 // Collect all viable vectorization factors. 5868 SmallVector<unsigned, 8> VFs; 5869 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 5870 for (unsigned VS = MaxVectorSize; VS <= NewMaxVectorSize; VS *= 2) 5871 VFs.push_back(VS); 5872 5873 // For each VF calculate its register usage. 5874 auto RUs = calculateRegisterUsage(VFs); 5875 5876 // Select the largest VF which doesn't require more registers than existing 5877 // ones. 5878 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 5879 for (int i = RUs.size() - 1; i >= 0; --i) { 5880 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 5881 VF = VFs[i]; 5882 break; 5883 } 5884 } 5885 } 5886 5887 // If we optimize the program for size, avoid creating the tail loop. 5888 if (OptForSize) { 5889 // If we are unable to calculate the trip count then don't try to vectorize. 5890 if (TC < 2) { 5891 emitAnalysis( 5892 VectorizationReport() 5893 << "unable to calculate the loop count due to complex control flow"); 5894 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 5895 return Factor; 5896 } 5897 5898 // Find the maximum SIMD width that can fit within the trip count. 5899 VF = TC % MaxVectorSize; 5900 5901 if (VF == 0) 5902 VF = MaxVectorSize; 5903 else { 5904 // If the trip count that we found modulo the vectorization factor is not 5905 // zero then we require a tail. 5906 emitAnalysis(VectorizationReport() 5907 << "cannot optimize for size and vectorize at the " 5908 "same time. Enable vectorization of this loop " 5909 "with '#pragma clang loop vectorize(enable)' " 5910 "when compiling with -Os/-Oz"); 5911 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 5912 return Factor; 5913 } 5914 } 5915 5916 int UserVF = Hints->getWidth(); 5917 if (UserVF != 0) { 5918 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 5919 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 5920 5921 Factor.Width = UserVF; 5922 return Factor; 5923 } 5924 5925 float Cost = expectedCost(1).first; 5926 #ifndef NDEBUG 5927 const float ScalarCost = Cost; 5928 #endif /* NDEBUG */ 5929 unsigned Width = 1; 5930 DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 5931 5932 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5933 // Ignore scalar width, because the user explicitly wants vectorization. 5934 if (ForceVectorization && VF > 1) { 5935 Width = 2; 5936 Cost = expectedCost(Width).first / (float)Width; 5937 } 5938 5939 for (unsigned i = 2; i <= VF; i *= 2) { 5940 // Notice that the vector loop needs to be executed less times, so 5941 // we need to divide the cost of the vector loops by the width of 5942 // the vector elements. 5943 VectorizationCostTy C = expectedCost(i); 5944 float VectorCost = C.first / (float)i; 5945 DEBUG(dbgs() << "LV: Vector loop of width " << i 5946 << " costs: " << (int)VectorCost << ".\n"); 5947 if (!C.second && !ForceVectorization) { 5948 DEBUG( 5949 dbgs() << "LV: Not considering vector loop of width " << i 5950 << " because it will not generate any vector instructions.\n"); 5951 continue; 5952 } 5953 if (VectorCost < Cost) { 5954 Cost = VectorCost; 5955 Width = i; 5956 } 5957 } 5958 5959 DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 5960 << "LV: Vectorization seems to be not beneficial, " 5961 << "but was forced by a user.\n"); 5962 DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 5963 Factor.Width = Width; 5964 Factor.Cost = Width * Cost; 5965 return Factor; 5966 } 5967 5968 std::pair<unsigned, unsigned> 5969 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5970 unsigned MinWidth = -1U; 5971 unsigned MaxWidth = 8; 5972 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5973 5974 // For each block. 5975 for (BasicBlock *BB : TheLoop->blocks()) { 5976 // For each instruction in the loop. 5977 for (Instruction &I : *BB) { 5978 Type *T = I.getType(); 5979 5980 // Skip ignored values. 5981 if (ValuesToIgnore.count(&I)) 5982 continue; 5983 5984 // Only examine Loads, Stores and PHINodes. 5985 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5986 continue; 5987 5988 // Examine PHI nodes that are reduction variables. Update the type to 5989 // account for the recurrence type. 5990 if (auto *PN = dyn_cast<PHINode>(&I)) { 5991 if (!Legal->isReductionVariable(PN)) 5992 continue; 5993 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 5994 T = RdxDesc.getRecurrenceType(); 5995 } 5996 5997 // Examine the stored values. 5998 if (auto *ST = dyn_cast<StoreInst>(&I)) 5999 T = ST->getValueOperand()->getType(); 6000 6001 // Ignore loaded pointer types and stored pointer types that are not 6002 // consecutive. However, we do want to take consecutive stores/loads of 6003 // pointer vectors into account. 6004 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I)) 6005 continue; 6006 6007 MinWidth = std::min(MinWidth, 6008 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6009 MaxWidth = std::max(MaxWidth, 6010 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6011 } 6012 } 6013 6014 return {MinWidth, MaxWidth}; 6015 } 6016 6017 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 6018 unsigned VF, 6019 unsigned LoopCost) { 6020 6021 // -- The interleave heuristics -- 6022 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6023 // There are many micro-architectural considerations that we can't predict 6024 // at this level. For example, frontend pressure (on decode or fetch) due to 6025 // code size, or the number and capabilities of the execution ports. 6026 // 6027 // We use the following heuristics to select the interleave count: 6028 // 1. If the code has reductions, then we interleave to break the cross 6029 // iteration dependency. 6030 // 2. If the loop is really small, then we interleave to reduce the loop 6031 // overhead. 6032 // 3. We don't interleave if we think that we will spill registers to memory 6033 // due to the increased register pressure. 6034 6035 // When we optimize for size, we don't interleave. 6036 if (OptForSize) 6037 return 1; 6038 6039 // We used the distance for the interleave count. 6040 if (Legal->getMaxSafeDepDistBytes() != -1U) 6041 return 1; 6042 6043 // Do not interleave loops with a relatively small trip count. 6044 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 6045 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 6046 return 1; 6047 6048 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 6049 DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6050 << " registers\n"); 6051 6052 if (VF == 1) { 6053 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6054 TargetNumRegisters = ForceTargetNumScalarRegs; 6055 } else { 6056 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6057 TargetNumRegisters = ForceTargetNumVectorRegs; 6058 } 6059 6060 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6061 // We divide by these constants so assume that we have at least one 6062 // instruction that uses at least one register. 6063 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 6064 R.NumInstructions = std::max(R.NumInstructions, 1U); 6065 6066 // We calculate the interleave count using the following formula. 6067 // Subtract the number of loop invariants from the number of available 6068 // registers. These registers are used by all of the interleaved instances. 6069 // Next, divide the remaining registers by the number of registers that is 6070 // required by the loop, in order to estimate how many parallel instances 6071 // fit without causing spills. All of this is rounded down if necessary to be 6072 // a power of two. We want power of two interleave count to simplify any 6073 // addressing operations or alignment considerations. 6074 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 6075 R.MaxLocalUsers); 6076 6077 // Don't count the induction variable as interleaved. 6078 if (EnableIndVarRegisterHeur) 6079 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 6080 std::max(1U, (R.MaxLocalUsers - 1))); 6081 6082 // Clamp the interleave ranges to reasonable counts. 6083 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 6084 6085 // Check if the user has overridden the max. 6086 if (VF == 1) { 6087 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6088 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6089 } else { 6090 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6091 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6092 } 6093 6094 // If we did not calculate the cost for VF (because the user selected the VF) 6095 // then we calculate the cost of VF here. 6096 if (LoopCost == 0) 6097 LoopCost = expectedCost(VF).first; 6098 6099 // Clamp the calculated IC to be between the 1 and the max interleave count 6100 // that the target allows. 6101 if (IC > MaxInterleaveCount) 6102 IC = MaxInterleaveCount; 6103 else if (IC < 1) 6104 IC = 1; 6105 6106 // Interleave if we vectorized this loop and there is a reduction that could 6107 // benefit from interleaving. 6108 if (VF > 1 && Legal->getReductionVars()->size()) { 6109 DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6110 return IC; 6111 } 6112 6113 // Note that if we've already vectorized the loop we will have done the 6114 // runtime check and so interleaving won't require further checks. 6115 bool InterleavingRequiresRuntimePointerCheck = 6116 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 6117 6118 // We want to interleave small loops in order to reduce the loop overhead and 6119 // potentially expose ILP opportunities. 6120 DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 6121 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6122 // We assume that the cost overhead is 1 and we use the cost model 6123 // to estimate the cost of the loop and interleave until the cost of the 6124 // loop overhead is about 5% of the cost of the loop. 6125 unsigned SmallIC = 6126 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6127 6128 // Interleave until store/load ports (estimated by max interleave count) are 6129 // saturated. 6130 unsigned NumStores = Legal->getNumStores(); 6131 unsigned NumLoads = Legal->getNumLoads(); 6132 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6133 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6134 6135 // If we have a scalar reduction (vector reductions are already dealt with 6136 // by this point), we can increase the critical path length if the loop 6137 // we're interleaving is inside another loop. Limit, by default to 2, so the 6138 // critical path only gets increased by one reduction operation. 6139 if (Legal->getReductionVars()->size() && TheLoop->getLoopDepth() > 1) { 6140 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6141 SmallIC = std::min(SmallIC, F); 6142 StoresIC = std::min(StoresIC, F); 6143 LoadsIC = std::min(LoadsIC, F); 6144 } 6145 6146 if (EnableLoadStoreRuntimeInterleave && 6147 std::max(StoresIC, LoadsIC) > SmallIC) { 6148 DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6149 return std::max(StoresIC, LoadsIC); 6150 } 6151 6152 DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6153 return SmallIC; 6154 } 6155 6156 // Interleave if this is a large loop (small loops are already dealt with by 6157 // this point) that could benefit from interleaving. 6158 bool HasReductions = (Legal->getReductionVars()->size() > 0); 6159 if (TTI.enableAggressiveInterleaving(HasReductions)) { 6160 DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6161 return IC; 6162 } 6163 6164 DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6165 return 1; 6166 } 6167 6168 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6169 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 6170 // This function calculates the register usage by measuring the highest number 6171 // of values that are alive at a single location. Obviously, this is a very 6172 // rough estimation. We scan the loop in a topological order in order and 6173 // assign a number to each instruction. We use RPO to ensure that defs are 6174 // met before their users. We assume that each instruction that has in-loop 6175 // users starts an interval. We record every time that an in-loop value is 6176 // used, so we have a list of the first and last occurrences of each 6177 // instruction. Next, we transpose this data structure into a multi map that 6178 // holds the list of intervals that *end* at a specific location. This multi 6179 // map allows us to perform a linear search. We scan the instructions linearly 6180 // and record each time that a new interval starts, by placing it in a set. 6181 // If we find this value in the multi-map then we remove it from the set. 6182 // The max register usage is the maximum size of the set. 6183 // We also search for instructions that are defined outside the loop, but are 6184 // used inside the loop. We need this number separately from the max-interval 6185 // usage number because when we unroll, loop-invariant values do not take 6186 // more register. 6187 LoopBlocksDFS DFS(TheLoop); 6188 DFS.perform(LI); 6189 6190 RegisterUsage RU; 6191 RU.NumInstructions = 0; 6192 6193 // Each 'key' in the map opens a new interval. The values 6194 // of the map are the index of the 'last seen' usage of the 6195 // instruction that is the key. 6196 typedef DenseMap<Instruction *, unsigned> IntervalMap; 6197 // Maps instruction to its index. 6198 DenseMap<unsigned, Instruction *> IdxToInstr; 6199 // Marks the end of each interval. 6200 IntervalMap EndPoint; 6201 // Saves the list of instruction indices that are used in the loop. 6202 SmallSet<Instruction *, 8> Ends; 6203 // Saves the list of values that are used in the loop but are 6204 // defined outside the loop, such as arguments and constants. 6205 SmallPtrSet<Value *, 8> LoopInvariants; 6206 6207 unsigned Index = 0; 6208 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6209 RU.NumInstructions += BB->size(); 6210 for (Instruction &I : *BB) { 6211 IdxToInstr[Index++] = &I; 6212 6213 // Save the end location of each USE. 6214 for (Value *U : I.operands()) { 6215 auto *Instr = dyn_cast<Instruction>(U); 6216 6217 // Ignore non-instruction values such as arguments, constants, etc. 6218 if (!Instr) 6219 continue; 6220 6221 // If this instruction is outside the loop then record it and continue. 6222 if (!TheLoop->contains(Instr)) { 6223 LoopInvariants.insert(Instr); 6224 continue; 6225 } 6226 6227 // Overwrite previous end points. 6228 EndPoint[Instr] = Index; 6229 Ends.insert(Instr); 6230 } 6231 } 6232 } 6233 6234 // Saves the list of intervals that end with the index in 'key'. 6235 typedef SmallVector<Instruction *, 2> InstrList; 6236 DenseMap<unsigned, InstrList> TransposeEnds; 6237 6238 // Transpose the EndPoints to a list of values that end at each index. 6239 for (auto &Interval : EndPoint) 6240 TransposeEnds[Interval.second].push_back(Interval.first); 6241 6242 SmallSet<Instruction *, 8> OpenIntervals; 6243 6244 // Get the size of the widest register. 6245 unsigned MaxSafeDepDist = -1U; 6246 if (Legal->getMaxSafeDepDistBytes() != -1U) 6247 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 6248 unsigned WidestRegister = 6249 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 6250 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6251 6252 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6253 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 6254 6255 DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6256 6257 // A lambda that gets the register usage for the given type and VF. 6258 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 6259 if (Ty->isTokenTy()) 6260 return 0U; 6261 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 6262 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 6263 }; 6264 6265 for (unsigned int i = 0; i < Index; ++i) { 6266 Instruction *I = IdxToInstr[i]; 6267 // Ignore instructions that are never used within the loop. 6268 if (!Ends.count(I)) 6269 continue; 6270 6271 // Remove all of the instructions that end at this location. 6272 InstrList &List = TransposeEnds[i]; 6273 for (Instruction *ToRemove : List) 6274 OpenIntervals.erase(ToRemove); 6275 6276 // Skip ignored values. 6277 if (ValuesToIgnore.count(I)) 6278 continue; 6279 6280 // For each VF find the maximum usage of registers. 6281 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6282 if (VFs[j] == 1) { 6283 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 6284 continue; 6285 } 6286 6287 // Count the number of live intervals. 6288 unsigned RegUsage = 0; 6289 for (auto Inst : OpenIntervals) { 6290 // Skip ignored values for VF > 1. 6291 if (VecValuesToIgnore.count(Inst)) 6292 continue; 6293 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 6294 } 6295 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 6296 } 6297 6298 DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6299 << OpenIntervals.size() << '\n'); 6300 6301 // Add the current instruction to the list of open intervals. 6302 OpenIntervals.insert(I); 6303 } 6304 6305 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6306 unsigned Invariant = 0; 6307 if (VFs[i] == 1) 6308 Invariant = LoopInvariants.size(); 6309 else { 6310 for (auto Inst : LoopInvariants) 6311 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 6312 } 6313 6314 DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 6315 DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 6316 DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); 6317 DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n'); 6318 6319 RU.LoopInvariantRegs = Invariant; 6320 RU.MaxLocalUsers = MaxUsages[i]; 6321 RUs[i] = RU; 6322 } 6323 6324 return RUs; 6325 } 6326 6327 LoopVectorizationCostModel::VectorizationCostTy 6328 LoopVectorizationCostModel::expectedCost(unsigned VF) { 6329 VectorizationCostTy Cost; 6330 6331 // For each block. 6332 for (BasicBlock *BB : TheLoop->blocks()) { 6333 VectorizationCostTy BlockCost; 6334 6335 // For each instruction in the old loop. 6336 for (Instruction &I : *BB) { 6337 // Skip dbg intrinsics. 6338 if (isa<DbgInfoIntrinsic>(I)) 6339 continue; 6340 6341 // Skip ignored values. 6342 if (ValuesToIgnore.count(&I)) 6343 continue; 6344 6345 VectorizationCostTy C = getInstructionCost(&I, VF); 6346 6347 // Check if we should override the cost. 6348 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6349 C.first = ForceTargetInstructionCost; 6350 6351 BlockCost.first += C.first; 6352 BlockCost.second |= C.second; 6353 DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " 6354 << VF << " For instruction: " << I << '\n'); 6355 } 6356 6357 // We assume that if-converted blocks have a 50% chance of being executed. 6358 // When the code is scalar then some of the blocks are avoided due to CF. 6359 // When the code is vectorized we execute all code paths. 6360 if (VF == 1 && Legal->blockNeedsPredication(BB)) 6361 BlockCost.first /= 2; 6362 6363 Cost.first += BlockCost.first; 6364 Cost.second |= BlockCost.second; 6365 } 6366 6367 return Cost; 6368 } 6369 6370 /// \brief Check whether the address computation for a non-consecutive memory 6371 /// access looks like an unlikely candidate for being merged into the indexing 6372 /// mode. 6373 /// 6374 /// We look for a GEP which has one index that is an induction variable and all 6375 /// other indices are loop invariant. If the stride of this access is also 6376 /// within a small bound we decide that this address computation can likely be 6377 /// merged into the addressing mode. 6378 /// In all other cases, we identify the address computation as complex. 6379 static bool isLikelyComplexAddressComputation(Value *Ptr, 6380 LoopVectorizationLegality *Legal, 6381 ScalarEvolution *SE, 6382 const Loop *TheLoop) { 6383 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6384 if (!Gep) 6385 return true; 6386 6387 // We are looking for a gep with all loop invariant indices except for one 6388 // which should be an induction variable. 6389 unsigned NumOperands = Gep->getNumOperands(); 6390 for (unsigned i = 1; i < NumOperands; ++i) { 6391 Value *Opd = Gep->getOperand(i); 6392 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6393 !Legal->isInductionVariable(Opd)) 6394 return true; 6395 } 6396 6397 // Now we know we have a GEP ptr, %inv, %ind, %inv. Make sure that the step 6398 // can likely be merged into the address computation. 6399 unsigned MaxMergeDistance = 64; 6400 6401 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Ptr)); 6402 if (!AddRec) 6403 return true; 6404 6405 // Check the step is constant. 6406 const SCEV *Step = AddRec->getStepRecurrence(*SE); 6407 // Calculate the pointer stride and check if it is consecutive. 6408 const auto *C = dyn_cast<SCEVConstant>(Step); 6409 if (!C) 6410 return true; 6411 6412 const APInt &APStepVal = C->getAPInt(); 6413 6414 // Huge step value - give up. 6415 if (APStepVal.getBitWidth() > 64) 6416 return true; 6417 6418 int64_t StepVal = APStepVal.getSExtValue(); 6419 6420 return StepVal > MaxMergeDistance; 6421 } 6422 6423 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6424 return Legal->hasStride(I->getOperand(0)) || 6425 Legal->hasStride(I->getOperand(1)); 6426 } 6427 6428 LoopVectorizationCostModel::VectorizationCostTy 6429 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 6430 // If we know that this instruction will remain uniform, check the cost of 6431 // the scalar version. 6432 if (Legal->isUniformAfterVectorization(I)) 6433 VF = 1; 6434 6435 Type *VectorTy; 6436 unsigned C = getInstructionCost(I, VF, VectorTy); 6437 6438 bool TypeNotScalarized = 6439 VF > 1 && !VectorTy->isVoidTy() && TTI.getNumberOfParts(VectorTy) < VF; 6440 return VectorizationCostTy(C, TypeNotScalarized); 6441 } 6442 6443 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6444 unsigned VF, 6445 Type *&VectorTy) { 6446 Type *RetTy = I->getType(); 6447 if (VF > 1 && MinBWs.count(I)) 6448 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 6449 VectorTy = ToVectorTy(RetTy, VF); 6450 auto SE = PSE.getSE(); 6451 6452 // TODO: We need to estimate the cost of intrinsic calls. 6453 switch (I->getOpcode()) { 6454 case Instruction::GetElementPtr: 6455 // We mark this instruction as zero-cost because the cost of GEPs in 6456 // vectorized code depends on whether the corresponding memory instruction 6457 // is scalarized or not. Therefore, we handle GEPs with the memory 6458 // instruction cost. 6459 return 0; 6460 case Instruction::Br: { 6461 return TTI.getCFInstrCost(I->getOpcode()); 6462 } 6463 case Instruction::PHI: { 6464 auto *Phi = cast<PHINode>(I); 6465 6466 // First-order recurrences are replaced by vector shuffles inside the loop. 6467 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 6468 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 6469 VectorTy, VF - 1, VectorTy); 6470 6471 // TODO: IF-converted IFs become selects. 6472 return 0; 6473 } 6474 case Instruction::UDiv: 6475 case Instruction::SDiv: 6476 case Instruction::URem: 6477 case Instruction::SRem: 6478 // We assume that if-converted blocks have a 50% chance of being executed. 6479 // Predicated scalarized instructions are avoided due to the CF that 6480 // bypasses turned off lanes. If we are not predicating, fallthrough. 6481 if (VF > 1 && mayDivideByZero(*I) && 6482 Legal->blockNeedsPredication(I->getParent())) 6483 return VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy) / 2 + 6484 getScalarizationOverhead(I, VF, true, TTI); 6485 case Instruction::Add: 6486 case Instruction::FAdd: 6487 case Instruction::Sub: 6488 case Instruction::FSub: 6489 case Instruction::Mul: 6490 case Instruction::FMul: 6491 case Instruction::FDiv: 6492 case Instruction::FRem: 6493 case Instruction::Shl: 6494 case Instruction::LShr: 6495 case Instruction::AShr: 6496 case Instruction::And: 6497 case Instruction::Or: 6498 case Instruction::Xor: { 6499 // Since we will replace the stride by 1 the multiplication should go away. 6500 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 6501 return 0; 6502 // Certain instructions can be cheaper to vectorize if they have a constant 6503 // second vector operand. One example of this are shifts on x86. 6504 TargetTransformInfo::OperandValueKind Op1VK = 6505 TargetTransformInfo::OK_AnyValue; 6506 TargetTransformInfo::OperandValueKind Op2VK = 6507 TargetTransformInfo::OK_AnyValue; 6508 TargetTransformInfo::OperandValueProperties Op1VP = 6509 TargetTransformInfo::OP_None; 6510 TargetTransformInfo::OperandValueProperties Op2VP = 6511 TargetTransformInfo::OP_None; 6512 Value *Op2 = I->getOperand(1); 6513 6514 // Check for a splat or for a non uniform vector of constants. 6515 if (isa<ConstantInt>(Op2)) { 6516 ConstantInt *CInt = cast<ConstantInt>(Op2); 6517 if (CInt && CInt->getValue().isPowerOf2()) 6518 Op2VP = TargetTransformInfo::OP_PowerOf2; 6519 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 6520 } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) { 6521 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 6522 Constant *SplatValue = cast<Constant>(Op2)->getSplatValue(); 6523 if (SplatValue) { 6524 ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue); 6525 if (CInt && CInt->getValue().isPowerOf2()) 6526 Op2VP = TargetTransformInfo::OP_PowerOf2; 6527 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 6528 } 6529 } else if (Legal->isUniform(Op2)) { 6530 Op2VK = TargetTransformInfo::OK_UniformValue; 6531 } 6532 6533 return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, Op2VK, 6534 Op1VP, Op2VP); 6535 } 6536 case Instruction::Select: { 6537 SelectInst *SI = cast<SelectInst>(I); 6538 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 6539 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 6540 Type *CondTy = SI->getCondition()->getType(); 6541 if (!ScalarCond) 6542 CondTy = VectorType::get(CondTy, VF); 6543 6544 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy); 6545 } 6546 case Instruction::ICmp: 6547 case Instruction::FCmp: { 6548 Type *ValTy = I->getOperand(0)->getType(); 6549 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 6550 auto It = MinBWs.find(Op0AsInstruction); 6551 if (VF > 1 && It != MinBWs.end()) 6552 ValTy = IntegerType::get(ValTy->getContext(), It->second); 6553 VectorTy = ToVectorTy(ValTy, VF); 6554 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy); 6555 } 6556 case Instruction::Store: 6557 case Instruction::Load: { 6558 StoreInst *SI = dyn_cast<StoreInst>(I); 6559 LoadInst *LI = dyn_cast<LoadInst>(I); 6560 Type *ValTy = (SI ? SI->getValueOperand()->getType() : LI->getType()); 6561 VectorTy = ToVectorTy(ValTy, VF); 6562 6563 unsigned Alignment = SI ? SI->getAlignment() : LI->getAlignment(); 6564 unsigned AS = 6565 SI ? SI->getPointerAddressSpace() : LI->getPointerAddressSpace(); 6566 Value *Ptr = getPointerOperand(I); 6567 // We add the cost of address computation here instead of with the gep 6568 // instruction because only here we know whether the operation is 6569 // scalarized. 6570 if (VF == 1) 6571 return TTI.getAddressComputationCost(VectorTy) + 6572 TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6573 6574 if (LI && Legal->isUniform(Ptr)) { 6575 // Scalar load + broadcast 6576 unsigned Cost = TTI.getAddressComputationCost(ValTy->getScalarType()); 6577 Cost += TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 6578 Alignment, AS); 6579 return Cost + 6580 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, ValTy); 6581 } 6582 6583 // For an interleaved access, calculate the total cost of the whole 6584 // interleave group. 6585 if (Legal->isAccessInterleaved(I)) { 6586 auto Group = Legal->getInterleavedAccessGroup(I); 6587 assert(Group && "Fail to get an interleaved access group."); 6588 6589 // Only calculate the cost once at the insert position. 6590 if (Group->getInsertPos() != I) 6591 return 0; 6592 6593 unsigned InterleaveFactor = Group->getFactor(); 6594 Type *WideVecTy = 6595 VectorType::get(VectorTy->getVectorElementType(), 6596 VectorTy->getVectorNumElements() * InterleaveFactor); 6597 6598 // Holds the indices of existing members in an interleaved load group. 6599 // An interleaved store group doesn't need this as it doesn't allow gaps. 6600 SmallVector<unsigned, 4> Indices; 6601 if (LI) { 6602 for (unsigned i = 0; i < InterleaveFactor; i++) 6603 if (Group->getMember(i)) 6604 Indices.push_back(i); 6605 } 6606 6607 // Calculate the cost of the whole interleaved group. 6608 unsigned Cost = TTI.getInterleavedMemoryOpCost( 6609 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 6610 Group->getAlignment(), AS); 6611 6612 if (Group->isReverse()) 6613 Cost += 6614 Group->getNumMembers() * 6615 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6616 6617 // FIXME: The interleaved load group with a huge gap could be even more 6618 // expensive than scalar operations. Then we could ignore such group and 6619 // use scalar operations instead. 6620 return Cost; 6621 } 6622 6623 // Check if the memory instruction will be scalarized. 6624 if (Legal->memoryInstructionMustBeScalarized(I, VF)) { 6625 bool IsComplexComputation = 6626 isLikelyComplexAddressComputation(Ptr, Legal, SE, TheLoop); 6627 unsigned Cost = 0; 6628 // The cost of extracting from the value vector and pointer vector. 6629 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6630 for (unsigned i = 0; i < VF; ++i) { 6631 // The cost of extracting the pointer operand. 6632 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, PtrTy, i); 6633 // In case of STORE, the cost of ExtractElement from the vector. 6634 // In case of LOAD, the cost of InsertElement into the returned 6635 // vector. 6636 Cost += TTI.getVectorInstrCost(SI ? Instruction::ExtractElement 6637 : Instruction::InsertElement, 6638 VectorTy, i); 6639 } 6640 6641 // The cost of the scalar loads/stores. 6642 Cost += VF * TTI.getAddressComputationCost(PtrTy, IsComplexComputation); 6643 Cost += VF * 6644 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 6645 Alignment, AS); 6646 return Cost; 6647 } 6648 6649 // Determine if the pointer operand of the access is either consecutive or 6650 // reverse consecutive. 6651 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6652 bool Reverse = ConsecutiveStride < 0; 6653 6654 // Determine if either a gather or scatter operation is legal. 6655 bool UseGatherOrScatter = 6656 !ConsecutiveStride && Legal->isLegalGatherOrScatter(I); 6657 6658 unsigned Cost = TTI.getAddressComputationCost(VectorTy); 6659 if (UseGatherOrScatter) { 6660 assert(ConsecutiveStride == 0 && 6661 "Gather/Scatter are not used for consecutive stride"); 6662 return Cost + 6663 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 6664 Legal->isMaskRequired(I), Alignment); 6665 } 6666 // Wide load/stores. 6667 if (Legal->isMaskRequired(I)) 6668 Cost += 6669 TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6670 else 6671 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6672 6673 if (Reverse) 6674 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6675 return Cost; 6676 } 6677 case Instruction::ZExt: 6678 case Instruction::SExt: 6679 case Instruction::FPToUI: 6680 case Instruction::FPToSI: 6681 case Instruction::FPExt: 6682 case Instruction::PtrToInt: 6683 case Instruction::IntToPtr: 6684 case Instruction::SIToFP: 6685 case Instruction::UIToFP: 6686 case Instruction::Trunc: 6687 case Instruction::FPTrunc: 6688 case Instruction::BitCast: { 6689 // We optimize the truncation of induction variable. 6690 // The cost of these is the same as the scalar operation. 6691 if (I->getOpcode() == Instruction::Trunc && 6692 Legal->isInductionVariable(I->getOperand(0))) 6693 return TTI.getCastInstrCost(I->getOpcode(), I->getType(), 6694 I->getOperand(0)->getType()); 6695 6696 Type *SrcScalarTy = I->getOperand(0)->getType(); 6697 Type *SrcVecTy = ToVectorTy(SrcScalarTy, VF); 6698 if (VF > 1 && MinBWs.count(I)) { 6699 // This cast is going to be shrunk. This may remove the cast or it might 6700 // turn it into slightly different cast. For example, if MinBW == 16, 6701 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 6702 // 6703 // Calculate the modified src and dest types. 6704 Type *MinVecTy = VectorTy; 6705 if (I->getOpcode() == Instruction::Trunc) { 6706 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 6707 VectorTy = 6708 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6709 } else if (I->getOpcode() == Instruction::ZExt || 6710 I->getOpcode() == Instruction::SExt) { 6711 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 6712 VectorTy = 6713 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6714 } 6715 } 6716 6717 return TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy); 6718 } 6719 case Instruction::Call: { 6720 bool NeedToScalarize; 6721 CallInst *CI = cast<CallInst>(I); 6722 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 6723 if (getVectorIntrinsicIDForCall(CI, TLI)) 6724 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 6725 return CallCost; 6726 } 6727 default: 6728 // The cost of executing VF copies of the scalar instruction. This opcode 6729 // is unknown. Assume that it is the same as 'mul'. 6730 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) + 6731 getScalarizationOverhead(I, VF, false, TTI); 6732 } // end of switch. 6733 } 6734 6735 char LoopVectorize::ID = 0; 6736 static const char lv_name[] = "Loop Vectorization"; 6737 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 6738 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6739 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 6740 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6741 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 6742 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6743 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6744 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6745 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6746 INITIALIZE_PASS_DEPENDENCY(LCSSAWrapperPass) 6747 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6748 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 6749 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 6750 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6751 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 6752 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6753 6754 namespace llvm { 6755 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { 6756 return new LoopVectorize(NoUnrolling, AlwaysVectorize); 6757 } 6758 } 6759 6760 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6761 6762 // Check if the pointer operand of a load or store instruction is 6763 // consecutive. 6764 if (auto *Ptr = getPointerOperand(Inst)) 6765 return Legal->isConsecutivePtr(Ptr); 6766 return false; 6767 } 6768 6769 void LoopVectorizationCostModel::collectValuesToIgnore() { 6770 // Ignore ephemeral values. 6771 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 6772 6773 // Ignore type-promoting instructions we identified during reduction 6774 // detection. 6775 for (auto &Reduction : *Legal->getReductionVars()) { 6776 RecurrenceDescriptor &RedDes = Reduction.second; 6777 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 6778 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6779 } 6780 6781 // Insert values known to be scalar into VecValuesToIgnore. 6782 for (auto *BB : TheLoop->getBlocks()) 6783 for (auto &I : *BB) 6784 if (Legal->isScalarAfterVectorization(&I)) 6785 VecValuesToIgnore.insert(&I); 6786 } 6787 6788 void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr, 6789 bool IfPredicateInstr) { 6790 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 6791 // Holds vector parameters or scalars, in case of uniform vals. 6792 SmallVector<VectorParts, 4> Params; 6793 6794 setDebugLocFromInst(Builder, Instr); 6795 6796 // Does this instruction return a value ? 6797 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 6798 6799 // Initialize a new scalar map entry. 6800 ScalarParts Entry(UF); 6801 6802 VectorParts Cond; 6803 if (IfPredicateInstr) 6804 Cond = createBlockInMask(Instr->getParent()); 6805 6806 // For each vector unroll 'part': 6807 for (unsigned Part = 0; Part < UF; ++Part) { 6808 Entry[Part].resize(1); 6809 // For each scalar that we create: 6810 6811 // Start an "if (pred) a[i] = ..." block. 6812 Value *Cmp = nullptr; 6813 if (IfPredicateInstr) { 6814 if (Cond[Part]->getType()->isVectorTy()) 6815 Cond[Part] = 6816 Builder.CreateExtractElement(Cond[Part], Builder.getInt32(0)); 6817 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cond[Part], 6818 ConstantInt::get(Cond[Part]->getType(), 1)); 6819 } 6820 6821 Instruction *Cloned = Instr->clone(); 6822 if (!IsVoidRetTy) 6823 Cloned->setName(Instr->getName() + ".cloned"); 6824 6825 // Replace the operands of the cloned instructions with their scalar 6826 // equivalents in the new loop. 6827 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 6828 auto *NewOp = getScalarValue(Instr->getOperand(op), Part, 0); 6829 Cloned->setOperand(op, NewOp); 6830 } 6831 6832 // Place the cloned scalar in the new loop. 6833 Builder.Insert(Cloned); 6834 6835 // Add the cloned scalar to the scalar map entry. 6836 Entry[Part][0] = Cloned; 6837 6838 // If we just cloned a new assumption, add it the assumption cache. 6839 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 6840 if (II->getIntrinsicID() == Intrinsic::assume) 6841 AC->registerAssumption(II); 6842 6843 // End if-block. 6844 if (IfPredicateInstr) 6845 PredicatedInstructions.push_back(std::make_pair(Cloned, Cmp)); 6846 } 6847 VectorLoopValueMap.initScalar(Instr, Entry); 6848 } 6849 6850 void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) { 6851 auto *SI = dyn_cast<StoreInst>(Instr); 6852 bool IfPredicateInstr = (SI && Legal->blockNeedsPredication(SI->getParent())); 6853 6854 return scalarizeInstruction(Instr, IfPredicateInstr); 6855 } 6856 6857 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 6858 6859 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 6860 6861 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 6862 Instruction::BinaryOps BinOp) { 6863 // When unrolling and the VF is 1, we only need to add a simple scalar. 6864 Type *Ty = Val->getType(); 6865 assert(!Ty->isVectorTy() && "Val must be a scalar"); 6866 6867 if (Ty->isFloatingPointTy()) { 6868 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 6869 6870 // Floating point operations had to be 'fast' to enable the unrolling. 6871 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 6872 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 6873 } 6874 Constant *C = ConstantInt::get(Ty, StartIdx); 6875 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 6876 } 6877 6878 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 6879 SmallVector<Metadata *, 4> MDs; 6880 // Reserve first location for self reference to the LoopID metadata node. 6881 MDs.push_back(nullptr); 6882 bool IsUnrollMetadata = false; 6883 MDNode *LoopID = L->getLoopID(); 6884 if (LoopID) { 6885 // First find existing loop unrolling disable metadata. 6886 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 6887 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 6888 if (MD) { 6889 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 6890 IsUnrollMetadata = 6891 S && S->getString().startswith("llvm.loop.unroll.disable"); 6892 } 6893 MDs.push_back(LoopID->getOperand(i)); 6894 } 6895 } 6896 6897 if (!IsUnrollMetadata) { 6898 // Add runtime unroll disable metadata. 6899 LLVMContext &Context = L->getHeader()->getContext(); 6900 SmallVector<Metadata *, 1> DisableOperands; 6901 DisableOperands.push_back( 6902 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 6903 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 6904 MDs.push_back(DisableNode); 6905 MDNode *NewLoopID = MDNode::get(Context, MDs); 6906 // Set operand 0 to refer to the loop id itself. 6907 NewLoopID->replaceOperandWith(0, NewLoopID); 6908 L->setLoopID(NewLoopID); 6909 } 6910 } 6911 6912 bool LoopVectorizePass::processLoop(Loop *L) { 6913 assert(L->empty() && "Only process inner loops."); 6914 6915 #ifndef NDEBUG 6916 const std::string DebugLocStr = getDebugLocString(L); 6917 #endif /* NDEBUG */ 6918 6919 DEBUG(dbgs() << "\nLV: Checking a loop in \"" 6920 << L->getHeader()->getParent()->getName() << "\" from " 6921 << DebugLocStr << "\n"); 6922 6923 LoopVectorizeHints Hints(L, DisableUnrolling, *ORE); 6924 6925 DEBUG(dbgs() << "LV: Loop hints:" 6926 << " force=" 6927 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 6928 ? "disabled" 6929 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 6930 ? "enabled" 6931 : "?")) 6932 << " width=" << Hints.getWidth() 6933 << " unroll=" << Hints.getInterleave() << "\n"); 6934 6935 // Function containing loop 6936 Function *F = L->getHeader()->getParent(); 6937 6938 // Looking at the diagnostic output is the only way to determine if a loop 6939 // was vectorized (other than looking at the IR or machine code), so it 6940 // is important to generate an optimization remark for each loop. Most of 6941 // these messages are generated by emitOptimizationRemarkAnalysis. Remarks 6942 // generated by emitOptimizationRemark and emitOptimizationRemarkMissed are 6943 // less verbose reporting vectorized loops and unvectorized loops that may 6944 // benefit from vectorization, respectively. 6945 6946 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { 6947 DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 6948 return false; 6949 } 6950 6951 // Check the loop for a trip count threshold: 6952 // do not vectorize loops with a tiny trip count. 6953 const unsigned TC = SE->getSmallConstantTripCount(L); 6954 if (TC > 0u && TC < TinyTripCountVectorThreshold) { 6955 DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 6956 << "This loop is not worth vectorizing."); 6957 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 6958 DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 6959 else { 6960 DEBUG(dbgs() << "\n"); 6961 emitAnalysisDiag(L, Hints, *ORE, VectorizationReport() 6962 << "vectorization is not beneficial " 6963 "and is not explicitly forced"); 6964 return false; 6965 } 6966 } 6967 6968 PredicatedScalarEvolution PSE(*SE, *L); 6969 6970 // Check if it is legal to vectorize the loop. 6971 LoopVectorizationRequirements Requirements(*ORE); 6972 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI, ORE, 6973 &Requirements, &Hints); 6974 if (!LVL.canVectorize()) { 6975 DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 6976 emitMissedWarning(F, L, Hints, ORE); 6977 return false; 6978 } 6979 6980 // Use the cost model. 6981 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F, 6982 &Hints); 6983 CM.collectValuesToIgnore(); 6984 6985 // Check the function attributes to find out if this function should be 6986 // optimized for size. 6987 bool OptForSize = 6988 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 6989 6990 // Compute the weighted frequency of this loop being executed and see if it 6991 // is less than 20% of the function entry baseline frequency. Note that we 6992 // always have a canonical loop here because we think we *can* vectorize. 6993 // FIXME: This is hidden behind a flag due to pervasive problems with 6994 // exactly what block frequency models. 6995 if (LoopVectorizeWithBlockFrequency) { 6996 BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader()); 6997 if (Hints.getForce() != LoopVectorizeHints::FK_Enabled && 6998 LoopEntryFreq < ColdEntryFreq) 6999 OptForSize = true; 7000 } 7001 7002 // Check the function attributes to see if implicit floats are allowed. 7003 // FIXME: This check doesn't seem possibly correct -- what if the loop is 7004 // an integer loop and the vector instructions selected are purely integer 7005 // vector instructions? 7006 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 7007 DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 7008 "attribute is used.\n"); 7009 emitAnalysisDiag( 7010 L, Hints, *ORE, 7011 VectorizationReport() 7012 << "loop not vectorized due to NoImplicitFloat attribute"); 7013 emitMissedWarning(F, L, Hints, ORE); 7014 return false; 7015 } 7016 7017 // Check if the target supports potentially unsafe FP vectorization. 7018 // FIXME: Add a check for the type of safety issue (denormal, signaling) 7019 // for the target we're vectorizing for, to make sure none of the 7020 // additional fp-math flags can help. 7021 if (Hints.isPotentiallyUnsafe() && 7022 TTI->isFPVectorizationPotentiallyUnsafe()) { 7023 DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 7024 emitAnalysisDiag(L, Hints, *ORE, 7025 VectorizationReport() 7026 << "loop not vectorized due to unsafe FP support."); 7027 emitMissedWarning(F, L, Hints, ORE); 7028 return false; 7029 } 7030 7031 // Select the optimal vectorization factor. 7032 const LoopVectorizationCostModel::VectorizationFactor VF = 7033 CM.selectVectorizationFactor(OptForSize); 7034 7035 // Select the interleave count. 7036 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 7037 7038 // Get user interleave count. 7039 unsigned UserIC = Hints.getInterleave(); 7040 7041 // Identify the diagnostic messages that should be produced. 7042 std::string VecDiagMsg, IntDiagMsg; 7043 bool VectorizeLoop = true, InterleaveLoop = true; 7044 if (Requirements.doesNotMeet(F, L, Hints)) { 7045 DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 7046 "requirements.\n"); 7047 emitMissedWarning(F, L, Hints, ORE); 7048 return false; 7049 } 7050 7051 if (VF.Width == 1) { 7052 DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 7053 VecDiagMsg = 7054 "the cost-model indicates that vectorization is not beneficial"; 7055 VectorizeLoop = false; 7056 } 7057 7058 if (IC == 1 && UserIC <= 1) { 7059 // Tell the user interleaving is not beneficial. 7060 DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 7061 IntDiagMsg = 7062 "the cost-model indicates that interleaving is not beneficial"; 7063 InterleaveLoop = false; 7064 if (UserIC == 1) 7065 IntDiagMsg += 7066 " and is explicitly disabled or interleave count is set to 1"; 7067 } else if (IC > 1 && UserIC == 1) { 7068 // Tell the user interleaving is beneficial, but it explicitly disabled. 7069 DEBUG(dbgs() 7070 << "LV: Interleaving is beneficial but is explicitly disabled."); 7071 IntDiagMsg = "the cost-model indicates that interleaving is beneficial " 7072 "but is explicitly disabled or interleave count is set to 1"; 7073 InterleaveLoop = false; 7074 } 7075 7076 // Override IC if user provided an interleave count. 7077 IC = UserIC > 0 ? UserIC : IC; 7078 7079 // Emit diagnostic messages, if any. 7080 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 7081 if (!VectorizeLoop && !InterleaveLoop) { 7082 // Do not vectorize or interleaving the loop. 7083 ORE->emitOptimizationRemarkAnalysis(VAPassName, L, VecDiagMsg); 7084 ORE->emitOptimizationRemarkAnalysis(LV_NAME, L, IntDiagMsg); 7085 return false; 7086 } else if (!VectorizeLoop && InterleaveLoop) { 7087 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7088 ORE->emitOptimizationRemarkAnalysis(VAPassName, L, VecDiagMsg); 7089 } else if (VectorizeLoop && !InterleaveLoop) { 7090 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 7091 << DebugLocStr << '\n'); 7092 ORE->emitOptimizationRemarkAnalysis(LV_NAME, L, IntDiagMsg); 7093 } else if (VectorizeLoop && InterleaveLoop) { 7094 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 7095 << DebugLocStr << '\n'); 7096 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7097 } 7098 7099 if (!VectorizeLoop) { 7100 assert(IC > 1 && "interleave count should not be 1 or 0"); 7101 // If we decided that it is not legal to vectorize the loop, then 7102 // interleave it. 7103 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC); 7104 Unroller.vectorize(&LVL, CM.MinBWs); 7105 7106 ORE->emitOptimizationRemark(LV_NAME, L, 7107 Twine("interleaved loop (interleaved count: ") + 7108 Twine(IC) + ")"); 7109 } else { 7110 // If we decided that it is *legal* to vectorize the loop, then do it. 7111 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC); 7112 LB.vectorize(&LVL, CM.MinBWs); 7113 ++LoopsVectorized; 7114 7115 // Add metadata to disable runtime unrolling a scalar loop when there are 7116 // no runtime checks about strides and memory. A scalar loop that is 7117 // rarely used is not worth unrolling. 7118 if (!LB.areSafetyChecksAdded()) 7119 AddRuntimeUnrollDisableMetaData(L); 7120 7121 // Report the vectorization decision. 7122 ORE->emitOptimizationRemark( 7123 LV_NAME, L, Twine("vectorized loop (vectorization width: ") + 7124 Twine(VF.Width) + ", interleaved count: " + Twine(IC) + 7125 ")"); 7126 } 7127 7128 // Mark the loop as already vectorized to avoid vectorizing again. 7129 Hints.setAlreadyVectorized(); 7130 7131 DEBUG(verifyFunction(*L->getHeader()->getParent())); 7132 return true; 7133 } 7134 7135 bool LoopVectorizePass::runImpl( 7136 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 7137 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 7138 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 7139 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 7140 OptimizationRemarkEmitter &ORE_) { 7141 7142 SE = &SE_; 7143 LI = &LI_; 7144 TTI = &TTI_; 7145 DT = &DT_; 7146 BFI = &BFI_; 7147 TLI = TLI_; 7148 AA = &AA_; 7149 AC = &AC_; 7150 GetLAA = &GetLAA_; 7151 DB = &DB_; 7152 ORE = &ORE_; 7153 7154 // Compute some weights outside of the loop over the loops. Compute this 7155 // using a BranchProbability to re-use its scaling math. 7156 const BranchProbability ColdProb(1, 5); // 20% 7157 ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb; 7158 7159 // Don't attempt if 7160 // 1. the target claims to have no vector registers, and 7161 // 2. interleaving won't help ILP. 7162 // 7163 // The second condition is necessary because, even if the target has no 7164 // vector registers, loop vectorization may still enable scalar 7165 // interleaving. 7166 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 7167 return false; 7168 7169 // Build up a worklist of inner-loops to vectorize. This is necessary as 7170 // the act of vectorizing or partially unrolling a loop creates new loops 7171 // and can invalidate iterators across the loops. 7172 SmallVector<Loop *, 8> Worklist; 7173 7174 for (Loop *L : *LI) 7175 addAcyclicInnerLoop(*L, Worklist); 7176 7177 LoopsAnalyzed += Worklist.size(); 7178 7179 // Now walk the identified inner loops. 7180 bool Changed = false; 7181 while (!Worklist.empty()) 7182 Changed |= processLoop(Worklist.pop_back_val()); 7183 7184 // Process each loop nest in the function. 7185 return Changed; 7186 7187 } 7188 7189 7190 PreservedAnalyses LoopVectorizePass::run(Function &F, 7191 FunctionAnalysisManager &AM) { 7192 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 7193 auto &LI = AM.getResult<LoopAnalysis>(F); 7194 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 7195 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 7196 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 7197 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 7198 auto &AA = AM.getResult<AAManager>(F); 7199 auto &AC = AM.getResult<AssumptionAnalysis>(F); 7200 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 7201 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 7202 7203 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 7204 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 7205 [&](Loop &L) -> const LoopAccessInfo & { 7206 return LAM.getResult<LoopAccessAnalysis>(L); 7207 }; 7208 bool Changed = 7209 runImpl(F, SE, LI, TTI, DT, BFI, TLI, DB, AA, AC, GetLAA, ORE); 7210 if (!Changed) 7211 return PreservedAnalyses::all(); 7212 PreservedAnalyses PA; 7213 PA.preserve<LoopAnalysis>(); 7214 PA.preserve<DominatorTreeAnalysis>(); 7215 PA.preserve<BasicAA>(); 7216 PA.preserve<GlobalsAA>(); 7217 return PA; 7218 } 7219