1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 11 // and generates target-independent LLVM-IR. 12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 13 // of instructions in order to estimate the profitability of vectorization. 14 // 15 // The loop vectorizer combines consecutive loop iterations into a single 16 // 'wide' iteration. After this transformation the index is incremented 17 // by the SIMD vector width, and not by one. 18 // 19 // This pass has three parts: 20 // 1. The main loop pass that drives the different parts. 21 // 2. LoopVectorizationLegality - A unit that checks for the legality 22 // of the vectorization. 23 // 3. InnerLoopVectorizer - A unit that performs the actual 24 // widening of instructions. 25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 26 // of vectorization. It decides on the optimal vector width, which 27 // can be one, if vectorization is not profitable. 28 // 29 //===----------------------------------------------------------------------===// 30 // 31 // The reduction-variable vectorization is based on the paper: 32 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 33 // 34 // Variable uniformity checks are inspired by: 35 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 36 // 37 // The interleaved access vectorization is based on the paper: 38 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 39 // Data for SIMD 40 // 41 // Other ideas/concepts are from: 42 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 43 // 44 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 45 // Vectorizing Compilers. 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/ADT/DenseMap.h" 50 #include "llvm/ADT/Hashing.h" 51 #include "llvm/ADT/MapVector.h" 52 #include "llvm/ADT/SetVector.h" 53 #include "llvm/ADT/SmallPtrSet.h" 54 #include "llvm/ADT/SmallSet.h" 55 #include "llvm/ADT/SmallVector.h" 56 #include "llvm/ADT/Statistic.h" 57 #include "llvm/ADT/StringExtras.h" 58 #include "llvm/Analysis/AliasAnalysis.h" 59 #include "llvm/Analysis/AssumptionCache.h" 60 #include "llvm/Analysis/BasicAliasAnalysis.h" 61 #include "llvm/Analysis/BlockFrequencyInfo.h" 62 #include "llvm/Analysis/CodeMetrics.h" 63 #include "llvm/Analysis/DemandedBits.h" 64 #include "llvm/Analysis/GlobalsModRef.h" 65 #include "llvm/Analysis/LoopAccessAnalysis.h" 66 #include "llvm/Analysis/LoopInfo.h" 67 #include "llvm/Analysis/LoopIterator.h" 68 #include "llvm/Analysis/LoopPass.h" 69 #include "llvm/Analysis/ScalarEvolution.h" 70 #include "llvm/Analysis/ScalarEvolutionExpander.h" 71 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 72 #include "llvm/Analysis/TargetTransformInfo.h" 73 #include "llvm/Analysis/ValueTracking.h" 74 #include "llvm/Analysis/VectorUtils.h" 75 #include "llvm/IR/Constants.h" 76 #include "llvm/IR/DataLayout.h" 77 #include "llvm/IR/DebugInfo.h" 78 #include "llvm/IR/DerivedTypes.h" 79 #include "llvm/IR/DiagnosticInfo.h" 80 #include "llvm/IR/Dominators.h" 81 #include "llvm/IR/Function.h" 82 #include "llvm/IR/IRBuilder.h" 83 #include "llvm/IR/Instructions.h" 84 #include "llvm/IR/IntrinsicInst.h" 85 #include "llvm/IR/LLVMContext.h" 86 #include "llvm/IR/Module.h" 87 #include "llvm/IR/PatternMatch.h" 88 #include "llvm/IR/Type.h" 89 #include "llvm/IR/Value.h" 90 #include "llvm/IR/ValueHandle.h" 91 #include "llvm/IR/Verifier.h" 92 #include "llvm/Pass.h" 93 #include "llvm/Support/BranchProbability.h" 94 #include "llvm/Support/CommandLine.h" 95 #include "llvm/Support/Debug.h" 96 #include "llvm/Support/raw_ostream.h" 97 #include "llvm/Transforms/Scalar.h" 98 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 99 #include "llvm/Transforms/Utils/Local.h" 100 #include "llvm/Transforms/Utils/LoopUtils.h" 101 #include "llvm/Transforms/Utils/LoopVersioning.h" 102 #include "llvm/Transforms/Vectorize.h" 103 #include <algorithm> 104 #include <functional> 105 #include <map> 106 #include <tuple> 107 108 using namespace llvm; 109 using namespace llvm::PatternMatch; 110 111 #define LV_NAME "loop-vectorize" 112 #define DEBUG_TYPE LV_NAME 113 114 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 115 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 116 117 static cl::opt<bool> 118 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, 119 cl::desc("Enable if-conversion during vectorization.")); 120 121 /// We don't vectorize loops with a known constant trip count below this number. 122 static cl::opt<unsigned> TinyTripCountVectorThreshold( 123 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 124 cl::desc("Don't vectorize loops with a constant " 125 "trip count that is smaller than this " 126 "value.")); 127 128 static cl::opt<bool> MaximizeBandwidth( 129 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 130 cl::desc("Maximize bandwidth when selecting vectorization factor which " 131 "will be determined by the smallest type in loop.")); 132 133 /// This enables versioning on the strides of symbolically striding memory 134 /// accesses in code like the following. 135 /// for (i = 0; i < N; ++i) 136 /// A[i * Stride1] += B[i * Stride2] ... 137 /// 138 /// Will be roughly translated to 139 /// if (Stride1 == 1 && Stride2 == 1) { 140 /// for (i = 0; i < N; i+=4) 141 /// A[i:i+3] += ... 142 /// } else 143 /// ... 144 static cl::opt<bool> EnableMemAccessVersioning( 145 "enable-mem-access-versioning", cl::init(true), cl::Hidden, 146 cl::desc("Enable symbolic stride memory access versioning")); 147 148 static cl::opt<bool> EnableInterleavedMemAccesses( 149 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 150 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 151 152 /// Maximum factor for an interleaved memory access. 153 static cl::opt<unsigned> MaxInterleaveGroupFactor( 154 "max-interleave-group-factor", cl::Hidden, 155 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 156 cl::init(8)); 157 158 /// We don't interleave loops with a known constant trip count below this 159 /// number. 160 static const unsigned TinyTripCountInterleaveThreshold = 128; 161 162 static cl::opt<unsigned> ForceTargetNumScalarRegs( 163 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 164 cl::desc("A flag that overrides the target's number of scalar registers.")); 165 166 static cl::opt<unsigned> ForceTargetNumVectorRegs( 167 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 168 cl::desc("A flag that overrides the target's number of vector registers.")); 169 170 /// Maximum vectorization interleave count. 171 static const unsigned MaxInterleaveFactor = 16; 172 173 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 174 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 175 cl::desc("A flag that overrides the target's max interleave factor for " 176 "scalar loops.")); 177 178 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 179 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 180 cl::desc("A flag that overrides the target's max interleave factor for " 181 "vectorized loops.")); 182 183 static cl::opt<unsigned> ForceTargetInstructionCost( 184 "force-target-instruction-cost", cl::init(0), cl::Hidden, 185 cl::desc("A flag that overrides the target's expected cost for " 186 "an instruction to a single constant value. Mostly " 187 "useful for getting consistent testing.")); 188 189 static cl::opt<unsigned> SmallLoopCost( 190 "small-loop-cost", cl::init(20), cl::Hidden, 191 cl::desc( 192 "The cost of a loop that is considered 'small' by the interleaver.")); 193 194 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 195 "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden, 196 cl::desc("Enable the use of the block frequency analysis to access PGO " 197 "heuristics minimizing code growth in cold regions and being more " 198 "aggressive in hot regions.")); 199 200 // Runtime interleave loops for load/store throughput. 201 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 202 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 203 cl::desc( 204 "Enable runtime interleaving until load/store ports are saturated")); 205 206 /// The number of stores in a loop that are allowed to need predication. 207 static cl::opt<unsigned> NumberOfStoresToPredicate( 208 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 209 cl::desc("Max number of stores to be predicated behind an if.")); 210 211 static cl::opt<bool> EnableIndVarRegisterHeur( 212 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 213 cl::desc("Count the induction variable only once when interleaving")); 214 215 static cl::opt<bool> EnableCondStoresVectorization( 216 "enable-cond-stores-vec", cl::init(false), cl::Hidden, 217 cl::desc("Enable if predication of stores during vectorization.")); 218 219 static cl::opt<unsigned> MaxNestedScalarReductionIC( 220 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 221 cl::desc("The maximum interleave count to use when interleaving a scalar " 222 "reduction in a nested loop.")); 223 224 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 225 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 226 cl::desc("The maximum allowed number of runtime memory checks with a " 227 "vectorize(enable) pragma.")); 228 229 static cl::opt<unsigned> VectorizeSCEVCheckThreshold( 230 "vectorize-scev-check-threshold", cl::init(16), cl::Hidden, 231 cl::desc("The maximum number of SCEV checks allowed.")); 232 233 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold( 234 "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden, 235 cl::desc("The maximum number of SCEV checks allowed with a " 236 "vectorize(enable) pragma")); 237 238 namespace { 239 240 // Forward declarations. 241 class LoopVectorizeHints; 242 class LoopVectorizationLegality; 243 class LoopVectorizationCostModel; 244 class LoopVectorizationRequirements; 245 246 /// \brief This modifies LoopAccessReport to initialize message with 247 /// loop-vectorizer-specific part. 248 class VectorizationReport : public LoopAccessReport { 249 public: 250 VectorizationReport(Instruction *I = nullptr) 251 : LoopAccessReport("loop not vectorized: ", I) {} 252 253 /// \brief This allows promotion of the loop-access analysis report into the 254 /// loop-vectorizer report. It modifies the message to add the 255 /// loop-vectorizer-specific part of the message. 256 explicit VectorizationReport(const LoopAccessReport &R) 257 : LoopAccessReport(Twine("loop not vectorized: ") + R.str(), 258 R.getInstr()) {} 259 }; 260 261 /// A helper function for converting Scalar types to vector types. 262 /// If the incoming type is void, we return void. If the VF is 1, we return 263 /// the scalar type. 264 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 265 if (Scalar->isVoidTy() || VF == 1) 266 return Scalar; 267 return VectorType::get(Scalar, VF); 268 } 269 270 /// A helper function that returns GEP instruction and knows to skip a 271 /// 'bitcast'. The 'bitcast' may be skipped if the source and the destination 272 /// pointee types of the 'bitcast' have the same size. 273 /// For example: 274 /// bitcast double** %var to i64* - can be skipped 275 /// bitcast double** %var to i8* - can not 276 static GetElementPtrInst *getGEPInstruction(Value *Ptr) { 277 278 if (isa<GetElementPtrInst>(Ptr)) 279 return cast<GetElementPtrInst>(Ptr); 280 281 if (isa<BitCastInst>(Ptr) && 282 isa<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0))) { 283 Type *BitcastTy = Ptr->getType(); 284 Type *GEPTy = cast<BitCastInst>(Ptr)->getSrcTy(); 285 if (!isa<PointerType>(BitcastTy) || !isa<PointerType>(GEPTy)) 286 return nullptr; 287 Type *Pointee1Ty = cast<PointerType>(BitcastTy)->getPointerElementType(); 288 Type *Pointee2Ty = cast<PointerType>(GEPTy)->getPointerElementType(); 289 const DataLayout &DL = cast<BitCastInst>(Ptr)->getModule()->getDataLayout(); 290 if (DL.getTypeSizeInBits(Pointee1Ty) == DL.getTypeSizeInBits(Pointee2Ty)) 291 return cast<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0)); 292 } 293 return nullptr; 294 } 295 296 /// InnerLoopVectorizer vectorizes loops which contain only one basic 297 /// block to a specified vectorization factor (VF). 298 /// This class performs the widening of scalars into vectors, or multiple 299 /// scalars. This class also implements the following features: 300 /// * It inserts an epilogue loop for handling loops that don't have iteration 301 /// counts that are known to be a multiple of the vectorization factor. 302 /// * It handles the code generation for reduction variables. 303 /// * Scalarization (implementation using scalars) of un-vectorizable 304 /// instructions. 305 /// InnerLoopVectorizer does not perform any vectorization-legality 306 /// checks, and relies on the caller to check for the different legality 307 /// aspects. The InnerLoopVectorizer relies on the 308 /// LoopVectorizationLegality class to provide information about the induction 309 /// and reduction variables that were found to a given vectorization factor. 310 class InnerLoopVectorizer { 311 public: 312 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 313 LoopInfo *LI, DominatorTree *DT, 314 const TargetLibraryInfo *TLI, 315 const TargetTransformInfo *TTI, AssumptionCache *AC, 316 unsigned VecWidth, unsigned UnrollFactor) 317 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 318 AC(AC), VF(VecWidth), UF(UnrollFactor), 319 Builder(PSE.getSE()->getContext()), Induction(nullptr), 320 OldInduction(nullptr), WidenMap(UnrollFactor), TripCount(nullptr), 321 VectorTripCount(nullptr), Legal(nullptr), AddedSafetyChecks(false) {} 322 323 // Perform the actual loop widening (vectorization). 324 // MinimumBitWidths maps scalar integer values to the smallest bitwidth they 325 // can be validly truncated to. The cost model has assumed this truncation 326 // will happen when vectorizing. 327 void vectorize(LoopVectorizationLegality *L, 328 MapVector<Instruction *, uint64_t> MinimumBitWidths) { 329 MinBWs = MinimumBitWidths; 330 Legal = L; 331 // Create a new empty loop. Unlink the old loop and connect the new one. 332 createEmptyLoop(); 333 // Widen each instruction in the old loop to a new one in the new loop. 334 // Use the Legality module to find the induction and reduction variables. 335 vectorizeLoop(); 336 } 337 338 // Return true if any runtime check is added. 339 bool IsSafetyChecksAdded() { return AddedSafetyChecks; } 340 341 virtual ~InnerLoopVectorizer() {} 342 343 protected: 344 /// A small list of PHINodes. 345 typedef SmallVector<PHINode *, 4> PhiVector; 346 /// When we unroll loops we have multiple vector values for each scalar. 347 /// This data structure holds the unrolled and vectorized values that 348 /// originated from one scalar instruction. 349 typedef SmallVector<Value *, 2> VectorParts; 350 351 // When we if-convert we need to create edge masks. We have to cache values 352 // so that we don't end up with exponential recursion/IR. 353 typedef DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts> 354 EdgeMaskCache; 355 356 /// Create an empty loop, based on the loop ranges of the old loop. 357 void createEmptyLoop(); 358 /// Create a new induction variable inside L. 359 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 360 Value *Step, Instruction *DL); 361 /// Copy and widen the instructions from the old loop. 362 virtual void vectorizeLoop(); 363 364 /// Fix a first-order recurrence. This is the second phase of vectorizing 365 /// this phi node. 366 void fixFirstOrderRecurrence(PHINode *Phi); 367 368 /// \brief The Loop exit block may have single value PHI nodes where the 369 /// incoming value is 'Undef'. While vectorizing we only handled real values 370 /// that were defined inside the loop. Here we fix the 'undef case'. 371 /// See PR14725. 372 void fixLCSSAPHIs(); 373 374 /// Shrinks vector element sizes based on information in "MinBWs". 375 void truncateToMinimalBitwidths(); 376 377 /// A helper function that computes the predicate of the block BB, assuming 378 /// that the header block of the loop is set to True. It returns the *entry* 379 /// mask for the block BB. 380 VectorParts createBlockInMask(BasicBlock *BB); 381 /// A helper function that computes the predicate of the edge between SRC 382 /// and DST. 383 VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst); 384 385 /// A helper function to vectorize a single BB within the innermost loop. 386 void vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV); 387 388 /// Vectorize a single PHINode in a block. This method handles the induction 389 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 390 /// arbitrary length vectors. 391 void widenPHIInstruction(Instruction *PN, VectorParts &Entry, unsigned UF, 392 unsigned VF, PhiVector *PV); 393 394 /// Insert the new loop to the loop hierarchy and pass manager 395 /// and update the analysis passes. 396 void updateAnalysis(); 397 398 /// This instruction is un-vectorizable. Implement it as a sequence 399 /// of scalars. If \p IfPredicateStore is true we need to 'hide' each 400 /// scalarized instruction behind an if block predicated on the control 401 /// dependence of the instruction. 402 virtual void scalarizeInstruction(Instruction *Instr, 403 bool IfPredicateStore = false); 404 405 /// Vectorize Load and Store instructions, 406 virtual void vectorizeMemoryInstruction(Instruction *Instr); 407 408 /// Create a broadcast instruction. This method generates a broadcast 409 /// instruction (shuffle) for loop invariant values and for the induction 410 /// value. If this is the induction variable then we extend it to N, N+1, ... 411 /// this is needed because each iteration in the loop corresponds to a SIMD 412 /// element. 413 virtual Value *getBroadcastInstrs(Value *V); 414 415 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 416 /// to each vector element of Val. The sequence starts at StartIndex. 417 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step); 418 419 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 420 /// to each vector element of Val. The sequence starts at StartIndex. 421 /// Step is a SCEV. In order to get StepValue it takes the existing value 422 /// from SCEV or creates a new using SCEVExpander. 423 virtual Value *getStepVector(Value *Val, int StartIdx, const SCEV *Step); 424 425 /// When we go over instructions in the basic block we rely on previous 426 /// values within the current basic block or on loop invariant values. 427 /// When we widen (vectorize) values we place them in the map. If the values 428 /// are not within the map, they have to be loop invariant, so we simply 429 /// broadcast them into a vector. 430 VectorParts &getVectorValue(Value *V); 431 432 /// Try to vectorize the interleaved access group that \p Instr belongs to. 433 void vectorizeInterleaveGroup(Instruction *Instr); 434 435 /// Generate a shuffle sequence that will reverse the vector Vec. 436 virtual Value *reverseVector(Value *Vec); 437 438 /// Returns (and creates if needed) the original loop trip count. 439 Value *getOrCreateTripCount(Loop *NewLoop); 440 441 /// Returns (and creates if needed) the trip count of the widened loop. 442 Value *getOrCreateVectorTripCount(Loop *NewLoop); 443 444 /// Emit a bypass check to see if the trip count would overflow, or we 445 /// wouldn't have enough iterations to execute one vector loop. 446 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 447 /// Emit a bypass check to see if the vector trip count is nonzero. 448 void emitVectorLoopEnteredCheck(Loop *L, BasicBlock *Bypass); 449 /// Emit a bypass check to see if all of the SCEV assumptions we've 450 /// had to make are correct. 451 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 452 /// Emit bypass checks to check any memory assumptions we may have made. 453 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 454 455 /// Add additional metadata to \p To that was not present on \p Orig. 456 /// 457 /// Currently this is used to add the noalias annotations based on the 458 /// inserted memchecks. Use this for instructions that are *cloned* into the 459 /// vector loop. 460 void addNewMetadata(Instruction *To, const Instruction *Orig); 461 462 /// Add metadata from one instruction to another. 463 /// 464 /// This includes both the original MDs from \p From and additional ones (\see 465 /// addNewMetadata). Use this for *newly created* instructions in the vector 466 /// loop. 467 void addMetadata(Instruction *To, const Instruction *From); 468 469 /// \brief Similar to the previous function but it adds the metadata to a 470 /// vector of instructions. 471 void addMetadata(SmallVectorImpl<Value *> &To, const Instruction *From); 472 473 /// This is a helper class that holds the vectorizer state. It maps scalar 474 /// instructions to vector instructions. When the code is 'unrolled' then 475 /// then a single scalar value is mapped to multiple vector parts. The parts 476 /// are stored in the VectorPart type. 477 struct ValueMap { 478 /// C'tor. UnrollFactor controls the number of vectors ('parts') that 479 /// are mapped. 480 ValueMap(unsigned UnrollFactor) : UF(UnrollFactor) {} 481 482 /// \return True if 'Key' is saved in the Value Map. 483 bool has(Value *Key) const { return MapStorage.count(Key); } 484 485 /// Initializes a new entry in the map. Sets all of the vector parts to the 486 /// save value in 'Val'. 487 /// \return A reference to a vector with splat values. 488 VectorParts &splat(Value *Key, Value *Val) { 489 VectorParts &Entry = MapStorage[Key]; 490 Entry.assign(UF, Val); 491 return Entry; 492 } 493 494 ///\return A reference to the value that is stored at 'Key'. 495 VectorParts &get(Value *Key) { 496 VectorParts &Entry = MapStorage[Key]; 497 if (Entry.empty()) 498 Entry.resize(UF); 499 assert(Entry.size() == UF); 500 return Entry; 501 } 502 503 private: 504 /// The unroll factor. Each entry in the map stores this number of vector 505 /// elements. 506 unsigned UF; 507 508 /// Map storage. We use std::map and not DenseMap because insertions to a 509 /// dense map invalidates its iterators. 510 std::map<Value *, VectorParts> MapStorage; 511 }; 512 513 /// The original loop. 514 Loop *OrigLoop; 515 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 516 /// dynamic knowledge to simplify SCEV expressions and converts them to a 517 /// more usable form. 518 PredicatedScalarEvolution &PSE; 519 /// Loop Info. 520 LoopInfo *LI; 521 /// Dominator Tree. 522 DominatorTree *DT; 523 /// Alias Analysis. 524 AliasAnalysis *AA; 525 /// Target Library Info. 526 const TargetLibraryInfo *TLI; 527 /// Target Transform Info. 528 const TargetTransformInfo *TTI; 529 /// Assumption Cache. 530 AssumptionCache *AC; 531 532 /// \brief LoopVersioning. It's only set up (non-null) if memchecks were 533 /// used. 534 /// 535 /// This is currently only used to add no-alias metadata based on the 536 /// memchecks. The actually versioning is performed manually. 537 std::unique_ptr<LoopVersioning> LVer; 538 539 /// The vectorization SIMD factor to use. Each vector will have this many 540 /// vector elements. 541 unsigned VF; 542 543 protected: 544 /// The vectorization unroll factor to use. Each scalar is vectorized to this 545 /// many different vector instructions. 546 unsigned UF; 547 548 /// The builder that we use 549 IRBuilder<> Builder; 550 551 // --- Vectorization state --- 552 553 /// The vector-loop preheader. 554 BasicBlock *LoopVectorPreHeader; 555 /// The scalar-loop preheader. 556 BasicBlock *LoopScalarPreHeader; 557 /// Middle Block between the vector and the scalar. 558 BasicBlock *LoopMiddleBlock; 559 /// The ExitBlock of the scalar loop. 560 BasicBlock *LoopExitBlock; 561 /// The vector loop body. 562 SmallVector<BasicBlock *, 4> LoopVectorBody; 563 /// The scalar loop body. 564 BasicBlock *LoopScalarBody; 565 /// A list of all bypass blocks. The first block is the entry of the loop. 566 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 567 568 /// The new Induction variable which was added to the new block. 569 PHINode *Induction; 570 /// The induction variable of the old basic block. 571 PHINode *OldInduction; 572 /// Maps scalars to widened vectors. 573 ValueMap WidenMap; 574 /// Store instructions that should be predicated, as a pair 575 /// <StoreInst, Predicate> 576 SmallVector<std::pair<StoreInst *, Value *>, 4> PredicatedStores; 577 EdgeMaskCache MaskCache; 578 /// Trip count of the original loop. 579 Value *TripCount; 580 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 581 Value *VectorTripCount; 582 583 /// Map of scalar integer values to the smallest bitwidth they can be legally 584 /// represented as. The vector equivalents of these values should be truncated 585 /// to this type. 586 MapVector<Instruction *, uint64_t> MinBWs; 587 LoopVectorizationLegality *Legal; 588 589 // Record whether runtime check is added. 590 bool AddedSafetyChecks; 591 }; 592 593 class InnerLoopUnroller : public InnerLoopVectorizer { 594 public: 595 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 596 LoopInfo *LI, DominatorTree *DT, 597 const TargetLibraryInfo *TLI, 598 const TargetTransformInfo *TTI, AssumptionCache *AC, 599 unsigned UnrollFactor) 600 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, 1, 601 UnrollFactor) {} 602 603 private: 604 void scalarizeInstruction(Instruction *Instr, 605 bool IfPredicateStore = false) override; 606 void vectorizeMemoryInstruction(Instruction *Instr) override; 607 Value *getBroadcastInstrs(Value *V) override; 608 Value *getStepVector(Value *Val, int StartIdx, Value *Step) override; 609 Value *getStepVector(Value *Val, int StartIdx, const SCEV *StepSCEV) override; 610 Value *reverseVector(Value *Vec) override; 611 }; 612 613 /// \brief Look for a meaningful debug location on the instruction or it's 614 /// operands. 615 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 616 if (!I) 617 return I; 618 619 DebugLoc Empty; 620 if (I->getDebugLoc() != Empty) 621 return I; 622 623 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 624 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 625 if (OpInst->getDebugLoc() != Empty) 626 return OpInst; 627 } 628 629 return I; 630 } 631 632 /// \brief Set the debug location in the builder using the debug location in the 633 /// instruction. 634 static void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 635 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) 636 B.SetCurrentDebugLocation(Inst->getDebugLoc()); 637 else 638 B.SetCurrentDebugLocation(DebugLoc()); 639 } 640 641 #ifndef NDEBUG 642 /// \return string containing a file name and a line # for the given loop. 643 static std::string getDebugLocString(const Loop *L) { 644 std::string Result; 645 if (L) { 646 raw_string_ostream OS(Result); 647 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 648 LoopDbgLoc.print(OS); 649 else 650 // Just print the module name. 651 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 652 OS.flush(); 653 } 654 return Result; 655 } 656 #endif 657 658 /// \brief Propagate known metadata from one instruction to another. 659 static void propagateMetadata(Instruction *To, const Instruction *From) { 660 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 661 From->getAllMetadataOtherThanDebugLoc(Metadata); 662 663 for (auto M : Metadata) { 664 unsigned Kind = M.first; 665 666 // These are safe to transfer (this is safe for TBAA, even when we 667 // if-convert, because should that metadata have had a control dependency 668 // on the condition, and thus actually aliased with some other 669 // non-speculated memory access when the condition was false, this would be 670 // caught by the runtime overlap checks). 671 if (Kind != LLVMContext::MD_tbaa && Kind != LLVMContext::MD_alias_scope && 672 Kind != LLVMContext::MD_noalias && Kind != LLVMContext::MD_fpmath && 673 Kind != LLVMContext::MD_nontemporal) 674 continue; 675 676 To->setMetadata(Kind, M.second); 677 } 678 } 679 680 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 681 const Instruction *Orig) { 682 // If the loop was versioned with memchecks, add the corresponding no-alias 683 // metadata. 684 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 685 LVer->annotateInstWithNoAlias(To, Orig); 686 } 687 688 void InnerLoopVectorizer::addMetadata(Instruction *To, 689 const Instruction *From) { 690 propagateMetadata(To, From); 691 addNewMetadata(To, From); 692 } 693 694 void InnerLoopVectorizer::addMetadata(SmallVectorImpl<Value *> &To, 695 const Instruction *From) { 696 for (Value *V : To) 697 if (Instruction *I = dyn_cast<Instruction>(V)) 698 addMetadata(I, From); 699 } 700 701 /// \brief The group of interleaved loads/stores sharing the same stride and 702 /// close to each other. 703 /// 704 /// Each member in this group has an index starting from 0, and the largest 705 /// index should be less than interleaved factor, which is equal to the absolute 706 /// value of the access's stride. 707 /// 708 /// E.g. An interleaved load group of factor 4: 709 /// for (unsigned i = 0; i < 1024; i+=4) { 710 /// a = A[i]; // Member of index 0 711 /// b = A[i+1]; // Member of index 1 712 /// d = A[i+3]; // Member of index 3 713 /// ... 714 /// } 715 /// 716 /// An interleaved store group of factor 4: 717 /// for (unsigned i = 0; i < 1024; i+=4) { 718 /// ... 719 /// A[i] = a; // Member of index 0 720 /// A[i+1] = b; // Member of index 1 721 /// A[i+2] = c; // Member of index 2 722 /// A[i+3] = d; // Member of index 3 723 /// } 724 /// 725 /// Note: the interleaved load group could have gaps (missing members), but 726 /// the interleaved store group doesn't allow gaps. 727 class InterleaveGroup { 728 public: 729 InterleaveGroup(Instruction *Instr, int Stride, unsigned Align) 730 : Align(Align), SmallestKey(0), LargestKey(0), InsertPos(Instr) { 731 assert(Align && "The alignment should be non-zero"); 732 733 Factor = std::abs(Stride); 734 assert(Factor > 1 && "Invalid interleave factor"); 735 736 Reverse = Stride < 0; 737 Members[0] = Instr; 738 } 739 740 bool isReverse() const { return Reverse; } 741 unsigned getFactor() const { return Factor; } 742 unsigned getAlignment() const { return Align; } 743 unsigned getNumMembers() const { return Members.size(); } 744 745 /// \brief Try to insert a new member \p Instr with index \p Index and 746 /// alignment \p NewAlign. The index is related to the leader and it could be 747 /// negative if it is the new leader. 748 /// 749 /// \returns false if the instruction doesn't belong to the group. 750 bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) { 751 assert(NewAlign && "The new member's alignment should be non-zero"); 752 753 int Key = Index + SmallestKey; 754 755 // Skip if there is already a member with the same index. 756 if (Members.count(Key)) 757 return false; 758 759 if (Key > LargestKey) { 760 // The largest index is always less than the interleave factor. 761 if (Index >= static_cast<int>(Factor)) 762 return false; 763 764 LargestKey = Key; 765 } else if (Key < SmallestKey) { 766 // The largest index is always less than the interleave factor. 767 if (LargestKey - Key >= static_cast<int>(Factor)) 768 return false; 769 770 SmallestKey = Key; 771 } 772 773 // It's always safe to select the minimum alignment. 774 Align = std::min(Align, NewAlign); 775 Members[Key] = Instr; 776 return true; 777 } 778 779 /// \brief Get the member with the given index \p Index 780 /// 781 /// \returns nullptr if contains no such member. 782 Instruction *getMember(unsigned Index) const { 783 int Key = SmallestKey + Index; 784 if (!Members.count(Key)) 785 return nullptr; 786 787 return Members.find(Key)->second; 788 } 789 790 /// \brief Get the index for the given member. Unlike the key in the member 791 /// map, the index starts from 0. 792 unsigned getIndex(Instruction *Instr) const { 793 for (auto I : Members) 794 if (I.second == Instr) 795 return I.first - SmallestKey; 796 797 llvm_unreachable("InterleaveGroup contains no such member"); 798 } 799 800 Instruction *getInsertPos() const { return InsertPos; } 801 void setInsertPos(Instruction *Inst) { InsertPos = Inst; } 802 803 private: 804 unsigned Factor; // Interleave Factor. 805 bool Reverse; 806 unsigned Align; 807 DenseMap<int, Instruction *> Members; 808 int SmallestKey; 809 int LargestKey; 810 811 // To avoid breaking dependences, vectorized instructions of an interleave 812 // group should be inserted at either the first load or the last store in 813 // program order. 814 // 815 // E.g. %even = load i32 // Insert Position 816 // %add = add i32 %even // Use of %even 817 // %odd = load i32 818 // 819 // store i32 %even 820 // %odd = add i32 // Def of %odd 821 // store i32 %odd // Insert Position 822 Instruction *InsertPos; 823 }; 824 825 /// \brief Drive the analysis of interleaved memory accesses in the loop. 826 /// 827 /// Use this class to analyze interleaved accesses only when we can vectorize 828 /// a loop. Otherwise it's meaningless to do analysis as the vectorization 829 /// on interleaved accesses is unsafe. 830 /// 831 /// The analysis collects interleave groups and records the relationships 832 /// between the member and the group in a map. 833 class InterleavedAccessInfo { 834 public: 835 InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L, 836 DominatorTree *DT) 837 : PSE(PSE), TheLoop(L), DT(DT), RequiresScalarEpilogue(false) {} 838 839 ~InterleavedAccessInfo() { 840 SmallSet<InterleaveGroup *, 4> DelSet; 841 // Avoid releasing a pointer twice. 842 for (auto &I : InterleaveGroupMap) 843 DelSet.insert(I.second); 844 for (auto *Ptr : DelSet) 845 delete Ptr; 846 } 847 848 /// \brief Analyze the interleaved accesses and collect them in interleave 849 /// groups. Substitute symbolic strides using \p Strides. 850 void analyzeInterleaving(const ValueToValueMap &Strides); 851 852 /// \brief Check if \p Instr belongs to any interleave group. 853 bool isInterleaved(Instruction *Instr) const { 854 return InterleaveGroupMap.count(Instr); 855 } 856 857 /// \brief Get the interleave group that \p Instr belongs to. 858 /// 859 /// \returns nullptr if doesn't have such group. 860 InterleaveGroup *getInterleaveGroup(Instruction *Instr) const { 861 if (InterleaveGroupMap.count(Instr)) 862 return InterleaveGroupMap.find(Instr)->second; 863 return nullptr; 864 } 865 866 /// \brief Returns true if an interleaved group that may access memory 867 /// out-of-bounds requires a scalar epilogue iteration for correctness. 868 bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; } 869 870 private: 871 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks. 872 /// Simplifies SCEV expressions in the context of existing SCEV assumptions. 873 /// The interleaved access analysis can also add new predicates (for example 874 /// by versioning strides of pointers). 875 PredicatedScalarEvolution &PSE; 876 Loop *TheLoop; 877 DominatorTree *DT; 878 879 /// True if the loop may contain non-reversed interleaved groups with 880 /// out-of-bounds accesses. We ensure we don't speculatively access memory 881 /// out-of-bounds by executing at least one scalar epilogue iteration. 882 bool RequiresScalarEpilogue; 883 884 /// Holds the relationships between the members and the interleave group. 885 DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap; 886 887 /// \brief The descriptor for a strided memory access. 888 struct StrideDescriptor { 889 StrideDescriptor(int Stride, const SCEV *Scev, unsigned Size, 890 unsigned Align) 891 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {} 892 893 StrideDescriptor() : Stride(0), Scev(nullptr), Size(0), Align(0) {} 894 895 int Stride; // The access's stride. It is negative for a reverse access. 896 const SCEV *Scev; // The scalar expression of this access 897 unsigned Size; // The size of the memory object. 898 unsigned Align; // The alignment of this access. 899 }; 900 901 /// \brief Create a new interleave group with the given instruction \p Instr, 902 /// stride \p Stride and alignment \p Align. 903 /// 904 /// \returns the newly created interleave group. 905 InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride, 906 unsigned Align) { 907 assert(!InterleaveGroupMap.count(Instr) && 908 "Already in an interleaved access group"); 909 InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align); 910 return InterleaveGroupMap[Instr]; 911 } 912 913 /// \brief Release the group and remove all the relationships. 914 void releaseGroup(InterleaveGroup *Group) { 915 for (unsigned i = 0; i < Group->getFactor(); i++) 916 if (Instruction *Member = Group->getMember(i)) 917 InterleaveGroupMap.erase(Member); 918 919 delete Group; 920 } 921 922 /// \brief Collect all the accesses with a constant stride in program order. 923 void collectConstStridedAccesses( 924 MapVector<Instruction *, StrideDescriptor> &StrideAccesses, 925 const ValueToValueMap &Strides); 926 }; 927 928 /// Utility class for getting and setting loop vectorizer hints in the form 929 /// of loop metadata. 930 /// This class keeps a number of loop annotations locally (as member variables) 931 /// and can, upon request, write them back as metadata on the loop. It will 932 /// initially scan the loop for existing metadata, and will update the local 933 /// values based on information in the loop. 934 /// We cannot write all values to metadata, as the mere presence of some info, 935 /// for example 'force', means a decision has been made. So, we need to be 936 /// careful NOT to add them if the user hasn't specifically asked so. 937 class LoopVectorizeHints { 938 enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE }; 939 940 /// Hint - associates name and validation with the hint value. 941 struct Hint { 942 const char *Name; 943 unsigned Value; // This may have to change for non-numeric values. 944 HintKind Kind; 945 946 Hint(const char *Name, unsigned Value, HintKind Kind) 947 : Name(Name), Value(Value), Kind(Kind) {} 948 949 bool validate(unsigned Val) { 950 switch (Kind) { 951 case HK_WIDTH: 952 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth; 953 case HK_UNROLL: 954 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor; 955 case HK_FORCE: 956 return (Val <= 1); 957 } 958 return false; 959 } 960 }; 961 962 /// Vectorization width. 963 Hint Width; 964 /// Vectorization interleave factor. 965 Hint Interleave; 966 /// Vectorization forced 967 Hint Force; 968 969 /// Return the loop metadata prefix. 970 static StringRef Prefix() { return "llvm.loop."; } 971 972 /// True if there is any unsafe math in the loop. 973 bool PotentiallyUnsafe; 974 975 public: 976 enum ForceKind { 977 FK_Undefined = -1, ///< Not selected. 978 FK_Disabled = 0, ///< Forcing disabled. 979 FK_Enabled = 1, ///< Forcing enabled. 980 }; 981 982 LoopVectorizeHints(const Loop *L, bool DisableInterleaving) 983 : Width("vectorize.width", VectorizerParams::VectorizationFactor, 984 HK_WIDTH), 985 Interleave("interleave.count", DisableInterleaving, HK_UNROLL), 986 Force("vectorize.enable", FK_Undefined, HK_FORCE), 987 PotentiallyUnsafe(false), TheLoop(L) { 988 // Populate values with existing loop metadata. 989 getHintsFromMetadata(); 990 991 // force-vector-interleave overrides DisableInterleaving. 992 if (VectorizerParams::isInterleaveForced()) 993 Interleave.Value = VectorizerParams::VectorizationInterleave; 994 995 DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs() 996 << "LV: Interleaving disabled by the pass manager\n"); 997 } 998 999 /// Mark the loop L as already vectorized by setting the width to 1. 1000 void setAlreadyVectorized() { 1001 Width.Value = Interleave.Value = 1; 1002 Hint Hints[] = {Width, Interleave}; 1003 writeHintsToMetadata(Hints); 1004 } 1005 1006 bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const { 1007 if (getForce() == LoopVectorizeHints::FK_Disabled) { 1008 DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); 1009 emitOptimizationRemarkAnalysis(F->getContext(), 1010 vectorizeAnalysisPassName(), *F, 1011 L->getStartLoc(), emitRemark()); 1012 return false; 1013 } 1014 1015 if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) { 1016 DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); 1017 emitOptimizationRemarkAnalysis(F->getContext(), 1018 vectorizeAnalysisPassName(), *F, 1019 L->getStartLoc(), emitRemark()); 1020 return false; 1021 } 1022 1023 if (getWidth() == 1 && getInterleave() == 1) { 1024 // FIXME: Add a separate metadata to indicate when the loop has already 1025 // been vectorized instead of setting width and count to 1. 1026 DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); 1027 // FIXME: Add interleave.disable metadata. This will allow 1028 // vectorize.disable to be used without disabling the pass and errors 1029 // to differentiate between disabled vectorization and a width of 1. 1030 emitOptimizationRemarkAnalysis( 1031 F->getContext(), vectorizeAnalysisPassName(), *F, L->getStartLoc(), 1032 "loop not vectorized: vectorization and interleaving are explicitly " 1033 "disabled, or vectorize width and interleave count are both set to " 1034 "1"); 1035 return false; 1036 } 1037 1038 return true; 1039 } 1040 1041 /// Dumps all the hint information. 1042 std::string emitRemark() const { 1043 VectorizationReport R; 1044 if (Force.Value == LoopVectorizeHints::FK_Disabled) 1045 R << "vectorization is explicitly disabled"; 1046 else { 1047 R << "use -Rpass-analysis=loop-vectorize for more info"; 1048 if (Force.Value == LoopVectorizeHints::FK_Enabled) { 1049 R << " (Force=true"; 1050 if (Width.Value != 0) 1051 R << ", Vector Width=" << Width.Value; 1052 if (Interleave.Value != 0) 1053 R << ", Interleave Count=" << Interleave.Value; 1054 R << ")"; 1055 } 1056 } 1057 1058 return R.str(); 1059 } 1060 1061 unsigned getWidth() const { return Width.Value; } 1062 unsigned getInterleave() const { return Interleave.Value; } 1063 enum ForceKind getForce() const { return (ForceKind)Force.Value; } 1064 const char *vectorizeAnalysisPassName() const { 1065 // If hints are provided that don't disable vectorization use the 1066 // AlwaysPrint pass name to force the frontend to print the diagnostic. 1067 if (getWidth() == 1) 1068 return LV_NAME; 1069 if (getForce() == LoopVectorizeHints::FK_Disabled) 1070 return LV_NAME; 1071 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0) 1072 return LV_NAME; 1073 return DiagnosticInfo::AlwaysPrint; 1074 } 1075 1076 bool allowReordering() const { 1077 // When enabling loop hints are provided we allow the vectorizer to change 1078 // the order of operations that is given by the scalar loop. This is not 1079 // enabled by default because can be unsafe or inefficient. For example, 1080 // reordering floating-point operations will change the way round-off 1081 // error accumulates in the loop. 1082 return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1; 1083 } 1084 1085 bool isPotentiallyUnsafe() const { 1086 // Avoid FP vectorization if the target is unsure about proper support. 1087 // This may be related to the SIMD unit in the target not handling 1088 // IEEE 754 FP ops properly, or bad single-to-double promotions. 1089 // Otherwise, a sequence of vectorized loops, even without reduction, 1090 // could lead to different end results on the destination vectors. 1091 return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe; 1092 } 1093 1094 void setPotentiallyUnsafe() { PotentiallyUnsafe = true; } 1095 1096 private: 1097 /// Find hints specified in the loop metadata and update local values. 1098 void getHintsFromMetadata() { 1099 MDNode *LoopID = TheLoop->getLoopID(); 1100 if (!LoopID) 1101 return; 1102 1103 // First operand should refer to the loop id itself. 1104 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 1105 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 1106 1107 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1108 const MDString *S = nullptr; 1109 SmallVector<Metadata *, 4> Args; 1110 1111 // The expected hint is either a MDString or a MDNode with the first 1112 // operand a MDString. 1113 if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) { 1114 if (!MD || MD->getNumOperands() == 0) 1115 continue; 1116 S = dyn_cast<MDString>(MD->getOperand(0)); 1117 for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i) 1118 Args.push_back(MD->getOperand(i)); 1119 } else { 1120 S = dyn_cast<MDString>(LoopID->getOperand(i)); 1121 assert(Args.size() == 0 && "too many arguments for MDString"); 1122 } 1123 1124 if (!S) 1125 continue; 1126 1127 // Check if the hint starts with the loop metadata prefix. 1128 StringRef Name = S->getString(); 1129 if (Args.size() == 1) 1130 setHint(Name, Args[0]); 1131 } 1132 } 1133 1134 /// Checks string hint with one operand and set value if valid. 1135 void setHint(StringRef Name, Metadata *Arg) { 1136 if (!Name.startswith(Prefix())) 1137 return; 1138 Name = Name.substr(Prefix().size(), StringRef::npos); 1139 1140 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); 1141 if (!C) 1142 return; 1143 unsigned Val = C->getZExtValue(); 1144 1145 Hint *Hints[] = {&Width, &Interleave, &Force}; 1146 for (auto H : Hints) { 1147 if (Name == H->Name) { 1148 if (H->validate(Val)) 1149 H->Value = Val; 1150 else 1151 DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); 1152 break; 1153 } 1154 } 1155 } 1156 1157 /// Create a new hint from name / value pair. 1158 MDNode *createHintMetadata(StringRef Name, unsigned V) const { 1159 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1160 Metadata *MDs[] = {MDString::get(Context, Name), 1161 ConstantAsMetadata::get( 1162 ConstantInt::get(Type::getInt32Ty(Context), V))}; 1163 return MDNode::get(Context, MDs); 1164 } 1165 1166 /// Matches metadata with hint name. 1167 bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) { 1168 MDString *Name = dyn_cast<MDString>(Node->getOperand(0)); 1169 if (!Name) 1170 return false; 1171 1172 for (auto H : HintTypes) 1173 if (Name->getString().endswith(H.Name)) 1174 return true; 1175 return false; 1176 } 1177 1178 /// Sets current hints into loop metadata, keeping other values intact. 1179 void writeHintsToMetadata(ArrayRef<Hint> HintTypes) { 1180 if (HintTypes.size() == 0) 1181 return; 1182 1183 // Reserve the first element to LoopID (see below). 1184 SmallVector<Metadata *, 4> MDs(1); 1185 // If the loop already has metadata, then ignore the existing operands. 1186 MDNode *LoopID = TheLoop->getLoopID(); 1187 if (LoopID) { 1188 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1189 MDNode *Node = cast<MDNode>(LoopID->getOperand(i)); 1190 // If node in update list, ignore old value. 1191 if (!matchesHintMetadataName(Node, HintTypes)) 1192 MDs.push_back(Node); 1193 } 1194 } 1195 1196 // Now, add the missing hints. 1197 for (auto H : HintTypes) 1198 MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value)); 1199 1200 // Replace current metadata node with new one. 1201 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1202 MDNode *NewLoopID = MDNode::get(Context, MDs); 1203 // Set operand 0 to refer to the loop id itself. 1204 NewLoopID->replaceOperandWith(0, NewLoopID); 1205 1206 TheLoop->setLoopID(NewLoopID); 1207 } 1208 1209 /// The loop these hints belong to. 1210 const Loop *TheLoop; 1211 }; 1212 1213 static void emitAnalysisDiag(const Function *TheFunction, const Loop *TheLoop, 1214 const LoopVectorizeHints &Hints, 1215 const LoopAccessReport &Message) { 1216 const char *Name = Hints.vectorizeAnalysisPassName(); 1217 LoopAccessReport::emitAnalysis(Message, TheFunction, TheLoop, Name); 1218 } 1219 1220 static void emitMissedWarning(Function *F, Loop *L, 1221 const LoopVectorizeHints &LH) { 1222 emitOptimizationRemarkMissed(F->getContext(), LV_NAME, *F, L->getStartLoc(), 1223 LH.emitRemark()); 1224 1225 if (LH.getForce() == LoopVectorizeHints::FK_Enabled) { 1226 if (LH.getWidth() != 1) 1227 emitLoopVectorizeWarning( 1228 F->getContext(), *F, L->getStartLoc(), 1229 "failed explicitly specified loop vectorization"); 1230 else if (LH.getInterleave() != 1) 1231 emitLoopInterleaveWarning( 1232 F->getContext(), *F, L->getStartLoc(), 1233 "failed explicitly specified loop interleaving"); 1234 } 1235 } 1236 1237 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and 1238 /// to what vectorization factor. 1239 /// This class does not look at the profitability of vectorization, only the 1240 /// legality. This class has two main kinds of checks: 1241 /// * Memory checks - The code in canVectorizeMemory checks if vectorization 1242 /// will change the order of memory accesses in a way that will change the 1243 /// correctness of the program. 1244 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory 1245 /// checks for a number of different conditions, such as the availability of a 1246 /// single induction variable, that all types are supported and vectorize-able, 1247 /// etc. This code reflects the capabilities of InnerLoopVectorizer. 1248 /// This class is also used by InnerLoopVectorizer for identifying 1249 /// induction variable and the different reduction variables. 1250 class LoopVectorizationLegality { 1251 public: 1252 LoopVectorizationLegality(Loop *L, PredicatedScalarEvolution &PSE, 1253 DominatorTree *DT, TargetLibraryInfo *TLI, 1254 AliasAnalysis *AA, Function *F, 1255 const TargetTransformInfo *TTI, 1256 LoopAccessAnalysis *LAA, 1257 LoopVectorizationRequirements *R, 1258 LoopVectorizeHints *H) 1259 : NumPredStores(0), TheLoop(L), PSE(PSE), TLI(TLI), TheFunction(F), 1260 TTI(TTI), DT(DT), LAA(LAA), LAI(nullptr), InterleaveInfo(PSE, L, DT), 1261 Induction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false), 1262 Requirements(R), Hints(H) {} 1263 1264 /// ReductionList contains the reduction descriptors for all 1265 /// of the reductions that were found in the loop. 1266 typedef DenseMap<PHINode *, RecurrenceDescriptor> ReductionList; 1267 1268 /// InductionList saves induction variables and maps them to the 1269 /// induction descriptor. 1270 typedef MapVector<PHINode *, InductionDescriptor> InductionList; 1271 1272 /// RecurrenceSet contains the phi nodes that are recurrences other than 1273 /// inductions and reductions. 1274 typedef SmallPtrSet<const PHINode *, 8> RecurrenceSet; 1275 1276 /// Returns true if it is legal to vectorize this loop. 1277 /// This does not mean that it is profitable to vectorize this 1278 /// loop, only that it is legal to do so. 1279 bool canVectorize(); 1280 1281 /// Returns the Induction variable. 1282 PHINode *getInduction() { return Induction; } 1283 1284 /// Returns the reduction variables found in the loop. 1285 ReductionList *getReductionVars() { return &Reductions; } 1286 1287 /// Returns the induction variables found in the loop. 1288 InductionList *getInductionVars() { return &Inductions; } 1289 1290 /// Return the first-order recurrences found in the loop. 1291 RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; } 1292 1293 /// Returns the widest induction type. 1294 Type *getWidestInductionType() { return WidestIndTy; } 1295 1296 /// Returns True if V is an induction variable in this loop. 1297 bool isInductionVariable(const Value *V); 1298 1299 /// Returns True if PN is a reduction variable in this loop. 1300 bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); } 1301 1302 /// Returns True if Phi is a first-order recurrence in this loop. 1303 bool isFirstOrderRecurrence(const PHINode *Phi); 1304 1305 /// Return true if the block BB needs to be predicated in order for the loop 1306 /// to be vectorized. 1307 bool blockNeedsPredication(BasicBlock *BB); 1308 1309 /// Check if this pointer is consecutive when vectorizing. This happens 1310 /// when the last index of the GEP is the induction variable, or that the 1311 /// pointer itself is an induction variable. 1312 /// This check allows us to vectorize A[idx] into a wide load/store. 1313 /// Returns: 1314 /// 0 - Stride is unknown or non-consecutive. 1315 /// 1 - Address is consecutive. 1316 /// -1 - Address is consecutive, and decreasing. 1317 int isConsecutivePtr(Value *Ptr); 1318 1319 /// Returns true if the value V is uniform within the loop. 1320 bool isUniform(Value *V); 1321 1322 /// Returns true if this instruction will remain scalar after vectorization. 1323 bool isUniformAfterVectorization(Instruction *I) { return Uniforms.count(I); } 1324 1325 /// Returns the information that we collected about runtime memory check. 1326 const RuntimePointerChecking *getRuntimePointerChecking() const { 1327 return LAI->getRuntimePointerChecking(); 1328 } 1329 1330 const LoopAccessInfo *getLAI() const { return LAI; } 1331 1332 /// \brief Check if \p Instr belongs to any interleaved access group. 1333 bool isAccessInterleaved(Instruction *Instr) { 1334 return InterleaveInfo.isInterleaved(Instr); 1335 } 1336 1337 /// \brief Get the interleaved access group that \p Instr belongs to. 1338 const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { 1339 return InterleaveInfo.getInterleaveGroup(Instr); 1340 } 1341 1342 /// \brief Returns true if an interleaved group requires a scalar iteration 1343 /// to handle accesses with gaps. 1344 bool requiresScalarEpilogue() const { 1345 return InterleaveInfo.requiresScalarEpilogue(); 1346 } 1347 1348 unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); } 1349 1350 bool hasStride(Value *V) { return StrideSet.count(V); } 1351 bool mustCheckStrides() { return !StrideSet.empty(); } 1352 SmallPtrSet<Value *, 8>::iterator strides_begin() { 1353 return StrideSet.begin(); 1354 } 1355 SmallPtrSet<Value *, 8>::iterator strides_end() { return StrideSet.end(); } 1356 1357 /// Returns true if the target machine supports masked store operation 1358 /// for the given \p DataType and kind of access to \p Ptr. 1359 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1360 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType); 1361 } 1362 /// Returns true if the target machine supports masked load operation 1363 /// for the given \p DataType and kind of access to \p Ptr. 1364 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1365 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType); 1366 } 1367 /// Returns true if the target machine supports masked scatter operation 1368 /// for the given \p DataType. 1369 bool isLegalMaskedScatter(Type *DataType) { 1370 return TTI->isLegalMaskedScatter(DataType); 1371 } 1372 /// Returns true if the target machine supports masked gather operation 1373 /// for the given \p DataType. 1374 bool isLegalMaskedGather(Type *DataType) { 1375 return TTI->isLegalMaskedGather(DataType); 1376 } 1377 1378 /// Returns true if vector representation of the instruction \p I 1379 /// requires mask. 1380 bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); } 1381 unsigned getNumStores() const { return LAI->getNumStores(); } 1382 unsigned getNumLoads() const { return LAI->getNumLoads(); } 1383 unsigned getNumPredStores() const { return NumPredStores; } 1384 1385 private: 1386 /// Check if a single basic block loop is vectorizable. 1387 /// At this point we know that this is a loop with a constant trip count 1388 /// and we only need to check individual instructions. 1389 bool canVectorizeInstrs(); 1390 1391 /// When we vectorize loops we may change the order in which 1392 /// we read and write from memory. This method checks if it is 1393 /// legal to vectorize the code, considering only memory constrains. 1394 /// Returns true if the loop is vectorizable 1395 bool canVectorizeMemory(); 1396 1397 /// Return true if we can vectorize this loop using the IF-conversion 1398 /// transformation. 1399 bool canVectorizeWithIfConvert(); 1400 1401 /// Collect the variables that need to stay uniform after vectorization. 1402 void collectLoopUniforms(); 1403 1404 /// Return true if all of the instructions in the block can be speculatively 1405 /// executed. \p SafePtrs is a list of addresses that are known to be legal 1406 /// and we know that we can read from them without segfault. 1407 bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs); 1408 1409 /// \brief Collect memory access with loop invariant strides. 1410 /// 1411 /// Looks for accesses like "a[i * StrideA]" where "StrideA" is loop 1412 /// invariant. 1413 void collectStridedAccess(Value *LoadOrStoreInst); 1414 1415 /// \brief Returns true if we can vectorize using this PHI node as an 1416 /// induction. 1417 /// 1418 /// Updates the vectorization state by adding \p Phi to the inductions list. 1419 /// This can set \p Phi as the main induction of the loop if \p Phi is a 1420 /// better choice for the main induction than the existing one. 1421 bool addInductionPhi(PHINode *Phi, InductionDescriptor ID); 1422 1423 /// Report an analysis message to assist the user in diagnosing loops that are 1424 /// not vectorized. These are handled as LoopAccessReport rather than 1425 /// VectorizationReport because the << operator of VectorizationReport returns 1426 /// LoopAccessReport. 1427 void emitAnalysis(const LoopAccessReport &Message) const { 1428 emitAnalysisDiag(TheFunction, TheLoop, *Hints, Message); 1429 } 1430 1431 unsigned NumPredStores; 1432 1433 /// The loop that we evaluate. 1434 Loop *TheLoop; 1435 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. 1436 /// Applies dynamic knowledge to simplify SCEV expressions in the context 1437 /// of existing SCEV assumptions. The analysis will also add a minimal set 1438 /// of new predicates if this is required to enable vectorization and 1439 /// unrolling. 1440 PredicatedScalarEvolution &PSE; 1441 /// Target Library Info. 1442 TargetLibraryInfo *TLI; 1443 /// Parent function 1444 Function *TheFunction; 1445 /// Target Transform Info 1446 const TargetTransformInfo *TTI; 1447 /// Dominator Tree. 1448 DominatorTree *DT; 1449 // LoopAccess analysis. 1450 LoopAccessAnalysis *LAA; 1451 // And the loop-accesses info corresponding to this loop. This pointer is 1452 // null until canVectorizeMemory sets it up. 1453 const LoopAccessInfo *LAI; 1454 1455 /// The interleave access information contains groups of interleaved accesses 1456 /// with the same stride and close to each other. 1457 InterleavedAccessInfo InterleaveInfo; 1458 1459 // --- vectorization state --- // 1460 1461 /// Holds the integer induction variable. This is the counter of the 1462 /// loop. 1463 PHINode *Induction; 1464 /// Holds the reduction variables. 1465 ReductionList Reductions; 1466 /// Holds all of the induction variables that we found in the loop. 1467 /// Notice that inductions don't need to start at zero and that induction 1468 /// variables can be pointers. 1469 InductionList Inductions; 1470 /// Holds the phi nodes that are first-order recurrences. 1471 RecurrenceSet FirstOrderRecurrences; 1472 /// Holds the widest induction type encountered. 1473 Type *WidestIndTy; 1474 1475 /// Allowed outside users. This holds the reduction 1476 /// vars which can be accessed from outside the loop. 1477 SmallPtrSet<Value *, 4> AllowedExit; 1478 /// This set holds the variables which are known to be uniform after 1479 /// vectorization. 1480 SmallPtrSet<Instruction *, 4> Uniforms; 1481 1482 /// Can we assume the absence of NaNs. 1483 bool HasFunNoNaNAttr; 1484 1485 /// Vectorization requirements that will go through late-evaluation. 1486 LoopVectorizationRequirements *Requirements; 1487 1488 /// Used to emit an analysis of any legality issues. 1489 LoopVectorizeHints *Hints; 1490 1491 ValueToValueMap Strides; 1492 SmallPtrSet<Value *, 8> StrideSet; 1493 1494 /// While vectorizing these instructions we have to generate a 1495 /// call to the appropriate masked intrinsic 1496 SmallPtrSet<const Instruction *, 8> MaskedOp; 1497 }; 1498 1499 /// LoopVectorizationCostModel - estimates the expected speedups due to 1500 /// vectorization. 1501 /// In many cases vectorization is not profitable. This can happen because of 1502 /// a number of reasons. In this class we mainly attempt to predict the 1503 /// expected speedup/slowdowns due to the supported instruction set. We use the 1504 /// TargetTransformInfo to query the different backends for the cost of 1505 /// different operations. 1506 class LoopVectorizationCostModel { 1507 public: 1508 LoopVectorizationCostModel(Loop *L, ScalarEvolution *SE, LoopInfo *LI, 1509 LoopVectorizationLegality *Legal, 1510 const TargetTransformInfo &TTI, 1511 const TargetLibraryInfo *TLI, DemandedBits *DB, 1512 AssumptionCache *AC, const Function *F, 1513 const LoopVectorizeHints *Hints, 1514 SmallPtrSetImpl<const Value *> &ValuesToIgnore) 1515 : TheLoop(L), SE(SE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 1516 TheFunction(F), Hints(Hints), ValuesToIgnore(ValuesToIgnore) {} 1517 1518 /// Information about vectorization costs 1519 struct VectorizationFactor { 1520 unsigned Width; // Vector width with best cost 1521 unsigned Cost; // Cost of the loop with that width 1522 }; 1523 /// \return The most profitable vectorization factor and the cost of that VF. 1524 /// This method checks every power of two up to VF. If UserVF is not ZERO 1525 /// then this vectorization factor will be selected if vectorization is 1526 /// possible. 1527 VectorizationFactor selectVectorizationFactor(bool OptForSize); 1528 1529 /// \return The size (in bits) of the smallest and widest types in the code 1530 /// that needs to be vectorized. We ignore values that remain scalar such as 1531 /// 64 bit loop indices. 1532 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1533 1534 /// \return The desired interleave count. 1535 /// If interleave count has been specified by metadata it will be returned. 1536 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1537 /// are the selected vectorization factor and the cost of the selected VF. 1538 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 1539 unsigned LoopCost); 1540 1541 /// \return The most profitable unroll factor. 1542 /// This method finds the best unroll-factor based on register pressure and 1543 /// other parameters. VF and LoopCost are the selected vectorization factor 1544 /// and the cost of the selected VF. 1545 unsigned computeInterleaveCount(bool OptForSize, unsigned VF, 1546 unsigned LoopCost); 1547 1548 /// \brief A struct that represents some properties of the register usage 1549 /// of a loop. 1550 struct RegisterUsage { 1551 /// Holds the number of loop invariant values that are used in the loop. 1552 unsigned LoopInvariantRegs; 1553 /// Holds the maximum number of concurrent live intervals in the loop. 1554 unsigned MaxLocalUsers; 1555 /// Holds the number of instructions in the loop. 1556 unsigned NumInstructions; 1557 }; 1558 1559 /// \return Returns information about the register usages of the loop for the 1560 /// given vectorization factors. 1561 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 1562 1563 private: 1564 /// The vectorization cost is a combination of the cost itself and a boolean 1565 /// indicating whether any of the contributing operations will actually 1566 /// operate on 1567 /// vector values after type legalization in the backend. If this latter value 1568 /// is 1569 /// false, then all operations will be scalarized (i.e. no vectorization has 1570 /// actually taken place). 1571 typedef std::pair<unsigned, bool> VectorizationCostTy; 1572 1573 /// Returns the expected execution cost. The unit of the cost does 1574 /// not matter because we use the 'cost' units to compare different 1575 /// vector widths. The cost that is returned is *not* normalized by 1576 /// the factor width. 1577 VectorizationCostTy expectedCost(unsigned VF); 1578 1579 /// Returns the execution time cost of an instruction for a given vector 1580 /// width. Vector width of one means scalar. 1581 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1582 1583 /// The cost-computation logic from getInstructionCost which provides 1584 /// the vector type as an output parameter. 1585 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1586 1587 /// Returns whether the instruction is a load or store and will be a emitted 1588 /// as a vector operation. 1589 bool isConsecutiveLoadOrStore(Instruction *I); 1590 1591 /// Report an analysis message to assist the user in diagnosing loops that are 1592 /// not vectorized. These are handled as LoopAccessReport rather than 1593 /// VectorizationReport because the << operator of VectorizationReport returns 1594 /// LoopAccessReport. 1595 void emitAnalysis(const LoopAccessReport &Message) const { 1596 emitAnalysisDiag(TheFunction, TheLoop, *Hints, Message); 1597 } 1598 1599 public: 1600 /// Map of scalar integer values to the smallest bitwidth they can be legally 1601 /// represented as. The vector equivalents of these values should be truncated 1602 /// to this type. 1603 MapVector<Instruction *, uint64_t> MinBWs; 1604 1605 /// The loop that we evaluate. 1606 Loop *TheLoop; 1607 /// Scev analysis. 1608 ScalarEvolution *SE; 1609 /// Loop Info analysis. 1610 LoopInfo *LI; 1611 /// Vectorization legality. 1612 LoopVectorizationLegality *Legal; 1613 /// Vector target information. 1614 const TargetTransformInfo &TTI; 1615 /// Target Library Info. 1616 const TargetLibraryInfo *TLI; 1617 /// Demanded bits analysis 1618 DemandedBits *DB; 1619 const Function *TheFunction; 1620 // Loop Vectorize Hint. 1621 const LoopVectorizeHints *Hints; 1622 // Values to ignore in the cost model. 1623 const SmallPtrSetImpl<const Value *> &ValuesToIgnore; 1624 }; 1625 1626 /// \brief This holds vectorization requirements that must be verified late in 1627 /// the process. The requirements are set by legalize and costmodel. Once 1628 /// vectorization has been determined to be possible and profitable the 1629 /// requirements can be verified by looking for metadata or compiler options. 1630 /// For example, some loops require FP commutativity which is only allowed if 1631 /// vectorization is explicitly specified or if the fast-math compiler option 1632 /// has been provided. 1633 /// Late evaluation of these requirements allows helpful diagnostics to be 1634 /// composed that tells the user what need to be done to vectorize the loop. For 1635 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late 1636 /// evaluation should be used only when diagnostics can generated that can be 1637 /// followed by a non-expert user. 1638 class LoopVectorizationRequirements { 1639 public: 1640 LoopVectorizationRequirements() 1641 : NumRuntimePointerChecks(0), UnsafeAlgebraInst(nullptr) {} 1642 1643 void addUnsafeAlgebraInst(Instruction *I) { 1644 // First unsafe algebra instruction. 1645 if (!UnsafeAlgebraInst) 1646 UnsafeAlgebraInst = I; 1647 } 1648 1649 void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; } 1650 1651 bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) { 1652 const char *Name = Hints.vectorizeAnalysisPassName(); 1653 bool Failed = false; 1654 if (UnsafeAlgebraInst && !Hints.allowReordering()) { 1655 emitOptimizationRemarkAnalysisFPCommute( 1656 F->getContext(), Name, *F, UnsafeAlgebraInst->getDebugLoc(), 1657 VectorizationReport() << "cannot prove it is safe to reorder " 1658 "floating-point operations"); 1659 Failed = true; 1660 } 1661 1662 // Test if runtime memcheck thresholds are exceeded. 1663 bool PragmaThresholdReached = 1664 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 1665 bool ThresholdReached = 1666 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 1667 if ((ThresholdReached && !Hints.allowReordering()) || 1668 PragmaThresholdReached) { 1669 emitOptimizationRemarkAnalysisAliasing( 1670 F->getContext(), Name, *F, L->getStartLoc(), 1671 VectorizationReport() 1672 << "cannot prove it is safe to reorder memory operations"); 1673 DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 1674 Failed = true; 1675 } 1676 1677 return Failed; 1678 } 1679 1680 private: 1681 unsigned NumRuntimePointerChecks; 1682 Instruction *UnsafeAlgebraInst; 1683 }; 1684 1685 static void addInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) { 1686 if (L.empty()) 1687 return V.push_back(&L); 1688 1689 for (Loop *InnerL : L) 1690 addInnerLoop(*InnerL, V); 1691 } 1692 1693 /// The LoopVectorize Pass. 1694 struct LoopVectorize : public FunctionPass { 1695 /// Pass identification, replacement for typeid 1696 static char ID; 1697 1698 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) 1699 : FunctionPass(ID), DisableUnrolling(NoUnrolling), 1700 AlwaysVectorize(AlwaysVectorize) { 1701 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1702 } 1703 1704 ScalarEvolution *SE; 1705 LoopInfo *LI; 1706 TargetTransformInfo *TTI; 1707 DominatorTree *DT; 1708 BlockFrequencyInfo *BFI; 1709 TargetLibraryInfo *TLI; 1710 DemandedBits *DB; 1711 AliasAnalysis *AA; 1712 AssumptionCache *AC; 1713 LoopAccessAnalysis *LAA; 1714 bool DisableUnrolling; 1715 bool AlwaysVectorize; 1716 1717 BlockFrequency ColdEntryFreq; 1718 1719 bool runOnFunction(Function &F) override { 1720 if (skipFunction(F)) 1721 return false; 1722 1723 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1724 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1725 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1726 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1727 BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1728 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1729 TLI = TLIP ? &TLIP->getTLI() : nullptr; 1730 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1731 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1732 LAA = &getAnalysis<LoopAccessAnalysis>(); 1733 DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1734 1735 // Compute some weights outside of the loop over the loops. Compute this 1736 // using a BranchProbability to re-use its scaling math. 1737 const BranchProbability ColdProb(1, 5); // 20% 1738 ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb; 1739 1740 // Don't attempt if 1741 // 1. the target claims to have no vector registers, and 1742 // 2. interleaving won't help ILP. 1743 // 1744 // The second condition is necessary because, even if the target has no 1745 // vector registers, loop vectorization may still enable scalar 1746 // interleaving. 1747 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 1748 return false; 1749 1750 // Build up a worklist of inner-loops to vectorize. This is necessary as 1751 // the act of vectorizing or partially unrolling a loop creates new loops 1752 // and can invalidate iterators across the loops. 1753 SmallVector<Loop *, 8> Worklist; 1754 1755 for (Loop *L : *LI) 1756 addInnerLoop(*L, Worklist); 1757 1758 LoopsAnalyzed += Worklist.size(); 1759 1760 // Now walk the identified inner loops. 1761 bool Changed = false; 1762 while (!Worklist.empty()) 1763 Changed |= processLoop(Worklist.pop_back_val()); 1764 1765 // Process each loop nest in the function. 1766 return Changed; 1767 } 1768 1769 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 1770 SmallVector<Metadata *, 4> MDs; 1771 // Reserve first location for self reference to the LoopID metadata node. 1772 MDs.push_back(nullptr); 1773 bool IsUnrollMetadata = false; 1774 MDNode *LoopID = L->getLoopID(); 1775 if (LoopID) { 1776 // First find existing loop unrolling disable metadata. 1777 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1778 MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 1779 if (MD) { 1780 const MDString *S = dyn_cast<MDString>(MD->getOperand(0)); 1781 IsUnrollMetadata = 1782 S && S->getString().startswith("llvm.loop.unroll.disable"); 1783 } 1784 MDs.push_back(LoopID->getOperand(i)); 1785 } 1786 } 1787 1788 if (!IsUnrollMetadata) { 1789 // Add runtime unroll disable metadata. 1790 LLVMContext &Context = L->getHeader()->getContext(); 1791 SmallVector<Metadata *, 1> DisableOperands; 1792 DisableOperands.push_back( 1793 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 1794 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 1795 MDs.push_back(DisableNode); 1796 MDNode *NewLoopID = MDNode::get(Context, MDs); 1797 // Set operand 0 to refer to the loop id itself. 1798 NewLoopID->replaceOperandWith(0, NewLoopID); 1799 L->setLoopID(NewLoopID); 1800 } 1801 } 1802 1803 bool processLoop(Loop *L) { 1804 assert(L->empty() && "Only process inner loops."); 1805 1806 #ifndef NDEBUG 1807 const std::string DebugLocStr = getDebugLocString(L); 1808 #endif /* NDEBUG */ 1809 1810 DEBUG(dbgs() << "\nLV: Checking a loop in \"" 1811 << L->getHeader()->getParent()->getName() << "\" from " 1812 << DebugLocStr << "\n"); 1813 1814 LoopVectorizeHints Hints(L, DisableUnrolling); 1815 1816 DEBUG(dbgs() << "LV: Loop hints:" 1817 << " force=" 1818 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 1819 ? "disabled" 1820 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 1821 ? "enabled" 1822 : "?")) 1823 << " width=" << Hints.getWidth() 1824 << " unroll=" << Hints.getInterleave() << "\n"); 1825 1826 // Function containing loop 1827 Function *F = L->getHeader()->getParent(); 1828 1829 // Looking at the diagnostic output is the only way to determine if a loop 1830 // was vectorized (other than looking at the IR or machine code), so it 1831 // is important to generate an optimization remark for each loop. Most of 1832 // these messages are generated by emitOptimizationRemarkAnalysis. Remarks 1833 // generated by emitOptimizationRemark and emitOptimizationRemarkMissed are 1834 // less verbose reporting vectorized loops and unvectorized loops that may 1835 // benefit from vectorization, respectively. 1836 1837 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { 1838 DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 1839 return false; 1840 } 1841 1842 // Check the loop for a trip count threshold: 1843 // do not vectorize loops with a tiny trip count. 1844 const unsigned TC = SE->getSmallConstantTripCount(L); 1845 if (TC > 0u && TC < TinyTripCountVectorThreshold) { 1846 DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 1847 << "This loop is not worth vectorizing."); 1848 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 1849 DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 1850 else { 1851 DEBUG(dbgs() << "\n"); 1852 emitAnalysisDiag(F, L, Hints, VectorizationReport() 1853 << "vectorization is not beneficial " 1854 "and is not explicitly forced"); 1855 return false; 1856 } 1857 } 1858 1859 PredicatedScalarEvolution PSE(*SE, *L); 1860 1861 // Check if it is legal to vectorize the loop. 1862 LoopVectorizationRequirements Requirements; 1863 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, LAA, 1864 &Requirements, &Hints); 1865 if (!LVL.canVectorize()) { 1866 DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 1867 emitMissedWarning(F, L, Hints); 1868 return false; 1869 } 1870 1871 // Collect values we want to ignore in the cost model. This includes 1872 // type-promoting instructions we identified during reduction detection. 1873 SmallPtrSet<const Value *, 32> ValuesToIgnore; 1874 CodeMetrics::collectEphemeralValues(L, AC, ValuesToIgnore); 1875 for (auto &Reduction : *LVL.getReductionVars()) { 1876 RecurrenceDescriptor &RedDes = Reduction.second; 1877 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 1878 ValuesToIgnore.insert(Casts.begin(), Casts.end()); 1879 } 1880 1881 // Use the cost model. 1882 LoopVectorizationCostModel CM(L, PSE.getSE(), LI, &LVL, *TTI, TLI, DB, AC, 1883 F, &Hints, ValuesToIgnore); 1884 1885 // Check the function attributes to find out if this function should be 1886 // optimized for size. 1887 bool OptForSize = 1888 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 1889 1890 // Compute the weighted frequency of this loop being executed and see if it 1891 // is less than 20% of the function entry baseline frequency. Note that we 1892 // always have a canonical loop here because we think we *can* vectorize. 1893 // FIXME: This is hidden behind a flag due to pervasive problems with 1894 // exactly what block frequency models. 1895 if (LoopVectorizeWithBlockFrequency) { 1896 BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader()); 1897 if (Hints.getForce() != LoopVectorizeHints::FK_Enabled && 1898 LoopEntryFreq < ColdEntryFreq) 1899 OptForSize = true; 1900 } 1901 1902 // Check the function attributes to see if implicit floats are allowed. 1903 // FIXME: This check doesn't seem possibly correct -- what if the loop is 1904 // an integer loop and the vector instructions selected are purely integer 1905 // vector instructions? 1906 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 1907 DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 1908 "attribute is used.\n"); 1909 emitAnalysisDiag( 1910 F, L, Hints, 1911 VectorizationReport() 1912 << "loop not vectorized due to NoImplicitFloat attribute"); 1913 emitMissedWarning(F, L, Hints); 1914 return false; 1915 } 1916 1917 // Check if the target supports potentially unsafe FP vectorization. 1918 // FIXME: Add a check for the type of safety issue (denormal, signaling) 1919 // for the target we're vectorizing for, to make sure none of the 1920 // additional fp-math flags can help. 1921 if (Hints.isPotentiallyUnsafe() && 1922 TTI->isFPVectorizationPotentiallyUnsafe()) { 1923 DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 1924 emitAnalysisDiag(F, L, Hints, 1925 VectorizationReport() 1926 << "loop not vectorized due to unsafe FP support."); 1927 emitMissedWarning(F, L, Hints); 1928 return false; 1929 } 1930 1931 // Select the optimal vectorization factor. 1932 const LoopVectorizationCostModel::VectorizationFactor VF = 1933 CM.selectVectorizationFactor(OptForSize); 1934 1935 // Select the interleave count. 1936 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 1937 1938 // Get user interleave count. 1939 unsigned UserIC = Hints.getInterleave(); 1940 1941 // Identify the diagnostic messages that should be produced. 1942 std::string VecDiagMsg, IntDiagMsg; 1943 bool VectorizeLoop = true, InterleaveLoop = true; 1944 1945 if (Requirements.doesNotMeet(F, L, Hints)) { 1946 DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 1947 "requirements.\n"); 1948 emitMissedWarning(F, L, Hints); 1949 return false; 1950 } 1951 1952 if (VF.Width == 1) { 1953 DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 1954 VecDiagMsg = 1955 "the cost-model indicates that vectorization is not beneficial"; 1956 VectorizeLoop = false; 1957 } 1958 1959 if (IC == 1 && UserIC <= 1) { 1960 // Tell the user interleaving is not beneficial. 1961 DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 1962 IntDiagMsg = 1963 "the cost-model indicates that interleaving is not beneficial"; 1964 InterleaveLoop = false; 1965 if (UserIC == 1) 1966 IntDiagMsg += 1967 " and is explicitly disabled or interleave count is set to 1"; 1968 } else if (IC > 1 && UserIC == 1) { 1969 // Tell the user interleaving is beneficial, but it explicitly disabled. 1970 DEBUG(dbgs() 1971 << "LV: Interleaving is beneficial but is explicitly disabled."); 1972 IntDiagMsg = "the cost-model indicates that interleaving is beneficial " 1973 "but is explicitly disabled or interleave count is set to 1"; 1974 InterleaveLoop = false; 1975 } 1976 1977 // Override IC if user provided an interleave count. 1978 IC = UserIC > 0 ? UserIC : IC; 1979 1980 // Emit diagnostic messages, if any. 1981 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 1982 if (!VectorizeLoop && !InterleaveLoop) { 1983 // Do not vectorize or interleaving the loop. 1984 emitOptimizationRemarkAnalysis(F->getContext(), VAPassName, *F, 1985 L->getStartLoc(), VecDiagMsg); 1986 emitOptimizationRemarkAnalysis(F->getContext(), LV_NAME, *F, 1987 L->getStartLoc(), IntDiagMsg); 1988 return false; 1989 } else if (!VectorizeLoop && InterleaveLoop) { 1990 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 1991 emitOptimizationRemarkAnalysis(F->getContext(), VAPassName, *F, 1992 L->getStartLoc(), VecDiagMsg); 1993 } else if (VectorizeLoop && !InterleaveLoop) { 1994 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 1995 << DebugLocStr << '\n'); 1996 emitOptimizationRemarkAnalysis(F->getContext(), LV_NAME, *F, 1997 L->getStartLoc(), IntDiagMsg); 1998 } else if (VectorizeLoop && InterleaveLoop) { 1999 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 2000 << DebugLocStr << '\n'); 2001 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 2002 } 2003 2004 if (!VectorizeLoop) { 2005 assert(IC > 1 && "interleave count should not be 1 or 0"); 2006 // If we decided that it is not legal to vectorize the loop then 2007 // interleave it. 2008 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, IC); 2009 Unroller.vectorize(&LVL, CM.MinBWs); 2010 2011 emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(), 2012 Twine("interleaved loop (interleaved count: ") + 2013 Twine(IC) + ")"); 2014 } else { 2015 // If we decided that it is *legal* to vectorize the loop then do it. 2016 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, VF.Width, IC); 2017 LB.vectorize(&LVL, CM.MinBWs); 2018 ++LoopsVectorized; 2019 2020 // Add metadata to disable runtime unrolling scalar loop when there's no 2021 // runtime check about strides and memory. Because at this situation, 2022 // scalar loop is rarely used not worthy to be unrolled. 2023 if (!LB.IsSafetyChecksAdded()) 2024 AddRuntimeUnrollDisableMetaData(L); 2025 2026 // Report the vectorization decision. 2027 emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(), 2028 Twine("vectorized loop (vectorization width: ") + 2029 Twine(VF.Width) + ", interleaved count: " + 2030 Twine(IC) + ")"); 2031 } 2032 2033 // Mark the loop as already vectorized to avoid vectorizing again. 2034 Hints.setAlreadyVectorized(); 2035 2036 DEBUG(verifyFunction(*L->getHeader()->getParent())); 2037 return true; 2038 } 2039 2040 void getAnalysisUsage(AnalysisUsage &AU) const override { 2041 AU.addRequired<AssumptionCacheTracker>(); 2042 AU.addRequiredID(LoopSimplifyID); 2043 AU.addRequiredID(LCSSAID); 2044 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2045 AU.addRequired<DominatorTreeWrapperPass>(); 2046 AU.addRequired<LoopInfoWrapperPass>(); 2047 AU.addRequired<ScalarEvolutionWrapperPass>(); 2048 AU.addRequired<TargetTransformInfoWrapperPass>(); 2049 AU.addRequired<AAResultsWrapperPass>(); 2050 AU.addRequired<LoopAccessAnalysis>(); 2051 AU.addRequired<DemandedBitsWrapperPass>(); 2052 AU.addPreserved<LoopInfoWrapperPass>(); 2053 AU.addPreserved<DominatorTreeWrapperPass>(); 2054 AU.addPreserved<BasicAAWrapperPass>(); 2055 AU.addPreserved<GlobalsAAWrapperPass>(); 2056 } 2057 }; 2058 2059 } // end anonymous namespace 2060 2061 //===----------------------------------------------------------------------===// 2062 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2063 // LoopVectorizationCostModel. 2064 //===----------------------------------------------------------------------===// 2065 2066 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2067 // We need to place the broadcast of invariant variables outside the loop. 2068 Instruction *Instr = dyn_cast<Instruction>(V); 2069 bool NewInstr = (Instr && 2070 std::find(LoopVectorBody.begin(), LoopVectorBody.end(), 2071 Instr->getParent()) != LoopVectorBody.end()); 2072 bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr; 2073 2074 // Place the code for broadcasting invariant variables in the new preheader. 2075 IRBuilder<>::InsertPointGuard Guard(Builder); 2076 if (Invariant) 2077 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2078 2079 // Broadcast the scalar into all locations in the vector. 2080 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2081 2082 return Shuf; 2083 } 2084 2085 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, 2086 const SCEV *StepSCEV) { 2087 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2088 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2089 Value *StepValue = Exp.expandCodeFor(StepSCEV, StepSCEV->getType(), 2090 &*Builder.GetInsertPoint()); 2091 return getStepVector(Val, StartIdx, StepValue); 2092 } 2093 2094 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, 2095 Value *Step) { 2096 assert(Val->getType()->isVectorTy() && "Must be a vector"); 2097 assert(Val->getType()->getScalarType()->isIntegerTy() && 2098 "Elem must be an integer"); 2099 assert(Step->getType() == Val->getType()->getScalarType() && 2100 "Step has wrong type"); 2101 // Create the types. 2102 Type *ITy = Val->getType()->getScalarType(); 2103 VectorType *Ty = cast<VectorType>(Val->getType()); 2104 int VLen = Ty->getNumElements(); 2105 SmallVector<Constant *, 8> Indices; 2106 2107 // Create a vector of consecutive numbers from zero to VF. 2108 for (int i = 0; i < VLen; ++i) 2109 Indices.push_back(ConstantInt::get(ITy, StartIdx + i)); 2110 2111 // Add the consecutive indices to the vector value. 2112 Constant *Cv = ConstantVector::get(Indices); 2113 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2114 Step = Builder.CreateVectorSplat(VLen, Step); 2115 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2116 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2117 // which can be found from the original scalar operations. 2118 Step = Builder.CreateMul(Cv, Step); 2119 return Builder.CreateAdd(Val, Step, "induction"); 2120 } 2121 2122 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { 2123 assert(Ptr->getType()->isPointerTy() && "Unexpected non-ptr"); 2124 auto *SE = PSE.getSE(); 2125 // Make sure that the pointer does not point to structs. 2126 if (Ptr->getType()->getPointerElementType()->isAggregateType()) 2127 return 0; 2128 2129 // If this value is a pointer induction variable we know it is consecutive. 2130 PHINode *Phi = dyn_cast_or_null<PHINode>(Ptr); 2131 if (Phi && Inductions.count(Phi)) { 2132 InductionDescriptor II = Inductions[Phi]; 2133 return II.getConsecutiveDirection(); 2134 } 2135 2136 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2137 if (!Gep) 2138 return 0; 2139 2140 unsigned NumOperands = Gep->getNumOperands(); 2141 Value *GpPtr = Gep->getPointerOperand(); 2142 // If this GEP value is a consecutive pointer induction variable and all of 2143 // the indices are constant then we know it is consecutive. We can 2144 Phi = dyn_cast<PHINode>(GpPtr); 2145 if (Phi && Inductions.count(Phi)) { 2146 2147 // Make sure that the pointer does not point to structs. 2148 PointerType *GepPtrType = cast<PointerType>(GpPtr->getType()); 2149 if (GepPtrType->getElementType()->isAggregateType()) 2150 return 0; 2151 2152 // Make sure that all of the index operands are loop invariant. 2153 for (unsigned i = 1; i < NumOperands; ++i) 2154 if (!SE->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), TheLoop)) 2155 return 0; 2156 2157 InductionDescriptor II = Inductions[Phi]; 2158 return II.getConsecutiveDirection(); 2159 } 2160 2161 unsigned InductionOperand = getGEPInductionOperand(Gep); 2162 2163 // Check that all of the gep indices are uniform except for our induction 2164 // operand. 2165 for (unsigned i = 0; i != NumOperands; ++i) 2166 if (i != InductionOperand && 2167 !SE->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), TheLoop)) 2168 return 0; 2169 2170 // We can emit wide load/stores only if the last non-zero index is the 2171 // induction variable. 2172 const SCEV *Last = nullptr; 2173 if (!Strides.count(Gep)) 2174 Last = PSE.getSCEV(Gep->getOperand(InductionOperand)); 2175 else { 2176 // Because of the multiplication by a stride we can have a s/zext cast. 2177 // We are going to replace this stride by 1 so the cast is safe to ignore. 2178 // 2179 // %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] 2180 // %0 = trunc i64 %indvars.iv to i32 2181 // %mul = mul i32 %0, %Stride1 2182 // %idxprom = zext i32 %mul to i64 << Safe cast. 2183 // %arrayidx = getelementptr inbounds i32* %B, i64 %idxprom 2184 // 2185 Last = replaceSymbolicStrideSCEV(PSE, Strides, 2186 Gep->getOperand(InductionOperand), Gep); 2187 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(Last)) 2188 Last = 2189 (C->getSCEVType() == scSignExtend || C->getSCEVType() == scZeroExtend) 2190 ? C->getOperand() 2191 : Last; 2192 } 2193 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Last)) { 2194 const SCEV *Step = AR->getStepRecurrence(*SE); 2195 2196 // The memory is consecutive because the last index is consecutive 2197 // and all other indices are loop invariant. 2198 if (Step->isOne()) 2199 return 1; 2200 if (Step->isAllOnesValue()) 2201 return -1; 2202 } 2203 2204 return 0; 2205 } 2206 2207 bool LoopVectorizationLegality::isUniform(Value *V) { 2208 return LAI->isUniform(V); 2209 } 2210 2211 InnerLoopVectorizer::VectorParts & 2212 InnerLoopVectorizer::getVectorValue(Value *V) { 2213 assert(V != Induction && "The new induction variable should not be used."); 2214 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2215 2216 // If we have a stride that is replaced by one, do it here. 2217 if (Legal->hasStride(V)) 2218 V = ConstantInt::get(V->getType(), 1); 2219 2220 // If we have this scalar in the map, return it. 2221 if (WidenMap.has(V)) 2222 return WidenMap.get(V); 2223 2224 // If this scalar is unknown, assume that it is a constant or that it is 2225 // loop invariant. Broadcast V and save the value for future uses. 2226 Value *B = getBroadcastInstrs(V); 2227 return WidenMap.splat(V, B); 2228 } 2229 2230 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2231 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2232 SmallVector<Constant *, 8> ShuffleMask; 2233 for (unsigned i = 0; i < VF; ++i) 2234 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 2235 2236 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2237 ConstantVector::get(ShuffleMask), 2238 "reverse"); 2239 } 2240 2241 // Get a mask to interleave \p NumVec vectors into a wide vector. 2242 // I.e. <0, VF, VF*2, ..., VF*(NumVec-1), 1, VF+1, VF*2+1, ...> 2243 // E.g. For 2 interleaved vectors, if VF is 4, the mask is: 2244 // <0, 4, 1, 5, 2, 6, 3, 7> 2245 static Constant *getInterleavedMask(IRBuilder<> &Builder, unsigned VF, 2246 unsigned NumVec) { 2247 SmallVector<Constant *, 16> Mask; 2248 for (unsigned i = 0; i < VF; i++) 2249 for (unsigned j = 0; j < NumVec; j++) 2250 Mask.push_back(Builder.getInt32(j * VF + i)); 2251 2252 return ConstantVector::get(Mask); 2253 } 2254 2255 // Get the strided mask starting from index \p Start. 2256 // I.e. <Start, Start + Stride, ..., Start + Stride*(VF-1)> 2257 static Constant *getStridedMask(IRBuilder<> &Builder, unsigned Start, 2258 unsigned Stride, unsigned VF) { 2259 SmallVector<Constant *, 16> Mask; 2260 for (unsigned i = 0; i < VF; i++) 2261 Mask.push_back(Builder.getInt32(Start + i * Stride)); 2262 2263 return ConstantVector::get(Mask); 2264 } 2265 2266 // Get a mask of two parts: The first part consists of sequential integers 2267 // starting from 0, The second part consists of UNDEFs. 2268 // I.e. <0, 1, 2, ..., NumInt - 1, undef, ..., undef> 2269 static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned NumInt, 2270 unsigned NumUndef) { 2271 SmallVector<Constant *, 16> Mask; 2272 for (unsigned i = 0; i < NumInt; i++) 2273 Mask.push_back(Builder.getInt32(i)); 2274 2275 Constant *Undef = UndefValue::get(Builder.getInt32Ty()); 2276 for (unsigned i = 0; i < NumUndef; i++) 2277 Mask.push_back(Undef); 2278 2279 return ConstantVector::get(Mask); 2280 } 2281 2282 // Concatenate two vectors with the same element type. The 2nd vector should 2283 // not have more elements than the 1st vector. If the 2nd vector has less 2284 // elements, extend it with UNDEFs. 2285 static Value *ConcatenateTwoVectors(IRBuilder<> &Builder, Value *V1, 2286 Value *V2) { 2287 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); 2288 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); 2289 assert(VecTy1 && VecTy2 && 2290 VecTy1->getScalarType() == VecTy2->getScalarType() && 2291 "Expect two vectors with the same element type"); 2292 2293 unsigned NumElts1 = VecTy1->getNumElements(); 2294 unsigned NumElts2 = VecTy2->getNumElements(); 2295 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); 2296 2297 if (NumElts1 > NumElts2) { 2298 // Extend with UNDEFs. 2299 Constant *ExtMask = 2300 getSequentialMask(Builder, NumElts2, NumElts1 - NumElts2); 2301 V2 = Builder.CreateShuffleVector(V2, UndefValue::get(VecTy2), ExtMask); 2302 } 2303 2304 Constant *Mask = getSequentialMask(Builder, NumElts1 + NumElts2, 0); 2305 return Builder.CreateShuffleVector(V1, V2, Mask); 2306 } 2307 2308 // Concatenate vectors in the given list. All vectors have the same type. 2309 static Value *ConcatenateVectors(IRBuilder<> &Builder, 2310 ArrayRef<Value *> InputList) { 2311 unsigned NumVec = InputList.size(); 2312 assert(NumVec > 1 && "Should be at least two vectors"); 2313 2314 SmallVector<Value *, 8> ResList; 2315 ResList.append(InputList.begin(), InputList.end()); 2316 do { 2317 SmallVector<Value *, 8> TmpList; 2318 for (unsigned i = 0; i < NumVec - 1; i += 2) { 2319 Value *V0 = ResList[i], *V1 = ResList[i + 1]; 2320 assert((V0->getType() == V1->getType() || i == NumVec - 2) && 2321 "Only the last vector may have a different type"); 2322 2323 TmpList.push_back(ConcatenateTwoVectors(Builder, V0, V1)); 2324 } 2325 2326 // Push the last vector if the total number of vectors is odd. 2327 if (NumVec % 2 != 0) 2328 TmpList.push_back(ResList[NumVec - 1]); 2329 2330 ResList = TmpList; 2331 NumVec = ResList.size(); 2332 } while (NumVec > 1); 2333 2334 return ResList[0]; 2335 } 2336 2337 // Try to vectorize the interleave group that \p Instr belongs to. 2338 // 2339 // E.g. Translate following interleaved load group (factor = 3): 2340 // for (i = 0; i < N; i+=3) { 2341 // R = Pic[i]; // Member of index 0 2342 // G = Pic[i+1]; // Member of index 1 2343 // B = Pic[i+2]; // Member of index 2 2344 // ... // do something to R, G, B 2345 // } 2346 // To: 2347 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2348 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2349 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2350 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2351 // 2352 // Or translate following interleaved store group (factor = 3): 2353 // for (i = 0; i < N; i+=3) { 2354 // ... do something to R, G, B 2355 // Pic[i] = R; // Member of index 0 2356 // Pic[i+1] = G; // Member of index 1 2357 // Pic[i+2] = B; // Member of index 2 2358 // } 2359 // To: 2360 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2361 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2362 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2363 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2364 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2365 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { 2366 const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr); 2367 assert(Group && "Fail to get an interleaved access group."); 2368 2369 // Skip if current instruction is not the insert position. 2370 if (Instr != Group->getInsertPos()) 2371 return; 2372 2373 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2374 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2375 Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); 2376 2377 // Prepare for the vector type of the interleaved load/store. 2378 Type *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2379 unsigned InterleaveFactor = Group->getFactor(); 2380 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2381 Type *PtrTy = VecTy->getPointerTo(Ptr->getType()->getPointerAddressSpace()); 2382 2383 // Prepare for the new pointers. 2384 setDebugLocFromInst(Builder, Ptr); 2385 VectorParts &PtrParts = getVectorValue(Ptr); 2386 SmallVector<Value *, 2> NewPtrs; 2387 unsigned Index = Group->getIndex(Instr); 2388 for (unsigned Part = 0; Part < UF; Part++) { 2389 // Extract the pointer for current instruction from the pointer vector. A 2390 // reverse access uses the pointer in the last lane. 2391 Value *NewPtr = Builder.CreateExtractElement( 2392 PtrParts[Part], 2393 Group->isReverse() ? Builder.getInt32(VF - 1) : Builder.getInt32(0)); 2394 2395 // Notice current instruction could be any index. Need to adjust the address 2396 // to the member of index 0. 2397 // 2398 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2399 // b = A[i]; // Member of index 0 2400 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2401 // 2402 // E.g. A[i+1] = a; // Member of index 1 2403 // A[i] = b; // Member of index 0 2404 // A[i+2] = c; // Member of index 2 (Current instruction) 2405 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2406 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 2407 2408 // Cast to the vector pointer type. 2409 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2410 } 2411 2412 setDebugLocFromInst(Builder, Instr); 2413 Value *UndefVec = UndefValue::get(VecTy); 2414 2415 // Vectorize the interleaved load group. 2416 if (LI) { 2417 for (unsigned Part = 0; Part < UF; Part++) { 2418 Instruction *NewLoadInstr = Builder.CreateAlignedLoad( 2419 NewPtrs[Part], Group->getAlignment(), "wide.vec"); 2420 2421 for (unsigned i = 0; i < InterleaveFactor; i++) { 2422 Instruction *Member = Group->getMember(i); 2423 2424 // Skip the gaps in the group. 2425 if (!Member) 2426 continue; 2427 2428 Constant *StrideMask = getStridedMask(Builder, i, InterleaveFactor, VF); 2429 Value *StridedVec = Builder.CreateShuffleVector( 2430 NewLoadInstr, UndefVec, StrideMask, "strided.vec"); 2431 2432 // If this member has different type, cast the result type. 2433 if (Member->getType() != ScalarTy) { 2434 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2435 StridedVec = Builder.CreateBitOrPointerCast(StridedVec, OtherVTy); 2436 } 2437 2438 VectorParts &Entry = WidenMap.get(Member); 2439 Entry[Part] = 2440 Group->isReverse() ? reverseVector(StridedVec) : StridedVec; 2441 } 2442 2443 addMetadata(NewLoadInstr, Instr); 2444 } 2445 return; 2446 } 2447 2448 // The sub vector type for current instruction. 2449 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2450 2451 // Vectorize the interleaved store group. 2452 for (unsigned Part = 0; Part < UF; Part++) { 2453 // Collect the stored vector from each member. 2454 SmallVector<Value *, 4> StoredVecs; 2455 for (unsigned i = 0; i < InterleaveFactor; i++) { 2456 // Interleaved store group doesn't allow a gap, so each index has a member 2457 Instruction *Member = Group->getMember(i); 2458 assert(Member && "Fail to get a member from an interleaved store group"); 2459 2460 Value *StoredVec = 2461 getVectorValue(dyn_cast<StoreInst>(Member)->getValueOperand())[Part]; 2462 if (Group->isReverse()) 2463 StoredVec = reverseVector(StoredVec); 2464 2465 // If this member has different type, cast it to an unified type. 2466 if (StoredVec->getType() != SubVT) 2467 StoredVec = Builder.CreateBitOrPointerCast(StoredVec, SubVT); 2468 2469 StoredVecs.push_back(StoredVec); 2470 } 2471 2472 // Concatenate all vectors into a wide vector. 2473 Value *WideVec = ConcatenateVectors(Builder, StoredVecs); 2474 2475 // Interleave the elements in the wide vector. 2476 Constant *IMask = getInterleavedMask(Builder, VF, InterleaveFactor); 2477 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2478 "interleaved.vec"); 2479 2480 Instruction *NewStoreInstr = 2481 Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment()); 2482 addMetadata(NewStoreInstr, Instr); 2483 } 2484 } 2485 2486 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { 2487 // Attempt to issue a wide load. 2488 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2489 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2490 2491 assert((LI || SI) && "Invalid Load/Store instruction"); 2492 2493 // Try to vectorize the interleave group if this access is interleaved. 2494 if (Legal->isAccessInterleaved(Instr)) 2495 return vectorizeInterleaveGroup(Instr); 2496 2497 Type *ScalarDataTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2498 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2499 Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); 2500 unsigned Alignment = LI ? LI->getAlignment() : SI->getAlignment(); 2501 // An alignment of 0 means target abi alignment. We need to use the scalar's 2502 // target abi alignment in such a case. 2503 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2504 if (!Alignment) 2505 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2506 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2507 unsigned ScalarAllocatedSize = DL.getTypeAllocSize(ScalarDataTy); 2508 unsigned VectorElementSize = DL.getTypeStoreSize(DataTy) / VF; 2509 2510 if (SI && Legal->blockNeedsPredication(SI->getParent()) && 2511 !Legal->isMaskRequired(SI)) 2512 return scalarizeInstruction(Instr, true); 2513 2514 if (ScalarAllocatedSize != VectorElementSize) 2515 return scalarizeInstruction(Instr); 2516 2517 // If the pointer is loop invariant scalarize the load. 2518 if (LI && Legal->isUniform(Ptr)) 2519 return scalarizeInstruction(Instr); 2520 2521 // If the pointer is non-consecutive and gather/scatter is not supported 2522 // scalarize the instruction. 2523 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 2524 bool Reverse = ConsecutiveStride < 0; 2525 bool CreateGatherScatter = 2526 !ConsecutiveStride && ((LI && Legal->isLegalMaskedGather(ScalarDataTy)) || 2527 (SI && Legal->isLegalMaskedScatter(ScalarDataTy))); 2528 2529 if (!ConsecutiveStride && !CreateGatherScatter) 2530 return scalarizeInstruction(Instr); 2531 2532 Constant *Zero = Builder.getInt32(0); 2533 VectorParts &Entry = WidenMap.get(Instr); 2534 VectorParts VectorGep; 2535 2536 // Handle consecutive loads/stores. 2537 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2538 if (ConsecutiveStride) { 2539 if (Gep && Legal->isInductionVariable(Gep->getPointerOperand())) { 2540 setDebugLocFromInst(Builder, Gep); 2541 Value *PtrOperand = Gep->getPointerOperand(); 2542 Value *FirstBasePtr = getVectorValue(PtrOperand)[0]; 2543 FirstBasePtr = Builder.CreateExtractElement(FirstBasePtr, Zero); 2544 2545 // Create the new GEP with the new induction variable. 2546 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2547 Gep2->setOperand(0, FirstBasePtr); 2548 Gep2->setName("gep.indvar.base"); 2549 Ptr = Builder.Insert(Gep2); 2550 } else if (Gep) { 2551 setDebugLocFromInst(Builder, Gep); 2552 assert(PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getPointerOperand()), 2553 OrigLoop) && 2554 "Base ptr must be invariant"); 2555 // The last index does not have to be the induction. It can be 2556 // consecutive and be a function of the index. For example A[I+1]; 2557 unsigned NumOperands = Gep->getNumOperands(); 2558 unsigned InductionOperand = getGEPInductionOperand(Gep); 2559 // Create the new GEP with the new induction variable. 2560 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2561 2562 for (unsigned i = 0; i < NumOperands; ++i) { 2563 Value *GepOperand = Gep->getOperand(i); 2564 Instruction *GepOperandInst = dyn_cast<Instruction>(GepOperand); 2565 2566 // Update last index or loop invariant instruction anchored in loop. 2567 if (i == InductionOperand || 2568 (GepOperandInst && OrigLoop->contains(GepOperandInst))) { 2569 assert((i == InductionOperand || 2570 PSE.getSE()->isLoopInvariant(PSE.getSCEV(GepOperandInst), 2571 OrigLoop)) && 2572 "Must be last index or loop invariant"); 2573 2574 VectorParts &GEPParts = getVectorValue(GepOperand); 2575 Value *Index = GEPParts[0]; 2576 Index = Builder.CreateExtractElement(Index, Zero); 2577 Gep2->setOperand(i, Index); 2578 Gep2->setName("gep.indvar.idx"); 2579 } 2580 } 2581 Ptr = Builder.Insert(Gep2); 2582 } else { // No GEP 2583 // Use the induction element ptr. 2584 assert(isa<PHINode>(Ptr) && "Invalid induction ptr"); 2585 setDebugLocFromInst(Builder, Ptr); 2586 VectorParts &PtrVal = getVectorValue(Ptr); 2587 Ptr = Builder.CreateExtractElement(PtrVal[0], Zero); 2588 } 2589 } else { 2590 // At this point we should vector version of GEP for Gather or Scatter 2591 assert(CreateGatherScatter && "The instruction should be scalarized"); 2592 if (Gep) { 2593 SmallVector<VectorParts, 4> OpsV; 2594 // Vectorizing GEP, across UF parts, we want to keep each loop-invariant 2595 // base or index of GEP scalar 2596 for (Value *Op : Gep->operands()) { 2597 if (PSE.getSE()->isLoopInvariant(PSE.getSCEV(Op), OrigLoop)) 2598 OpsV.push_back(VectorParts(UF, Op)); 2599 else 2600 OpsV.push_back(getVectorValue(Op)); 2601 } 2602 2603 for (unsigned Part = 0; Part < UF; ++Part) { 2604 SmallVector<Value *, 4> Ops; 2605 Value *GEPBasePtr = OpsV[0][Part]; 2606 for (unsigned i = 1; i < Gep->getNumOperands(); i++) 2607 Ops.push_back(OpsV[i][Part]); 2608 Value *NewGep = 2609 Builder.CreateGEP(nullptr, GEPBasePtr, Ops, "VectorGep"); 2610 assert(NewGep->getType()->isVectorTy() && "Expected vector GEP"); 2611 NewGep = 2612 Builder.CreateBitCast(NewGep, VectorType::get(Ptr->getType(), VF)); 2613 VectorGep.push_back(NewGep); 2614 } 2615 } else 2616 VectorGep = getVectorValue(Ptr); 2617 } 2618 2619 VectorParts Mask = createBlockInMask(Instr->getParent()); 2620 // Handle Stores: 2621 if (SI) { 2622 assert(!Legal->isUniform(SI->getPointerOperand()) && 2623 "We do not allow storing to uniform addresses"); 2624 setDebugLocFromInst(Builder, SI); 2625 // We don't want to update the value in the map as it might be used in 2626 // another expression. So don't use a reference type for "StoredVal". 2627 VectorParts StoredVal = getVectorValue(SI->getValueOperand()); 2628 2629 for (unsigned Part = 0; Part < UF; ++Part) { 2630 Instruction *NewSI = nullptr; 2631 if (CreateGatherScatter) { 2632 Value *MaskPart = Legal->isMaskRequired(SI) ? Mask[Part] : nullptr; 2633 NewSI = Builder.CreateMaskedScatter(StoredVal[Part], VectorGep[Part], 2634 Alignment, MaskPart); 2635 } else { 2636 // Calculate the pointer for the specific unroll-part. 2637 Value *PartPtr = 2638 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2639 2640 if (Reverse) { 2641 // If we store to reverse consecutive memory locations, then we need 2642 // to reverse the order of elements in the stored value. 2643 StoredVal[Part] = reverseVector(StoredVal[Part]); 2644 // If the address is consecutive but reversed, then the 2645 // wide store needs to start at the last vector element. 2646 PartPtr = 2647 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2648 PartPtr = 2649 Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2650 Mask[Part] = reverseVector(Mask[Part]); 2651 } 2652 2653 Value *VecPtr = 2654 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2655 2656 if (Legal->isMaskRequired(SI)) 2657 NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment, 2658 Mask[Part]); 2659 else 2660 NewSI = 2661 Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment); 2662 } 2663 addMetadata(NewSI, SI); 2664 } 2665 return; 2666 } 2667 2668 // Handle loads. 2669 assert(LI && "Must have a load instruction"); 2670 setDebugLocFromInst(Builder, LI); 2671 for (unsigned Part = 0; Part < UF; ++Part) { 2672 Instruction *NewLI; 2673 if (CreateGatherScatter) { 2674 Value *MaskPart = Legal->isMaskRequired(LI) ? Mask[Part] : nullptr; 2675 NewLI = Builder.CreateMaskedGather(VectorGep[Part], Alignment, MaskPart, 2676 0, "wide.masked.gather"); 2677 Entry[Part] = NewLI; 2678 } else { 2679 // Calculate the pointer for the specific unroll-part. 2680 Value *PartPtr = 2681 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2682 2683 if (Reverse) { 2684 // If the address is consecutive but reversed, then the 2685 // wide load needs to start at the last vector element. 2686 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2687 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2688 Mask[Part] = reverseVector(Mask[Part]); 2689 } 2690 2691 Value *VecPtr = 2692 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2693 if (Legal->isMaskRequired(LI)) 2694 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 2695 UndefValue::get(DataTy), 2696 "wide.masked.load"); 2697 else 2698 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 2699 Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI; 2700 } 2701 addMetadata(NewLI, LI); 2702 } 2703 } 2704 2705 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2706 bool IfPredicateStore) { 2707 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2708 // Holds vector parameters or scalars, in case of uniform vals. 2709 SmallVector<VectorParts, 4> Params; 2710 2711 setDebugLocFromInst(Builder, Instr); 2712 2713 // Find all of the vectorized parameters. 2714 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2715 Value *SrcOp = Instr->getOperand(op); 2716 2717 // If we are accessing the old induction variable, use the new one. 2718 if (SrcOp == OldInduction) { 2719 Params.push_back(getVectorValue(SrcOp)); 2720 continue; 2721 } 2722 2723 // Try using previously calculated values. 2724 Instruction *SrcInst = dyn_cast<Instruction>(SrcOp); 2725 2726 // If the src is an instruction that appeared earlier in the basic block, 2727 // then it should already be vectorized. 2728 if (SrcInst && OrigLoop->contains(SrcInst)) { 2729 assert(WidenMap.has(SrcInst) && "Source operand is unavailable"); 2730 // The parameter is a vector value from earlier. 2731 Params.push_back(WidenMap.get(SrcInst)); 2732 } else { 2733 // The parameter is a scalar from outside the loop. Maybe even a constant. 2734 VectorParts Scalars; 2735 Scalars.append(UF, SrcOp); 2736 Params.push_back(Scalars); 2737 } 2738 } 2739 2740 assert(Params.size() == Instr->getNumOperands() && 2741 "Invalid number of operands"); 2742 2743 // Does this instruction return a value ? 2744 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2745 2746 Value *UndefVec = 2747 IsVoidRetTy ? nullptr 2748 : UndefValue::get(VectorType::get(Instr->getType(), VF)); 2749 // Create a new entry in the WidenMap and initialize it to Undef or Null. 2750 VectorParts &VecResults = WidenMap.splat(Instr, UndefVec); 2751 2752 VectorParts Cond; 2753 if (IfPredicateStore) { 2754 assert(Instr->getParent()->getSinglePredecessor() && 2755 "Only support single predecessor blocks"); 2756 Cond = createEdgeMask(Instr->getParent()->getSinglePredecessor(), 2757 Instr->getParent()); 2758 } 2759 2760 // For each vector unroll 'part': 2761 for (unsigned Part = 0; Part < UF; ++Part) { 2762 // For each scalar that we create: 2763 for (unsigned Width = 0; Width < VF; ++Width) { 2764 2765 // Start if-block. 2766 Value *Cmp = nullptr; 2767 if (IfPredicateStore) { 2768 Cmp = Builder.CreateExtractElement(Cond[Part], Builder.getInt32(Width)); 2769 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cmp, 2770 ConstantInt::get(Cmp->getType(), 1)); 2771 } 2772 2773 Instruction *Cloned = Instr->clone(); 2774 if (!IsVoidRetTy) 2775 Cloned->setName(Instr->getName() + ".cloned"); 2776 // Replace the operands of the cloned instructions with extracted scalars. 2777 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2778 Value *Op = Params[op][Part]; 2779 // Param is a vector. Need to extract the right lane. 2780 if (Op->getType()->isVectorTy()) 2781 Op = Builder.CreateExtractElement(Op, Builder.getInt32(Width)); 2782 Cloned->setOperand(op, Op); 2783 } 2784 addNewMetadata(Cloned, Instr); 2785 2786 // Place the cloned scalar in the new loop. 2787 Builder.Insert(Cloned); 2788 2789 // If we just cloned a new assumption, add it the assumption cache. 2790 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2791 if (II->getIntrinsicID() == Intrinsic::assume) 2792 AC->registerAssumption(II); 2793 2794 // If the original scalar returns a value we need to place it in a vector 2795 // so that future users will be able to use it. 2796 if (!IsVoidRetTy) 2797 VecResults[Part] = Builder.CreateInsertElement(VecResults[Part], Cloned, 2798 Builder.getInt32(Width)); 2799 // End if-block. 2800 if (IfPredicateStore) 2801 PredicatedStores.push_back( 2802 std::make_pair(cast<StoreInst>(Cloned), Cmp)); 2803 } 2804 } 2805 } 2806 2807 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2808 Value *End, Value *Step, 2809 Instruction *DL) { 2810 BasicBlock *Header = L->getHeader(); 2811 BasicBlock *Latch = L->getLoopLatch(); 2812 // As we're just creating this loop, it's possible no latch exists 2813 // yet. If so, use the header as this will be a single block loop. 2814 if (!Latch) 2815 Latch = Header; 2816 2817 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2818 setDebugLocFromInst(Builder, getDebugLocFromInstOrOperands(OldInduction)); 2819 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2820 2821 Builder.SetInsertPoint(Latch->getTerminator()); 2822 2823 // Create i+1 and fill the PHINode. 2824 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2825 Induction->addIncoming(Start, L->getLoopPreheader()); 2826 Induction->addIncoming(Next, Latch); 2827 // Create the compare. 2828 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2829 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 2830 2831 // Now we have two terminators. Remove the old one from the block. 2832 Latch->getTerminator()->eraseFromParent(); 2833 2834 return Induction; 2835 } 2836 2837 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2838 if (TripCount) 2839 return TripCount; 2840 2841 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2842 // Find the loop boundaries. 2843 ScalarEvolution *SE = PSE.getSE(); 2844 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2845 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 2846 "Invalid loop count"); 2847 2848 Type *IdxTy = Legal->getWidestInductionType(); 2849 2850 // The exit count might have the type of i64 while the phi is i32. This can 2851 // happen if we have an induction variable that is sign extended before the 2852 // compare. The only way that we get a backedge taken count is that the 2853 // induction variable was signed and as such will not overflow. In such a case 2854 // truncation is legal. 2855 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 2856 IdxTy->getPrimitiveSizeInBits()) 2857 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2858 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2859 2860 // Get the total trip count from the count by adding 1. 2861 const SCEV *ExitCount = SE->getAddExpr( 2862 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2863 2864 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2865 2866 // Expand the trip count and place the new instructions in the preheader. 2867 // Notice that the pre-header does not change, only the loop body. 2868 SCEVExpander Exp(*SE, DL, "induction"); 2869 2870 // Count holds the overall loop count (N). 2871 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2872 L->getLoopPreheader()->getTerminator()); 2873 2874 if (TripCount->getType()->isPointerTy()) 2875 TripCount = 2876 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2877 L->getLoopPreheader()->getTerminator()); 2878 2879 return TripCount; 2880 } 2881 2882 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 2883 if (VectorTripCount) 2884 return VectorTripCount; 2885 2886 Value *TC = getOrCreateTripCount(L); 2887 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2888 2889 // Now we need to generate the expression for the part of the loop that the 2890 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2891 // iterations are not required for correctness, or N - Step, otherwise. Step 2892 // is equal to the vectorization factor (number of SIMD elements) times the 2893 // unroll factor (number of SIMD instructions). 2894 Constant *Step = ConstantInt::get(TC->getType(), VF * UF); 2895 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2896 2897 // If there is a non-reversed interleaved group that may speculatively access 2898 // memory out-of-bounds, we need to ensure that there will be at least one 2899 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 2900 // the trip count, we set the remainder to be equal to the step. If the step 2901 // does not evenly divide the trip count, no adjustment is necessary since 2902 // there will already be scalar iterations. Note that the minimum iterations 2903 // check ensures that N >= Step. 2904 if (VF > 1 && Legal->requiresScalarEpilogue()) { 2905 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2906 R = Builder.CreateSelect(IsZero, Step, R); 2907 } 2908 2909 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2910 2911 return VectorTripCount; 2912 } 2913 2914 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 2915 BasicBlock *Bypass) { 2916 Value *Count = getOrCreateTripCount(L); 2917 BasicBlock *BB = L->getLoopPreheader(); 2918 IRBuilder<> Builder(BB->getTerminator()); 2919 2920 // Generate code to check that the loop's trip count that we computed by 2921 // adding one to the backedge-taken count will not overflow. 2922 Value *CheckMinIters = Builder.CreateICmpULT( 2923 Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check"); 2924 2925 BasicBlock *NewBB = 2926 BB->splitBasicBlock(BB->getTerminator(), "min.iters.checked"); 2927 // Update dominator tree immediately if the generated block is a 2928 // LoopBypassBlock because SCEV expansions to generate loop bypass 2929 // checks may query it before the current function is finished. 2930 DT->addNewBlock(NewBB, BB); 2931 if (L->getParentLoop()) 2932 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2933 ReplaceInstWithInst(BB->getTerminator(), 2934 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 2935 LoopBypassBlocks.push_back(BB); 2936 } 2937 2938 void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L, 2939 BasicBlock *Bypass) { 2940 Value *TC = getOrCreateVectorTripCount(L); 2941 BasicBlock *BB = L->getLoopPreheader(); 2942 IRBuilder<> Builder(BB->getTerminator()); 2943 2944 // Now, compare the new count to zero. If it is zero skip the vector loop and 2945 // jump to the scalar loop. 2946 Value *Cmp = Builder.CreateICmpEQ(TC, Constant::getNullValue(TC->getType()), 2947 "cmp.zero"); 2948 2949 // Generate code to check that the loop's trip count that we computed by 2950 // adding one to the backedge-taken count will not overflow. 2951 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2952 // Update dominator tree immediately if the generated block is a 2953 // LoopBypassBlock because SCEV expansions to generate loop bypass 2954 // checks may query it before the current function is finished. 2955 DT->addNewBlock(NewBB, BB); 2956 if (L->getParentLoop()) 2957 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2958 ReplaceInstWithInst(BB->getTerminator(), 2959 BranchInst::Create(Bypass, NewBB, Cmp)); 2960 LoopBypassBlocks.push_back(BB); 2961 } 2962 2963 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 2964 BasicBlock *BB = L->getLoopPreheader(); 2965 2966 // Generate the code to check that the SCEV assumptions that we made. 2967 // We want the new basic block to start at the first instruction in a 2968 // sequence of instructions that form a check. 2969 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 2970 "scev.check"); 2971 Value *SCEVCheck = 2972 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 2973 2974 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 2975 if (C->isZero()) 2976 return; 2977 2978 // Create a new block containing the stride check. 2979 BB->setName("vector.scevcheck"); 2980 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2981 // Update dominator tree immediately if the generated block is a 2982 // LoopBypassBlock because SCEV expansions to generate loop bypass 2983 // checks may query it before the current function is finished. 2984 DT->addNewBlock(NewBB, BB); 2985 if (L->getParentLoop()) 2986 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2987 ReplaceInstWithInst(BB->getTerminator(), 2988 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 2989 LoopBypassBlocks.push_back(BB); 2990 AddedSafetyChecks = true; 2991 } 2992 2993 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 2994 BasicBlock *BB = L->getLoopPreheader(); 2995 2996 // Generate the code that checks in runtime if arrays overlap. We put the 2997 // checks into a separate block to make the more common case of few elements 2998 // faster. 2999 Instruction *FirstCheckInst; 3000 Instruction *MemRuntimeCheck; 3001 std::tie(FirstCheckInst, MemRuntimeCheck) = 3002 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 3003 if (!MemRuntimeCheck) 3004 return; 3005 3006 // Create a new block containing the memory check. 3007 BB->setName("vector.memcheck"); 3008 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3009 // Update dominator tree immediately if the generated block is a 3010 // LoopBypassBlock because SCEV expansions to generate loop bypass 3011 // checks may query it before the current function is finished. 3012 DT->addNewBlock(NewBB, BB); 3013 if (L->getParentLoop()) 3014 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3015 ReplaceInstWithInst(BB->getTerminator(), 3016 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 3017 LoopBypassBlocks.push_back(BB); 3018 AddedSafetyChecks = true; 3019 3020 // We currently don't use LoopVersioning for the actual loop cloning but we 3021 // still use it to add the noalias metadata. 3022 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 3023 PSE.getSE()); 3024 LVer->prepareNoAliasMetadata(); 3025 } 3026 3027 void InnerLoopVectorizer::createEmptyLoop() { 3028 /* 3029 In this function we generate a new loop. The new loop will contain 3030 the vectorized instructions while the old loop will continue to run the 3031 scalar remainder. 3032 3033 [ ] <-- loop iteration number check. 3034 / | 3035 / v 3036 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3037 | / | 3038 | / v 3039 || [ ] <-- vector pre header. 3040 |/ | 3041 | v 3042 | [ ] \ 3043 | [ ]_| <-- vector loop. 3044 | | 3045 | v 3046 | -[ ] <--- middle-block. 3047 | / | 3048 | / v 3049 -|- >[ ] <--- new preheader. 3050 | | 3051 | v 3052 | [ ] \ 3053 | [ ]_| <-- old scalar loop to handle remainder. 3054 \ | 3055 \ v 3056 >[ ] <-- exit block. 3057 ... 3058 */ 3059 3060 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 3061 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 3062 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 3063 assert(VectorPH && "Invalid loop structure"); 3064 assert(ExitBlock && "Must have an exit block"); 3065 3066 // Some loops have a single integer induction variable, while other loops 3067 // don't. One example is c++ iterators that often have multiple pointer 3068 // induction variables. In the code below we also support a case where we 3069 // don't have a single induction variable. 3070 // 3071 // We try to obtain an induction variable from the original loop as hard 3072 // as possible. However if we don't find one that: 3073 // - is an integer 3074 // - counts from zero, stepping by one 3075 // - is the size of the widest induction variable type 3076 // then we create a new one. 3077 OldInduction = Legal->getInduction(); 3078 Type *IdxTy = Legal->getWidestInductionType(); 3079 3080 // Split the single block loop into the two loop structure described above. 3081 BasicBlock *VecBody = 3082 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 3083 BasicBlock *MiddleBlock = 3084 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 3085 BasicBlock *ScalarPH = 3086 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 3087 3088 // Create and register the new vector loop. 3089 Loop *Lp = new Loop(); 3090 Loop *ParentLoop = OrigLoop->getParentLoop(); 3091 3092 // Insert the new loop into the loop nest and register the new basic blocks 3093 // before calling any utilities such as SCEV that require valid LoopInfo. 3094 if (ParentLoop) { 3095 ParentLoop->addChildLoop(Lp); 3096 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 3097 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 3098 } else { 3099 LI->addTopLevelLoop(Lp); 3100 } 3101 Lp->addBasicBlockToLoop(VecBody, *LI); 3102 3103 // Find the loop boundaries. 3104 Value *Count = getOrCreateTripCount(Lp); 3105 3106 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3107 3108 // We need to test whether the backedge-taken count is uint##_max. Adding one 3109 // to it will cause overflow and an incorrect loop trip count in the vector 3110 // body. In case of overflow we want to directly jump to the scalar remainder 3111 // loop. 3112 emitMinimumIterationCountCheck(Lp, ScalarPH); 3113 // Now, compare the new count to zero. If it is zero skip the vector loop and 3114 // jump to the scalar loop. 3115 emitVectorLoopEnteredCheck(Lp, ScalarPH); 3116 // Generate the code to check any assumptions that we've made for SCEV 3117 // expressions. 3118 emitSCEVChecks(Lp, ScalarPH); 3119 3120 // Generate the code that checks in runtime if arrays overlap. We put the 3121 // checks into a separate block to make the more common case of few elements 3122 // faster. 3123 emitMemRuntimeChecks(Lp, ScalarPH); 3124 3125 // Generate the induction variable. 3126 // The loop step is equal to the vectorization factor (num of SIMD elements) 3127 // times the unroll factor (num of SIMD instructions). 3128 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3129 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 3130 Induction = 3131 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3132 getDebugLocFromInstOrOperands(OldInduction)); 3133 3134 // We are going to resume the execution of the scalar loop. 3135 // Go over all of the induction variables that we found and fix the 3136 // PHIs that are left in the scalar version of the loop. 3137 // The starting values of PHI nodes depend on the counter of the last 3138 // iteration in the vectorized loop. 3139 // If we come from a bypass edge then we need to start from the original 3140 // start value. 3141 3142 // This variable saves the new starting index for the scalar loop. It is used 3143 // to test if there are any tail iterations left once the vector loop has 3144 // completed. 3145 LoopVectorizationLegality::InductionList::iterator I, E; 3146 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 3147 for (I = List->begin(), E = List->end(); I != E; ++I) { 3148 PHINode *OrigPhi = I->first; 3149 InductionDescriptor II = I->second; 3150 3151 // Create phi nodes to merge from the backedge-taken check block. 3152 PHINode *BCResumeVal = PHINode::Create( 3153 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 3154 Value *EndValue; 3155 if (OrigPhi == OldInduction) { 3156 // We know what the end value is. 3157 EndValue = CountRoundDown; 3158 } else { 3159 IRBuilder<> B(LoopBypassBlocks.back()->getTerminator()); 3160 Value *CRD = B.CreateSExtOrTrunc(CountRoundDown, 3161 II.getStep()->getType(), "cast.crd"); 3162 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3163 EndValue = II.transform(B, CRD, PSE.getSE(), DL); 3164 EndValue->setName("ind.end"); 3165 } 3166 3167 // The new PHI merges the original incoming value, in case of a bypass, 3168 // or the value at the end of the vectorized loop. 3169 BCResumeVal->addIncoming(EndValue, MiddleBlock); 3170 3171 // Fix the scalar body counter (PHI node). 3172 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 3173 3174 // The old induction's phi node in the scalar body needs the truncated 3175 // value. 3176 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3177 BCResumeVal->addIncoming(II.getStartValue(), LoopBypassBlocks[I]); 3178 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 3179 } 3180 3181 // Add a check in the middle block to see if we have completed 3182 // all of the iterations in the first vector loop. 3183 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3184 Value *CmpN = 3185 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 3186 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 3187 ReplaceInstWithInst(MiddleBlock->getTerminator(), 3188 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 3189 3190 // Get ready to start creating new instructions into the vectorized body. 3191 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 3192 3193 // Save the state. 3194 LoopVectorPreHeader = Lp->getLoopPreheader(); 3195 LoopScalarPreHeader = ScalarPH; 3196 LoopMiddleBlock = MiddleBlock; 3197 LoopExitBlock = ExitBlock; 3198 LoopVectorBody.push_back(VecBody); 3199 LoopScalarBody = OldBasicBlock; 3200 3201 // Keep all loop hints from the original loop on the vector loop (we'll 3202 // replace the vectorizer-specific hints below). 3203 if (MDNode *LID = OrigLoop->getLoopID()) 3204 Lp->setLoopID(LID); 3205 3206 LoopVectorizeHints Hints(Lp, true); 3207 Hints.setAlreadyVectorized(); 3208 } 3209 3210 namespace { 3211 struct CSEDenseMapInfo { 3212 static bool canHandle(Instruction *I) { 3213 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3214 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3215 } 3216 static inline Instruction *getEmptyKey() { 3217 return DenseMapInfo<Instruction *>::getEmptyKey(); 3218 } 3219 static inline Instruction *getTombstoneKey() { 3220 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3221 } 3222 static unsigned getHashValue(Instruction *I) { 3223 assert(canHandle(I) && "Unknown instruction!"); 3224 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3225 I->value_op_end())); 3226 } 3227 static bool isEqual(Instruction *LHS, Instruction *RHS) { 3228 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3229 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3230 return LHS == RHS; 3231 return LHS->isIdenticalTo(RHS); 3232 } 3233 }; 3234 } 3235 3236 /// \brief Check whether this block is a predicated block. 3237 /// Due to if predication of stores we might create a sequence of "if(pred) a[i] 3238 /// = ...; " blocks. We start with one vectorized basic block. For every 3239 /// conditional block we split this vectorized block. Therefore, every second 3240 /// block will be a predicated one. 3241 static bool isPredicatedBlock(unsigned BlockNum) { return BlockNum % 2; } 3242 3243 ///\brief Perform cse of induction variable instructions. 3244 static void cse(SmallVector<BasicBlock *, 4> &BBs) { 3245 // Perform simple cse. 3246 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3247 for (unsigned i = 0, e = BBs.size(); i != e; ++i) { 3248 BasicBlock *BB = BBs[i]; 3249 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3250 Instruction *In = &*I++; 3251 3252 if (!CSEDenseMapInfo::canHandle(In)) 3253 continue; 3254 3255 // Check if we can replace this instruction with any of the 3256 // visited instructions. 3257 if (Instruction *V = CSEMap.lookup(In)) { 3258 In->replaceAllUsesWith(V); 3259 In->eraseFromParent(); 3260 continue; 3261 } 3262 // Ignore instructions in conditional blocks. We create "if (pred) a[i] = 3263 // ...;" blocks for predicated stores. Every second block is a predicated 3264 // block. 3265 if (isPredicatedBlock(i)) 3266 continue; 3267 3268 CSEMap[In] = In; 3269 } 3270 } 3271 } 3272 3273 /// \brief Adds a 'fast' flag to floating point operations. 3274 static Value *addFastMathFlag(Value *V) { 3275 if (isa<FPMathOperator>(V)) { 3276 FastMathFlags Flags; 3277 Flags.setUnsafeAlgebra(); 3278 cast<Instruction>(V)->setFastMathFlags(Flags); 3279 } 3280 return V; 3281 } 3282 3283 /// Estimate the overhead of scalarizing a value. Insert and Extract are set if 3284 /// the result needs to be inserted and/or extracted from vectors. 3285 static unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract, 3286 const TargetTransformInfo &TTI) { 3287 if (Ty->isVoidTy()) 3288 return 0; 3289 3290 assert(Ty->isVectorTy() && "Can only scalarize vectors"); 3291 unsigned Cost = 0; 3292 3293 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 3294 if (Insert) 3295 Cost += TTI.getVectorInstrCost(Instruction::InsertElement, Ty, i); 3296 if (Extract) 3297 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, Ty, i); 3298 } 3299 3300 return Cost; 3301 } 3302 3303 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3304 // Return the cost of the instruction, including scalarization overhead if it's 3305 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3306 // i.e. either vector version isn't available, or is too expensive. 3307 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3308 const TargetTransformInfo &TTI, 3309 const TargetLibraryInfo *TLI, 3310 bool &NeedToScalarize) { 3311 Function *F = CI->getCalledFunction(); 3312 StringRef FnName = CI->getCalledFunction()->getName(); 3313 Type *ScalarRetTy = CI->getType(); 3314 SmallVector<Type *, 4> Tys, ScalarTys; 3315 for (auto &ArgOp : CI->arg_operands()) 3316 ScalarTys.push_back(ArgOp->getType()); 3317 3318 // Estimate cost of scalarized vector call. The source operands are assumed 3319 // to be vectors, so we need to extract individual elements from there, 3320 // execute VF scalar calls, and then gather the result into the vector return 3321 // value. 3322 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3323 if (VF == 1) 3324 return ScalarCallCost; 3325 3326 // Compute corresponding vector type for return value and arguments. 3327 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3328 for (unsigned i = 0, ie = ScalarTys.size(); i != ie; ++i) 3329 Tys.push_back(ToVectorTy(ScalarTys[i], VF)); 3330 3331 // Compute costs of unpacking argument values for the scalar calls and 3332 // packing the return values to a vector. 3333 unsigned ScalarizationCost = 3334 getScalarizationOverhead(RetTy, true, false, TTI); 3335 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) 3336 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true, TTI); 3337 3338 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3339 3340 // If we can't emit a vector call for this function, then the currently found 3341 // cost is the cost we need to return. 3342 NeedToScalarize = true; 3343 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3344 return Cost; 3345 3346 // If the corresponding vector cost is cheaper, return its cost. 3347 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3348 if (VectorCallCost < Cost) { 3349 NeedToScalarize = false; 3350 return VectorCallCost; 3351 } 3352 return Cost; 3353 } 3354 3355 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3356 // factor VF. Return the cost of the instruction, including scalarization 3357 // overhead if it's needed. 3358 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3359 const TargetTransformInfo &TTI, 3360 const TargetLibraryInfo *TLI) { 3361 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3362 assert(ID && "Expected intrinsic call!"); 3363 3364 Type *RetTy = ToVectorTy(CI->getType(), VF); 3365 SmallVector<Type *, 4> Tys; 3366 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) 3367 Tys.push_back(ToVectorTy(CI->getArgOperand(i)->getType(), VF)); 3368 3369 FastMathFlags FMF; 3370 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3371 FMF = FPMO->getFastMathFlags(); 3372 3373 return TTI.getIntrinsicInstrCost(ID, RetTy, Tys, FMF); 3374 } 3375 3376 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3377 IntegerType *I1 = cast<IntegerType>(T1->getVectorElementType()); 3378 IntegerType *I2 = cast<IntegerType>(T2->getVectorElementType()); 3379 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3380 } 3381 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3382 IntegerType *I1 = cast<IntegerType>(T1->getVectorElementType()); 3383 IntegerType *I2 = cast<IntegerType>(T2->getVectorElementType()); 3384 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3385 } 3386 3387 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3388 // For every instruction `I` in MinBWs, truncate the operands, create a 3389 // truncated version of `I` and reextend its result. InstCombine runs 3390 // later and will remove any ext/trunc pairs. 3391 // 3392 for (auto &KV : MinBWs) { 3393 VectorParts &Parts = WidenMap.get(KV.first); 3394 for (Value *&I : Parts) { 3395 if (I->use_empty()) 3396 continue; 3397 Type *OriginalTy = I->getType(); 3398 Type *ScalarTruncatedTy = 3399 IntegerType::get(OriginalTy->getContext(), KV.second); 3400 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3401 OriginalTy->getVectorNumElements()); 3402 if (TruncatedTy == OriginalTy) 3403 continue; 3404 3405 if (!isa<Instruction>(I)) 3406 continue; 3407 3408 IRBuilder<> B(cast<Instruction>(I)); 3409 auto ShrinkOperand = [&](Value *V) -> Value * { 3410 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3411 if (ZI->getSrcTy() == TruncatedTy) 3412 return ZI->getOperand(0); 3413 return B.CreateZExtOrTrunc(V, TruncatedTy); 3414 }; 3415 3416 // The actual instruction modification depends on the instruction type, 3417 // unfortunately. 3418 Value *NewI = nullptr; 3419 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { 3420 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3421 ShrinkOperand(BO->getOperand(1))); 3422 cast<BinaryOperator>(NewI)->copyIRFlags(I); 3423 } else if (ICmpInst *CI = dyn_cast<ICmpInst>(I)) { 3424 NewI = 3425 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3426 ShrinkOperand(CI->getOperand(1))); 3427 } else if (SelectInst *SI = dyn_cast<SelectInst>(I)) { 3428 NewI = B.CreateSelect(SI->getCondition(), 3429 ShrinkOperand(SI->getTrueValue()), 3430 ShrinkOperand(SI->getFalseValue())); 3431 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 3432 switch (CI->getOpcode()) { 3433 default: 3434 llvm_unreachable("Unhandled cast!"); 3435 case Instruction::Trunc: 3436 NewI = ShrinkOperand(CI->getOperand(0)); 3437 break; 3438 case Instruction::SExt: 3439 NewI = B.CreateSExtOrTrunc( 3440 CI->getOperand(0), 3441 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3442 break; 3443 case Instruction::ZExt: 3444 NewI = B.CreateZExtOrTrunc( 3445 CI->getOperand(0), 3446 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3447 break; 3448 } 3449 } else if (ShuffleVectorInst *SI = dyn_cast<ShuffleVectorInst>(I)) { 3450 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3451 auto *O0 = B.CreateZExtOrTrunc( 3452 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3453 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3454 auto *O1 = B.CreateZExtOrTrunc( 3455 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3456 3457 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3458 } else if (isa<LoadInst>(I)) { 3459 // Don't do anything with the operands, just extend the result. 3460 continue; 3461 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3462 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3463 auto *O0 = B.CreateZExtOrTrunc( 3464 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3465 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3466 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3467 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3468 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3469 auto *O0 = B.CreateZExtOrTrunc( 3470 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3471 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3472 } else { 3473 llvm_unreachable("Unhandled instruction type!"); 3474 } 3475 3476 // Lastly, extend the result. 3477 NewI->takeName(cast<Instruction>(I)); 3478 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3479 I->replaceAllUsesWith(Res); 3480 cast<Instruction>(I)->eraseFromParent(); 3481 I = Res; 3482 } 3483 } 3484 3485 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3486 for (auto &KV : MinBWs) { 3487 VectorParts &Parts = WidenMap.get(KV.first); 3488 for (Value *&I : Parts) { 3489 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3490 if (Inst && Inst->use_empty()) { 3491 Value *NewI = Inst->getOperand(0); 3492 Inst->eraseFromParent(); 3493 I = NewI; 3494 } 3495 } 3496 } 3497 } 3498 3499 void InnerLoopVectorizer::vectorizeLoop() { 3500 //===------------------------------------------------===// 3501 // 3502 // Notice: any optimization or new instruction that go 3503 // into the code below should be also be implemented in 3504 // the cost-model. 3505 // 3506 //===------------------------------------------------===// 3507 Constant *Zero = Builder.getInt32(0); 3508 3509 // In order to support recurrences we need to be able to vectorize Phi nodes. 3510 // Phi nodes have cycles, so we need to vectorize them in two stages. First, 3511 // we create a new vector PHI node with no incoming edges. We use this value 3512 // when we vectorize all of the instructions that use the PHI. Next, after 3513 // all of the instructions in the block are complete we add the new incoming 3514 // edges to the PHI. At this point all of the instructions in the basic block 3515 // are vectorized, so we can use them to construct the PHI. 3516 PhiVector PHIsToFix; 3517 3518 // Scan the loop in a topological order to ensure that defs are vectorized 3519 // before users. 3520 LoopBlocksDFS DFS(OrigLoop); 3521 DFS.perform(LI); 3522 3523 // Vectorize all of the blocks in the original loop. 3524 for (LoopBlocksDFS::RPOIterator bb = DFS.beginRPO(), be = DFS.endRPO(); 3525 bb != be; ++bb) 3526 vectorizeBlockInLoop(*bb, &PHIsToFix); 3527 3528 // Insert truncates and extends for any truncated instructions as hints to 3529 // InstCombine. 3530 if (VF > 1) 3531 truncateToMinimalBitwidths(); 3532 3533 // At this point every instruction in the original loop is widened to a 3534 // vector form. Now we need to fix the recurrences in PHIsToFix. These PHI 3535 // nodes are currently empty because we did not want to introduce cycles. 3536 // This is the second stage of vectorizing recurrences. 3537 for (PHINode *Phi : PHIsToFix) { 3538 assert(Phi && "Unable to recover vectorized PHI"); 3539 3540 // Handle first-order recurrences that need to be fixed. 3541 if (Legal->isFirstOrderRecurrence(Phi)) { 3542 fixFirstOrderRecurrence(Phi); 3543 continue; 3544 } 3545 3546 // If the phi node is not a first-order recurrence, it must be a reduction. 3547 // Get it's reduction variable descriptor. 3548 assert(Legal->isReductionVariable(Phi) && 3549 "Unable to find the reduction variable"); 3550 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 3551 3552 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3553 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3554 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3555 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3556 RdxDesc.getMinMaxRecurrenceKind(); 3557 setDebugLocFromInst(Builder, ReductionStartValue); 3558 3559 // We need to generate a reduction vector from the incoming scalar. 3560 // To do so, we need to generate the 'identity' vector and override 3561 // one of the elements with the incoming scalar reduction. We need 3562 // to do it in the vector-loop preheader. 3563 Builder.SetInsertPoint(LoopBypassBlocks[1]->getTerminator()); 3564 3565 // This is the vector-clone of the value that leaves the loop. 3566 VectorParts &VectorExit = getVectorValue(LoopExitInst); 3567 Type *VecTy = VectorExit[0]->getType(); 3568 3569 // Find the reduction identity variable. Zero for addition, or, xor, 3570 // one for multiplication, -1 for And. 3571 Value *Identity; 3572 Value *VectorStart; 3573 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3574 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3575 // MinMax reduction have the start value as their identify. 3576 if (VF == 1) { 3577 VectorStart = Identity = ReductionStartValue; 3578 } else { 3579 VectorStart = Identity = 3580 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3581 } 3582 } else { 3583 // Handle other reduction kinds: 3584 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3585 RK, VecTy->getScalarType()); 3586 if (VF == 1) { 3587 Identity = Iden; 3588 // This vector is the Identity vector where the first element is the 3589 // incoming scalar reduction. 3590 VectorStart = ReductionStartValue; 3591 } else { 3592 Identity = ConstantVector::getSplat(VF, Iden); 3593 3594 // This vector is the Identity vector where the first element is the 3595 // incoming scalar reduction. 3596 VectorStart = 3597 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3598 } 3599 } 3600 3601 // Fix the vector-loop phi. 3602 3603 // Reductions do not have to start at zero. They can start with 3604 // any loop invariant values. 3605 VectorParts &VecRdxPhi = WidenMap.get(Phi); 3606 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3607 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3608 VectorParts &Val = getVectorValue(LoopVal); 3609 for (unsigned part = 0; part < UF; ++part) { 3610 // Make sure to add the reduction stat value only to the 3611 // first unroll part. 3612 Value *StartVal = (part == 0) ? VectorStart : Identity; 3613 cast<PHINode>(VecRdxPhi[part]) 3614 ->addIncoming(StartVal, LoopVectorPreHeader); 3615 cast<PHINode>(VecRdxPhi[part]) 3616 ->addIncoming(Val[part], LoopVectorBody.back()); 3617 } 3618 3619 // Before each round, move the insertion point right between 3620 // the PHIs and the values we are going to write. 3621 // This allows us to write both PHINodes and the extractelement 3622 // instructions. 3623 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3624 3625 VectorParts RdxParts = getVectorValue(LoopExitInst); 3626 setDebugLocFromInst(Builder, LoopExitInst); 3627 3628 // If the vector reduction can be performed in a smaller type, we truncate 3629 // then extend the loop exit value to enable InstCombine to evaluate the 3630 // entire expression in the smaller type. 3631 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3632 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3633 Builder.SetInsertPoint(LoopVectorBody.back()->getTerminator()); 3634 for (unsigned part = 0; part < UF; ++part) { 3635 Value *Trunc = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3636 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3637 : Builder.CreateZExt(Trunc, VecTy); 3638 for (Value::user_iterator UI = RdxParts[part]->user_begin(); 3639 UI != RdxParts[part]->user_end();) 3640 if (*UI != Trunc) { 3641 (*UI++)->replaceUsesOfWith(RdxParts[part], Extnd); 3642 RdxParts[part] = Extnd; 3643 } else { 3644 ++UI; 3645 } 3646 } 3647 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3648 for (unsigned part = 0; part < UF; ++part) 3649 RdxParts[part] = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3650 } 3651 3652 // Reduce all of the unrolled parts into a single vector. 3653 Value *ReducedPartRdx = RdxParts[0]; 3654 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3655 setDebugLocFromInst(Builder, ReducedPartRdx); 3656 for (unsigned part = 1; part < UF; ++part) { 3657 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3658 // Floating point operations had to be 'fast' to enable the reduction. 3659 ReducedPartRdx = addFastMathFlag( 3660 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxParts[part], 3661 ReducedPartRdx, "bin.rdx")); 3662 else 3663 ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp( 3664 Builder, MinMaxKind, ReducedPartRdx, RdxParts[part]); 3665 } 3666 3667 if (VF > 1) { 3668 // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles 3669 // and vector ops, reducing the set of values being computed by half each 3670 // round. 3671 assert(isPowerOf2_32(VF) && 3672 "Reduction emission only supported for pow2 vectors!"); 3673 Value *TmpVec = ReducedPartRdx; 3674 SmallVector<Constant *, 32> ShuffleMask(VF, nullptr); 3675 for (unsigned i = VF; i != 1; i >>= 1) { 3676 // Move the upper half of the vector to the lower half. 3677 for (unsigned j = 0; j != i / 2; ++j) 3678 ShuffleMask[j] = Builder.getInt32(i / 2 + j); 3679 3680 // Fill the rest of the mask with undef. 3681 std::fill(&ShuffleMask[i / 2], ShuffleMask.end(), 3682 UndefValue::get(Builder.getInt32Ty())); 3683 3684 Value *Shuf = Builder.CreateShuffleVector( 3685 TmpVec, UndefValue::get(TmpVec->getType()), 3686 ConstantVector::get(ShuffleMask), "rdx.shuf"); 3687 3688 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3689 // Floating point operations had to be 'fast' to enable the reduction. 3690 TmpVec = addFastMathFlag(Builder.CreateBinOp( 3691 (Instruction::BinaryOps)Op, TmpVec, Shuf, "bin.rdx")); 3692 else 3693 TmpVec = RecurrenceDescriptor::createMinMaxOp(Builder, MinMaxKind, 3694 TmpVec, Shuf); 3695 } 3696 3697 // The result is in the first element of the vector. 3698 ReducedPartRdx = 3699 Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 3700 3701 // If the reduction can be performed in a smaller type, we need to extend 3702 // the reduction to the wider type before we branch to the original loop. 3703 if (Phi->getType() != RdxDesc.getRecurrenceType()) 3704 ReducedPartRdx = 3705 RdxDesc.isSigned() 3706 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 3707 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 3708 } 3709 3710 // Create a phi node that merges control-flow from the backedge-taken check 3711 // block and the middle block. 3712 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 3713 LoopScalarPreHeader->getTerminator()); 3714 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3715 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 3716 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3717 3718 // Now, we need to fix the users of the reduction variable 3719 // inside and outside of the scalar remainder loop. 3720 // We know that the loop is in LCSSA form. We need to update the 3721 // PHI nodes in the exit blocks. 3722 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 3723 LEE = LoopExitBlock->end(); 3724 LEI != LEE; ++LEI) { 3725 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 3726 if (!LCSSAPhi) 3727 break; 3728 3729 // All PHINodes need to have a single entry edge, or two if 3730 // we already fixed them. 3731 assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 3732 3733 // We found our reduction value exit-PHI. Update it with the 3734 // incoming bypass edge. 3735 if (LCSSAPhi->getIncomingValue(0) == LoopExitInst) { 3736 // Add an edge coming from the bypass. 3737 LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3738 break; 3739 } 3740 } // end of the LCSSA phi scan. 3741 3742 // Fix the scalar loop reduction variable with the incoming reduction sum 3743 // from the vector body and from the backedge value. 3744 int IncomingEdgeBlockIdx = 3745 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 3746 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 3747 // Pick the other block. 3748 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 3749 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 3750 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 3751 } // end of for each Phi in PHIsToFix. 3752 3753 fixLCSSAPHIs(); 3754 3755 // Make sure DomTree is updated. 3756 updateAnalysis(); 3757 3758 // Predicate any stores. 3759 for (auto KV : PredicatedStores) { 3760 BasicBlock::iterator I(KV.first); 3761 auto *BB = SplitBlock(I->getParent(), &*std::next(I), DT, LI); 3762 auto *T = SplitBlockAndInsertIfThen(KV.second, &*I, /*Unreachable=*/false, 3763 /*BranchWeights=*/nullptr, DT, LI); 3764 I->moveBefore(T); 3765 I->getParent()->setName("pred.store.if"); 3766 BB->setName("pred.store.continue"); 3767 } 3768 DEBUG(DT->verifyDomTree()); 3769 // Remove redundant induction instructions. 3770 cse(LoopVectorBody); 3771 } 3772 3773 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 3774 3775 // This is the second phase of vectorizing first-order rececurrences. An 3776 // overview of the transformation is described below. Suppose we have the 3777 // following loop. 3778 // 3779 // for (int i = 0; i < n; ++i) 3780 // b[i] = a[i] - a[i - 1]; 3781 // 3782 // There is a first-order recurrence on "a". For this loop, the shorthand 3783 // scalar IR looks like: 3784 // 3785 // scalar.ph: 3786 // s_init = a[-1] 3787 // br scalar.body 3788 // 3789 // scalar.body: 3790 // i = phi [0, scalar.ph], [i+1, scalar.body] 3791 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3792 // s2 = a[i] 3793 // b[i] = s2 - s1 3794 // br cond, scalar.body, ... 3795 // 3796 // In this example, s1 is a recurrence because it's value depends on the 3797 // previous iteration. In the first phase of vectorization, we created a 3798 // temporary value for s1. We now complete the vectorization and produce the 3799 // shorthand vector IR shown below (for VF = 4, UF = 1). 3800 // 3801 // vector.ph: 3802 // v_init = vector(..., ..., ..., a[-1]) 3803 // br vector.body 3804 // 3805 // vector.body 3806 // i = phi [0, vector.ph], [i+4, vector.body] 3807 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3808 // v2 = a[i, i+1, i+2, i+3]; 3809 // v3 = vector(v1(3), v2(0, 1, 2)) 3810 // b[i, i+1, i+2, i+3] = v2 - v3 3811 // br cond, vector.body, middle.block 3812 // 3813 // middle.block: 3814 // x = v2(3) 3815 // br scalar.ph 3816 // 3817 // scalar.ph: 3818 // s_init = phi [x, middle.block], [a[-1], otherwise] 3819 // br scalar.body 3820 // 3821 // After execution completes the vector loop, we extract the next value of 3822 // the recurrence (x) to use as the initial value in the scalar loop. 3823 3824 // Get the original loop preheader and single loop latch. 3825 auto *Preheader = OrigLoop->getLoopPreheader(); 3826 auto *Latch = OrigLoop->getLoopLatch(); 3827 3828 // Get the initial and previous values of the scalar recurrence. 3829 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 3830 auto *Previous = Phi->getIncomingValueForBlock(Latch); 3831 3832 // Create a vector from the initial value. 3833 auto *VectorInit = ScalarInit; 3834 if (VF > 1) { 3835 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3836 VectorInit = Builder.CreateInsertElement( 3837 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 3838 Builder.getInt32(VF - 1), "vector.recur.init"); 3839 } 3840 3841 // We constructed a temporary phi node in the first phase of vectorization. 3842 // This phi node will eventually be deleted. 3843 auto &PhiParts = getVectorValue(Phi); 3844 Builder.SetInsertPoint(cast<Instruction>(PhiParts[0])); 3845 3846 // Create a phi node for the new recurrence. The current value will either be 3847 // the initial value inserted into a vector or loop-varying vector value. 3848 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 3849 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 3850 3851 // Get the vectorized previous value. We ensured the previous values was an 3852 // instruction when detecting the recurrence. 3853 auto &PreviousParts = getVectorValue(Previous); 3854 3855 // Set the insertion point to be after this instruction. We ensured the 3856 // previous value dominated all uses of the phi when detecting the 3857 // recurrence. 3858 Builder.SetInsertPoint( 3859 &*++BasicBlock::iterator(cast<Instruction>(PreviousParts[UF - 1]))); 3860 3861 // We will construct a vector for the recurrence by combining the values for 3862 // the current and previous iterations. This is the required shuffle mask. 3863 SmallVector<Constant *, 8> ShuffleMask(VF); 3864 ShuffleMask[0] = Builder.getInt32(VF - 1); 3865 for (unsigned I = 1; I < VF; ++I) 3866 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 3867 3868 // The vector from which to take the initial value for the current iteration 3869 // (actual or unrolled). Initially, this is the vector phi node. 3870 Value *Incoming = VecPhi; 3871 3872 // Shuffle the current and previous vector and update the vector parts. 3873 for (unsigned Part = 0; Part < UF; ++Part) { 3874 auto *Shuffle = 3875 VF > 1 3876 ? Builder.CreateShuffleVector(Incoming, PreviousParts[Part], 3877 ConstantVector::get(ShuffleMask)) 3878 : Incoming; 3879 PhiParts[Part]->replaceAllUsesWith(Shuffle); 3880 cast<Instruction>(PhiParts[Part])->eraseFromParent(); 3881 PhiParts[Part] = Shuffle; 3882 Incoming = PreviousParts[Part]; 3883 } 3884 3885 // Fix the latch value of the new recurrence in the vector loop. 3886 VecPhi->addIncoming(Incoming, 3887 LI->getLoopFor(LoopVectorBody[0])->getLoopLatch()); 3888 3889 // Extract the last vector element in the middle block. This will be the 3890 // initial value for the recurrence when jumping to the scalar loop. 3891 auto *Extract = Incoming; 3892 if (VF > 1) { 3893 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3894 Extract = Builder.CreateExtractElement(Extract, Builder.getInt32(VF - 1), 3895 "vector.recur.extract"); 3896 } 3897 3898 // Fix the initial value of the original recurrence in the scalar loop. 3899 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3900 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3901 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3902 auto *Incoming = BB == LoopMiddleBlock ? Extract : ScalarInit; 3903 Start->addIncoming(Incoming, BB); 3904 } 3905 3906 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 3907 Phi->setName("scalar.recur"); 3908 3909 // Finally, fix users of the recurrence outside the loop. The users will need 3910 // either the last value of the scalar recurrence or the last value of the 3911 // vector recurrence we extracted in the middle block. Since the loop is in 3912 // LCSSA form, we just need to find the phi node for the original scalar 3913 // recurrence in the exit block, and then add an edge for the middle block. 3914 for (auto &I : *LoopExitBlock) { 3915 auto *LCSSAPhi = dyn_cast<PHINode>(&I); 3916 if (!LCSSAPhi) 3917 break; 3918 if (LCSSAPhi->getIncomingValue(0) == Phi) { 3919 LCSSAPhi->addIncoming(Extract, LoopMiddleBlock); 3920 break; 3921 } 3922 } 3923 } 3924 3925 void InnerLoopVectorizer::fixLCSSAPHIs() { 3926 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 3927 LEE = LoopExitBlock->end(); 3928 LEI != LEE; ++LEI) { 3929 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 3930 if (!LCSSAPhi) 3931 break; 3932 if (LCSSAPhi->getNumIncomingValues() == 1) 3933 LCSSAPhi->addIncoming(UndefValue::get(LCSSAPhi->getType()), 3934 LoopMiddleBlock); 3935 } 3936 } 3937 3938 InnerLoopVectorizer::VectorParts 3939 InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) { 3940 assert(std::find(pred_begin(Dst), pred_end(Dst), Src) != pred_end(Dst) && 3941 "Invalid edge"); 3942 3943 // Look for cached value. 3944 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 3945 EdgeMaskCache::iterator ECEntryIt = MaskCache.find(Edge); 3946 if (ECEntryIt != MaskCache.end()) 3947 return ECEntryIt->second; 3948 3949 VectorParts SrcMask = createBlockInMask(Src); 3950 3951 // The terminator has to be a branch inst! 3952 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 3953 assert(BI && "Unexpected terminator found"); 3954 3955 if (BI->isConditional()) { 3956 VectorParts EdgeMask = getVectorValue(BI->getCondition()); 3957 3958 if (BI->getSuccessor(0) != Dst) 3959 for (unsigned part = 0; part < UF; ++part) 3960 EdgeMask[part] = Builder.CreateNot(EdgeMask[part]); 3961 3962 for (unsigned part = 0; part < UF; ++part) 3963 EdgeMask[part] = Builder.CreateAnd(EdgeMask[part], SrcMask[part]); 3964 3965 MaskCache[Edge] = EdgeMask; 3966 return EdgeMask; 3967 } 3968 3969 MaskCache[Edge] = SrcMask; 3970 return SrcMask; 3971 } 3972 3973 InnerLoopVectorizer::VectorParts 3974 InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) { 3975 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 3976 3977 // Loop incoming mask is all-one. 3978 if (OrigLoop->getHeader() == BB) { 3979 Value *C = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 1); 3980 return getVectorValue(C); 3981 } 3982 3983 // This is the block mask. We OR all incoming edges, and with zero. 3984 Value *Zero = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 0); 3985 VectorParts BlockMask = getVectorValue(Zero); 3986 3987 // For each pred: 3988 for (pred_iterator it = pred_begin(BB), e = pred_end(BB); it != e; ++it) { 3989 VectorParts EM = createEdgeMask(*it, BB); 3990 for (unsigned part = 0; part < UF; ++part) 3991 BlockMask[part] = Builder.CreateOr(BlockMask[part], EM[part]); 3992 } 3993 3994 return BlockMask; 3995 } 3996 3997 void InnerLoopVectorizer::widenPHIInstruction( 3998 Instruction *PN, InnerLoopVectorizer::VectorParts &Entry, unsigned UF, 3999 unsigned VF, PhiVector *PV) { 4000 PHINode *P = cast<PHINode>(PN); 4001 // Handle recurrences. 4002 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 4003 for (unsigned part = 0; part < UF; ++part) { 4004 // This is phase one of vectorizing PHIs. 4005 Type *VecTy = 4006 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4007 Entry[part] = PHINode::Create( 4008 VecTy, 2, "vec.phi", &*LoopVectorBody.back()->getFirstInsertionPt()); 4009 } 4010 PV->push_back(P); 4011 return; 4012 } 4013 4014 setDebugLocFromInst(Builder, P); 4015 // Check for PHI nodes that are lowered to vector selects. 4016 if (P->getParent() != OrigLoop->getHeader()) { 4017 // We know that all PHIs in non-header blocks are converted into 4018 // selects, so we don't have to worry about the insertion order and we 4019 // can just use the builder. 4020 // At this point we generate the predication tree. There may be 4021 // duplications since this is a simple recursive scan, but future 4022 // optimizations will clean it up. 4023 4024 unsigned NumIncoming = P->getNumIncomingValues(); 4025 4026 // Generate a sequence of selects of the form: 4027 // SELECT(Mask3, In3, 4028 // SELECT(Mask2, In2, 4029 // ( ...))) 4030 for (unsigned In = 0; In < NumIncoming; In++) { 4031 VectorParts Cond = 4032 createEdgeMask(P->getIncomingBlock(In), P->getParent()); 4033 VectorParts &In0 = getVectorValue(P->getIncomingValue(In)); 4034 4035 for (unsigned part = 0; part < UF; ++part) { 4036 // We might have single edge PHIs (blocks) - use an identity 4037 // 'select' for the first PHI operand. 4038 if (In == 0) 4039 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], In0[part]); 4040 else 4041 // Select between the current value and the previous incoming edge 4042 // based on the incoming mask. 4043 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], Entry[part], 4044 "predphi"); 4045 } 4046 } 4047 return; 4048 } 4049 4050 // This PHINode must be an induction variable. 4051 // Make sure that we know about it. 4052 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 4053 4054 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 4055 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4056 4057 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4058 // which can be found from the original scalar operations. 4059 switch (II.getKind()) { 4060 case InductionDescriptor::IK_NoInduction: 4061 llvm_unreachable("Unknown induction"); 4062 case InductionDescriptor::IK_IntInduction: { 4063 assert(P->getType() == II.getStartValue()->getType() && "Types must match"); 4064 // Handle other induction variables that are now based on the 4065 // canonical one. 4066 Value *V = Induction; 4067 if (P != OldInduction) { 4068 V = Builder.CreateSExtOrTrunc(Induction, P->getType()); 4069 V = II.transform(Builder, V, PSE.getSE(), DL); 4070 V->setName("offset.idx"); 4071 } 4072 Value *Broadcasted = getBroadcastInstrs(V); 4073 // After broadcasting the induction variable we need to make the vector 4074 // consecutive by adding 0, 1, 2, etc. 4075 for (unsigned part = 0; part < UF; ++part) 4076 Entry[part] = getStepVector(Broadcasted, VF * part, II.getStep()); 4077 return; 4078 } 4079 case InductionDescriptor::IK_PtrInduction: 4080 // Handle the pointer induction variable case. 4081 assert(P->getType()->isPointerTy() && "Unexpected type."); 4082 // This is the normalized GEP that starts counting at zero. 4083 Value *PtrInd = Induction; 4084 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 4085 // This is the vector of results. Notice that we don't generate 4086 // vector geps because scalar geps result in better code. 4087 for (unsigned part = 0; part < UF; ++part) { 4088 if (VF == 1) { 4089 int EltIndex = part; 4090 Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex); 4091 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4092 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 4093 SclrGep->setName("next.gep"); 4094 Entry[part] = SclrGep; 4095 continue; 4096 } 4097 4098 Value *VecVal = UndefValue::get(VectorType::get(P->getType(), VF)); 4099 for (unsigned int i = 0; i < VF; ++i) { 4100 int EltIndex = i + part * VF; 4101 Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex); 4102 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4103 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 4104 SclrGep->setName("next.gep"); 4105 VecVal = Builder.CreateInsertElement(VecVal, SclrGep, 4106 Builder.getInt32(i), "insert.gep"); 4107 } 4108 Entry[part] = VecVal; 4109 } 4110 return; 4111 } 4112 } 4113 4114 void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { 4115 // For each instruction in the old loop. 4116 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 4117 VectorParts &Entry = WidenMap.get(&*it); 4118 4119 switch (it->getOpcode()) { 4120 case Instruction::Br: 4121 // Nothing to do for PHIs and BR, since we already took care of the 4122 // loop control flow instructions. 4123 continue; 4124 case Instruction::PHI: { 4125 // Vectorize PHINodes. 4126 widenPHIInstruction(&*it, Entry, UF, VF, PV); 4127 continue; 4128 } // End of PHI. 4129 4130 case Instruction::Add: 4131 case Instruction::FAdd: 4132 case Instruction::Sub: 4133 case Instruction::FSub: 4134 case Instruction::Mul: 4135 case Instruction::FMul: 4136 case Instruction::UDiv: 4137 case Instruction::SDiv: 4138 case Instruction::FDiv: 4139 case Instruction::URem: 4140 case Instruction::SRem: 4141 case Instruction::FRem: 4142 case Instruction::Shl: 4143 case Instruction::LShr: 4144 case Instruction::AShr: 4145 case Instruction::And: 4146 case Instruction::Or: 4147 case Instruction::Xor: { 4148 // Just widen binops. 4149 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(it); 4150 setDebugLocFromInst(Builder, BinOp); 4151 VectorParts &A = getVectorValue(it->getOperand(0)); 4152 VectorParts &B = getVectorValue(it->getOperand(1)); 4153 4154 // Use this vector value for all users of the original instruction. 4155 for (unsigned Part = 0; Part < UF; ++Part) { 4156 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A[Part], B[Part]); 4157 4158 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 4159 VecOp->copyIRFlags(BinOp); 4160 4161 Entry[Part] = V; 4162 } 4163 4164 addMetadata(Entry, &*it); 4165 break; 4166 } 4167 case Instruction::Select: { 4168 // Widen selects. 4169 // If the selector is loop invariant we can create a select 4170 // instruction with a scalar condition. Otherwise, use vector-select. 4171 auto *SE = PSE.getSE(); 4172 bool InvariantCond = 4173 SE->isLoopInvariant(PSE.getSCEV(it->getOperand(0)), OrigLoop); 4174 setDebugLocFromInst(Builder, &*it); 4175 4176 // The condition can be loop invariant but still defined inside the 4177 // loop. This means that we can't just use the original 'cond' value. 4178 // We have to take the 'vectorized' value and pick the first lane. 4179 // Instcombine will make this a no-op. 4180 VectorParts &Cond = getVectorValue(it->getOperand(0)); 4181 VectorParts &Op0 = getVectorValue(it->getOperand(1)); 4182 VectorParts &Op1 = getVectorValue(it->getOperand(2)); 4183 4184 Value *ScalarCond = 4185 (VF == 1) 4186 ? Cond[0] 4187 : Builder.CreateExtractElement(Cond[0], Builder.getInt32(0)); 4188 4189 for (unsigned Part = 0; Part < UF; ++Part) { 4190 Entry[Part] = Builder.CreateSelect( 4191 InvariantCond ? ScalarCond : Cond[Part], Op0[Part], Op1[Part]); 4192 } 4193 4194 addMetadata(Entry, &*it); 4195 break; 4196 } 4197 4198 case Instruction::ICmp: 4199 case Instruction::FCmp: { 4200 // Widen compares. Generate vector compares. 4201 bool FCmp = (it->getOpcode() == Instruction::FCmp); 4202 CmpInst *Cmp = dyn_cast<CmpInst>(it); 4203 setDebugLocFromInst(Builder, &*it); 4204 VectorParts &A = getVectorValue(it->getOperand(0)); 4205 VectorParts &B = getVectorValue(it->getOperand(1)); 4206 for (unsigned Part = 0; Part < UF; ++Part) { 4207 Value *C = nullptr; 4208 if (FCmp) { 4209 C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]); 4210 cast<FCmpInst>(C)->copyFastMathFlags(&*it); 4211 } else { 4212 C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]); 4213 } 4214 Entry[Part] = C; 4215 } 4216 4217 addMetadata(Entry, &*it); 4218 break; 4219 } 4220 4221 case Instruction::Store: 4222 case Instruction::Load: 4223 vectorizeMemoryInstruction(&*it); 4224 break; 4225 case Instruction::ZExt: 4226 case Instruction::SExt: 4227 case Instruction::FPToUI: 4228 case Instruction::FPToSI: 4229 case Instruction::FPExt: 4230 case Instruction::PtrToInt: 4231 case Instruction::IntToPtr: 4232 case Instruction::SIToFP: 4233 case Instruction::UIToFP: 4234 case Instruction::Trunc: 4235 case Instruction::FPTrunc: 4236 case Instruction::BitCast: { 4237 CastInst *CI = dyn_cast<CastInst>(it); 4238 setDebugLocFromInst(Builder, &*it); 4239 /// Optimize the special case where the source is a constant integer 4240 /// induction variable. Notice that we can only optimize the 'trunc' case 4241 /// because: a. FP conversions lose precision, b. sext/zext may wrap, 4242 /// c. other casts depend on pointer size. 4243 4244 if (CI->getOperand(0) == OldInduction && 4245 it->getOpcode() == Instruction::Trunc) { 4246 InductionDescriptor II = 4247 Legal->getInductionVars()->lookup(OldInduction); 4248 if (auto StepValue = II.getConstIntStepValue()) { 4249 StepValue = ConstantInt::getSigned(cast<IntegerType>(CI->getType()), 4250 StepValue->getSExtValue()); 4251 Value *ScalarCast = Builder.CreateCast(CI->getOpcode(), Induction, 4252 CI->getType()); 4253 Value *Broadcasted = getBroadcastInstrs(ScalarCast); 4254 for (unsigned Part = 0; Part < UF; ++Part) 4255 Entry[Part] = getStepVector(Broadcasted, VF * Part, StepValue); 4256 addMetadata(Entry, &*it); 4257 break; 4258 } 4259 } 4260 /// Vectorize casts. 4261 Type *DestTy = 4262 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4263 4264 VectorParts &A = getVectorValue(it->getOperand(0)); 4265 for (unsigned Part = 0; Part < UF; ++Part) 4266 Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy); 4267 addMetadata(Entry, &*it); 4268 break; 4269 } 4270 4271 case Instruction::Call: { 4272 // Ignore dbg intrinsics. 4273 if (isa<DbgInfoIntrinsic>(it)) 4274 break; 4275 setDebugLocFromInst(Builder, &*it); 4276 4277 Module *M = BB->getParent()->getParent(); 4278 CallInst *CI = cast<CallInst>(it); 4279 4280 StringRef FnName = CI->getCalledFunction()->getName(); 4281 Function *F = CI->getCalledFunction(); 4282 Type *RetTy = ToVectorTy(CI->getType(), VF); 4283 SmallVector<Type *, 4> Tys; 4284 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) 4285 Tys.push_back(ToVectorTy(CI->getArgOperand(i)->getType(), VF)); 4286 4287 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4288 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 4289 ID == Intrinsic::lifetime_start)) { 4290 scalarizeInstruction(&*it); 4291 break; 4292 } 4293 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4294 // version of the instruction. 4295 // Is it beneficial to perform intrinsic call compared to lib call? 4296 bool NeedToScalarize; 4297 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4298 bool UseVectorIntrinsic = 4299 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4300 if (!UseVectorIntrinsic && NeedToScalarize) { 4301 scalarizeInstruction(&*it); 4302 break; 4303 } 4304 4305 for (unsigned Part = 0; Part < UF; ++Part) { 4306 SmallVector<Value *, 4> Args; 4307 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4308 Value *Arg = CI->getArgOperand(i); 4309 // Some intrinsics have a scalar argument - don't replace it with a 4310 // vector. 4311 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) { 4312 VectorParts &VectorArg = getVectorValue(CI->getArgOperand(i)); 4313 Arg = VectorArg[Part]; 4314 } 4315 Args.push_back(Arg); 4316 } 4317 4318 Function *VectorF; 4319 if (UseVectorIntrinsic) { 4320 // Use vector version of the intrinsic. 4321 Type *TysForDecl[] = {CI->getType()}; 4322 if (VF > 1) 4323 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4324 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4325 } else { 4326 // Use vector version of the library call. 4327 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4328 assert(!VFnName.empty() && "Vector function name is empty."); 4329 VectorF = M->getFunction(VFnName); 4330 if (!VectorF) { 4331 // Generate a declaration 4332 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4333 VectorF = 4334 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4335 VectorF->copyAttributesFrom(F); 4336 } 4337 } 4338 assert(VectorF && "Can't create vector function."); 4339 4340 SmallVector<OperandBundleDef, 1> OpBundles; 4341 CI->getOperandBundlesAsDefs(OpBundles); 4342 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4343 4344 if (isa<FPMathOperator>(V)) 4345 V->copyFastMathFlags(CI); 4346 4347 Entry[Part] = V; 4348 } 4349 4350 addMetadata(Entry, &*it); 4351 break; 4352 } 4353 4354 default: 4355 // All other instructions are unsupported. Scalarize them. 4356 scalarizeInstruction(&*it); 4357 break; 4358 } // end of switch. 4359 } // end of for_each instr. 4360 } 4361 4362 void InnerLoopVectorizer::updateAnalysis() { 4363 // Forget the original basic block. 4364 PSE.getSE()->forgetLoop(OrigLoop); 4365 4366 // Update the dominator tree information. 4367 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4368 "Entry does not dominate exit."); 4369 4370 // We don't predicate stores by this point, so the vector body should be a 4371 // single loop. 4372 assert(LoopVectorBody.size() == 1 && "Expected single block loop!"); 4373 DT->addNewBlock(LoopVectorBody[0], LoopVectorPreHeader); 4374 4375 DT->addNewBlock(LoopMiddleBlock, LoopVectorBody.back()); 4376 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4377 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4378 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4379 4380 DEBUG(DT->verifyDomTree()); 4381 } 4382 4383 /// \brief Check whether it is safe to if-convert this phi node. 4384 /// 4385 /// Phi nodes with constant expressions that can trap are not safe to if 4386 /// convert. 4387 static bool canIfConvertPHINodes(BasicBlock *BB) { 4388 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { 4389 PHINode *Phi = dyn_cast<PHINode>(I); 4390 if (!Phi) 4391 return true; 4392 for (unsigned p = 0, e = Phi->getNumIncomingValues(); p != e; ++p) 4393 if (Constant *C = dyn_cast<Constant>(Phi->getIncomingValue(p))) 4394 if (C->canTrap()) 4395 return false; 4396 } 4397 return true; 4398 } 4399 4400 bool LoopVectorizationLegality::canVectorizeWithIfConvert() { 4401 if (!EnableIfConversion) { 4402 emitAnalysis(VectorizationReport() << "if-conversion is disabled"); 4403 return false; 4404 } 4405 4406 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable"); 4407 4408 // A list of pointers that we can safely read and write to. 4409 SmallPtrSet<Value *, 8> SafePointes; 4410 4411 // Collect safe addresses. 4412 for (Loop::block_iterator BI = TheLoop->block_begin(), 4413 BE = TheLoop->block_end(); 4414 BI != BE; ++BI) { 4415 BasicBlock *BB = *BI; 4416 4417 if (blockNeedsPredication(BB)) 4418 continue; 4419 4420 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { 4421 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 4422 SafePointes.insert(LI->getPointerOperand()); 4423 else if (StoreInst *SI = dyn_cast<StoreInst>(I)) 4424 SafePointes.insert(SI->getPointerOperand()); 4425 } 4426 } 4427 4428 // Collect the blocks that need predication. 4429 BasicBlock *Header = TheLoop->getHeader(); 4430 for (Loop::block_iterator BI = TheLoop->block_begin(), 4431 BE = TheLoop->block_end(); 4432 BI != BE; ++BI) { 4433 BasicBlock *BB = *BI; 4434 4435 // We don't support switch statements inside loops. 4436 if (!isa<BranchInst>(BB->getTerminator())) { 4437 emitAnalysis(VectorizationReport(BB->getTerminator()) 4438 << "loop contains a switch statement"); 4439 return false; 4440 } 4441 4442 // We must be able to predicate all blocks that need to be predicated. 4443 if (blockNeedsPredication(BB)) { 4444 if (!blockCanBePredicated(BB, SafePointes)) { 4445 emitAnalysis(VectorizationReport(BB->getTerminator()) 4446 << "control flow cannot be substituted for a select"); 4447 return false; 4448 } 4449 } else if (BB != Header && !canIfConvertPHINodes(BB)) { 4450 emitAnalysis(VectorizationReport(BB->getTerminator()) 4451 << "control flow cannot be substituted for a select"); 4452 return false; 4453 } 4454 } 4455 4456 // We can if-convert this loop. 4457 return true; 4458 } 4459 4460 bool LoopVectorizationLegality::canVectorize() { 4461 // We must have a loop in canonical form. Loops with indirectbr in them cannot 4462 // be canonicalized. 4463 if (!TheLoop->getLoopPreheader()) { 4464 emitAnalysis(VectorizationReport() 4465 << "loop control flow is not understood by vectorizer"); 4466 return false; 4467 } 4468 4469 // We can only vectorize innermost loops. 4470 if (!TheLoop->empty()) { 4471 emitAnalysis(VectorizationReport() << "loop is not the innermost loop"); 4472 return false; 4473 } 4474 4475 // We must have a single backedge. 4476 if (TheLoop->getNumBackEdges() != 1) { 4477 emitAnalysis(VectorizationReport() 4478 << "loop control flow is not understood by vectorizer"); 4479 return false; 4480 } 4481 4482 // We must have a single exiting block. 4483 if (!TheLoop->getExitingBlock()) { 4484 emitAnalysis(VectorizationReport() 4485 << "loop control flow is not understood by vectorizer"); 4486 return false; 4487 } 4488 4489 // We only handle bottom-tested loops, i.e. loop in which the condition is 4490 // checked at the end of each iteration. With that we can assume that all 4491 // instructions in the loop are executed the same number of times. 4492 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 4493 emitAnalysis(VectorizationReport() 4494 << "loop control flow is not understood by vectorizer"); 4495 return false; 4496 } 4497 4498 // We need to have a loop header. 4499 DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName() 4500 << '\n'); 4501 4502 // Check if we can if-convert non-single-bb loops. 4503 unsigned NumBlocks = TheLoop->getNumBlocks(); 4504 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { 4505 DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); 4506 return false; 4507 } 4508 4509 // ScalarEvolution needs to be able to find the exit count. 4510 const SCEV *ExitCount = PSE.getBackedgeTakenCount(); 4511 if (ExitCount == PSE.getSE()->getCouldNotCompute()) { 4512 emitAnalysis(VectorizationReport() 4513 << "could not determine number of loop iterations"); 4514 DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n"); 4515 return false; 4516 } 4517 4518 // Check if we can vectorize the instructions and CFG in this loop. 4519 if (!canVectorizeInstrs()) { 4520 DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); 4521 return false; 4522 } 4523 4524 // Go over each instruction and look at memory deps. 4525 if (!canVectorizeMemory()) { 4526 DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); 4527 return false; 4528 } 4529 4530 // Collect all of the variables that remain uniform after vectorization. 4531 collectLoopUniforms(); 4532 4533 DEBUG(dbgs() << "LV: We can vectorize this loop" 4534 << (LAI->getRuntimePointerChecking()->Need 4535 ? " (with a runtime bound check)" 4536 : "") 4537 << "!\n"); 4538 4539 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 4540 4541 // If an override option has been passed in for interleaved accesses, use it. 4542 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 4543 UseInterleaved = EnableInterleavedMemAccesses; 4544 4545 // Analyze interleaved memory accesses. 4546 if (UseInterleaved) 4547 InterleaveInfo.analyzeInterleaving(Strides); 4548 4549 unsigned SCEVThreshold = VectorizeSCEVCheckThreshold; 4550 if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) 4551 SCEVThreshold = PragmaVectorizeSCEVCheckThreshold; 4552 4553 if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) { 4554 emitAnalysis(VectorizationReport() 4555 << "Too many SCEV assumptions need to be made and checked " 4556 << "at runtime"); 4557 DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n"); 4558 return false; 4559 } 4560 4561 // Okay! We can vectorize. At this point we don't have any other mem analysis 4562 // which may limit our maximum vectorization factor, so just return true with 4563 // no restrictions. 4564 return true; 4565 } 4566 4567 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { 4568 if (Ty->isPointerTy()) 4569 return DL.getIntPtrType(Ty); 4570 4571 // It is possible that char's or short's overflow when we ask for the loop's 4572 // trip count, work around this by changing the type size. 4573 if (Ty->getScalarSizeInBits() < 32) 4574 return Type::getInt32Ty(Ty->getContext()); 4575 4576 return Ty; 4577 } 4578 4579 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { 4580 Ty0 = convertPointerToIntegerType(DL, Ty0); 4581 Ty1 = convertPointerToIntegerType(DL, Ty1); 4582 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) 4583 return Ty0; 4584 return Ty1; 4585 } 4586 4587 /// \brief Check that the instruction has outside loop users and is not an 4588 /// identified reduction variable. 4589 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, 4590 SmallPtrSetImpl<Value *> &Reductions) { 4591 // Reduction instructions are allowed to have exit users. All other 4592 // instructions must not have external users. 4593 if (!Reductions.count(Inst)) 4594 // Check that all of the users of the loop are inside the BB. 4595 for (User *U : Inst->users()) { 4596 Instruction *UI = cast<Instruction>(U); 4597 // This user may be a reduction exit value. 4598 if (!TheLoop->contains(UI)) { 4599 DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); 4600 return true; 4601 } 4602 } 4603 return false; 4604 } 4605 4606 bool LoopVectorizationLegality::addInductionPhi(PHINode *Phi, 4607 InductionDescriptor ID) { 4608 Inductions[Phi] = ID; 4609 Type *PhiTy = Phi->getType(); 4610 const DataLayout &DL = Phi->getModule()->getDataLayout(); 4611 4612 // Get the widest type. 4613 if (!WidestIndTy) 4614 WidestIndTy = convertPointerToIntegerType(DL, PhiTy); 4615 else 4616 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); 4617 4618 // Int inductions are special because we only allow one IV. 4619 if (ID.getKind() == InductionDescriptor::IK_IntInduction && 4620 ID.getConstIntStepValue() && 4621 ID.getConstIntStepValue()->isOne() && 4622 isa<Constant>(ID.getStartValue()) && 4623 cast<Constant>(ID.getStartValue())->isNullValue()) { 4624 4625 // Use the phi node with the widest type as induction. Use the last 4626 // one if there are multiple (no good reason for doing this other 4627 // than it is expedient). We've checked that it begins at zero and 4628 // steps by one, so this is a canonical induction variable. 4629 if (!Induction || PhiTy == WidestIndTy) 4630 Induction = Phi; 4631 } 4632 4633 DEBUG(dbgs() << "LV: Found an induction variable.\n"); 4634 4635 // Until we explicitly handle the case of an induction variable with 4636 // an outside loop user we have to give up vectorizing this loop. 4637 if (hasOutsideLoopUser(TheLoop, Phi, AllowedExit)) { 4638 emitAnalysis(VectorizationReport(Phi) << 4639 "use of induction value outside of the " 4640 "loop is not handled by vectorizer"); 4641 return false; 4642 } 4643 4644 return true; 4645 } 4646 4647 bool LoopVectorizationLegality::canVectorizeInstrs() { 4648 BasicBlock *Header = TheLoop->getHeader(); 4649 4650 // Look for the attribute signaling the absence of NaNs. 4651 Function &F = *Header->getParent(); 4652 HasFunNoNaNAttr = 4653 F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; 4654 4655 // For each block in the loop. 4656 for (Loop::block_iterator bb = TheLoop->block_begin(), 4657 be = TheLoop->block_end(); 4658 bb != be; ++bb) { 4659 4660 // Scan the instructions in the block and look for hazards. 4661 for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e; 4662 ++it) { 4663 4664 if (PHINode *Phi = dyn_cast<PHINode>(it)) { 4665 Type *PhiTy = Phi->getType(); 4666 // Check that this PHI type is allowed. 4667 if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() && 4668 !PhiTy->isPointerTy()) { 4669 emitAnalysis(VectorizationReport(&*it) 4670 << "loop control flow is not understood by vectorizer"); 4671 DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); 4672 return false; 4673 } 4674 4675 // If this PHINode is not in the header block, then we know that we 4676 // can convert it to select during if-conversion. No need to check if 4677 // the PHIs in this block are induction or reduction variables. 4678 if (*bb != Header) { 4679 // Check that this instruction has no outside users or is an 4680 // identified reduction value with an outside user. 4681 if (!hasOutsideLoopUser(TheLoop, &*it, AllowedExit)) 4682 continue; 4683 emitAnalysis(VectorizationReport(&*it) 4684 << "value could not be identified as " 4685 "an induction or reduction variable"); 4686 return false; 4687 } 4688 4689 // We only allow if-converted PHIs with exactly two incoming values. 4690 if (Phi->getNumIncomingValues() != 2) { 4691 emitAnalysis(VectorizationReport(&*it) 4692 << "control flow not understood by vectorizer"); 4693 DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); 4694 return false; 4695 } 4696 4697 RecurrenceDescriptor RedDes; 4698 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) { 4699 if (RedDes.hasUnsafeAlgebra()) 4700 Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst()); 4701 AllowedExit.insert(RedDes.getLoopExitInstr()); 4702 Reductions[Phi] = RedDes; 4703 continue; 4704 } 4705 4706 InductionDescriptor ID; 4707 if (InductionDescriptor::isInductionPHI(Phi, PSE, ID)) { 4708 if (!addInductionPhi(Phi, ID)) 4709 return false; 4710 continue; 4711 } 4712 4713 if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, DT)) { 4714 FirstOrderRecurrences.insert(Phi); 4715 continue; 4716 } 4717 4718 // As a last resort, coerce the PHI to a AddRec expression 4719 // and re-try classifying it a an induction PHI. 4720 if (InductionDescriptor::isInductionPHI(Phi, PSE, ID, true)) { 4721 if (!addInductionPhi(Phi, ID)) 4722 return false; 4723 continue; 4724 } 4725 4726 emitAnalysis(VectorizationReport(&*it) 4727 << "value that could not be identified as " 4728 "reduction is used outside the loop"); 4729 DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n"); 4730 return false; 4731 } // end of PHI handling 4732 4733 // We handle calls that: 4734 // * Are debug info intrinsics. 4735 // * Have a mapping to an IR intrinsic. 4736 // * Have a vector version available. 4737 CallInst *CI = dyn_cast<CallInst>(it); 4738 if (CI && !getVectorIntrinsicIDForCall(CI, TLI) && 4739 !isa<DbgInfoIntrinsic>(CI) && 4740 !(CI->getCalledFunction() && TLI && 4741 TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) { 4742 emitAnalysis(VectorizationReport(&*it) 4743 << "call instruction cannot be vectorized"); 4744 DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); 4745 return false; 4746 } 4747 4748 // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the 4749 // second argument is the same (i.e. loop invariant) 4750 if (CI && hasVectorInstrinsicScalarOpd( 4751 getVectorIntrinsicIDForCall(CI, TLI), 1)) { 4752 auto *SE = PSE.getSE(); 4753 if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) { 4754 emitAnalysis(VectorizationReport(&*it) 4755 << "intrinsic instruction cannot be vectorized"); 4756 DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n"); 4757 return false; 4758 } 4759 } 4760 4761 // Check that the instruction return type is vectorizable. 4762 // Also, we can't vectorize extractelement instructions. 4763 if ((!VectorType::isValidElementType(it->getType()) && 4764 !it->getType()->isVoidTy()) || 4765 isa<ExtractElementInst>(it)) { 4766 emitAnalysis(VectorizationReport(&*it) 4767 << "instruction return type cannot be vectorized"); 4768 DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); 4769 return false; 4770 } 4771 4772 // Check that the stored type is vectorizable. 4773 if (StoreInst *ST = dyn_cast<StoreInst>(it)) { 4774 Type *T = ST->getValueOperand()->getType(); 4775 if (!VectorType::isValidElementType(T)) { 4776 emitAnalysis(VectorizationReport(ST) 4777 << "store instruction cannot be vectorized"); 4778 return false; 4779 } 4780 if (EnableMemAccessVersioning) 4781 collectStridedAccess(ST); 4782 4783 } else if (LoadInst *LI = dyn_cast<LoadInst>(it)) { 4784 if (EnableMemAccessVersioning) 4785 collectStridedAccess(LI); 4786 4787 // FP instructions can allow unsafe algebra, thus vectorizable by 4788 // non-IEEE-754 compliant SIMD units. 4789 // This applies to floating-point math operations and calls, not memory 4790 // operations, shuffles, or casts, as they don't change precision or 4791 // semantics. 4792 } else if (it->getType()->isFloatingPointTy() && 4793 (CI || it->isBinaryOp()) && !it->hasUnsafeAlgebra()) { 4794 DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n"); 4795 Hints->setPotentiallyUnsafe(); 4796 } 4797 4798 // Reduction instructions are allowed to have exit users. 4799 // All other instructions must not have external users. 4800 if (hasOutsideLoopUser(TheLoop, &*it, AllowedExit)) { 4801 emitAnalysis(VectorizationReport(&*it) 4802 << "value cannot be used outside the loop"); 4803 return false; 4804 } 4805 4806 } // next instr. 4807 } 4808 4809 if (!Induction) { 4810 DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); 4811 if (Inductions.empty()) { 4812 emitAnalysis(VectorizationReport() 4813 << "loop induction variable could not be identified"); 4814 return false; 4815 } 4816 } 4817 4818 // Now we know the widest induction type, check if our found induction 4819 // is the same size. If it's not, unset it here and InnerLoopVectorizer 4820 // will create another. 4821 if (Induction && WidestIndTy != Induction->getType()) 4822 Induction = nullptr; 4823 4824 return true; 4825 } 4826 4827 void LoopVectorizationLegality::collectStridedAccess(Value *MemAccess) { 4828 Value *Ptr = nullptr; 4829 if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess)) 4830 Ptr = LI->getPointerOperand(); 4831 else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess)) 4832 Ptr = SI->getPointerOperand(); 4833 else 4834 return; 4835 4836 Value *Stride = getStrideFromPointer(Ptr, PSE.getSE(), TheLoop); 4837 if (!Stride) 4838 return; 4839 4840 DEBUG(dbgs() << "LV: Found a strided access that we can version"); 4841 DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n"); 4842 Strides[Ptr] = Stride; 4843 StrideSet.insert(Stride); 4844 } 4845 4846 void LoopVectorizationLegality::collectLoopUniforms() { 4847 // We now know that the loop is vectorizable! 4848 // Collect variables that will remain uniform after vectorization. 4849 std::vector<Value *> Worklist; 4850 BasicBlock *Latch = TheLoop->getLoopLatch(); 4851 4852 // Start with the conditional branch and walk up the block. 4853 Worklist.push_back(Latch->getTerminator()->getOperand(0)); 4854 4855 // Also add all consecutive pointer values; these values will be uniform 4856 // after vectorization (and subsequent cleanup) and, until revectorization is 4857 // supported, all dependencies must also be uniform. 4858 for (Loop::block_iterator B = TheLoop->block_begin(), 4859 BE = TheLoop->block_end(); 4860 B != BE; ++B) 4861 for (BasicBlock::iterator I = (*B)->begin(), IE = (*B)->end(); I != IE; ++I) 4862 if (I->getType()->isPointerTy() && isConsecutivePtr(&*I)) 4863 Worklist.insert(Worklist.end(), I->op_begin(), I->op_end()); 4864 4865 while (!Worklist.empty()) { 4866 Instruction *I = dyn_cast<Instruction>(Worklist.back()); 4867 Worklist.pop_back(); 4868 4869 // Look at instructions inside this loop. 4870 // Stop when reaching PHI nodes. 4871 // TODO: we need to follow values all over the loop, not only in this block. 4872 if (!I || !TheLoop->contains(I) || isa<PHINode>(I)) 4873 continue; 4874 4875 // This is a known uniform. 4876 Uniforms.insert(I); 4877 4878 // Insert all operands. 4879 Worklist.insert(Worklist.end(), I->op_begin(), I->op_end()); 4880 } 4881 } 4882 4883 bool LoopVectorizationLegality::canVectorizeMemory() { 4884 LAI = &LAA->getInfo(TheLoop, Strides); 4885 auto &OptionalReport = LAI->getReport(); 4886 if (OptionalReport) 4887 emitAnalysis(VectorizationReport(*OptionalReport)); 4888 if (!LAI->canVectorizeMemory()) 4889 return false; 4890 4891 if (LAI->hasStoreToLoopInvariantAddress()) { 4892 emitAnalysis( 4893 VectorizationReport() 4894 << "write to a loop invariant address could not be vectorized"); 4895 DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); 4896 return false; 4897 } 4898 4899 Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks()); 4900 PSE.addPredicate(LAI->PSE.getUnionPredicate()); 4901 4902 return true; 4903 } 4904 4905 bool LoopVectorizationLegality::isInductionVariable(const Value *V) { 4906 Value *In0 = const_cast<Value *>(V); 4907 PHINode *PN = dyn_cast_or_null<PHINode>(In0); 4908 if (!PN) 4909 return false; 4910 4911 return Inductions.count(PN); 4912 } 4913 4914 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) { 4915 return FirstOrderRecurrences.count(Phi); 4916 } 4917 4918 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { 4919 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 4920 } 4921 4922 bool LoopVectorizationLegality::blockCanBePredicated( 4923 BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) { 4924 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 4925 4926 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 4927 // Check that we don't have a constant expression that can trap as operand. 4928 for (Instruction::op_iterator OI = it->op_begin(), OE = it->op_end(); 4929 OI != OE; ++OI) { 4930 if (Constant *C = dyn_cast<Constant>(*OI)) 4931 if (C->canTrap()) 4932 return false; 4933 } 4934 // We might be able to hoist the load. 4935 if (it->mayReadFromMemory()) { 4936 LoadInst *LI = dyn_cast<LoadInst>(it); 4937 if (!LI) 4938 return false; 4939 if (!SafePtrs.count(LI->getPointerOperand())) { 4940 if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) || 4941 isLegalMaskedGather(LI->getType())) { 4942 MaskedOp.insert(LI); 4943 continue; 4944 } 4945 // !llvm.mem.parallel_loop_access implies if-conversion safety. 4946 if (IsAnnotatedParallel) 4947 continue; 4948 return false; 4949 } 4950 } 4951 4952 // We don't predicate stores at the moment. 4953 if (it->mayWriteToMemory()) { 4954 StoreInst *SI = dyn_cast<StoreInst>(it); 4955 // We only support predication of stores in basic blocks with one 4956 // predecessor. 4957 if (!SI) 4958 return false; 4959 4960 // Build a masked store if it is legal for the target. 4961 if (isLegalMaskedStore(SI->getValueOperand()->getType(), 4962 SI->getPointerOperand()) || 4963 isLegalMaskedScatter(SI->getValueOperand()->getType())) { 4964 MaskedOp.insert(SI); 4965 continue; 4966 } 4967 4968 bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0); 4969 bool isSinglePredecessor = SI->getParent()->getSinglePredecessor(); 4970 4971 if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr || 4972 !isSinglePredecessor) 4973 return false; 4974 } 4975 if (it->mayThrow()) 4976 return false; 4977 4978 // The instructions below can trap. 4979 switch (it->getOpcode()) { 4980 default: 4981 continue; 4982 case Instruction::UDiv: 4983 case Instruction::SDiv: 4984 case Instruction::URem: 4985 case Instruction::SRem: 4986 return false; 4987 } 4988 } 4989 4990 return true; 4991 } 4992 4993 void InterleavedAccessInfo::collectConstStridedAccesses( 4994 MapVector<Instruction *, StrideDescriptor> &StrideAccesses, 4995 const ValueToValueMap &Strides) { 4996 // Holds load/store instructions in program order. 4997 SmallVector<Instruction *, 16> AccessList; 4998 4999 for (auto *BB : TheLoop->getBlocks()) { 5000 bool IsPred = LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 5001 5002 for (auto &I : *BB) { 5003 if (!isa<LoadInst>(&I) && !isa<StoreInst>(&I)) 5004 continue; 5005 // FIXME: Currently we can't handle mixed accesses and predicated accesses 5006 if (IsPred) 5007 return; 5008 5009 AccessList.push_back(&I); 5010 } 5011 } 5012 5013 if (AccessList.empty()) 5014 return; 5015 5016 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 5017 for (auto I : AccessList) { 5018 LoadInst *LI = dyn_cast<LoadInst>(I); 5019 StoreInst *SI = dyn_cast<StoreInst>(I); 5020 5021 Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); 5022 int Stride = getPtrStride(PSE, Ptr, TheLoop, Strides); 5023 5024 // The factor of the corresponding interleave group. 5025 unsigned Factor = std::abs(Stride); 5026 5027 // Ignore the access if the factor is too small or too large. 5028 if (Factor < 2 || Factor > MaxInterleaveGroupFactor) 5029 continue; 5030 5031 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 5032 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 5033 unsigned Size = DL.getTypeAllocSize(PtrTy->getElementType()); 5034 5035 // An alignment of 0 means target ABI alignment. 5036 unsigned Align = LI ? LI->getAlignment() : SI->getAlignment(); 5037 if (!Align) 5038 Align = DL.getABITypeAlignment(PtrTy->getElementType()); 5039 5040 StrideAccesses[I] = StrideDescriptor(Stride, Scev, Size, Align); 5041 } 5042 } 5043 5044 // Analyze interleaved accesses and collect them into interleave groups. 5045 // 5046 // Notice that the vectorization on interleaved groups will change instruction 5047 // orders and may break dependences. But the memory dependence check guarantees 5048 // that there is no overlap between two pointers of different strides, element 5049 // sizes or underlying bases. 5050 // 5051 // For pointers sharing the same stride, element size and underlying base, no 5052 // need to worry about Read-After-Write dependences and Write-After-Read 5053 // dependences. 5054 // 5055 // E.g. The RAW dependence: A[i] = a; 5056 // b = A[i]; 5057 // This won't exist as it is a store-load forwarding conflict, which has 5058 // already been checked and forbidden in the dependence check. 5059 // 5060 // E.g. The WAR dependence: a = A[i]; // (1) 5061 // A[i] = b; // (2) 5062 // The store group of (2) is always inserted at or below (2), and the load group 5063 // of (1) is always inserted at or above (1). The dependence is safe. 5064 void InterleavedAccessInfo::analyzeInterleaving( 5065 const ValueToValueMap &Strides) { 5066 DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 5067 5068 // Holds all the stride accesses. 5069 MapVector<Instruction *, StrideDescriptor> StrideAccesses; 5070 collectConstStridedAccesses(StrideAccesses, Strides); 5071 5072 if (StrideAccesses.empty()) 5073 return; 5074 5075 // Holds all interleaved store groups temporarily. 5076 SmallSetVector<InterleaveGroup *, 4> StoreGroups; 5077 // Holds all interleaved load groups temporarily. 5078 SmallSetVector<InterleaveGroup *, 4> LoadGroups; 5079 5080 // Search the load-load/write-write pair B-A in bottom-up order and try to 5081 // insert B into the interleave group of A according to 3 rules: 5082 // 1. A and B have the same stride. 5083 // 2. A and B have the same memory object size. 5084 // 3. B belongs to the group according to the distance. 5085 // 5086 // The bottom-up order can avoid breaking the Write-After-Write dependences 5087 // between two pointers of the same base. 5088 // E.g. A[i] = a; (1) 5089 // A[i] = b; (2) 5090 // A[i+1] = c (3) 5091 // We form the group (2)+(3) in front, so (1) has to form groups with accesses 5092 // above (1), which guarantees that (1) is always above (2). 5093 for (auto I = StrideAccesses.rbegin(), E = StrideAccesses.rend(); I != E; 5094 ++I) { 5095 Instruction *A = I->first; 5096 StrideDescriptor DesA = I->second; 5097 5098 InterleaveGroup *Group = getInterleaveGroup(A); 5099 if (!Group) { 5100 DEBUG(dbgs() << "LV: Creating an interleave group with:" << *A << '\n'); 5101 Group = createInterleaveGroup(A, DesA.Stride, DesA.Align); 5102 } 5103 5104 if (A->mayWriteToMemory()) 5105 StoreGroups.insert(Group); 5106 else 5107 LoadGroups.insert(Group); 5108 5109 for (auto II = std::next(I); II != E; ++II) { 5110 Instruction *B = II->first; 5111 StrideDescriptor DesB = II->second; 5112 5113 // Ignore if B is already in a group or B is a different memory operation. 5114 if (isInterleaved(B) || A->mayReadFromMemory() != B->mayReadFromMemory()) 5115 continue; 5116 5117 // Check the rule 1 and 2. 5118 if (DesB.Stride != DesA.Stride || DesB.Size != DesA.Size) 5119 continue; 5120 5121 // Calculate the distance and prepare for the rule 3. 5122 const SCEVConstant *DistToA = dyn_cast<SCEVConstant>( 5123 PSE.getSE()->getMinusSCEV(DesB.Scev, DesA.Scev)); 5124 if (!DistToA) 5125 continue; 5126 5127 int DistanceToA = DistToA->getAPInt().getSExtValue(); 5128 5129 // Skip if the distance is not multiple of size as they are not in the 5130 // same group. 5131 if (DistanceToA % static_cast<int>(DesA.Size)) 5132 continue; 5133 5134 // The index of B is the index of A plus the related index to A. 5135 int IndexB = 5136 Group->getIndex(A) + DistanceToA / static_cast<int>(DesA.Size); 5137 5138 // Try to insert B into the group. 5139 if (Group->insertMember(B, IndexB, DesB.Align)) { 5140 DEBUG(dbgs() << "LV: Inserted:" << *B << '\n' 5141 << " into the interleave group with" << *A << '\n'); 5142 InterleaveGroupMap[B] = Group; 5143 5144 // Set the first load in program order as the insert position. 5145 if (B->mayReadFromMemory()) 5146 Group->setInsertPos(B); 5147 } 5148 } // Iteration on instruction B 5149 } // Iteration on instruction A 5150 5151 // Remove interleaved store groups with gaps. 5152 for (InterleaveGroup *Group : StoreGroups) 5153 if (Group->getNumMembers() != Group->getFactor()) 5154 releaseGroup(Group); 5155 5156 // If there is a non-reversed interleaved load group with gaps, we will need 5157 // to execute at least one scalar epilogue iteration. This will ensure that 5158 // we don't speculatively access memory out-of-bounds. Note that we only need 5159 // to look for a member at index factor - 1, since every group must have a 5160 // member at index zero. 5161 for (InterleaveGroup *Group : LoadGroups) 5162 if (!Group->getMember(Group->getFactor() - 1)) { 5163 if (Group->isReverse()) { 5164 releaseGroup(Group); 5165 } else { 5166 DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 5167 RequiresScalarEpilogue = true; 5168 } 5169 } 5170 } 5171 5172 LoopVectorizationCostModel::VectorizationFactor 5173 LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { 5174 // Width 1 means no vectorize 5175 VectorizationFactor Factor = {1U, 0U}; 5176 if (OptForSize && Legal->getRuntimePointerChecking()->Need) { 5177 emitAnalysis( 5178 VectorizationReport() 5179 << "runtime pointer checks needed. Enable vectorization of this " 5180 "loop with '#pragma clang loop vectorize(enable)' when " 5181 "compiling with -Os/-Oz"); 5182 DEBUG(dbgs() 5183 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 5184 return Factor; 5185 } 5186 5187 if (!EnableCondStoresVectorization && Legal->getNumPredStores()) { 5188 emitAnalysis( 5189 VectorizationReport() 5190 << "store that is conditionally executed prevents vectorization"); 5191 DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); 5192 return Factor; 5193 } 5194 5195 // Find the trip count. 5196 unsigned TC = SE->getSmallConstantTripCount(TheLoop); 5197 DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5198 5199 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5200 unsigned SmallestType, WidestType; 5201 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5202 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 5203 unsigned MaxSafeDepDist = -1U; 5204 if (Legal->getMaxSafeDepDistBytes() != -1U) 5205 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 5206 WidestRegister = 5207 ((WidestRegister < MaxSafeDepDist) ? WidestRegister : MaxSafeDepDist); 5208 unsigned MaxVectorSize = WidestRegister / WidestType; 5209 5210 DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " 5211 << WidestType << " bits.\n"); 5212 DEBUG(dbgs() << "LV: The Widest register is: " << WidestRegister 5213 << " bits.\n"); 5214 5215 if (MaxVectorSize == 0) { 5216 DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5217 MaxVectorSize = 1; 5218 } 5219 5220 assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements" 5221 " into one vector!"); 5222 5223 unsigned VF = MaxVectorSize; 5224 if (MaximizeBandwidth && !OptForSize) { 5225 // Collect all viable vectorization factors. 5226 SmallVector<unsigned, 8> VFs; 5227 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 5228 for (unsigned VS = MaxVectorSize; VS <= NewMaxVectorSize; VS *= 2) 5229 VFs.push_back(VS); 5230 5231 // For each VF calculate its register usage. 5232 auto RUs = calculateRegisterUsage(VFs); 5233 5234 // Select the largest VF which doesn't require more registers than existing 5235 // ones. 5236 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 5237 for (int i = RUs.size() - 1; i >= 0; --i) { 5238 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 5239 VF = VFs[i]; 5240 break; 5241 } 5242 } 5243 } 5244 5245 // If we optimize the program for size, avoid creating the tail loop. 5246 if (OptForSize) { 5247 // If we are unable to calculate the trip count then don't try to vectorize. 5248 if (TC < 2) { 5249 emitAnalysis( 5250 VectorizationReport() 5251 << "unable to calculate the loop count due to complex control flow"); 5252 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 5253 return Factor; 5254 } 5255 5256 // Find the maximum SIMD width that can fit within the trip count. 5257 VF = TC % MaxVectorSize; 5258 5259 if (VF == 0) 5260 VF = MaxVectorSize; 5261 else { 5262 // If the trip count that we found modulo the vectorization factor is not 5263 // zero then we require a tail. 5264 emitAnalysis(VectorizationReport() 5265 << "cannot optimize for size and vectorize at the " 5266 "same time. Enable vectorization of this loop " 5267 "with '#pragma clang loop vectorize(enable)' " 5268 "when compiling with -Os/-Oz"); 5269 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 5270 return Factor; 5271 } 5272 } 5273 5274 int UserVF = Hints->getWidth(); 5275 if (UserVF != 0) { 5276 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 5277 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 5278 5279 Factor.Width = UserVF; 5280 return Factor; 5281 } 5282 5283 float Cost = expectedCost(1).first; 5284 #ifndef NDEBUG 5285 const float ScalarCost = Cost; 5286 #endif /* NDEBUG */ 5287 unsigned Width = 1; 5288 DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 5289 5290 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5291 // Ignore scalar width, because the user explicitly wants vectorization. 5292 if (ForceVectorization && VF > 1) { 5293 Width = 2; 5294 Cost = expectedCost(Width).first / (float)Width; 5295 } 5296 5297 for (unsigned i = 2; i <= VF; i *= 2) { 5298 // Notice that the vector loop needs to be executed less times, so 5299 // we need to divide the cost of the vector loops by the width of 5300 // the vector elements. 5301 VectorizationCostTy C = expectedCost(i); 5302 float VectorCost = C.first / (float)i; 5303 DEBUG(dbgs() << "LV: Vector loop of width " << i 5304 << " costs: " << (int)VectorCost << ".\n"); 5305 if (!C.second && !ForceVectorization) { 5306 DEBUG( 5307 dbgs() << "LV: Not considering vector loop of width " << i 5308 << " because it will not generate any vector instructions.\n"); 5309 continue; 5310 } 5311 if (VectorCost < Cost) { 5312 Cost = VectorCost; 5313 Width = i; 5314 } 5315 } 5316 5317 DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 5318 << "LV: Vectorization seems to be not beneficial, " 5319 << "but was forced by a user.\n"); 5320 DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 5321 Factor.Width = Width; 5322 Factor.Cost = Width * Cost; 5323 return Factor; 5324 } 5325 5326 std::pair<unsigned, unsigned> 5327 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5328 unsigned MinWidth = -1U; 5329 unsigned MaxWidth = 8; 5330 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5331 5332 // For each block. 5333 for (Loop::block_iterator bb = TheLoop->block_begin(), 5334 be = TheLoop->block_end(); 5335 bb != be; ++bb) { 5336 BasicBlock *BB = *bb; 5337 5338 // For each instruction in the loop. 5339 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 5340 Type *T = it->getType(); 5341 5342 // Skip ignored values. 5343 if (ValuesToIgnore.count(&*it)) 5344 continue; 5345 5346 // Only examine Loads, Stores and PHINodes. 5347 if (!isa<LoadInst>(it) && !isa<StoreInst>(it) && !isa<PHINode>(it)) 5348 continue; 5349 5350 // Examine PHI nodes that are reduction variables. Update the type to 5351 // account for the recurrence type. 5352 if (PHINode *PN = dyn_cast<PHINode>(it)) { 5353 if (!Legal->isReductionVariable(PN)) 5354 continue; 5355 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 5356 T = RdxDesc.getRecurrenceType(); 5357 } 5358 5359 // Examine the stored values. 5360 if (StoreInst *ST = dyn_cast<StoreInst>(it)) 5361 T = ST->getValueOperand()->getType(); 5362 5363 // Ignore loaded pointer types and stored pointer types that are not 5364 // consecutive. However, we do want to take consecutive stores/loads of 5365 // pointer vectors into account. 5366 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&*it)) 5367 continue; 5368 5369 MinWidth = std::min(MinWidth, 5370 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5371 MaxWidth = std::max(MaxWidth, 5372 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5373 } 5374 } 5375 5376 return {MinWidth, MaxWidth}; 5377 } 5378 5379 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 5380 unsigned VF, 5381 unsigned LoopCost) { 5382 5383 // -- The interleave heuristics -- 5384 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5385 // There are many micro-architectural considerations that we can't predict 5386 // at this level. For example, frontend pressure (on decode or fetch) due to 5387 // code size, or the number and capabilities of the execution ports. 5388 // 5389 // We use the following heuristics to select the interleave count: 5390 // 1. If the code has reductions, then we interleave to break the cross 5391 // iteration dependency. 5392 // 2. If the loop is really small, then we interleave to reduce the loop 5393 // overhead. 5394 // 3. We don't interleave if we think that we will spill registers to memory 5395 // due to the increased register pressure. 5396 5397 // When we optimize for size, we don't interleave. 5398 if (OptForSize) 5399 return 1; 5400 5401 // We used the distance for the interleave count. 5402 if (Legal->getMaxSafeDepDistBytes() != -1U) 5403 return 1; 5404 5405 // Do not interleave loops with a relatively small trip count. 5406 unsigned TC = SE->getSmallConstantTripCount(TheLoop); 5407 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 5408 return 1; 5409 5410 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 5411 DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5412 << " registers\n"); 5413 5414 if (VF == 1) { 5415 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5416 TargetNumRegisters = ForceTargetNumScalarRegs; 5417 } else { 5418 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5419 TargetNumRegisters = ForceTargetNumVectorRegs; 5420 } 5421 5422 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5423 // We divide by these constants so assume that we have at least one 5424 // instruction that uses at least one register. 5425 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 5426 R.NumInstructions = std::max(R.NumInstructions, 1U); 5427 5428 // We calculate the interleave count using the following formula. 5429 // Subtract the number of loop invariants from the number of available 5430 // registers. These registers are used by all of the interleaved instances. 5431 // Next, divide the remaining registers by the number of registers that is 5432 // required by the loop, in order to estimate how many parallel instances 5433 // fit without causing spills. All of this is rounded down if necessary to be 5434 // a power of two. We want power of two interleave count to simplify any 5435 // addressing operations or alignment considerations. 5436 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 5437 R.MaxLocalUsers); 5438 5439 // Don't count the induction variable as interleaved. 5440 if (EnableIndVarRegisterHeur) 5441 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 5442 std::max(1U, (R.MaxLocalUsers - 1))); 5443 5444 // Clamp the interleave ranges to reasonable counts. 5445 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 5446 5447 // Check if the user has overridden the max. 5448 if (VF == 1) { 5449 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5450 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5451 } else { 5452 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5453 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5454 } 5455 5456 // If we did not calculate the cost for VF (because the user selected the VF) 5457 // then we calculate the cost of VF here. 5458 if (LoopCost == 0) 5459 LoopCost = expectedCost(VF).first; 5460 5461 // Clamp the calculated IC to be between the 1 and the max interleave count 5462 // that the target allows. 5463 if (IC > MaxInterleaveCount) 5464 IC = MaxInterleaveCount; 5465 else if (IC < 1) 5466 IC = 1; 5467 5468 // Interleave if we vectorized this loop and there is a reduction that could 5469 // benefit from interleaving. 5470 if (VF > 1 && Legal->getReductionVars()->size()) { 5471 DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5472 return IC; 5473 } 5474 5475 // Note that if we've already vectorized the loop we will have done the 5476 // runtime check and so interleaving won't require further checks. 5477 bool InterleavingRequiresRuntimePointerCheck = 5478 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 5479 5480 // We want to interleave small loops in order to reduce the loop overhead and 5481 // potentially expose ILP opportunities. 5482 DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 5483 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 5484 // We assume that the cost overhead is 1 and we use the cost model 5485 // to estimate the cost of the loop and interleave until the cost of the 5486 // loop overhead is about 5% of the cost of the loop. 5487 unsigned SmallIC = 5488 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5489 5490 // Interleave until store/load ports (estimated by max interleave count) are 5491 // saturated. 5492 unsigned NumStores = Legal->getNumStores(); 5493 unsigned NumLoads = Legal->getNumLoads(); 5494 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5495 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5496 5497 // If we have a scalar reduction (vector reductions are already dealt with 5498 // by this point), we can increase the critical path length if the loop 5499 // we're interleaving is inside another loop. Limit, by default to 2, so the 5500 // critical path only gets increased by one reduction operation. 5501 if (Legal->getReductionVars()->size() && TheLoop->getLoopDepth() > 1) { 5502 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5503 SmallIC = std::min(SmallIC, F); 5504 StoresIC = std::min(StoresIC, F); 5505 LoadsIC = std::min(LoadsIC, F); 5506 } 5507 5508 if (EnableLoadStoreRuntimeInterleave && 5509 std::max(StoresIC, LoadsIC) > SmallIC) { 5510 DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5511 return std::max(StoresIC, LoadsIC); 5512 } 5513 5514 DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5515 return SmallIC; 5516 } 5517 5518 // Interleave if this is a large loop (small loops are already dealt with by 5519 // this point) that could benefit from interleaving. 5520 bool HasReductions = (Legal->getReductionVars()->size() > 0); 5521 if (TTI.enableAggressiveInterleaving(HasReductions)) { 5522 DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5523 return IC; 5524 } 5525 5526 DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5527 return 1; 5528 } 5529 5530 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5531 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 5532 // This function calculates the register usage by measuring the highest number 5533 // of values that are alive at a single location. Obviously, this is a very 5534 // rough estimation. We scan the loop in a topological order in order and 5535 // assign a number to each instruction. We use RPO to ensure that defs are 5536 // met before their users. We assume that each instruction that has in-loop 5537 // users starts an interval. We record every time that an in-loop value is 5538 // used, so we have a list of the first and last occurrences of each 5539 // instruction. Next, we transpose this data structure into a multi map that 5540 // holds the list of intervals that *end* at a specific location. This multi 5541 // map allows us to perform a linear search. We scan the instructions linearly 5542 // and record each time that a new interval starts, by placing it in a set. 5543 // If we find this value in the multi-map then we remove it from the set. 5544 // The max register usage is the maximum size of the set. 5545 // We also search for instructions that are defined outside the loop, but are 5546 // used inside the loop. We need this number separately from the max-interval 5547 // usage number because when we unroll, loop-invariant values do not take 5548 // more register. 5549 LoopBlocksDFS DFS(TheLoop); 5550 DFS.perform(LI); 5551 5552 RegisterUsage RU; 5553 RU.NumInstructions = 0; 5554 5555 // Each 'key' in the map opens a new interval. The values 5556 // of the map are the index of the 'last seen' usage of the 5557 // instruction that is the key. 5558 typedef DenseMap<Instruction *, unsigned> IntervalMap; 5559 // Maps instruction to its index. 5560 DenseMap<unsigned, Instruction *> IdxToInstr; 5561 // Marks the end of each interval. 5562 IntervalMap EndPoint; 5563 // Saves the list of instruction indices that are used in the loop. 5564 SmallSet<Instruction *, 8> Ends; 5565 // Saves the list of values that are used in the loop but are 5566 // defined outside the loop, such as arguments and constants. 5567 SmallPtrSet<Value *, 8> LoopInvariants; 5568 5569 unsigned Index = 0; 5570 for (LoopBlocksDFS::RPOIterator bb = DFS.beginRPO(), be = DFS.endRPO(); 5571 bb != be; ++bb) { 5572 RU.NumInstructions += (*bb)->size(); 5573 for (Instruction &I : **bb) { 5574 IdxToInstr[Index++] = &I; 5575 5576 // Save the end location of each USE. 5577 for (unsigned i = 0; i < I.getNumOperands(); ++i) { 5578 Value *U = I.getOperand(i); 5579 Instruction *Instr = dyn_cast<Instruction>(U); 5580 5581 // Ignore non-instruction values such as arguments, constants, etc. 5582 if (!Instr) 5583 continue; 5584 5585 // If this instruction is outside the loop then record it and continue. 5586 if (!TheLoop->contains(Instr)) { 5587 LoopInvariants.insert(Instr); 5588 continue; 5589 } 5590 5591 // Overwrite previous end points. 5592 EndPoint[Instr] = Index; 5593 Ends.insert(Instr); 5594 } 5595 } 5596 } 5597 5598 // Saves the list of intervals that end with the index in 'key'. 5599 typedef SmallVector<Instruction *, 2> InstrList; 5600 DenseMap<unsigned, InstrList> TransposeEnds; 5601 5602 // Transpose the EndPoints to a list of values that end at each index. 5603 for (IntervalMap::iterator it = EndPoint.begin(), e = EndPoint.end(); it != e; 5604 ++it) 5605 TransposeEnds[it->second].push_back(it->first); 5606 5607 SmallSet<Instruction *, 8> OpenIntervals; 5608 5609 // Get the size of the widest register. 5610 unsigned MaxSafeDepDist = -1U; 5611 if (Legal->getMaxSafeDepDistBytes() != -1U) 5612 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 5613 unsigned WidestRegister = 5614 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 5615 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5616 5617 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5618 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 5619 5620 DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5621 5622 // A lambda that gets the register usage for the given type and VF. 5623 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 5624 if (Ty->isTokenTy()) 5625 return 0U; 5626 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 5627 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 5628 }; 5629 5630 for (unsigned int i = 0; i < Index; ++i) { 5631 Instruction *I = IdxToInstr[i]; 5632 // Ignore instructions that are never used within the loop. 5633 if (!Ends.count(I)) 5634 continue; 5635 5636 // Skip ignored values. 5637 if (ValuesToIgnore.count(I)) 5638 continue; 5639 5640 // Remove all of the instructions that end at this location. 5641 InstrList &List = TransposeEnds[i]; 5642 for (unsigned int j = 0, e = List.size(); j < e; ++j) 5643 OpenIntervals.erase(List[j]); 5644 5645 // For each VF find the maximum usage of registers. 5646 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 5647 if (VFs[j] == 1) { 5648 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 5649 continue; 5650 } 5651 5652 // Count the number of live intervals. 5653 unsigned RegUsage = 0; 5654 for (auto Inst : OpenIntervals) 5655 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 5656 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 5657 } 5658 5659 DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 5660 << OpenIntervals.size() << '\n'); 5661 5662 // Add the current instruction to the list of open intervals. 5663 OpenIntervals.insert(I); 5664 } 5665 5666 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 5667 unsigned Invariant = 0; 5668 if (VFs[i] == 1) 5669 Invariant = LoopInvariants.size(); 5670 else { 5671 for (auto Inst : LoopInvariants) 5672 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 5673 } 5674 5675 DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 5676 DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 5677 DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); 5678 DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n'); 5679 5680 RU.LoopInvariantRegs = Invariant; 5681 RU.MaxLocalUsers = MaxUsages[i]; 5682 RUs[i] = RU; 5683 } 5684 5685 return RUs; 5686 } 5687 5688 LoopVectorizationCostModel::VectorizationCostTy 5689 LoopVectorizationCostModel::expectedCost(unsigned VF) { 5690 VectorizationCostTy Cost; 5691 5692 // For each block. 5693 for (Loop::block_iterator bb = TheLoop->block_begin(), 5694 be = TheLoop->block_end(); 5695 bb != be; ++bb) { 5696 VectorizationCostTy BlockCost; 5697 BasicBlock *BB = *bb; 5698 5699 // For each instruction in the old loop. 5700 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 5701 // Skip dbg intrinsics. 5702 if (isa<DbgInfoIntrinsic>(it)) 5703 continue; 5704 5705 // Skip ignored values. 5706 if (ValuesToIgnore.count(&*it)) 5707 continue; 5708 5709 VectorizationCostTy C = getInstructionCost(&*it, VF); 5710 5711 // Check if we should override the cost. 5712 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 5713 C.first = ForceTargetInstructionCost; 5714 5715 BlockCost.first += C.first; 5716 BlockCost.second |= C.second; 5717 DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " 5718 << VF << " For instruction: " << *it << '\n'); 5719 } 5720 5721 // We assume that if-converted blocks have a 50% chance of being executed. 5722 // When the code is scalar then some of the blocks are avoided due to CF. 5723 // When the code is vectorized we execute all code paths. 5724 if (VF == 1 && Legal->blockNeedsPredication(*bb)) 5725 BlockCost.first /= 2; 5726 5727 Cost.first += BlockCost.first; 5728 Cost.second |= BlockCost.second; 5729 } 5730 5731 return Cost; 5732 } 5733 5734 /// \brief Check if the load/store instruction \p I may be translated into 5735 /// gather/scatter during vectorization. 5736 /// 5737 /// Pointer \p Ptr specifies address in memory for the given scalar memory 5738 /// instruction. We need it to retrieve data type. 5739 /// Using gather/scatter is possible when it is supported by target. 5740 static bool isGatherOrScatterLegal(Instruction *I, Value *Ptr, 5741 LoopVectorizationLegality *Legal) { 5742 Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType(); 5743 return (isa<LoadInst>(I) && Legal->isLegalMaskedGather(DataTy)) || 5744 (isa<StoreInst>(I) && Legal->isLegalMaskedScatter(DataTy)); 5745 } 5746 5747 /// \brief Check whether the address computation for a non-consecutive memory 5748 /// access looks like an unlikely candidate for being merged into the indexing 5749 /// mode. 5750 /// 5751 /// We look for a GEP which has one index that is an induction variable and all 5752 /// other indices are loop invariant. If the stride of this access is also 5753 /// within a small bound we decide that this address computation can likely be 5754 /// merged into the addressing mode. 5755 /// In all other cases, we identify the address computation as complex. 5756 static bool isLikelyComplexAddressComputation(Value *Ptr, 5757 LoopVectorizationLegality *Legal, 5758 ScalarEvolution *SE, 5759 const Loop *TheLoop) { 5760 GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(Ptr); 5761 if (!Gep) 5762 return true; 5763 5764 // We are looking for a gep with all loop invariant indices except for one 5765 // which should be an induction variable. 5766 unsigned NumOperands = Gep->getNumOperands(); 5767 for (unsigned i = 1; i < NumOperands; ++i) { 5768 Value *Opd = Gep->getOperand(i); 5769 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 5770 !Legal->isInductionVariable(Opd)) 5771 return true; 5772 } 5773 5774 // Now we know we have a GEP ptr, %inv, %ind, %inv. Make sure that the step 5775 // can likely be merged into the address computation. 5776 unsigned MaxMergeDistance = 64; 5777 5778 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Ptr)); 5779 if (!AddRec) 5780 return true; 5781 5782 // Check the step is constant. 5783 const SCEV *Step = AddRec->getStepRecurrence(*SE); 5784 // Calculate the pointer stride and check if it is consecutive. 5785 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step); 5786 if (!C) 5787 return true; 5788 5789 const APInt &APStepVal = C->getAPInt(); 5790 5791 // Huge step value - give up. 5792 if (APStepVal.getBitWidth() > 64) 5793 return true; 5794 5795 int64_t StepVal = APStepVal.getSExtValue(); 5796 5797 return StepVal > MaxMergeDistance; 5798 } 5799 5800 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 5801 return Legal->hasStride(I->getOperand(0)) || 5802 Legal->hasStride(I->getOperand(1)); 5803 } 5804 5805 LoopVectorizationCostModel::VectorizationCostTy 5806 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 5807 // If we know that this instruction will remain uniform, check the cost of 5808 // the scalar version. 5809 if (Legal->isUniformAfterVectorization(I)) 5810 VF = 1; 5811 5812 Type *VectorTy; 5813 unsigned C = getInstructionCost(I, VF, VectorTy); 5814 5815 bool TypeNotScalarized = 5816 VF > 1 && !VectorTy->isVoidTy() && TTI.getNumberOfParts(VectorTy) < VF; 5817 return VectorizationCostTy(C, TypeNotScalarized); 5818 } 5819 5820 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 5821 unsigned VF, 5822 Type *&VectorTy) { 5823 Type *RetTy = I->getType(); 5824 if (VF > 1 && MinBWs.count(I)) 5825 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 5826 VectorTy = ToVectorTy(RetTy, VF); 5827 5828 // TODO: We need to estimate the cost of intrinsic calls. 5829 switch (I->getOpcode()) { 5830 case Instruction::GetElementPtr: 5831 // We mark this instruction as zero-cost because the cost of GEPs in 5832 // vectorized code depends on whether the corresponding memory instruction 5833 // is scalarized or not. Therefore, we handle GEPs with the memory 5834 // instruction cost. 5835 return 0; 5836 case Instruction::Br: { 5837 return TTI.getCFInstrCost(I->getOpcode()); 5838 } 5839 case Instruction::PHI: { 5840 auto *Phi = cast<PHINode>(I); 5841 5842 // First-order recurrences are replaced by vector shuffles inside the loop. 5843 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 5844 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 5845 VectorTy, VF - 1, VectorTy); 5846 5847 // TODO: IF-converted IFs become selects. 5848 return 0; 5849 } 5850 case Instruction::Add: 5851 case Instruction::FAdd: 5852 case Instruction::Sub: 5853 case Instruction::FSub: 5854 case Instruction::Mul: 5855 case Instruction::FMul: 5856 case Instruction::UDiv: 5857 case Instruction::SDiv: 5858 case Instruction::FDiv: 5859 case Instruction::URem: 5860 case Instruction::SRem: 5861 case Instruction::FRem: 5862 case Instruction::Shl: 5863 case Instruction::LShr: 5864 case Instruction::AShr: 5865 case Instruction::And: 5866 case Instruction::Or: 5867 case Instruction::Xor: { 5868 // Since we will replace the stride by 1 the multiplication should go away. 5869 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 5870 return 0; 5871 // Certain instructions can be cheaper to vectorize if they have a constant 5872 // second vector operand. One example of this are shifts on x86. 5873 TargetTransformInfo::OperandValueKind Op1VK = 5874 TargetTransformInfo::OK_AnyValue; 5875 TargetTransformInfo::OperandValueKind Op2VK = 5876 TargetTransformInfo::OK_AnyValue; 5877 TargetTransformInfo::OperandValueProperties Op1VP = 5878 TargetTransformInfo::OP_None; 5879 TargetTransformInfo::OperandValueProperties Op2VP = 5880 TargetTransformInfo::OP_None; 5881 Value *Op2 = I->getOperand(1); 5882 5883 // Check for a splat of a constant or for a non uniform vector of constants. 5884 if (isa<ConstantInt>(Op2)) { 5885 ConstantInt *CInt = cast<ConstantInt>(Op2); 5886 if (CInt && CInt->getValue().isPowerOf2()) 5887 Op2VP = TargetTransformInfo::OP_PowerOf2; 5888 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 5889 } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) { 5890 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 5891 Constant *SplatValue = cast<Constant>(Op2)->getSplatValue(); 5892 if (SplatValue) { 5893 ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue); 5894 if (CInt && CInt->getValue().isPowerOf2()) 5895 Op2VP = TargetTransformInfo::OP_PowerOf2; 5896 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 5897 } 5898 } 5899 5900 return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, Op2VK, 5901 Op1VP, Op2VP); 5902 } 5903 case Instruction::Select: { 5904 SelectInst *SI = cast<SelectInst>(I); 5905 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 5906 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 5907 Type *CondTy = SI->getCondition()->getType(); 5908 if (!ScalarCond) 5909 CondTy = VectorType::get(CondTy, VF); 5910 5911 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy); 5912 } 5913 case Instruction::ICmp: 5914 case Instruction::FCmp: { 5915 Type *ValTy = I->getOperand(0)->getType(); 5916 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 5917 auto It = MinBWs.find(Op0AsInstruction); 5918 if (VF > 1 && It != MinBWs.end()) 5919 ValTy = IntegerType::get(ValTy->getContext(), It->second); 5920 VectorTy = ToVectorTy(ValTy, VF); 5921 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy); 5922 } 5923 case Instruction::Store: 5924 case Instruction::Load: { 5925 StoreInst *SI = dyn_cast<StoreInst>(I); 5926 LoadInst *LI = dyn_cast<LoadInst>(I); 5927 Type *ValTy = (SI ? SI->getValueOperand()->getType() : LI->getType()); 5928 VectorTy = ToVectorTy(ValTy, VF); 5929 5930 unsigned Alignment = SI ? SI->getAlignment() : LI->getAlignment(); 5931 unsigned AS = 5932 SI ? SI->getPointerAddressSpace() : LI->getPointerAddressSpace(); 5933 Value *Ptr = SI ? SI->getPointerOperand() : LI->getPointerOperand(); 5934 // We add the cost of address computation here instead of with the gep 5935 // instruction because only here we know whether the operation is 5936 // scalarized. 5937 if (VF == 1) 5938 return TTI.getAddressComputationCost(VectorTy) + 5939 TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 5940 5941 if (LI && Legal->isUniform(Ptr)) { 5942 // Scalar load + broadcast 5943 unsigned Cost = TTI.getAddressComputationCost(ValTy->getScalarType()); 5944 Cost += TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 5945 Alignment, AS); 5946 return Cost + 5947 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, ValTy); 5948 } 5949 5950 // For an interleaved access, calculate the total cost of the whole 5951 // interleave group. 5952 if (Legal->isAccessInterleaved(I)) { 5953 auto Group = Legal->getInterleavedAccessGroup(I); 5954 assert(Group && "Fail to get an interleaved access group."); 5955 5956 // Only calculate the cost once at the insert position. 5957 if (Group->getInsertPos() != I) 5958 return 0; 5959 5960 unsigned InterleaveFactor = Group->getFactor(); 5961 Type *WideVecTy = 5962 VectorType::get(VectorTy->getVectorElementType(), 5963 VectorTy->getVectorNumElements() * InterleaveFactor); 5964 5965 // Holds the indices of existing members in an interleaved load group. 5966 // An interleaved store group doesn't need this as it dones't allow gaps. 5967 SmallVector<unsigned, 4> Indices; 5968 if (LI) { 5969 for (unsigned i = 0; i < InterleaveFactor; i++) 5970 if (Group->getMember(i)) 5971 Indices.push_back(i); 5972 } 5973 5974 // Calculate the cost of the whole interleaved group. 5975 unsigned Cost = TTI.getInterleavedMemoryOpCost( 5976 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 5977 Group->getAlignment(), AS); 5978 5979 if (Group->isReverse()) 5980 Cost += 5981 Group->getNumMembers() * 5982 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5983 5984 // FIXME: The interleaved load group with a huge gap could be even more 5985 // expensive than scalar operations. Then we could ignore such group and 5986 // use scalar operations instead. 5987 return Cost; 5988 } 5989 5990 // Scalarized loads/stores. 5991 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 5992 bool UseGatherOrScatter = 5993 (ConsecutiveStride == 0) && isGatherOrScatterLegal(I, Ptr, Legal); 5994 5995 bool Reverse = ConsecutiveStride < 0; 5996 const DataLayout &DL = I->getModule()->getDataLayout(); 5997 unsigned ScalarAllocatedSize = DL.getTypeAllocSize(ValTy); 5998 unsigned VectorElementSize = DL.getTypeStoreSize(VectorTy) / VF; 5999 if ((!ConsecutiveStride && !UseGatherOrScatter) || 6000 ScalarAllocatedSize != VectorElementSize) { 6001 bool IsComplexComputation = 6002 isLikelyComplexAddressComputation(Ptr, Legal, SE, TheLoop); 6003 unsigned Cost = 0; 6004 // The cost of extracting from the value vector and pointer vector. 6005 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6006 for (unsigned i = 0; i < VF; ++i) { 6007 // The cost of extracting the pointer operand. 6008 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, PtrTy, i); 6009 // In case of STORE, the cost of ExtractElement from the vector. 6010 // In case of LOAD, the cost of InsertElement into the returned 6011 // vector. 6012 Cost += TTI.getVectorInstrCost(SI ? Instruction::ExtractElement 6013 : Instruction::InsertElement, 6014 VectorTy, i); 6015 } 6016 6017 // The cost of the scalar loads/stores. 6018 Cost += VF * TTI.getAddressComputationCost(PtrTy, IsComplexComputation); 6019 Cost += VF * 6020 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 6021 Alignment, AS); 6022 return Cost; 6023 } 6024 6025 unsigned Cost = TTI.getAddressComputationCost(VectorTy); 6026 if (UseGatherOrScatter) { 6027 assert(ConsecutiveStride == 0 && 6028 "Gather/Scatter are not used for consecutive stride"); 6029 return Cost + 6030 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 6031 Legal->isMaskRequired(I), Alignment); 6032 } 6033 // Wide load/stores. 6034 if (Legal->isMaskRequired(I)) 6035 Cost += 6036 TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6037 else 6038 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6039 6040 if (Reverse) 6041 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6042 return Cost; 6043 } 6044 case Instruction::ZExt: 6045 case Instruction::SExt: 6046 case Instruction::FPToUI: 6047 case Instruction::FPToSI: 6048 case Instruction::FPExt: 6049 case Instruction::PtrToInt: 6050 case Instruction::IntToPtr: 6051 case Instruction::SIToFP: 6052 case Instruction::UIToFP: 6053 case Instruction::Trunc: 6054 case Instruction::FPTrunc: 6055 case Instruction::BitCast: { 6056 // We optimize the truncation of induction variable. 6057 // The cost of these is the same as the scalar operation. 6058 if (I->getOpcode() == Instruction::Trunc && 6059 Legal->isInductionVariable(I->getOperand(0))) 6060 return TTI.getCastInstrCost(I->getOpcode(), I->getType(), 6061 I->getOperand(0)->getType()); 6062 6063 Type *SrcScalarTy = I->getOperand(0)->getType(); 6064 Type *SrcVecTy = ToVectorTy(SrcScalarTy, VF); 6065 if (VF > 1 && MinBWs.count(I)) { 6066 // This cast is going to be shrunk. This may remove the cast or it might 6067 // turn it into slightly different cast. For example, if MinBW == 16, 6068 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 6069 // 6070 // Calculate the modified src and dest types. 6071 Type *MinVecTy = VectorTy; 6072 if (I->getOpcode() == Instruction::Trunc) { 6073 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 6074 VectorTy = 6075 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6076 } else if (I->getOpcode() == Instruction::ZExt || 6077 I->getOpcode() == Instruction::SExt) { 6078 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 6079 VectorTy = 6080 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6081 } 6082 } 6083 6084 return TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy); 6085 } 6086 case Instruction::Call: { 6087 bool NeedToScalarize; 6088 CallInst *CI = cast<CallInst>(I); 6089 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 6090 if (getVectorIntrinsicIDForCall(CI, TLI)) 6091 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 6092 return CallCost; 6093 } 6094 default: { 6095 // We are scalarizing the instruction. Return the cost of the scalar 6096 // instruction, plus the cost of insert and extract into vector 6097 // elements, times the vector width. 6098 unsigned Cost = 0; 6099 6100 if (!RetTy->isVoidTy() && VF != 1) { 6101 unsigned InsCost = 6102 TTI.getVectorInstrCost(Instruction::InsertElement, VectorTy); 6103 unsigned ExtCost = 6104 TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy); 6105 6106 // The cost of inserting the results plus extracting each one of the 6107 // operands. 6108 Cost += VF * (InsCost + ExtCost * I->getNumOperands()); 6109 } 6110 6111 // The cost of executing VF copies of the scalar instruction. This opcode 6112 // is unknown. Assume that it is the same as 'mul'. 6113 Cost += VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy); 6114 return Cost; 6115 } 6116 } // end of switch. 6117 } 6118 6119 char LoopVectorize::ID = 0; 6120 static const char lv_name[] = "Loop Vectorization"; 6121 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 6122 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6123 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 6124 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6125 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 6126 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6127 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6128 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6129 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6130 INITIALIZE_PASS_DEPENDENCY(LCSSA) 6131 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6132 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 6133 INITIALIZE_PASS_DEPENDENCY(LoopAccessAnalysis) 6134 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6135 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6136 6137 namespace llvm { 6138 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { 6139 return new LoopVectorize(NoUnrolling, AlwaysVectorize); 6140 } 6141 } 6142 6143 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6144 // Check for a store. 6145 if (StoreInst *ST = dyn_cast<StoreInst>(Inst)) 6146 return Legal->isConsecutivePtr(ST->getPointerOperand()) != 0; 6147 6148 // Check for a load. 6149 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) 6150 return Legal->isConsecutivePtr(LI->getPointerOperand()) != 0; 6151 6152 return false; 6153 } 6154 6155 void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr, 6156 bool IfPredicateStore) { 6157 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 6158 // Holds vector parameters or scalars, in case of uniform vals. 6159 SmallVector<VectorParts, 4> Params; 6160 6161 setDebugLocFromInst(Builder, Instr); 6162 6163 // Find all of the vectorized parameters. 6164 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 6165 Value *SrcOp = Instr->getOperand(op); 6166 6167 // If we are accessing the old induction variable, use the new one. 6168 if (SrcOp == OldInduction) { 6169 Params.push_back(getVectorValue(SrcOp)); 6170 continue; 6171 } 6172 6173 // Try using previously calculated values. 6174 Instruction *SrcInst = dyn_cast<Instruction>(SrcOp); 6175 6176 // If the src is an instruction that appeared earlier in the basic block 6177 // then it should already be vectorized. 6178 if (SrcInst && OrigLoop->contains(SrcInst)) { 6179 assert(WidenMap.has(SrcInst) && "Source operand is unavailable"); 6180 // The parameter is a vector value from earlier. 6181 Params.push_back(WidenMap.get(SrcInst)); 6182 } else { 6183 // The parameter is a scalar from outside the loop. Maybe even a constant. 6184 VectorParts Scalars; 6185 Scalars.append(UF, SrcOp); 6186 Params.push_back(Scalars); 6187 } 6188 } 6189 6190 assert(Params.size() == Instr->getNumOperands() && 6191 "Invalid number of operands"); 6192 6193 // Does this instruction return a value ? 6194 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 6195 6196 Value *UndefVec = IsVoidRetTy ? nullptr : UndefValue::get(Instr->getType()); 6197 // Create a new entry in the WidenMap and initialize it to Undef or Null. 6198 VectorParts &VecResults = WidenMap.splat(Instr, UndefVec); 6199 6200 VectorParts Cond; 6201 if (IfPredicateStore) { 6202 assert(Instr->getParent()->getSinglePredecessor() && 6203 "Only support single predecessor blocks"); 6204 Cond = createEdgeMask(Instr->getParent()->getSinglePredecessor(), 6205 Instr->getParent()); 6206 } 6207 6208 // For each vector unroll 'part': 6209 for (unsigned Part = 0; Part < UF; ++Part) { 6210 // For each scalar that we create: 6211 6212 // Start an "if (pred) a[i] = ..." block. 6213 Value *Cmp = nullptr; 6214 if (IfPredicateStore) { 6215 if (Cond[Part]->getType()->isVectorTy()) 6216 Cond[Part] = 6217 Builder.CreateExtractElement(Cond[Part], Builder.getInt32(0)); 6218 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cond[Part], 6219 ConstantInt::get(Cond[Part]->getType(), 1)); 6220 } 6221 6222 Instruction *Cloned = Instr->clone(); 6223 if (!IsVoidRetTy) 6224 Cloned->setName(Instr->getName() + ".cloned"); 6225 // Replace the operands of the cloned instructions with extracted scalars. 6226 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 6227 Value *Op = Params[op][Part]; 6228 Cloned->setOperand(op, Op); 6229 } 6230 6231 // Place the cloned scalar in the new loop. 6232 Builder.Insert(Cloned); 6233 6234 // If we just cloned a new assumption, add it the assumption cache. 6235 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 6236 if (II->getIntrinsicID() == Intrinsic::assume) 6237 AC->registerAssumption(II); 6238 6239 // If the original scalar returns a value we need to place it in a vector 6240 // so that future users will be able to use it. 6241 if (!IsVoidRetTy) 6242 VecResults[Part] = Cloned; 6243 6244 // End if-block. 6245 if (IfPredicateStore) 6246 PredicatedStores.push_back(std::make_pair(cast<StoreInst>(Cloned), Cmp)); 6247 } 6248 } 6249 6250 void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) { 6251 StoreInst *SI = dyn_cast<StoreInst>(Instr); 6252 bool IfPredicateStore = (SI && Legal->blockNeedsPredication(SI->getParent())); 6253 6254 return scalarizeInstruction(Instr, IfPredicateStore); 6255 } 6256 6257 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 6258 6259 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 6260 6261 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, 6262 const SCEV *StepSCEV) { 6263 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 6264 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 6265 Value *StepValue = Exp.expandCodeFor(StepSCEV, StepSCEV->getType(), 6266 &*Builder.GetInsertPoint()); 6267 return getStepVector(Val, StartIdx, StepValue); 6268 } 6269 6270 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step) { 6271 // When unrolling and the VF is 1, we only need to add a simple scalar. 6272 Type *ITy = Val->getType(); 6273 assert(!ITy->isVectorTy() && "Val must be a scalar"); 6274 Constant *C = ConstantInt::get(ITy, StartIdx); 6275 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 6276 } 6277