1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 11 // and generates target-independent LLVM-IR. 12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 13 // of instructions in order to estimate the profitability of vectorization. 14 // 15 // The loop vectorizer combines consecutive loop iterations into a single 16 // 'wide' iteration. After this transformation the index is incremented 17 // by the SIMD vector width, and not by one. 18 // 19 // This pass has three parts: 20 // 1. The main loop pass that drives the different parts. 21 // 2. LoopVectorizationLegality - A unit that checks for the legality 22 // of the vectorization. 23 // 3. InnerLoopVectorizer - A unit that performs the actual 24 // widening of instructions. 25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 26 // of vectorization. It decides on the optimal vector width, which 27 // can be one, if vectorization is not profitable. 28 // 29 //===----------------------------------------------------------------------===// 30 // 31 // The reduction-variable vectorization is based on the paper: 32 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 33 // 34 // Variable uniformity checks are inspired by: 35 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 36 // 37 // The interleaved access vectorization is based on the paper: 38 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 39 // Data for SIMD 40 // 41 // Other ideas/concepts are from: 42 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 43 // 44 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 45 // Vectorizing Compilers. 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/Transforms/Vectorize.h" 50 #include "llvm/ADT/DenseMap.h" 51 #include "llvm/ADT/Hashing.h" 52 #include "llvm/ADT/MapVector.h" 53 #include "llvm/ADT/SetVector.h" 54 #include "llvm/ADT/SmallPtrSet.h" 55 #include "llvm/ADT/SmallSet.h" 56 #include "llvm/ADT/SmallVector.h" 57 #include "llvm/ADT/Statistic.h" 58 #include "llvm/ADT/StringExtras.h" 59 #include "llvm/Analysis/AliasAnalysis.h" 60 #include "llvm/Analysis/BasicAliasAnalysis.h" 61 #include "llvm/Analysis/AliasSetTracker.h" 62 #include "llvm/Analysis/AssumptionCache.h" 63 #include "llvm/Analysis/BlockFrequencyInfo.h" 64 #include "llvm/Analysis/CodeMetrics.h" 65 #include "llvm/Analysis/DemandedBits.h" 66 #include "llvm/Analysis/GlobalsModRef.h" 67 #include "llvm/Analysis/LoopAccessAnalysis.h" 68 #include "llvm/Analysis/LoopInfo.h" 69 #include "llvm/Analysis/LoopIterator.h" 70 #include "llvm/Analysis/LoopPass.h" 71 #include "llvm/Analysis/ScalarEvolution.h" 72 #include "llvm/Analysis/ScalarEvolutionExpander.h" 73 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 74 #include "llvm/Analysis/TargetTransformInfo.h" 75 #include "llvm/Analysis/ValueTracking.h" 76 #include "llvm/IR/Constants.h" 77 #include "llvm/IR/DataLayout.h" 78 #include "llvm/IR/DebugInfo.h" 79 #include "llvm/IR/DerivedTypes.h" 80 #include "llvm/IR/DiagnosticInfo.h" 81 #include "llvm/IR/Dominators.h" 82 #include "llvm/IR/Function.h" 83 #include "llvm/IR/IRBuilder.h" 84 #include "llvm/IR/Instructions.h" 85 #include "llvm/IR/IntrinsicInst.h" 86 #include "llvm/IR/LLVMContext.h" 87 #include "llvm/IR/Module.h" 88 #include "llvm/IR/PatternMatch.h" 89 #include "llvm/IR/Type.h" 90 #include "llvm/IR/Value.h" 91 #include "llvm/IR/ValueHandle.h" 92 #include "llvm/IR/Verifier.h" 93 #include "llvm/Pass.h" 94 #include "llvm/Support/BranchProbability.h" 95 #include "llvm/Support/CommandLine.h" 96 #include "llvm/Support/Debug.h" 97 #include "llvm/Support/raw_ostream.h" 98 #include "llvm/Transforms/Scalar.h" 99 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 100 #include "llvm/Transforms/Utils/Local.h" 101 #include "llvm/Transforms/Utils/LoopVersioning.h" 102 #include "llvm/Analysis/VectorUtils.h" 103 #include "llvm/Transforms/Utils/LoopUtils.h" 104 #include <algorithm> 105 #include <functional> 106 #include <map> 107 #include <tuple> 108 109 using namespace llvm; 110 using namespace llvm::PatternMatch; 111 112 #define LV_NAME "loop-vectorize" 113 #define DEBUG_TYPE LV_NAME 114 115 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 116 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 117 118 static cl::opt<bool> 119 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, 120 cl::desc("Enable if-conversion during vectorization.")); 121 122 /// We don't vectorize loops with a known constant trip count below this number. 123 static cl::opt<unsigned> 124 TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), 125 cl::Hidden, 126 cl::desc("Don't vectorize loops with a constant " 127 "trip count that is smaller than this " 128 "value.")); 129 130 static cl::opt<bool> MaximizeBandwidth( 131 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 132 cl::desc("Maximize bandwidth when selecting vectorization factor which " 133 "will be determined by the smallest type in loop.")); 134 135 /// This enables versioning on the strides of symbolically striding memory 136 /// accesses in code like the following. 137 /// for (i = 0; i < N; ++i) 138 /// A[i * Stride1] += B[i * Stride2] ... 139 /// 140 /// Will be roughly translated to 141 /// if (Stride1 == 1 && Stride2 == 1) { 142 /// for (i = 0; i < N; i+=4) 143 /// A[i:i+3] += ... 144 /// } else 145 /// ... 146 static cl::opt<bool> EnableMemAccessVersioning( 147 "enable-mem-access-versioning", cl::init(true), cl::Hidden, 148 cl::desc("Enable symbolic stride memory access versioning")); 149 150 static cl::opt<bool> EnableInterleavedMemAccesses( 151 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 152 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 153 154 /// Maximum factor for an interleaved memory access. 155 static cl::opt<unsigned> MaxInterleaveGroupFactor( 156 "max-interleave-group-factor", cl::Hidden, 157 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 158 cl::init(8)); 159 160 /// We don't interleave loops with a known constant trip count below this 161 /// number. 162 static const unsigned TinyTripCountInterleaveThreshold = 128; 163 164 static cl::opt<unsigned> ForceTargetNumScalarRegs( 165 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 166 cl::desc("A flag that overrides the target's number of scalar registers.")); 167 168 static cl::opt<unsigned> ForceTargetNumVectorRegs( 169 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 170 cl::desc("A flag that overrides the target's number of vector registers.")); 171 172 /// Maximum vectorization interleave count. 173 static const unsigned MaxInterleaveFactor = 16; 174 175 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 176 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 177 cl::desc("A flag that overrides the target's max interleave factor for " 178 "scalar loops.")); 179 180 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 181 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 182 cl::desc("A flag that overrides the target's max interleave factor for " 183 "vectorized loops.")); 184 185 static cl::opt<unsigned> ForceTargetInstructionCost( 186 "force-target-instruction-cost", cl::init(0), cl::Hidden, 187 cl::desc("A flag that overrides the target's expected cost for " 188 "an instruction to a single constant value. Mostly " 189 "useful for getting consistent testing.")); 190 191 static cl::opt<unsigned> SmallLoopCost( 192 "small-loop-cost", cl::init(20), cl::Hidden, 193 cl::desc( 194 "The cost of a loop that is considered 'small' by the interleaver.")); 195 196 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 197 "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden, 198 cl::desc("Enable the use of the block frequency analysis to access PGO " 199 "heuristics minimizing code growth in cold regions and being more " 200 "aggressive in hot regions.")); 201 202 // Runtime interleave loops for load/store throughput. 203 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 204 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 205 cl::desc( 206 "Enable runtime interleaving until load/store ports are saturated")); 207 208 /// The number of stores in a loop that are allowed to need predication. 209 static cl::opt<unsigned> NumberOfStoresToPredicate( 210 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 211 cl::desc("Max number of stores to be predicated behind an if.")); 212 213 static cl::opt<bool> EnableIndVarRegisterHeur( 214 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 215 cl::desc("Count the induction variable only once when interleaving")); 216 217 static cl::opt<bool> EnableCondStoresVectorization( 218 "enable-cond-stores-vec", cl::init(false), cl::Hidden, 219 cl::desc("Enable if predication of stores during vectorization.")); 220 221 static cl::opt<unsigned> MaxNestedScalarReductionIC( 222 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 223 cl::desc("The maximum interleave count to use when interleaving a scalar " 224 "reduction in a nested loop.")); 225 226 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 227 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 228 cl::desc("The maximum allowed number of runtime memory checks with a " 229 "vectorize(enable) pragma.")); 230 231 static cl::opt<unsigned> VectorizeSCEVCheckThreshold( 232 "vectorize-scev-check-threshold", cl::init(16), cl::Hidden, 233 cl::desc("The maximum number of SCEV checks allowed.")); 234 235 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold( 236 "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden, 237 cl::desc("The maximum number of SCEV checks allowed with a " 238 "vectorize(enable) pragma")); 239 240 namespace { 241 242 // Forward declarations. 243 class LoopVectorizeHints; 244 class LoopVectorizationLegality; 245 class LoopVectorizationCostModel; 246 class LoopVectorizationRequirements; 247 248 /// \brief This modifies LoopAccessReport to initialize message with 249 /// loop-vectorizer-specific part. 250 class VectorizationReport : public LoopAccessReport { 251 public: 252 VectorizationReport(Instruction *I = nullptr) 253 : LoopAccessReport("loop not vectorized: ", I) {} 254 255 /// \brief This allows promotion of the loop-access analysis report into the 256 /// loop-vectorizer report. It modifies the message to add the 257 /// loop-vectorizer-specific part of the message. 258 explicit VectorizationReport(const LoopAccessReport &R) 259 : LoopAccessReport(Twine("loop not vectorized: ") + R.str(), 260 R.getInstr()) {} 261 }; 262 263 /// A helper function for converting Scalar types to vector types. 264 /// If the incoming type is void, we return void. If the VF is 1, we return 265 /// the scalar type. 266 static Type* ToVectorTy(Type *Scalar, unsigned VF) { 267 if (Scalar->isVoidTy() || VF == 1) 268 return Scalar; 269 return VectorType::get(Scalar, VF); 270 } 271 272 /// A helper function that returns GEP instruction and knows to skip a 273 /// 'bitcast'. The 'bitcast' may be skipped if the source and the destination 274 /// pointee types of the 'bitcast' have the same size. 275 /// For example: 276 /// bitcast double** %var to i64* - can be skipped 277 /// bitcast double** %var to i8* - can not 278 static GetElementPtrInst *getGEPInstruction(Value *Ptr) { 279 280 if (isa<GetElementPtrInst>(Ptr)) 281 return cast<GetElementPtrInst>(Ptr); 282 283 if (isa<BitCastInst>(Ptr) && 284 isa<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0))) { 285 Type *BitcastTy = Ptr->getType(); 286 Type *GEPTy = cast<BitCastInst>(Ptr)->getSrcTy(); 287 if (!isa<PointerType>(BitcastTy) || !isa<PointerType>(GEPTy)) 288 return nullptr; 289 Type *Pointee1Ty = cast<PointerType>(BitcastTy)->getPointerElementType(); 290 Type *Pointee2Ty = cast<PointerType>(GEPTy)->getPointerElementType(); 291 const DataLayout &DL = cast<BitCastInst>(Ptr)->getModule()->getDataLayout(); 292 if (DL.getTypeSizeInBits(Pointee1Ty) == DL.getTypeSizeInBits(Pointee2Ty)) 293 return cast<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0)); 294 } 295 return nullptr; 296 } 297 298 /// InnerLoopVectorizer vectorizes loops which contain only one basic 299 /// block to a specified vectorization factor (VF). 300 /// This class performs the widening of scalars into vectors, or multiple 301 /// scalars. This class also implements the following features: 302 /// * It inserts an epilogue loop for handling loops that don't have iteration 303 /// counts that are known to be a multiple of the vectorization factor. 304 /// * It handles the code generation for reduction variables. 305 /// * Scalarization (implementation using scalars) of un-vectorizable 306 /// instructions. 307 /// InnerLoopVectorizer does not perform any vectorization-legality 308 /// checks, and relies on the caller to check for the different legality 309 /// aspects. The InnerLoopVectorizer relies on the 310 /// LoopVectorizationLegality class to provide information about the induction 311 /// and reduction variables that were found to a given vectorization factor. 312 class InnerLoopVectorizer { 313 public: 314 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 315 LoopInfo *LI, DominatorTree *DT, 316 const TargetLibraryInfo *TLI, 317 const TargetTransformInfo *TTI, AssumptionCache *AC, 318 unsigned VecWidth, unsigned UnrollFactor) 319 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 320 AC(AC), VF(VecWidth), UF(UnrollFactor), 321 Builder(PSE.getSE()->getContext()), Induction(nullptr), 322 OldInduction(nullptr), WidenMap(UnrollFactor), TripCount(nullptr), 323 VectorTripCount(nullptr), Legal(nullptr), AddedSafetyChecks(false) {} 324 325 // Perform the actual loop widening (vectorization). 326 // MinimumBitWidths maps scalar integer values to the smallest bitwidth they 327 // can be validly truncated to. The cost model has assumed this truncation 328 // will happen when vectorizing. 329 void vectorize(LoopVectorizationLegality *L, 330 MapVector<Instruction*,uint64_t> MinimumBitWidths) { 331 MinBWs = MinimumBitWidths; 332 Legal = L; 333 // Create a new empty loop. Unlink the old loop and connect the new one. 334 createEmptyLoop(); 335 // Widen each instruction in the old loop to a new one in the new loop. 336 // Use the Legality module to find the induction and reduction variables. 337 vectorizeLoop(); 338 } 339 340 // Return true if any runtime check is added. 341 bool IsSafetyChecksAdded() { 342 return AddedSafetyChecks; 343 } 344 345 virtual ~InnerLoopVectorizer() {} 346 347 protected: 348 /// A small list of PHINodes. 349 typedef SmallVector<PHINode*, 4> PhiVector; 350 /// When we unroll loops we have multiple vector values for each scalar. 351 /// This data structure holds the unrolled and vectorized values that 352 /// originated from one scalar instruction. 353 typedef SmallVector<Value*, 2> VectorParts; 354 355 // When we if-convert we need to create edge masks. We have to cache values 356 // so that we don't end up with exponential recursion/IR. 357 typedef DenseMap<std::pair<BasicBlock*, BasicBlock*>, 358 VectorParts> EdgeMaskCache; 359 360 /// Create an empty loop, based on the loop ranges of the old loop. 361 void createEmptyLoop(); 362 /// Create a new induction variable inside L. 363 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 364 Value *Step, Instruction *DL); 365 /// Copy and widen the instructions from the old loop. 366 virtual void vectorizeLoop(); 367 368 /// Fix a first-order recurrence. This is the second phase of vectorizing 369 /// this phi node. 370 void fixFirstOrderRecurrence(PHINode *Phi); 371 372 /// \brief The Loop exit block may have single value PHI nodes where the 373 /// incoming value is 'Undef'. While vectorizing we only handled real values 374 /// that were defined inside the loop. Here we fix the 'undef case'. 375 /// See PR14725. 376 void fixLCSSAPHIs(); 377 378 /// Shrinks vector element sizes based on information in "MinBWs". 379 void truncateToMinimalBitwidths(); 380 381 /// A helper function that computes the predicate of the block BB, assuming 382 /// that the header block of the loop is set to True. It returns the *entry* 383 /// mask for the block BB. 384 VectorParts createBlockInMask(BasicBlock *BB); 385 /// A helper function that computes the predicate of the edge between SRC 386 /// and DST. 387 VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst); 388 389 /// A helper function to vectorize a single BB within the innermost loop. 390 void vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV); 391 392 /// Vectorize a single PHINode in a block. This method handles the induction 393 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 394 /// arbitrary length vectors. 395 void widenPHIInstruction(Instruction *PN, VectorParts &Entry, 396 unsigned UF, unsigned VF, PhiVector *PV); 397 398 /// Insert the new loop to the loop hierarchy and pass manager 399 /// and update the analysis passes. 400 void updateAnalysis(); 401 402 /// This instruction is un-vectorizable. Implement it as a sequence 403 /// of scalars. If \p IfPredicateStore is true we need to 'hide' each 404 /// scalarized instruction behind an if block predicated on the control 405 /// dependence of the instruction. 406 virtual void scalarizeInstruction(Instruction *Instr, 407 bool IfPredicateStore=false); 408 409 /// Vectorize Load and Store instructions, 410 virtual void vectorizeMemoryInstruction(Instruction *Instr); 411 412 /// Create a broadcast instruction. This method generates a broadcast 413 /// instruction (shuffle) for loop invariant values and for the induction 414 /// value. If this is the induction variable then we extend it to N, N+1, ... 415 /// this is needed because each iteration in the loop corresponds to a SIMD 416 /// element. 417 virtual Value *getBroadcastInstrs(Value *V); 418 419 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 420 /// to each vector element of Val. The sequence starts at StartIndex. 421 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step); 422 423 /// When we go over instructions in the basic block we rely on previous 424 /// values within the current basic block or on loop invariant values. 425 /// When we widen (vectorize) values we place them in the map. If the values 426 /// are not within the map, they have to be loop invariant, so we simply 427 /// broadcast them into a vector. 428 VectorParts &getVectorValue(Value *V); 429 430 /// Try to vectorize the interleaved access group that \p Instr belongs to. 431 void vectorizeInterleaveGroup(Instruction *Instr); 432 433 /// Generate a shuffle sequence that will reverse the vector Vec. 434 virtual Value *reverseVector(Value *Vec); 435 436 /// Returns (and creates if needed) the original loop trip count. 437 Value *getOrCreateTripCount(Loop *NewLoop); 438 439 /// Returns (and creates if needed) the trip count of the widened loop. 440 Value *getOrCreateVectorTripCount(Loop *NewLoop); 441 442 /// Emit a bypass check to see if the trip count would overflow, or we 443 /// wouldn't have enough iterations to execute one vector loop. 444 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 445 /// Emit a bypass check to see if the vector trip count is nonzero. 446 void emitVectorLoopEnteredCheck(Loop *L, BasicBlock *Bypass); 447 /// Emit a bypass check to see if all of the SCEV assumptions we've 448 /// had to make are correct. 449 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 450 /// Emit bypass checks to check any memory assumptions we may have made. 451 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 452 453 /// Add additional metadata to \p To that was not present on \p Orig. 454 /// 455 /// Currently this is used to add the noalias annotations based on the 456 /// inserted memchecks. Use this for instructions that are *cloned* into the 457 /// vector loop. 458 void addNewMetadata(Instruction *To, const Instruction *Orig); 459 460 /// Add metadata from one instruction to another. 461 /// 462 /// This includes both the original MDs from \p From and additional ones (\see 463 /// addNewMetadata). Use this for *newly created* instructions in the vector 464 /// loop. 465 void addMetadata(Instruction *To, const Instruction *From); 466 467 /// \brief Similar to the previous function but it adds the metadata to a 468 /// vector of instructions. 469 void addMetadata(SmallVectorImpl<Value *> &To, const Instruction *From); 470 471 /// This is a helper class that holds the vectorizer state. It maps scalar 472 /// instructions to vector instructions. When the code is 'unrolled' then 473 /// then a single scalar value is mapped to multiple vector parts. The parts 474 /// are stored in the VectorPart type. 475 struct ValueMap { 476 /// C'tor. UnrollFactor controls the number of vectors ('parts') that 477 /// are mapped. 478 ValueMap(unsigned UnrollFactor) : UF(UnrollFactor) {} 479 480 /// \return True if 'Key' is saved in the Value Map. 481 bool has(Value *Key) const { return MapStorage.count(Key); } 482 483 /// Initializes a new entry in the map. Sets all of the vector parts to the 484 /// save value in 'Val'. 485 /// \return A reference to a vector with splat values. 486 VectorParts &splat(Value *Key, Value *Val) { 487 VectorParts &Entry = MapStorage[Key]; 488 Entry.assign(UF, Val); 489 return Entry; 490 } 491 492 ///\return A reference to the value that is stored at 'Key'. 493 VectorParts &get(Value *Key) { 494 VectorParts &Entry = MapStorage[Key]; 495 if (Entry.empty()) 496 Entry.resize(UF); 497 assert(Entry.size() == UF); 498 return Entry; 499 } 500 501 private: 502 /// The unroll factor. Each entry in the map stores this number of vector 503 /// elements. 504 unsigned UF; 505 506 /// Map storage. We use std::map and not DenseMap because insertions to a 507 /// dense map invalidates its iterators. 508 std::map<Value *, VectorParts> MapStorage; 509 }; 510 511 /// The original loop. 512 Loop *OrigLoop; 513 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 514 /// dynamic knowledge to simplify SCEV expressions and converts them to a 515 /// more usable form. 516 PredicatedScalarEvolution &PSE; 517 /// Loop Info. 518 LoopInfo *LI; 519 /// Dominator Tree. 520 DominatorTree *DT; 521 /// Alias Analysis. 522 AliasAnalysis *AA; 523 /// Target Library Info. 524 const TargetLibraryInfo *TLI; 525 /// Target Transform Info. 526 const TargetTransformInfo *TTI; 527 /// Assumption Cache. 528 AssumptionCache *AC; 529 530 /// \brief LoopVersioning. It's only set up (non-null) if memchecks were 531 /// used. 532 /// 533 /// This is currently only used to add no-alias metadata based on the 534 /// memchecks. The actually versioning is performed manually. 535 std::unique_ptr<LoopVersioning> LVer; 536 537 /// The vectorization SIMD factor to use. Each vector will have this many 538 /// vector elements. 539 unsigned VF; 540 541 protected: 542 /// The vectorization unroll factor to use. Each scalar is vectorized to this 543 /// many different vector instructions. 544 unsigned UF; 545 546 /// The builder that we use 547 IRBuilder<> Builder; 548 549 // --- Vectorization state --- 550 551 /// The vector-loop preheader. 552 BasicBlock *LoopVectorPreHeader; 553 /// The scalar-loop preheader. 554 BasicBlock *LoopScalarPreHeader; 555 /// Middle Block between the vector and the scalar. 556 BasicBlock *LoopMiddleBlock; 557 ///The ExitBlock of the scalar loop. 558 BasicBlock *LoopExitBlock; 559 ///The vector loop body. 560 SmallVector<BasicBlock *, 4> LoopVectorBody; 561 ///The scalar loop body. 562 BasicBlock *LoopScalarBody; 563 /// A list of all bypass blocks. The first block is the entry of the loop. 564 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 565 566 /// The new Induction variable which was added to the new block. 567 PHINode *Induction; 568 /// The induction variable of the old basic block. 569 PHINode *OldInduction; 570 /// Maps scalars to widened vectors. 571 ValueMap WidenMap; 572 /// Store instructions that should be predicated, as a pair 573 /// <StoreInst, Predicate> 574 SmallVector<std::pair<StoreInst*,Value*>, 4> PredicatedStores; 575 EdgeMaskCache MaskCache; 576 /// Trip count of the original loop. 577 Value *TripCount; 578 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 579 Value *VectorTripCount; 580 581 /// Map of scalar integer values to the smallest bitwidth they can be legally 582 /// represented as. The vector equivalents of these values should be truncated 583 /// to this type. 584 MapVector<Instruction*,uint64_t> MinBWs; 585 LoopVectorizationLegality *Legal; 586 587 // Record whether runtime check is added. 588 bool AddedSafetyChecks; 589 }; 590 591 class InnerLoopUnroller : public InnerLoopVectorizer { 592 public: 593 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 594 LoopInfo *LI, DominatorTree *DT, 595 const TargetLibraryInfo *TLI, 596 const TargetTransformInfo *TTI, AssumptionCache *AC, 597 unsigned UnrollFactor) 598 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, 1, 599 UnrollFactor) {} 600 601 private: 602 void scalarizeInstruction(Instruction *Instr, 603 bool IfPredicateStore = false) override; 604 void vectorizeMemoryInstruction(Instruction *Instr) override; 605 Value *getBroadcastInstrs(Value *V) override; 606 Value *getStepVector(Value *Val, int StartIdx, Value *Step) override; 607 Value *reverseVector(Value *Vec) override; 608 }; 609 610 /// \brief Look for a meaningful debug location on the instruction or it's 611 /// operands. 612 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 613 if (!I) 614 return I; 615 616 DebugLoc Empty; 617 if (I->getDebugLoc() != Empty) 618 return I; 619 620 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 621 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 622 if (OpInst->getDebugLoc() != Empty) 623 return OpInst; 624 } 625 626 return I; 627 } 628 629 /// \brief Set the debug location in the builder using the debug location in the 630 /// instruction. 631 static void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 632 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) 633 B.SetCurrentDebugLocation(Inst->getDebugLoc()); 634 else 635 B.SetCurrentDebugLocation(DebugLoc()); 636 } 637 638 #ifndef NDEBUG 639 /// \return string containing a file name and a line # for the given loop. 640 static std::string getDebugLocString(const Loop *L) { 641 std::string Result; 642 if (L) { 643 raw_string_ostream OS(Result); 644 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 645 LoopDbgLoc.print(OS); 646 else 647 // Just print the module name. 648 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 649 OS.flush(); 650 } 651 return Result; 652 } 653 #endif 654 655 /// \brief Propagate known metadata from one instruction to another. 656 static void propagateMetadata(Instruction *To, const Instruction *From) { 657 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 658 From->getAllMetadataOtherThanDebugLoc(Metadata); 659 660 for (auto M : Metadata) { 661 unsigned Kind = M.first; 662 663 // These are safe to transfer (this is safe for TBAA, even when we 664 // if-convert, because should that metadata have had a control dependency 665 // on the condition, and thus actually aliased with some other 666 // non-speculated memory access when the condition was false, this would be 667 // caught by the runtime overlap checks). 668 if (Kind != LLVMContext::MD_tbaa && 669 Kind != LLVMContext::MD_alias_scope && 670 Kind != LLVMContext::MD_noalias && 671 Kind != LLVMContext::MD_fpmath && 672 Kind != LLVMContext::MD_nontemporal) 673 continue; 674 675 To->setMetadata(Kind, M.second); 676 } 677 } 678 679 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 680 const Instruction *Orig) { 681 // If the loop was versioned with memchecks, add the corresponding no-alias 682 // metadata. 683 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 684 LVer->annotateInstWithNoAlias(To, Orig); 685 } 686 687 void InnerLoopVectorizer::addMetadata(Instruction *To, 688 const Instruction *From) { 689 propagateMetadata(To, From); 690 addNewMetadata(To, From); 691 } 692 693 void InnerLoopVectorizer::addMetadata(SmallVectorImpl<Value *> &To, 694 const Instruction *From) { 695 for (Value *V : To) 696 if (Instruction *I = dyn_cast<Instruction>(V)) 697 addMetadata(I, From); 698 } 699 700 /// \brief The group of interleaved loads/stores sharing the same stride and 701 /// close to each other. 702 /// 703 /// Each member in this group has an index starting from 0, and the largest 704 /// index should be less than interleaved factor, which is equal to the absolute 705 /// value of the access's stride. 706 /// 707 /// E.g. An interleaved load group of factor 4: 708 /// for (unsigned i = 0; i < 1024; i+=4) { 709 /// a = A[i]; // Member of index 0 710 /// b = A[i+1]; // Member of index 1 711 /// d = A[i+3]; // Member of index 3 712 /// ... 713 /// } 714 /// 715 /// An interleaved store group of factor 4: 716 /// for (unsigned i = 0; i < 1024; i+=4) { 717 /// ... 718 /// A[i] = a; // Member of index 0 719 /// A[i+1] = b; // Member of index 1 720 /// A[i+2] = c; // Member of index 2 721 /// A[i+3] = d; // Member of index 3 722 /// } 723 /// 724 /// Note: the interleaved load group could have gaps (missing members), but 725 /// the interleaved store group doesn't allow gaps. 726 class InterleaveGroup { 727 public: 728 InterleaveGroup(Instruction *Instr, int Stride, unsigned Align) 729 : Align(Align), SmallestKey(0), LargestKey(0), InsertPos(Instr) { 730 assert(Align && "The alignment should be non-zero"); 731 732 Factor = std::abs(Stride); 733 assert(Factor > 1 && "Invalid interleave factor"); 734 735 Reverse = Stride < 0; 736 Members[0] = Instr; 737 } 738 739 bool isReverse() const { return Reverse; } 740 unsigned getFactor() const { return Factor; } 741 unsigned getAlignment() const { return Align; } 742 unsigned getNumMembers() const { return Members.size(); } 743 744 /// \brief Try to insert a new member \p Instr with index \p Index and 745 /// alignment \p NewAlign. The index is related to the leader and it could be 746 /// negative if it is the new leader. 747 /// 748 /// \returns false if the instruction doesn't belong to the group. 749 bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) { 750 assert(NewAlign && "The new member's alignment should be non-zero"); 751 752 int Key = Index + SmallestKey; 753 754 // Skip if there is already a member with the same index. 755 if (Members.count(Key)) 756 return false; 757 758 if (Key > LargestKey) { 759 // The largest index is always less than the interleave factor. 760 if (Index >= static_cast<int>(Factor)) 761 return false; 762 763 LargestKey = Key; 764 } else if (Key < SmallestKey) { 765 // The largest index is always less than the interleave factor. 766 if (LargestKey - Key >= static_cast<int>(Factor)) 767 return false; 768 769 SmallestKey = Key; 770 } 771 772 // It's always safe to select the minimum alignment. 773 Align = std::min(Align, NewAlign); 774 Members[Key] = Instr; 775 return true; 776 } 777 778 /// \brief Get the member with the given index \p Index 779 /// 780 /// \returns nullptr if contains no such member. 781 Instruction *getMember(unsigned Index) const { 782 int Key = SmallestKey + Index; 783 if (!Members.count(Key)) 784 return nullptr; 785 786 return Members.find(Key)->second; 787 } 788 789 /// \brief Get the index for the given member. Unlike the key in the member 790 /// map, the index starts from 0. 791 unsigned getIndex(Instruction *Instr) const { 792 for (auto I : Members) 793 if (I.second == Instr) 794 return I.first - SmallestKey; 795 796 llvm_unreachable("InterleaveGroup contains no such member"); 797 } 798 799 Instruction *getInsertPos() const { return InsertPos; } 800 void setInsertPos(Instruction *Inst) { InsertPos = Inst; } 801 802 private: 803 unsigned Factor; // Interleave Factor. 804 bool Reverse; 805 unsigned Align; 806 DenseMap<int, Instruction *> Members; 807 int SmallestKey; 808 int LargestKey; 809 810 // To avoid breaking dependences, vectorized instructions of an interleave 811 // group should be inserted at either the first load or the last store in 812 // program order. 813 // 814 // E.g. %even = load i32 // Insert Position 815 // %add = add i32 %even // Use of %even 816 // %odd = load i32 817 // 818 // store i32 %even 819 // %odd = add i32 // Def of %odd 820 // store i32 %odd // Insert Position 821 Instruction *InsertPos; 822 }; 823 824 /// \brief Drive the analysis of interleaved memory accesses in the loop. 825 /// 826 /// Use this class to analyze interleaved accesses only when we can vectorize 827 /// a loop. Otherwise it's meaningless to do analysis as the vectorization 828 /// on interleaved accesses is unsafe. 829 /// 830 /// The analysis collects interleave groups and records the relationships 831 /// between the member and the group in a map. 832 class InterleavedAccessInfo { 833 public: 834 InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L, 835 DominatorTree *DT) 836 : PSE(PSE), TheLoop(L), DT(DT) {} 837 838 ~InterleavedAccessInfo() { 839 SmallSet<InterleaveGroup *, 4> DelSet; 840 // Avoid releasing a pointer twice. 841 for (auto &I : InterleaveGroupMap) 842 DelSet.insert(I.second); 843 for (auto *Ptr : DelSet) 844 delete Ptr; 845 } 846 847 /// \brief Analyze the interleaved accesses and collect them in interleave 848 /// groups. Substitute symbolic strides using \p Strides. 849 void analyzeInterleaving(const ValueToValueMap &Strides); 850 851 /// \brief Check if \p Instr belongs to any interleave group. 852 bool isInterleaved(Instruction *Instr) const { 853 return InterleaveGroupMap.count(Instr); 854 } 855 856 /// \brief Get the interleave group that \p Instr belongs to. 857 /// 858 /// \returns nullptr if doesn't have such group. 859 InterleaveGroup *getInterleaveGroup(Instruction *Instr) const { 860 if (InterleaveGroupMap.count(Instr)) 861 return InterleaveGroupMap.find(Instr)->second; 862 return nullptr; 863 } 864 865 private: 866 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks. 867 /// Simplifies SCEV expressions in the context of existing SCEV assumptions. 868 /// The interleaved access analysis can also add new predicates (for example 869 /// by versioning strides of pointers). 870 PredicatedScalarEvolution &PSE; 871 Loop *TheLoop; 872 DominatorTree *DT; 873 874 /// Holds the relationships between the members and the interleave group. 875 DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap; 876 877 /// \brief The descriptor for a strided memory access. 878 struct StrideDescriptor { 879 StrideDescriptor(int Stride, const SCEV *Scev, unsigned Size, 880 unsigned Align) 881 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {} 882 883 StrideDescriptor() : Stride(0), Scev(nullptr), Size(0), Align(0) {} 884 885 int Stride; // The access's stride. It is negative for a reverse access. 886 const SCEV *Scev; // The scalar expression of this access 887 unsigned Size; // The size of the memory object. 888 unsigned Align; // The alignment of this access. 889 }; 890 891 /// \brief Create a new interleave group with the given instruction \p Instr, 892 /// stride \p Stride and alignment \p Align. 893 /// 894 /// \returns the newly created interleave group. 895 InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride, 896 unsigned Align) { 897 assert(!InterleaveGroupMap.count(Instr) && 898 "Already in an interleaved access group"); 899 InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align); 900 return InterleaveGroupMap[Instr]; 901 } 902 903 /// \brief Release the group and remove all the relationships. 904 void releaseGroup(InterleaveGroup *Group) { 905 for (unsigned i = 0; i < Group->getFactor(); i++) 906 if (Instruction *Member = Group->getMember(i)) 907 InterleaveGroupMap.erase(Member); 908 909 delete Group; 910 } 911 912 /// \brief Collect all the accesses with a constant stride in program order. 913 void collectConstStridedAccesses( 914 MapVector<Instruction *, StrideDescriptor> &StrideAccesses, 915 const ValueToValueMap &Strides); 916 }; 917 918 /// Utility class for getting and setting loop vectorizer hints in the form 919 /// of loop metadata. 920 /// This class keeps a number of loop annotations locally (as member variables) 921 /// and can, upon request, write them back as metadata on the loop. It will 922 /// initially scan the loop for existing metadata, and will update the local 923 /// values based on information in the loop. 924 /// We cannot write all values to metadata, as the mere presence of some info, 925 /// for example 'force', means a decision has been made. So, we need to be 926 /// careful NOT to add them if the user hasn't specifically asked so. 927 class LoopVectorizeHints { 928 enum HintKind { 929 HK_WIDTH, 930 HK_UNROLL, 931 HK_FORCE 932 }; 933 934 /// Hint - associates name and validation with the hint value. 935 struct Hint { 936 const char * Name; 937 unsigned Value; // This may have to change for non-numeric values. 938 HintKind Kind; 939 940 Hint(const char * Name, unsigned Value, HintKind Kind) 941 : Name(Name), Value(Value), Kind(Kind) { } 942 943 bool validate(unsigned Val) { 944 switch (Kind) { 945 case HK_WIDTH: 946 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth; 947 case HK_UNROLL: 948 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor; 949 case HK_FORCE: 950 return (Val <= 1); 951 } 952 return false; 953 } 954 }; 955 956 /// Vectorization width. 957 Hint Width; 958 /// Vectorization interleave factor. 959 Hint Interleave; 960 /// Vectorization forced 961 Hint Force; 962 963 /// Return the loop metadata prefix. 964 static StringRef Prefix() { return "llvm.loop."; } 965 966 public: 967 enum ForceKind { 968 FK_Undefined = -1, ///< Not selected. 969 FK_Disabled = 0, ///< Forcing disabled. 970 FK_Enabled = 1, ///< Forcing enabled. 971 }; 972 973 LoopVectorizeHints(const Loop *L, bool DisableInterleaving) 974 : Width("vectorize.width", VectorizerParams::VectorizationFactor, 975 HK_WIDTH), 976 Interleave("interleave.count", DisableInterleaving, HK_UNROLL), 977 Force("vectorize.enable", FK_Undefined, HK_FORCE), 978 TheLoop(L) { 979 // Populate values with existing loop metadata. 980 getHintsFromMetadata(); 981 982 // force-vector-interleave overrides DisableInterleaving. 983 if (VectorizerParams::isInterleaveForced()) 984 Interleave.Value = VectorizerParams::VectorizationInterleave; 985 986 DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs() 987 << "LV: Interleaving disabled by the pass manager\n"); 988 } 989 990 /// Mark the loop L as already vectorized by setting the width to 1. 991 void setAlreadyVectorized() { 992 Width.Value = Interleave.Value = 1; 993 Hint Hints[] = {Width, Interleave}; 994 writeHintsToMetadata(Hints); 995 } 996 997 bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const { 998 if (getForce() == LoopVectorizeHints::FK_Disabled) { 999 DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); 1000 emitOptimizationRemarkAnalysis(F->getContext(), 1001 vectorizeAnalysisPassName(), *F, 1002 L->getStartLoc(), emitRemark()); 1003 return false; 1004 } 1005 1006 if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) { 1007 DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); 1008 emitOptimizationRemarkAnalysis(F->getContext(), 1009 vectorizeAnalysisPassName(), *F, 1010 L->getStartLoc(), emitRemark()); 1011 return false; 1012 } 1013 1014 if (getWidth() == 1 && getInterleave() == 1) { 1015 // FIXME: Add a separate metadata to indicate when the loop has already 1016 // been vectorized instead of setting width and count to 1. 1017 DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); 1018 // FIXME: Add interleave.disable metadata. This will allow 1019 // vectorize.disable to be used without disabling the pass and errors 1020 // to differentiate between disabled vectorization and a width of 1. 1021 emitOptimizationRemarkAnalysis( 1022 F->getContext(), vectorizeAnalysisPassName(), *F, L->getStartLoc(), 1023 "loop not vectorized: vectorization and interleaving are explicitly " 1024 "disabled, or vectorize width and interleave count are both set to " 1025 "1"); 1026 return false; 1027 } 1028 1029 return true; 1030 } 1031 1032 /// Dumps all the hint information. 1033 std::string emitRemark() const { 1034 VectorizationReport R; 1035 if (Force.Value == LoopVectorizeHints::FK_Disabled) 1036 R << "vectorization is explicitly disabled"; 1037 else { 1038 R << "use -Rpass-analysis=loop-vectorize for more info"; 1039 if (Force.Value == LoopVectorizeHints::FK_Enabled) { 1040 R << " (Force=true"; 1041 if (Width.Value != 0) 1042 R << ", Vector Width=" << Width.Value; 1043 if (Interleave.Value != 0) 1044 R << ", Interleave Count=" << Interleave.Value; 1045 R << ")"; 1046 } 1047 } 1048 1049 return R.str(); 1050 } 1051 1052 unsigned getWidth() const { return Width.Value; } 1053 unsigned getInterleave() const { return Interleave.Value; } 1054 enum ForceKind getForce() const { return (ForceKind)Force.Value; } 1055 const char *vectorizeAnalysisPassName() const { 1056 // If hints are provided that don't disable vectorization use the 1057 // AlwaysPrint pass name to force the frontend to print the diagnostic. 1058 if (getWidth() == 1) 1059 return LV_NAME; 1060 if (getForce() == LoopVectorizeHints::FK_Disabled) 1061 return LV_NAME; 1062 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0) 1063 return LV_NAME; 1064 return DiagnosticInfo::AlwaysPrint; 1065 } 1066 1067 bool allowReordering() const { 1068 // When enabling loop hints are provided we allow the vectorizer to change 1069 // the order of operations that is given by the scalar loop. This is not 1070 // enabled by default because can be unsafe or inefficient. For example, 1071 // reordering floating-point operations will change the way round-off 1072 // error accumulates in the loop. 1073 return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1; 1074 } 1075 1076 private: 1077 /// Find hints specified in the loop metadata and update local values. 1078 void getHintsFromMetadata() { 1079 MDNode *LoopID = TheLoop->getLoopID(); 1080 if (!LoopID) 1081 return; 1082 1083 // First operand should refer to the loop id itself. 1084 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 1085 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 1086 1087 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1088 const MDString *S = nullptr; 1089 SmallVector<Metadata *, 4> Args; 1090 1091 // The expected hint is either a MDString or a MDNode with the first 1092 // operand a MDString. 1093 if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) { 1094 if (!MD || MD->getNumOperands() == 0) 1095 continue; 1096 S = dyn_cast<MDString>(MD->getOperand(0)); 1097 for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i) 1098 Args.push_back(MD->getOperand(i)); 1099 } else { 1100 S = dyn_cast<MDString>(LoopID->getOperand(i)); 1101 assert(Args.size() == 0 && "too many arguments for MDString"); 1102 } 1103 1104 if (!S) 1105 continue; 1106 1107 // Check if the hint starts with the loop metadata prefix. 1108 StringRef Name = S->getString(); 1109 if (Args.size() == 1) 1110 setHint(Name, Args[0]); 1111 } 1112 } 1113 1114 /// Checks string hint with one operand and set value if valid. 1115 void setHint(StringRef Name, Metadata *Arg) { 1116 if (!Name.startswith(Prefix())) 1117 return; 1118 Name = Name.substr(Prefix().size(), StringRef::npos); 1119 1120 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); 1121 if (!C) return; 1122 unsigned Val = C->getZExtValue(); 1123 1124 Hint *Hints[] = {&Width, &Interleave, &Force}; 1125 for (auto H : Hints) { 1126 if (Name == H->Name) { 1127 if (H->validate(Val)) 1128 H->Value = Val; 1129 else 1130 DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); 1131 break; 1132 } 1133 } 1134 } 1135 1136 /// Create a new hint from name / value pair. 1137 MDNode *createHintMetadata(StringRef Name, unsigned V) const { 1138 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1139 Metadata *MDs[] = {MDString::get(Context, Name), 1140 ConstantAsMetadata::get( 1141 ConstantInt::get(Type::getInt32Ty(Context), V))}; 1142 return MDNode::get(Context, MDs); 1143 } 1144 1145 /// Matches metadata with hint name. 1146 bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) { 1147 MDString* Name = dyn_cast<MDString>(Node->getOperand(0)); 1148 if (!Name) 1149 return false; 1150 1151 for (auto H : HintTypes) 1152 if (Name->getString().endswith(H.Name)) 1153 return true; 1154 return false; 1155 } 1156 1157 /// Sets current hints into loop metadata, keeping other values intact. 1158 void writeHintsToMetadata(ArrayRef<Hint> HintTypes) { 1159 if (HintTypes.size() == 0) 1160 return; 1161 1162 // Reserve the first element to LoopID (see below). 1163 SmallVector<Metadata *, 4> MDs(1); 1164 // If the loop already has metadata, then ignore the existing operands. 1165 MDNode *LoopID = TheLoop->getLoopID(); 1166 if (LoopID) { 1167 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1168 MDNode *Node = cast<MDNode>(LoopID->getOperand(i)); 1169 // If node in update list, ignore old value. 1170 if (!matchesHintMetadataName(Node, HintTypes)) 1171 MDs.push_back(Node); 1172 } 1173 } 1174 1175 // Now, add the missing hints. 1176 for (auto H : HintTypes) 1177 MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value)); 1178 1179 // Replace current metadata node with new one. 1180 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1181 MDNode *NewLoopID = MDNode::get(Context, MDs); 1182 // Set operand 0 to refer to the loop id itself. 1183 NewLoopID->replaceOperandWith(0, NewLoopID); 1184 1185 TheLoop->setLoopID(NewLoopID); 1186 } 1187 1188 /// The loop these hints belong to. 1189 const Loop *TheLoop; 1190 }; 1191 1192 static void emitAnalysisDiag(const Function *TheFunction, const Loop *TheLoop, 1193 const LoopVectorizeHints &Hints, 1194 const LoopAccessReport &Message) { 1195 const char *Name = Hints.vectorizeAnalysisPassName(); 1196 LoopAccessReport::emitAnalysis(Message, TheFunction, TheLoop, Name); 1197 } 1198 1199 static void emitMissedWarning(Function *F, Loop *L, 1200 const LoopVectorizeHints &LH) { 1201 emitOptimizationRemarkMissed(F->getContext(), LV_NAME, *F, L->getStartLoc(), 1202 LH.emitRemark()); 1203 1204 if (LH.getForce() == LoopVectorizeHints::FK_Enabled) { 1205 if (LH.getWidth() != 1) 1206 emitLoopVectorizeWarning( 1207 F->getContext(), *F, L->getStartLoc(), 1208 "failed explicitly specified loop vectorization"); 1209 else if (LH.getInterleave() != 1) 1210 emitLoopInterleaveWarning( 1211 F->getContext(), *F, L->getStartLoc(), 1212 "failed explicitly specified loop interleaving"); 1213 } 1214 } 1215 1216 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and 1217 /// to what vectorization factor. 1218 /// This class does not look at the profitability of vectorization, only the 1219 /// legality. This class has two main kinds of checks: 1220 /// * Memory checks - The code in canVectorizeMemory checks if vectorization 1221 /// will change the order of memory accesses in a way that will change the 1222 /// correctness of the program. 1223 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory 1224 /// checks for a number of different conditions, such as the availability of a 1225 /// single induction variable, that all types are supported and vectorize-able, 1226 /// etc. This code reflects the capabilities of InnerLoopVectorizer. 1227 /// This class is also used by InnerLoopVectorizer for identifying 1228 /// induction variable and the different reduction variables. 1229 class LoopVectorizationLegality { 1230 public: 1231 LoopVectorizationLegality(Loop *L, PredicatedScalarEvolution &PSE, 1232 DominatorTree *DT, TargetLibraryInfo *TLI, 1233 AliasAnalysis *AA, Function *F, 1234 const TargetTransformInfo *TTI, 1235 LoopAccessAnalysis *LAA, 1236 LoopVectorizationRequirements *R, 1237 const LoopVectorizeHints *H) 1238 : NumPredStores(0), TheLoop(L), PSE(PSE), TLI(TLI), TheFunction(F), 1239 TTI(TTI), DT(DT), LAA(LAA), LAI(nullptr), InterleaveInfo(PSE, L, DT), 1240 Induction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false), 1241 Requirements(R), Hints(H) {} 1242 1243 /// ReductionList contains the reduction descriptors for all 1244 /// of the reductions that were found in the loop. 1245 typedef DenseMap<PHINode *, RecurrenceDescriptor> ReductionList; 1246 1247 /// InductionList saves induction variables and maps them to the 1248 /// induction descriptor. 1249 typedef MapVector<PHINode*, InductionDescriptor> InductionList; 1250 1251 /// RecurrenceSet contains the phi nodes that are recurrences other than 1252 /// inductions and reductions. 1253 typedef SmallPtrSet<const PHINode *, 8> RecurrenceSet; 1254 1255 /// Returns true if it is legal to vectorize this loop. 1256 /// This does not mean that it is profitable to vectorize this 1257 /// loop, only that it is legal to do so. 1258 bool canVectorize(); 1259 1260 /// Returns the Induction variable. 1261 PHINode *getInduction() { return Induction; } 1262 1263 /// Returns the reduction variables found in the loop. 1264 ReductionList *getReductionVars() { return &Reductions; } 1265 1266 /// Returns the induction variables found in the loop. 1267 InductionList *getInductionVars() { return &Inductions; } 1268 1269 /// Return the first-order recurrences found in the loop. 1270 RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; } 1271 1272 /// Returns the widest induction type. 1273 Type *getWidestInductionType() { return WidestIndTy; } 1274 1275 /// Returns True if V is an induction variable in this loop. 1276 bool isInductionVariable(const Value *V); 1277 1278 /// Returns True if PN is a reduction variable in this loop. 1279 bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); } 1280 1281 /// Returns True if Phi is a first-order recurrence in this loop. 1282 bool isFirstOrderRecurrence(const PHINode *Phi); 1283 1284 /// Return true if the block BB needs to be predicated in order for the loop 1285 /// to be vectorized. 1286 bool blockNeedsPredication(BasicBlock *BB); 1287 1288 /// Check if this pointer is consecutive when vectorizing. This happens 1289 /// when the last index of the GEP is the induction variable, or that the 1290 /// pointer itself is an induction variable. 1291 /// This check allows us to vectorize A[idx] into a wide load/store. 1292 /// Returns: 1293 /// 0 - Stride is unknown or non-consecutive. 1294 /// 1 - Address is consecutive. 1295 /// -1 - Address is consecutive, and decreasing. 1296 int isConsecutivePtr(Value *Ptr); 1297 1298 /// Returns true if the value V is uniform within the loop. 1299 bool isUniform(Value *V); 1300 1301 /// Returns true if this instruction will remain scalar after vectorization. 1302 bool isUniformAfterVectorization(Instruction* I) { return Uniforms.count(I); } 1303 1304 /// Returns the information that we collected about runtime memory check. 1305 const RuntimePointerChecking *getRuntimePointerChecking() const { 1306 return LAI->getRuntimePointerChecking(); 1307 } 1308 1309 const LoopAccessInfo *getLAI() const { 1310 return LAI; 1311 } 1312 1313 /// \brief Check if \p Instr belongs to any interleaved access group. 1314 bool isAccessInterleaved(Instruction *Instr) { 1315 return InterleaveInfo.isInterleaved(Instr); 1316 } 1317 1318 /// \brief Get the interleaved access group that \p Instr belongs to. 1319 const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { 1320 return InterleaveInfo.getInterleaveGroup(Instr); 1321 } 1322 1323 unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); } 1324 1325 bool hasStride(Value *V) { return StrideSet.count(V); } 1326 bool mustCheckStrides() { return !StrideSet.empty(); } 1327 SmallPtrSet<Value *, 8>::iterator strides_begin() { 1328 return StrideSet.begin(); 1329 } 1330 SmallPtrSet<Value *, 8>::iterator strides_end() { return StrideSet.end(); } 1331 1332 /// Returns true if the target machine supports masked store operation 1333 /// for the given \p DataType and kind of access to \p Ptr. 1334 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1335 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType); 1336 } 1337 /// Returns true if the target machine supports masked load operation 1338 /// for the given \p DataType and kind of access to \p Ptr. 1339 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1340 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType); 1341 } 1342 /// Returns true if the target machine supports masked scatter operation 1343 /// for the given \p DataType. 1344 bool isLegalMaskedScatter(Type *DataType) { 1345 return TTI->isLegalMaskedScatter(DataType); 1346 } 1347 /// Returns true if the target machine supports masked gather operation 1348 /// for the given \p DataType. 1349 bool isLegalMaskedGather(Type *DataType) { 1350 return TTI->isLegalMaskedGather(DataType); 1351 } 1352 1353 /// Returns true if vector representation of the instruction \p I 1354 /// requires mask. 1355 bool isMaskRequired(const Instruction* I) { 1356 return (MaskedOp.count(I) != 0); 1357 } 1358 unsigned getNumStores() const { 1359 return LAI->getNumStores(); 1360 } 1361 unsigned getNumLoads() const { 1362 return LAI->getNumLoads(); 1363 } 1364 unsigned getNumPredStores() const { 1365 return NumPredStores; 1366 } 1367 private: 1368 /// Check if a single basic block loop is vectorizable. 1369 /// At this point we know that this is a loop with a constant trip count 1370 /// and we only need to check individual instructions. 1371 bool canVectorizeInstrs(); 1372 1373 /// When we vectorize loops we may change the order in which 1374 /// we read and write from memory. This method checks if it is 1375 /// legal to vectorize the code, considering only memory constrains. 1376 /// Returns true if the loop is vectorizable 1377 bool canVectorizeMemory(); 1378 1379 /// Return true if we can vectorize this loop using the IF-conversion 1380 /// transformation. 1381 bool canVectorizeWithIfConvert(); 1382 1383 /// Collect the variables that need to stay uniform after vectorization. 1384 void collectLoopUniforms(); 1385 1386 /// Return true if all of the instructions in the block can be speculatively 1387 /// executed. \p SafePtrs is a list of addresses that are known to be legal 1388 /// and we know that we can read from them without segfault. 1389 bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs); 1390 1391 /// \brief Collect memory access with loop invariant strides. 1392 /// 1393 /// Looks for accesses like "a[i * StrideA]" where "StrideA" is loop 1394 /// invariant. 1395 void collectStridedAccess(Value *LoadOrStoreInst); 1396 1397 /// Report an analysis message to assist the user in diagnosing loops that are 1398 /// not vectorized. These are handled as LoopAccessReport rather than 1399 /// VectorizationReport because the << operator of VectorizationReport returns 1400 /// LoopAccessReport. 1401 void emitAnalysis(const LoopAccessReport &Message) const { 1402 emitAnalysisDiag(TheFunction, TheLoop, *Hints, Message); 1403 } 1404 1405 unsigned NumPredStores; 1406 1407 /// The loop that we evaluate. 1408 Loop *TheLoop; 1409 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. 1410 /// Applies dynamic knowledge to simplify SCEV expressions in the context 1411 /// of existing SCEV assumptions. The analysis will also add a minimal set 1412 /// of new predicates if this is required to enable vectorization and 1413 /// unrolling. 1414 PredicatedScalarEvolution &PSE; 1415 /// Target Library Info. 1416 TargetLibraryInfo *TLI; 1417 /// Parent function 1418 Function *TheFunction; 1419 /// Target Transform Info 1420 const TargetTransformInfo *TTI; 1421 /// Dominator Tree. 1422 DominatorTree *DT; 1423 // LoopAccess analysis. 1424 LoopAccessAnalysis *LAA; 1425 // And the loop-accesses info corresponding to this loop. This pointer is 1426 // null until canVectorizeMemory sets it up. 1427 const LoopAccessInfo *LAI; 1428 1429 /// The interleave access information contains groups of interleaved accesses 1430 /// with the same stride and close to each other. 1431 InterleavedAccessInfo InterleaveInfo; 1432 1433 // --- vectorization state --- // 1434 1435 /// Holds the integer induction variable. This is the counter of the 1436 /// loop. 1437 PHINode *Induction; 1438 /// Holds the reduction variables. 1439 ReductionList Reductions; 1440 /// Holds all of the induction variables that we found in the loop. 1441 /// Notice that inductions don't need to start at zero and that induction 1442 /// variables can be pointers. 1443 InductionList Inductions; 1444 /// Holds the phi nodes that are first-order recurrences. 1445 RecurrenceSet FirstOrderRecurrences; 1446 /// Holds the widest induction type encountered. 1447 Type *WidestIndTy; 1448 1449 /// Allowed outside users. This holds the reduction 1450 /// vars which can be accessed from outside the loop. 1451 SmallPtrSet<Value*, 4> AllowedExit; 1452 /// This set holds the variables which are known to be uniform after 1453 /// vectorization. 1454 SmallPtrSet<Instruction*, 4> Uniforms; 1455 1456 /// Can we assume the absence of NaNs. 1457 bool HasFunNoNaNAttr; 1458 1459 /// Vectorization requirements that will go through late-evaluation. 1460 LoopVectorizationRequirements *Requirements; 1461 1462 /// Used to emit an analysis of any legality issues. 1463 const LoopVectorizeHints *Hints; 1464 1465 ValueToValueMap Strides; 1466 SmallPtrSet<Value *, 8> StrideSet; 1467 1468 /// While vectorizing these instructions we have to generate a 1469 /// call to the appropriate masked intrinsic 1470 SmallPtrSet<const Instruction *, 8> MaskedOp; 1471 }; 1472 1473 /// LoopVectorizationCostModel - estimates the expected speedups due to 1474 /// vectorization. 1475 /// In many cases vectorization is not profitable. This can happen because of 1476 /// a number of reasons. In this class we mainly attempt to predict the 1477 /// expected speedup/slowdowns due to the supported instruction set. We use the 1478 /// TargetTransformInfo to query the different backends for the cost of 1479 /// different operations. 1480 class LoopVectorizationCostModel { 1481 public: 1482 LoopVectorizationCostModel(Loop *L, ScalarEvolution *SE, LoopInfo *LI, 1483 LoopVectorizationLegality *Legal, 1484 const TargetTransformInfo &TTI, 1485 const TargetLibraryInfo *TLI, DemandedBits *DB, 1486 AssumptionCache *AC, const Function *F, 1487 const LoopVectorizeHints *Hints, 1488 SmallPtrSetImpl<const Value *> &ValuesToIgnore) 1489 : TheLoop(L), SE(SE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 1490 TheFunction(F), Hints(Hints), ValuesToIgnore(ValuesToIgnore) {} 1491 1492 /// Information about vectorization costs 1493 struct VectorizationFactor { 1494 unsigned Width; // Vector width with best cost 1495 unsigned Cost; // Cost of the loop with that width 1496 }; 1497 /// \return The most profitable vectorization factor and the cost of that VF. 1498 /// This method checks every power of two up to VF. If UserVF is not ZERO 1499 /// then this vectorization factor will be selected if vectorization is 1500 /// possible. 1501 VectorizationFactor selectVectorizationFactor(bool OptForSize); 1502 1503 /// \return The size (in bits) of the smallest and widest types in the code 1504 /// that needs to be vectorized. We ignore values that remain scalar such as 1505 /// 64 bit loop indices. 1506 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1507 1508 /// \return The desired interleave count. 1509 /// If interleave count has been specified by metadata it will be returned. 1510 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1511 /// are the selected vectorization factor and the cost of the selected VF. 1512 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 1513 unsigned LoopCost); 1514 1515 /// \return The most profitable unroll factor. 1516 /// This method finds the best unroll-factor based on register pressure and 1517 /// other parameters. VF and LoopCost are the selected vectorization factor 1518 /// and the cost of the selected VF. 1519 unsigned computeInterleaveCount(bool OptForSize, unsigned VF, 1520 unsigned LoopCost); 1521 1522 /// \brief A struct that represents some properties of the register usage 1523 /// of a loop. 1524 struct RegisterUsage { 1525 /// Holds the number of loop invariant values that are used in the loop. 1526 unsigned LoopInvariantRegs; 1527 /// Holds the maximum number of concurrent live intervals in the loop. 1528 unsigned MaxLocalUsers; 1529 /// Holds the number of instructions in the loop. 1530 unsigned NumInstructions; 1531 }; 1532 1533 /// \return Returns information about the register usages of the loop for the 1534 /// given vectorization factors. 1535 SmallVector<RegisterUsage, 8> 1536 calculateRegisterUsage(const SmallVector<unsigned, 8> &VFs); 1537 1538 private: 1539 /// The vectorization cost is a combination of the cost itself and a boolean 1540 /// indicating whether any of the contributing operations will actually operate on 1541 /// vector values after type legalization in the backend. If this latter value is 1542 /// false, then all operations will be scalarized (i.e. no vectorization has 1543 /// actually taken place). 1544 typedef std::pair<unsigned, bool> VectorizationCostTy; 1545 1546 /// Returns the expected execution cost. The unit of the cost does 1547 /// not matter because we use the 'cost' units to compare different 1548 /// vector widths. The cost that is returned is *not* normalized by 1549 /// the factor width. 1550 VectorizationCostTy expectedCost(unsigned VF); 1551 1552 /// Returns the execution time cost of an instruction for a given vector 1553 /// width. Vector width of one means scalar. 1554 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1555 1556 /// The cost-computation logic from getInstructionCost which provides 1557 /// the vector type as an output parameter. 1558 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1559 1560 /// Returns whether the instruction is a load or store and will be a emitted 1561 /// as a vector operation. 1562 bool isConsecutiveLoadOrStore(Instruction *I); 1563 1564 /// Report an analysis message to assist the user in diagnosing loops that are 1565 /// not vectorized. These are handled as LoopAccessReport rather than 1566 /// VectorizationReport because the << operator of VectorizationReport returns 1567 /// LoopAccessReport. 1568 void emitAnalysis(const LoopAccessReport &Message) const { 1569 emitAnalysisDiag(TheFunction, TheLoop, *Hints, Message); 1570 } 1571 1572 public: 1573 /// Map of scalar integer values to the smallest bitwidth they can be legally 1574 /// represented as. The vector equivalents of these values should be truncated 1575 /// to this type. 1576 MapVector<Instruction*,uint64_t> MinBWs; 1577 1578 /// The loop that we evaluate. 1579 Loop *TheLoop; 1580 /// Scev analysis. 1581 ScalarEvolution *SE; 1582 /// Loop Info analysis. 1583 LoopInfo *LI; 1584 /// Vectorization legality. 1585 LoopVectorizationLegality *Legal; 1586 /// Vector target information. 1587 const TargetTransformInfo &TTI; 1588 /// Target Library Info. 1589 const TargetLibraryInfo *TLI; 1590 /// Demanded bits analysis 1591 DemandedBits *DB; 1592 const Function *TheFunction; 1593 // Loop Vectorize Hint. 1594 const LoopVectorizeHints *Hints; 1595 // Values to ignore in the cost model. 1596 const SmallPtrSetImpl<const Value *> &ValuesToIgnore; 1597 }; 1598 1599 /// \brief This holds vectorization requirements that must be verified late in 1600 /// the process. The requirements are set by legalize and costmodel. Once 1601 /// vectorization has been determined to be possible and profitable the 1602 /// requirements can be verified by looking for metadata or compiler options. 1603 /// For example, some loops require FP commutativity which is only allowed if 1604 /// vectorization is explicitly specified or if the fast-math compiler option 1605 /// has been provided. 1606 /// Late evaluation of these requirements allows helpful diagnostics to be 1607 /// composed that tells the user what need to be done to vectorize the loop. For 1608 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late 1609 /// evaluation should be used only when diagnostics can generated that can be 1610 /// followed by a non-expert user. 1611 class LoopVectorizationRequirements { 1612 public: 1613 LoopVectorizationRequirements() 1614 : NumRuntimePointerChecks(0), UnsafeAlgebraInst(nullptr) {} 1615 1616 void addUnsafeAlgebraInst(Instruction *I) { 1617 // First unsafe algebra instruction. 1618 if (!UnsafeAlgebraInst) 1619 UnsafeAlgebraInst = I; 1620 } 1621 1622 void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; } 1623 1624 bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) { 1625 const char *Name = Hints.vectorizeAnalysisPassName(); 1626 bool Failed = false; 1627 if (UnsafeAlgebraInst && !Hints.allowReordering()) { 1628 emitOptimizationRemarkAnalysisFPCommute( 1629 F->getContext(), Name, *F, UnsafeAlgebraInst->getDebugLoc(), 1630 VectorizationReport() << "cannot prove it is safe to reorder " 1631 "floating-point operations"); 1632 Failed = true; 1633 } 1634 1635 // Test if runtime memcheck thresholds are exceeded. 1636 bool PragmaThresholdReached = 1637 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 1638 bool ThresholdReached = 1639 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 1640 if ((ThresholdReached && !Hints.allowReordering()) || 1641 PragmaThresholdReached) { 1642 emitOptimizationRemarkAnalysisAliasing( 1643 F->getContext(), Name, *F, L->getStartLoc(), 1644 VectorizationReport() 1645 << "cannot prove it is safe to reorder memory operations"); 1646 DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 1647 Failed = true; 1648 } 1649 1650 return Failed; 1651 } 1652 1653 private: 1654 unsigned NumRuntimePointerChecks; 1655 Instruction *UnsafeAlgebraInst; 1656 }; 1657 1658 static void addInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) { 1659 if (L.empty()) 1660 return V.push_back(&L); 1661 1662 for (Loop *InnerL : L) 1663 addInnerLoop(*InnerL, V); 1664 } 1665 1666 /// The LoopVectorize Pass. 1667 struct LoopVectorize : public FunctionPass { 1668 /// Pass identification, replacement for typeid 1669 static char ID; 1670 1671 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) 1672 : FunctionPass(ID), 1673 DisableUnrolling(NoUnrolling), 1674 AlwaysVectorize(AlwaysVectorize) { 1675 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1676 } 1677 1678 ScalarEvolution *SE; 1679 LoopInfo *LI; 1680 TargetTransformInfo *TTI; 1681 DominatorTree *DT; 1682 BlockFrequencyInfo *BFI; 1683 TargetLibraryInfo *TLI; 1684 DemandedBits *DB; 1685 AliasAnalysis *AA; 1686 AssumptionCache *AC; 1687 LoopAccessAnalysis *LAA; 1688 bool DisableUnrolling; 1689 bool AlwaysVectorize; 1690 1691 BlockFrequency ColdEntryFreq; 1692 1693 bool runOnFunction(Function &F) override { 1694 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1695 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1696 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1697 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1698 BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1699 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1700 TLI = TLIP ? &TLIP->getTLI() : nullptr; 1701 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1702 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1703 LAA = &getAnalysis<LoopAccessAnalysis>(); 1704 DB = &getAnalysis<DemandedBits>(); 1705 1706 // Compute some weights outside of the loop over the loops. Compute this 1707 // using a BranchProbability to re-use its scaling math. 1708 const BranchProbability ColdProb(1, 5); // 20% 1709 ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb; 1710 1711 // Don't attempt if 1712 // 1. the target claims to have no vector registers, and 1713 // 2. interleaving won't help ILP. 1714 // 1715 // The second condition is necessary because, even if the target has no 1716 // vector registers, loop vectorization may still enable scalar 1717 // interleaving. 1718 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 1719 return false; 1720 1721 // Build up a worklist of inner-loops to vectorize. This is necessary as 1722 // the act of vectorizing or partially unrolling a loop creates new loops 1723 // and can invalidate iterators across the loops. 1724 SmallVector<Loop *, 8> Worklist; 1725 1726 for (Loop *L : *LI) 1727 addInnerLoop(*L, Worklist); 1728 1729 LoopsAnalyzed += Worklist.size(); 1730 1731 // Now walk the identified inner loops. 1732 bool Changed = false; 1733 while (!Worklist.empty()) 1734 Changed |= processLoop(Worklist.pop_back_val()); 1735 1736 // Process each loop nest in the function. 1737 return Changed; 1738 } 1739 1740 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 1741 SmallVector<Metadata *, 4> MDs; 1742 // Reserve first location for self reference to the LoopID metadata node. 1743 MDs.push_back(nullptr); 1744 bool IsUnrollMetadata = false; 1745 MDNode *LoopID = L->getLoopID(); 1746 if (LoopID) { 1747 // First find existing loop unrolling disable metadata. 1748 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1749 MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 1750 if (MD) { 1751 const MDString *S = dyn_cast<MDString>(MD->getOperand(0)); 1752 IsUnrollMetadata = 1753 S && S->getString().startswith("llvm.loop.unroll.disable"); 1754 } 1755 MDs.push_back(LoopID->getOperand(i)); 1756 } 1757 } 1758 1759 if (!IsUnrollMetadata) { 1760 // Add runtime unroll disable metadata. 1761 LLVMContext &Context = L->getHeader()->getContext(); 1762 SmallVector<Metadata *, 1> DisableOperands; 1763 DisableOperands.push_back( 1764 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 1765 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 1766 MDs.push_back(DisableNode); 1767 MDNode *NewLoopID = MDNode::get(Context, MDs); 1768 // Set operand 0 to refer to the loop id itself. 1769 NewLoopID->replaceOperandWith(0, NewLoopID); 1770 L->setLoopID(NewLoopID); 1771 } 1772 } 1773 1774 bool processLoop(Loop *L) { 1775 assert(L->empty() && "Only process inner loops."); 1776 1777 #ifndef NDEBUG 1778 const std::string DebugLocStr = getDebugLocString(L); 1779 #endif /* NDEBUG */ 1780 1781 DEBUG(dbgs() << "\nLV: Checking a loop in \"" 1782 << L->getHeader()->getParent()->getName() << "\" from " 1783 << DebugLocStr << "\n"); 1784 1785 LoopVectorizeHints Hints(L, DisableUnrolling); 1786 1787 DEBUG(dbgs() << "LV: Loop hints:" 1788 << " force=" 1789 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 1790 ? "disabled" 1791 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 1792 ? "enabled" 1793 : "?")) << " width=" << Hints.getWidth() 1794 << " unroll=" << Hints.getInterleave() << "\n"); 1795 1796 // Function containing loop 1797 Function *F = L->getHeader()->getParent(); 1798 1799 // Looking at the diagnostic output is the only way to determine if a loop 1800 // was vectorized (other than looking at the IR or machine code), so it 1801 // is important to generate an optimization remark for each loop. Most of 1802 // these messages are generated by emitOptimizationRemarkAnalysis. Remarks 1803 // generated by emitOptimizationRemark and emitOptimizationRemarkMissed are 1804 // less verbose reporting vectorized loops and unvectorized loops that may 1805 // benefit from vectorization, respectively. 1806 1807 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { 1808 DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 1809 return false; 1810 } 1811 1812 // Check the loop for a trip count threshold: 1813 // do not vectorize loops with a tiny trip count. 1814 const unsigned TC = SE->getSmallConstantTripCount(L); 1815 if (TC > 0u && TC < TinyTripCountVectorThreshold) { 1816 DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 1817 << "This loop is not worth vectorizing."); 1818 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 1819 DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 1820 else { 1821 DEBUG(dbgs() << "\n"); 1822 emitAnalysisDiag(F, L, Hints, VectorizationReport() 1823 << "vectorization is not beneficial " 1824 "and is not explicitly forced"); 1825 return false; 1826 } 1827 } 1828 1829 PredicatedScalarEvolution PSE(*SE, *L); 1830 1831 // Check if it is legal to vectorize the loop. 1832 LoopVectorizationRequirements Requirements; 1833 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, LAA, 1834 &Requirements, &Hints); 1835 if (!LVL.canVectorize()) { 1836 DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 1837 emitMissedWarning(F, L, Hints); 1838 return false; 1839 } 1840 1841 // Collect values we want to ignore in the cost model. This includes 1842 // type-promoting instructions we identified during reduction detection. 1843 SmallPtrSet<const Value *, 32> ValuesToIgnore; 1844 CodeMetrics::collectEphemeralValues(L, AC, ValuesToIgnore); 1845 for (auto &Reduction : *LVL.getReductionVars()) { 1846 RecurrenceDescriptor &RedDes = Reduction.second; 1847 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 1848 ValuesToIgnore.insert(Casts.begin(), Casts.end()); 1849 } 1850 1851 // Use the cost model. 1852 LoopVectorizationCostModel CM(L, PSE.getSE(), LI, &LVL, *TTI, TLI, DB, AC, 1853 F, &Hints, ValuesToIgnore); 1854 1855 // Check the function attributes to find out if this function should be 1856 // optimized for size. 1857 bool OptForSize = Hints.getForce() != LoopVectorizeHints::FK_Enabled && 1858 F->optForSize(); 1859 1860 // Compute the weighted frequency of this loop being executed and see if it 1861 // is less than 20% of the function entry baseline frequency. Note that we 1862 // always have a canonical loop here because we think we *can* vectorize. 1863 // FIXME: This is hidden behind a flag due to pervasive problems with 1864 // exactly what block frequency models. 1865 if (LoopVectorizeWithBlockFrequency) { 1866 BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader()); 1867 if (Hints.getForce() != LoopVectorizeHints::FK_Enabled && 1868 LoopEntryFreq < ColdEntryFreq) 1869 OptForSize = true; 1870 } 1871 1872 // Check the function attributes to see if implicit floats are allowed. 1873 // FIXME: This check doesn't seem possibly correct -- what if the loop is 1874 // an integer loop and the vector instructions selected are purely integer 1875 // vector instructions? 1876 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 1877 DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 1878 "attribute is used.\n"); 1879 emitAnalysisDiag( 1880 F, L, Hints, 1881 VectorizationReport() 1882 << "loop not vectorized due to NoImplicitFloat attribute"); 1883 emitMissedWarning(F, L, Hints); 1884 return false; 1885 } 1886 1887 // Select the optimal vectorization factor. 1888 const LoopVectorizationCostModel::VectorizationFactor VF = 1889 CM.selectVectorizationFactor(OptForSize); 1890 1891 // Select the interleave count. 1892 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 1893 1894 // Get user interleave count. 1895 unsigned UserIC = Hints.getInterleave(); 1896 1897 // Identify the diagnostic messages that should be produced. 1898 std::string VecDiagMsg, IntDiagMsg; 1899 bool VectorizeLoop = true, InterleaveLoop = true; 1900 1901 if (Requirements.doesNotMeet(F, L, Hints)) { 1902 DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 1903 "requirements.\n"); 1904 emitMissedWarning(F, L, Hints); 1905 return false; 1906 } 1907 1908 if (VF.Width == 1) { 1909 DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 1910 VecDiagMsg = 1911 "the cost-model indicates that vectorization is not beneficial"; 1912 VectorizeLoop = false; 1913 } 1914 1915 if (IC == 1 && UserIC <= 1) { 1916 // Tell the user interleaving is not beneficial. 1917 DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 1918 IntDiagMsg = 1919 "the cost-model indicates that interleaving is not beneficial"; 1920 InterleaveLoop = false; 1921 if (UserIC == 1) 1922 IntDiagMsg += 1923 " and is explicitly disabled or interleave count is set to 1"; 1924 } else if (IC > 1 && UserIC == 1) { 1925 // Tell the user interleaving is beneficial, but it explicitly disabled. 1926 DEBUG(dbgs() 1927 << "LV: Interleaving is beneficial but is explicitly disabled."); 1928 IntDiagMsg = "the cost-model indicates that interleaving is beneficial " 1929 "but is explicitly disabled or interleave count is set to 1"; 1930 InterleaveLoop = false; 1931 } 1932 1933 // Override IC if user provided an interleave count. 1934 IC = UserIC > 0 ? UserIC : IC; 1935 1936 // Emit diagnostic messages, if any. 1937 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 1938 if (!VectorizeLoop && !InterleaveLoop) { 1939 // Do not vectorize or interleaving the loop. 1940 emitOptimizationRemarkAnalysis(F->getContext(), VAPassName, *F, 1941 L->getStartLoc(), VecDiagMsg); 1942 emitOptimizationRemarkAnalysis(F->getContext(), LV_NAME, *F, 1943 L->getStartLoc(), IntDiagMsg); 1944 return false; 1945 } else if (!VectorizeLoop && InterleaveLoop) { 1946 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 1947 emitOptimizationRemarkAnalysis(F->getContext(), VAPassName, *F, 1948 L->getStartLoc(), VecDiagMsg); 1949 } else if (VectorizeLoop && !InterleaveLoop) { 1950 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 1951 << DebugLocStr << '\n'); 1952 emitOptimizationRemarkAnalysis(F->getContext(), LV_NAME, *F, 1953 L->getStartLoc(), IntDiagMsg); 1954 } else if (VectorizeLoop && InterleaveLoop) { 1955 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 1956 << DebugLocStr << '\n'); 1957 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 1958 } 1959 1960 if (!VectorizeLoop) { 1961 assert(IC > 1 && "interleave count should not be 1 or 0"); 1962 // If we decided that it is not legal to vectorize the loop then 1963 // interleave it. 1964 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, IC); 1965 Unroller.vectorize(&LVL, CM.MinBWs); 1966 1967 emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(), 1968 Twine("interleaved loop (interleaved count: ") + 1969 Twine(IC) + ")"); 1970 } else { 1971 // If we decided that it is *legal* to vectorize the loop then do it. 1972 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, VF.Width, IC); 1973 LB.vectorize(&LVL, CM.MinBWs); 1974 ++LoopsVectorized; 1975 1976 // Add metadata to disable runtime unrolling scalar loop when there's no 1977 // runtime check about strides and memory. Because at this situation, 1978 // scalar loop is rarely used not worthy to be unrolled. 1979 if (!LB.IsSafetyChecksAdded()) 1980 AddRuntimeUnrollDisableMetaData(L); 1981 1982 // Report the vectorization decision. 1983 emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(), 1984 Twine("vectorized loop (vectorization width: ") + 1985 Twine(VF.Width) + ", interleaved count: " + 1986 Twine(IC) + ")"); 1987 } 1988 1989 // Mark the loop as already vectorized to avoid vectorizing again. 1990 Hints.setAlreadyVectorized(); 1991 1992 DEBUG(verifyFunction(*L->getHeader()->getParent())); 1993 return true; 1994 } 1995 1996 void getAnalysisUsage(AnalysisUsage &AU) const override { 1997 AU.addRequired<AssumptionCacheTracker>(); 1998 AU.addRequiredID(LoopSimplifyID); 1999 AU.addRequiredID(LCSSAID); 2000 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2001 AU.addRequired<DominatorTreeWrapperPass>(); 2002 AU.addRequired<LoopInfoWrapperPass>(); 2003 AU.addRequired<ScalarEvolutionWrapperPass>(); 2004 AU.addRequired<TargetTransformInfoWrapperPass>(); 2005 AU.addRequired<AAResultsWrapperPass>(); 2006 AU.addRequired<LoopAccessAnalysis>(); 2007 AU.addRequired<DemandedBits>(); 2008 AU.addPreserved<LoopInfoWrapperPass>(); 2009 AU.addPreserved<DominatorTreeWrapperPass>(); 2010 AU.addPreserved<BasicAAWrapperPass>(); 2011 AU.addPreserved<AAResultsWrapperPass>(); 2012 AU.addPreserved<GlobalsAAWrapperPass>(); 2013 } 2014 2015 }; 2016 2017 } // end anonymous namespace 2018 2019 //===----------------------------------------------------------------------===// 2020 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2021 // LoopVectorizationCostModel. 2022 //===----------------------------------------------------------------------===// 2023 2024 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2025 // We need to place the broadcast of invariant variables outside the loop. 2026 Instruction *Instr = dyn_cast<Instruction>(V); 2027 bool NewInstr = 2028 (Instr && std::find(LoopVectorBody.begin(), LoopVectorBody.end(), 2029 Instr->getParent()) != LoopVectorBody.end()); 2030 bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr; 2031 2032 // Place the code for broadcasting invariant variables in the new preheader. 2033 IRBuilder<>::InsertPointGuard Guard(Builder); 2034 if (Invariant) 2035 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2036 2037 // Broadcast the scalar into all locations in the vector. 2038 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2039 2040 return Shuf; 2041 } 2042 2043 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, 2044 Value *Step) { 2045 assert(Val->getType()->isVectorTy() && "Must be a vector"); 2046 assert(Val->getType()->getScalarType()->isIntegerTy() && 2047 "Elem must be an integer"); 2048 assert(Step->getType() == Val->getType()->getScalarType() && 2049 "Step has wrong type"); 2050 // Create the types. 2051 Type *ITy = Val->getType()->getScalarType(); 2052 VectorType *Ty = cast<VectorType>(Val->getType()); 2053 int VLen = Ty->getNumElements(); 2054 SmallVector<Constant*, 8> Indices; 2055 2056 // Create a vector of consecutive numbers from zero to VF. 2057 for (int i = 0; i < VLen; ++i) 2058 Indices.push_back(ConstantInt::get(ITy, StartIdx + i)); 2059 2060 // Add the consecutive indices to the vector value. 2061 Constant *Cv = ConstantVector::get(Indices); 2062 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2063 Step = Builder.CreateVectorSplat(VLen, Step); 2064 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2065 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2066 // which can be found from the original scalar operations. 2067 Step = Builder.CreateMul(Cv, Step); 2068 return Builder.CreateAdd(Val, Step, "induction"); 2069 } 2070 2071 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { 2072 assert(Ptr->getType()->isPointerTy() && "Unexpected non-ptr"); 2073 auto *SE = PSE.getSE(); 2074 // Make sure that the pointer does not point to structs. 2075 if (Ptr->getType()->getPointerElementType()->isAggregateType()) 2076 return 0; 2077 2078 // If this value is a pointer induction variable we know it is consecutive. 2079 PHINode *Phi = dyn_cast_or_null<PHINode>(Ptr); 2080 if (Phi && Inductions.count(Phi)) { 2081 InductionDescriptor II = Inductions[Phi]; 2082 return II.getConsecutiveDirection(); 2083 } 2084 2085 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2086 if (!Gep) 2087 return 0; 2088 2089 unsigned NumOperands = Gep->getNumOperands(); 2090 Value *GpPtr = Gep->getPointerOperand(); 2091 // If this GEP value is a consecutive pointer induction variable and all of 2092 // the indices are constant then we know it is consecutive. We can 2093 Phi = dyn_cast<PHINode>(GpPtr); 2094 if (Phi && Inductions.count(Phi)) { 2095 2096 // Make sure that the pointer does not point to structs. 2097 PointerType *GepPtrType = cast<PointerType>(GpPtr->getType()); 2098 if (GepPtrType->getElementType()->isAggregateType()) 2099 return 0; 2100 2101 // Make sure that all of the index operands are loop invariant. 2102 for (unsigned i = 1; i < NumOperands; ++i) 2103 if (!SE->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), TheLoop)) 2104 return 0; 2105 2106 InductionDescriptor II = Inductions[Phi]; 2107 return II.getConsecutiveDirection(); 2108 } 2109 2110 unsigned InductionOperand = getGEPInductionOperand(Gep); 2111 2112 // Check that all of the gep indices are uniform except for our induction 2113 // operand. 2114 for (unsigned i = 0; i != NumOperands; ++i) 2115 if (i != InductionOperand && 2116 !SE->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), TheLoop)) 2117 return 0; 2118 2119 // We can emit wide load/stores only if the last non-zero index is the 2120 // induction variable. 2121 const SCEV *Last = nullptr; 2122 if (!Strides.count(Gep)) 2123 Last = PSE.getSCEV(Gep->getOperand(InductionOperand)); 2124 else { 2125 // Because of the multiplication by a stride we can have a s/zext cast. 2126 // We are going to replace this stride by 1 so the cast is safe to ignore. 2127 // 2128 // %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] 2129 // %0 = trunc i64 %indvars.iv to i32 2130 // %mul = mul i32 %0, %Stride1 2131 // %idxprom = zext i32 %mul to i64 << Safe cast. 2132 // %arrayidx = getelementptr inbounds i32* %B, i64 %idxprom 2133 // 2134 Last = replaceSymbolicStrideSCEV(PSE, Strides, 2135 Gep->getOperand(InductionOperand), Gep); 2136 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(Last)) 2137 Last = 2138 (C->getSCEVType() == scSignExtend || C->getSCEVType() == scZeroExtend) 2139 ? C->getOperand() 2140 : Last; 2141 } 2142 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Last)) { 2143 const SCEV *Step = AR->getStepRecurrence(*SE); 2144 2145 // The memory is consecutive because the last index is consecutive 2146 // and all other indices are loop invariant. 2147 if (Step->isOne()) 2148 return 1; 2149 if (Step->isAllOnesValue()) 2150 return -1; 2151 } 2152 2153 return 0; 2154 } 2155 2156 bool LoopVectorizationLegality::isUniform(Value *V) { 2157 return LAI->isUniform(V); 2158 } 2159 2160 InnerLoopVectorizer::VectorParts& 2161 InnerLoopVectorizer::getVectorValue(Value *V) { 2162 assert(V != Induction && "The new induction variable should not be used."); 2163 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2164 2165 // If we have a stride that is replaced by one, do it here. 2166 if (Legal->hasStride(V)) 2167 V = ConstantInt::get(V->getType(), 1); 2168 2169 // If we have this scalar in the map, return it. 2170 if (WidenMap.has(V)) 2171 return WidenMap.get(V); 2172 2173 // If this scalar is unknown, assume that it is a constant or that it is 2174 // loop invariant. Broadcast V and save the value for future uses. 2175 Value *B = getBroadcastInstrs(V); 2176 return WidenMap.splat(V, B); 2177 } 2178 2179 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2180 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2181 SmallVector<Constant*, 8> ShuffleMask; 2182 for (unsigned i = 0; i < VF; ++i) 2183 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 2184 2185 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2186 ConstantVector::get(ShuffleMask), 2187 "reverse"); 2188 } 2189 2190 // Get a mask to interleave \p NumVec vectors into a wide vector. 2191 // I.e. <0, VF, VF*2, ..., VF*(NumVec-1), 1, VF+1, VF*2+1, ...> 2192 // E.g. For 2 interleaved vectors, if VF is 4, the mask is: 2193 // <0, 4, 1, 5, 2, 6, 3, 7> 2194 static Constant *getInterleavedMask(IRBuilder<> &Builder, unsigned VF, 2195 unsigned NumVec) { 2196 SmallVector<Constant *, 16> Mask; 2197 for (unsigned i = 0; i < VF; i++) 2198 for (unsigned j = 0; j < NumVec; j++) 2199 Mask.push_back(Builder.getInt32(j * VF + i)); 2200 2201 return ConstantVector::get(Mask); 2202 } 2203 2204 // Get the strided mask starting from index \p Start. 2205 // I.e. <Start, Start + Stride, ..., Start + Stride*(VF-1)> 2206 static Constant *getStridedMask(IRBuilder<> &Builder, unsigned Start, 2207 unsigned Stride, unsigned VF) { 2208 SmallVector<Constant *, 16> Mask; 2209 for (unsigned i = 0; i < VF; i++) 2210 Mask.push_back(Builder.getInt32(Start + i * Stride)); 2211 2212 return ConstantVector::get(Mask); 2213 } 2214 2215 // Get a mask of two parts: The first part consists of sequential integers 2216 // starting from 0, The second part consists of UNDEFs. 2217 // I.e. <0, 1, 2, ..., NumInt - 1, undef, ..., undef> 2218 static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned NumInt, 2219 unsigned NumUndef) { 2220 SmallVector<Constant *, 16> Mask; 2221 for (unsigned i = 0; i < NumInt; i++) 2222 Mask.push_back(Builder.getInt32(i)); 2223 2224 Constant *Undef = UndefValue::get(Builder.getInt32Ty()); 2225 for (unsigned i = 0; i < NumUndef; i++) 2226 Mask.push_back(Undef); 2227 2228 return ConstantVector::get(Mask); 2229 } 2230 2231 // Concatenate two vectors with the same element type. The 2nd vector should 2232 // not have more elements than the 1st vector. If the 2nd vector has less 2233 // elements, extend it with UNDEFs. 2234 static Value *ConcatenateTwoVectors(IRBuilder<> &Builder, Value *V1, 2235 Value *V2) { 2236 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); 2237 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); 2238 assert(VecTy1 && VecTy2 && 2239 VecTy1->getScalarType() == VecTy2->getScalarType() && 2240 "Expect two vectors with the same element type"); 2241 2242 unsigned NumElts1 = VecTy1->getNumElements(); 2243 unsigned NumElts2 = VecTy2->getNumElements(); 2244 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); 2245 2246 if (NumElts1 > NumElts2) { 2247 // Extend with UNDEFs. 2248 Constant *ExtMask = 2249 getSequentialMask(Builder, NumElts2, NumElts1 - NumElts2); 2250 V2 = Builder.CreateShuffleVector(V2, UndefValue::get(VecTy2), ExtMask); 2251 } 2252 2253 Constant *Mask = getSequentialMask(Builder, NumElts1 + NumElts2, 0); 2254 return Builder.CreateShuffleVector(V1, V2, Mask); 2255 } 2256 2257 // Concatenate vectors in the given list. All vectors have the same type. 2258 static Value *ConcatenateVectors(IRBuilder<> &Builder, 2259 ArrayRef<Value *> InputList) { 2260 unsigned NumVec = InputList.size(); 2261 assert(NumVec > 1 && "Should be at least two vectors"); 2262 2263 SmallVector<Value *, 8> ResList; 2264 ResList.append(InputList.begin(), InputList.end()); 2265 do { 2266 SmallVector<Value *, 8> TmpList; 2267 for (unsigned i = 0; i < NumVec - 1; i += 2) { 2268 Value *V0 = ResList[i], *V1 = ResList[i + 1]; 2269 assert((V0->getType() == V1->getType() || i == NumVec - 2) && 2270 "Only the last vector may have a different type"); 2271 2272 TmpList.push_back(ConcatenateTwoVectors(Builder, V0, V1)); 2273 } 2274 2275 // Push the last vector if the total number of vectors is odd. 2276 if (NumVec % 2 != 0) 2277 TmpList.push_back(ResList[NumVec - 1]); 2278 2279 ResList = TmpList; 2280 NumVec = ResList.size(); 2281 } while (NumVec > 1); 2282 2283 return ResList[0]; 2284 } 2285 2286 // Try to vectorize the interleave group that \p Instr belongs to. 2287 // 2288 // E.g. Translate following interleaved load group (factor = 3): 2289 // for (i = 0; i < N; i+=3) { 2290 // R = Pic[i]; // Member of index 0 2291 // G = Pic[i+1]; // Member of index 1 2292 // B = Pic[i+2]; // Member of index 2 2293 // ... // do something to R, G, B 2294 // } 2295 // To: 2296 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2297 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2298 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2299 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2300 // 2301 // Or translate following interleaved store group (factor = 3): 2302 // for (i = 0; i < N; i+=3) { 2303 // ... do something to R, G, B 2304 // Pic[i] = R; // Member of index 0 2305 // Pic[i+1] = G; // Member of index 1 2306 // Pic[i+2] = B; // Member of index 2 2307 // } 2308 // To: 2309 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2310 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2311 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2312 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2313 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2314 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { 2315 const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr); 2316 assert(Group && "Fail to get an interleaved access group."); 2317 2318 // Skip if current instruction is not the insert position. 2319 if (Instr != Group->getInsertPos()) 2320 return; 2321 2322 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2323 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2324 Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); 2325 2326 // Prepare for the vector type of the interleaved load/store. 2327 Type *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2328 unsigned InterleaveFactor = Group->getFactor(); 2329 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2330 Type *PtrTy = VecTy->getPointerTo(Ptr->getType()->getPointerAddressSpace()); 2331 2332 // Prepare for the new pointers. 2333 setDebugLocFromInst(Builder, Ptr); 2334 VectorParts &PtrParts = getVectorValue(Ptr); 2335 SmallVector<Value *, 2> NewPtrs; 2336 unsigned Index = Group->getIndex(Instr); 2337 for (unsigned Part = 0; Part < UF; Part++) { 2338 // Extract the pointer for current instruction from the pointer vector. A 2339 // reverse access uses the pointer in the last lane. 2340 Value *NewPtr = Builder.CreateExtractElement( 2341 PtrParts[Part], 2342 Group->isReverse() ? Builder.getInt32(VF - 1) : Builder.getInt32(0)); 2343 2344 // Notice current instruction could be any index. Need to adjust the address 2345 // to the member of index 0. 2346 // 2347 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2348 // b = A[i]; // Member of index 0 2349 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2350 // 2351 // E.g. A[i+1] = a; // Member of index 1 2352 // A[i] = b; // Member of index 0 2353 // A[i+2] = c; // Member of index 2 (Current instruction) 2354 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2355 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 2356 2357 // Cast to the vector pointer type. 2358 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2359 } 2360 2361 setDebugLocFromInst(Builder, Instr); 2362 Value *UndefVec = UndefValue::get(VecTy); 2363 2364 // Vectorize the interleaved load group. 2365 if (LI) { 2366 for (unsigned Part = 0; Part < UF; Part++) { 2367 Instruction *NewLoadInstr = Builder.CreateAlignedLoad( 2368 NewPtrs[Part], Group->getAlignment(), "wide.vec"); 2369 2370 for (unsigned i = 0; i < InterleaveFactor; i++) { 2371 Instruction *Member = Group->getMember(i); 2372 2373 // Skip the gaps in the group. 2374 if (!Member) 2375 continue; 2376 2377 Constant *StrideMask = getStridedMask(Builder, i, InterleaveFactor, VF); 2378 Value *StridedVec = Builder.CreateShuffleVector( 2379 NewLoadInstr, UndefVec, StrideMask, "strided.vec"); 2380 2381 // If this member has different type, cast the result type. 2382 if (Member->getType() != ScalarTy) { 2383 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2384 StridedVec = Builder.CreateBitOrPointerCast(StridedVec, OtherVTy); 2385 } 2386 2387 VectorParts &Entry = WidenMap.get(Member); 2388 Entry[Part] = 2389 Group->isReverse() ? reverseVector(StridedVec) : StridedVec; 2390 } 2391 2392 addMetadata(NewLoadInstr, Instr); 2393 } 2394 return; 2395 } 2396 2397 // The sub vector type for current instruction. 2398 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2399 2400 // Vectorize the interleaved store group. 2401 for (unsigned Part = 0; Part < UF; Part++) { 2402 // Collect the stored vector from each member. 2403 SmallVector<Value *, 4> StoredVecs; 2404 for (unsigned i = 0; i < InterleaveFactor; i++) { 2405 // Interleaved store group doesn't allow a gap, so each index has a member 2406 Instruction *Member = Group->getMember(i); 2407 assert(Member && "Fail to get a member from an interleaved store group"); 2408 2409 Value *StoredVec = 2410 getVectorValue(dyn_cast<StoreInst>(Member)->getValueOperand())[Part]; 2411 if (Group->isReverse()) 2412 StoredVec = reverseVector(StoredVec); 2413 2414 // If this member has different type, cast it to an unified type. 2415 if (StoredVec->getType() != SubVT) 2416 StoredVec = Builder.CreateBitOrPointerCast(StoredVec, SubVT); 2417 2418 StoredVecs.push_back(StoredVec); 2419 } 2420 2421 // Concatenate all vectors into a wide vector. 2422 Value *WideVec = ConcatenateVectors(Builder, StoredVecs); 2423 2424 // Interleave the elements in the wide vector. 2425 Constant *IMask = getInterleavedMask(Builder, VF, InterleaveFactor); 2426 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2427 "interleaved.vec"); 2428 2429 Instruction *NewStoreInstr = 2430 Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment()); 2431 addMetadata(NewStoreInstr, Instr); 2432 } 2433 } 2434 2435 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { 2436 // Attempt to issue a wide load. 2437 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2438 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2439 2440 assert((LI || SI) && "Invalid Load/Store instruction"); 2441 2442 // Try to vectorize the interleave group if this access is interleaved. 2443 if (Legal->isAccessInterleaved(Instr)) 2444 return vectorizeInterleaveGroup(Instr); 2445 2446 Type *ScalarDataTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2447 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2448 Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); 2449 unsigned Alignment = LI ? LI->getAlignment() : SI->getAlignment(); 2450 // An alignment of 0 means target abi alignment. We need to use the scalar's 2451 // target abi alignment in such a case. 2452 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2453 if (!Alignment) 2454 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2455 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2456 unsigned ScalarAllocatedSize = DL.getTypeAllocSize(ScalarDataTy); 2457 unsigned VectorElementSize = DL.getTypeStoreSize(DataTy) / VF; 2458 2459 if (SI && Legal->blockNeedsPredication(SI->getParent()) && 2460 !Legal->isMaskRequired(SI)) 2461 return scalarizeInstruction(Instr, true); 2462 2463 if (ScalarAllocatedSize != VectorElementSize) 2464 return scalarizeInstruction(Instr); 2465 2466 // If the pointer is loop invariant scalarize the load. 2467 if (LI && Legal->isUniform(Ptr)) 2468 return scalarizeInstruction(Instr); 2469 2470 // If the pointer is non-consecutive and gather/scatter is not supported 2471 // scalarize the instruction. 2472 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 2473 bool Reverse = ConsecutiveStride < 0; 2474 bool CreateGatherScatter = !ConsecutiveStride && 2475 ((LI && Legal->isLegalMaskedGather(ScalarDataTy)) || 2476 (SI && Legal->isLegalMaskedScatter(ScalarDataTy))); 2477 2478 if (!ConsecutiveStride && !CreateGatherScatter) 2479 return scalarizeInstruction(Instr); 2480 2481 Constant *Zero = Builder.getInt32(0); 2482 VectorParts &Entry = WidenMap.get(Instr); 2483 VectorParts VectorGep; 2484 2485 // Handle consecutive loads/stores. 2486 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2487 if (ConsecutiveStride) { 2488 if (Gep && Legal->isInductionVariable(Gep->getPointerOperand())) { 2489 setDebugLocFromInst(Builder, Gep); 2490 Value *PtrOperand = Gep->getPointerOperand(); 2491 Value *FirstBasePtr = getVectorValue(PtrOperand)[0]; 2492 FirstBasePtr = Builder.CreateExtractElement(FirstBasePtr, Zero); 2493 2494 // Create the new GEP with the new induction variable. 2495 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2496 Gep2->setOperand(0, FirstBasePtr); 2497 Gep2->setName("gep.indvar.base"); 2498 Ptr = Builder.Insert(Gep2); 2499 } else if (Gep) { 2500 setDebugLocFromInst(Builder, Gep); 2501 assert(PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getPointerOperand()), 2502 OrigLoop) && 2503 "Base ptr must be invariant"); 2504 // The last index does not have to be the induction. It can be 2505 // consecutive and be a function of the index. For example A[I+1]; 2506 unsigned NumOperands = Gep->getNumOperands(); 2507 unsigned InductionOperand = getGEPInductionOperand(Gep); 2508 // Create the new GEP with the new induction variable. 2509 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2510 2511 for (unsigned i = 0; i < NumOperands; ++i) { 2512 Value *GepOperand = Gep->getOperand(i); 2513 Instruction *GepOperandInst = dyn_cast<Instruction>(GepOperand); 2514 2515 // Update last index or loop invariant instruction anchored in loop. 2516 if (i == InductionOperand || 2517 (GepOperandInst && OrigLoop->contains(GepOperandInst))) { 2518 assert((i == InductionOperand || 2519 PSE.getSE()->isLoopInvariant(PSE.getSCEV(GepOperandInst), 2520 OrigLoop)) && 2521 "Must be last index or loop invariant"); 2522 2523 VectorParts &GEPParts = getVectorValue(GepOperand); 2524 Value *Index = GEPParts[0]; 2525 Index = Builder.CreateExtractElement(Index, Zero); 2526 Gep2->setOperand(i, Index); 2527 Gep2->setName("gep.indvar.idx"); 2528 } 2529 } 2530 Ptr = Builder.Insert(Gep2); 2531 } else { // No GEP 2532 // Use the induction element ptr. 2533 assert(isa<PHINode>(Ptr) && "Invalid induction ptr"); 2534 setDebugLocFromInst(Builder, Ptr); 2535 VectorParts &PtrVal = getVectorValue(Ptr); 2536 Ptr = Builder.CreateExtractElement(PtrVal[0], Zero); 2537 } 2538 } else { 2539 // At this point we should vector version of GEP for Gather or Scatter 2540 assert(CreateGatherScatter && "The instruction should be scalarized"); 2541 if (Gep) { 2542 SmallVector<VectorParts, 4> OpsV; 2543 // Vectorizing GEP, across UF parts, we want to keep each loop-invariant 2544 // base or index of GEP scalar 2545 for (Value *Op : Gep->operands()) { 2546 if (PSE.getSE()->isLoopInvariant(PSE.getSCEV(Op), OrigLoop)) 2547 OpsV.push_back(VectorParts(UF, Op)); 2548 else 2549 OpsV.push_back(getVectorValue(Op)); 2550 } 2551 2552 for (unsigned Part = 0; Part < UF; ++Part) { 2553 SmallVector<Value*, 4> Ops; 2554 Value *GEPBasePtr = OpsV[0][Part]; 2555 for (unsigned i = 1; i < Gep->getNumOperands(); i++) 2556 Ops.push_back(OpsV[i][Part]); 2557 Value *NewGep = Builder.CreateGEP(nullptr, GEPBasePtr, Ops, 2558 "VectorGep"); 2559 assert(NewGep->getType()->isVectorTy() && "Expected vector GEP"); 2560 NewGep = Builder.CreateBitCast(NewGep, 2561 VectorType::get(Ptr->getType(), VF)); 2562 VectorGep.push_back(NewGep); 2563 } 2564 } else 2565 VectorGep = getVectorValue(Ptr); 2566 } 2567 2568 VectorParts Mask = createBlockInMask(Instr->getParent()); 2569 // Handle Stores: 2570 if (SI) { 2571 assert(!Legal->isUniform(SI->getPointerOperand()) && 2572 "We do not allow storing to uniform addresses"); 2573 setDebugLocFromInst(Builder, SI); 2574 // We don't want to update the value in the map as it might be used in 2575 // another expression. So don't use a reference type for "StoredVal". 2576 VectorParts StoredVal = getVectorValue(SI->getValueOperand()); 2577 2578 for (unsigned Part = 0; Part < UF; ++Part) { 2579 Instruction *NewSI = nullptr; 2580 if (CreateGatherScatter) { 2581 Value *MaskPart = Legal->isMaskRequired(SI) ? Mask[Part] : nullptr; 2582 NewSI = Builder.CreateMaskedScatter(StoredVal[Part], VectorGep[Part], 2583 Alignment, MaskPart); 2584 } else { 2585 // Calculate the pointer for the specific unroll-part. 2586 Value *PartPtr = 2587 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2588 2589 if (Reverse) { 2590 // If we store to reverse consecutive memory locations, then we need 2591 // to reverse the order of elements in the stored value. 2592 StoredVal[Part] = reverseVector(StoredVal[Part]); 2593 // If the address is consecutive but reversed, then the 2594 // wide store needs to start at the last vector element. 2595 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2596 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2597 Mask[Part] = reverseVector(Mask[Part]); 2598 } 2599 2600 Value *VecPtr = Builder.CreateBitCast(PartPtr, 2601 DataTy->getPointerTo(AddressSpace)); 2602 2603 if (Legal->isMaskRequired(SI)) 2604 NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment, 2605 Mask[Part]); 2606 else 2607 NewSI = Builder.CreateAlignedStore(StoredVal[Part], VecPtr, 2608 Alignment); 2609 } 2610 addMetadata(NewSI, SI); 2611 } 2612 return; 2613 } 2614 2615 // Handle loads. 2616 assert(LI && "Must have a load instruction"); 2617 setDebugLocFromInst(Builder, LI); 2618 for (unsigned Part = 0; Part < UF; ++Part) { 2619 Instruction* NewLI; 2620 if (CreateGatherScatter) { 2621 Value *MaskPart = Legal->isMaskRequired(LI) ? Mask[Part] : nullptr; 2622 NewLI = Builder.CreateMaskedGather(VectorGep[Part], Alignment, 2623 MaskPart, 0, "wide.masked.gather"); 2624 Entry[Part] = NewLI; 2625 } else { 2626 // Calculate the pointer for the specific unroll-part. 2627 Value *PartPtr = 2628 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2629 2630 if (Reverse) { 2631 // If the address is consecutive but reversed, then the 2632 // wide load needs to start at the last vector element. 2633 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2634 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2635 Mask[Part] = reverseVector(Mask[Part]); 2636 } 2637 2638 Value *VecPtr = Builder.CreateBitCast(PartPtr, 2639 DataTy->getPointerTo(AddressSpace)); 2640 if (Legal->isMaskRequired(LI)) 2641 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 2642 UndefValue::get(DataTy), 2643 "wide.masked.load"); 2644 else 2645 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 2646 Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI; 2647 } 2648 addMetadata(NewLI, LI); 2649 } 2650 } 2651 2652 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2653 bool IfPredicateStore) { 2654 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2655 // Holds vector parameters or scalars, in case of uniform vals. 2656 SmallVector<VectorParts, 4> Params; 2657 2658 setDebugLocFromInst(Builder, Instr); 2659 2660 // Find all of the vectorized parameters. 2661 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2662 Value *SrcOp = Instr->getOperand(op); 2663 2664 // If we are accessing the old induction variable, use the new one. 2665 if (SrcOp == OldInduction) { 2666 Params.push_back(getVectorValue(SrcOp)); 2667 continue; 2668 } 2669 2670 // Try using previously calculated values. 2671 Instruction *SrcInst = dyn_cast<Instruction>(SrcOp); 2672 2673 // If the src is an instruction that appeared earlier in the basic block, 2674 // then it should already be vectorized. 2675 if (SrcInst && OrigLoop->contains(SrcInst)) { 2676 assert(WidenMap.has(SrcInst) && "Source operand is unavailable"); 2677 // The parameter is a vector value from earlier. 2678 Params.push_back(WidenMap.get(SrcInst)); 2679 } else { 2680 // The parameter is a scalar from outside the loop. Maybe even a constant. 2681 VectorParts Scalars; 2682 Scalars.append(UF, SrcOp); 2683 Params.push_back(Scalars); 2684 } 2685 } 2686 2687 assert(Params.size() == Instr->getNumOperands() && 2688 "Invalid number of operands"); 2689 2690 // Does this instruction return a value ? 2691 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2692 2693 Value *UndefVec = IsVoidRetTy ? nullptr : 2694 UndefValue::get(VectorType::get(Instr->getType(), VF)); 2695 // Create a new entry in the WidenMap and initialize it to Undef or Null. 2696 VectorParts &VecResults = WidenMap.splat(Instr, UndefVec); 2697 2698 VectorParts Cond; 2699 if (IfPredicateStore) { 2700 assert(Instr->getParent()->getSinglePredecessor() && 2701 "Only support single predecessor blocks"); 2702 Cond = createEdgeMask(Instr->getParent()->getSinglePredecessor(), 2703 Instr->getParent()); 2704 } 2705 2706 // For each vector unroll 'part': 2707 for (unsigned Part = 0; Part < UF; ++Part) { 2708 // For each scalar that we create: 2709 for (unsigned Width = 0; Width < VF; ++Width) { 2710 2711 // Start if-block. 2712 Value *Cmp = nullptr; 2713 if (IfPredicateStore) { 2714 Cmp = Builder.CreateExtractElement(Cond[Part], Builder.getInt32(Width)); 2715 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cmp, 2716 ConstantInt::get(Cmp->getType(), 1)); 2717 } 2718 2719 Instruction *Cloned = Instr->clone(); 2720 if (!IsVoidRetTy) 2721 Cloned->setName(Instr->getName() + ".cloned"); 2722 // Replace the operands of the cloned instructions with extracted scalars. 2723 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2724 Value *Op = Params[op][Part]; 2725 // Param is a vector. Need to extract the right lane. 2726 if (Op->getType()->isVectorTy()) 2727 Op = Builder.CreateExtractElement(Op, Builder.getInt32(Width)); 2728 Cloned->setOperand(op, Op); 2729 } 2730 addNewMetadata(Cloned, Instr); 2731 2732 // Place the cloned scalar in the new loop. 2733 Builder.Insert(Cloned); 2734 2735 // If we just cloned a new assumption, add it the assumption cache. 2736 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2737 if (II->getIntrinsicID() == Intrinsic::assume) 2738 AC->registerAssumption(II); 2739 2740 // If the original scalar returns a value we need to place it in a vector 2741 // so that future users will be able to use it. 2742 if (!IsVoidRetTy) 2743 VecResults[Part] = Builder.CreateInsertElement(VecResults[Part], Cloned, 2744 Builder.getInt32(Width)); 2745 // End if-block. 2746 if (IfPredicateStore) 2747 PredicatedStores.push_back(std::make_pair(cast<StoreInst>(Cloned), 2748 Cmp)); 2749 } 2750 } 2751 } 2752 2753 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2754 Value *End, Value *Step, 2755 Instruction *DL) { 2756 BasicBlock *Header = L->getHeader(); 2757 BasicBlock *Latch = L->getLoopLatch(); 2758 // As we're just creating this loop, it's possible no latch exists 2759 // yet. If so, use the header as this will be a single block loop. 2760 if (!Latch) 2761 Latch = Header; 2762 2763 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2764 setDebugLocFromInst(Builder, getDebugLocFromInstOrOperands(OldInduction)); 2765 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2766 2767 Builder.SetInsertPoint(Latch->getTerminator()); 2768 2769 // Create i+1 and fill the PHINode. 2770 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2771 Induction->addIncoming(Start, L->getLoopPreheader()); 2772 Induction->addIncoming(Next, Latch); 2773 // Create the compare. 2774 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2775 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 2776 2777 // Now we have two terminators. Remove the old one from the block. 2778 Latch->getTerminator()->eraseFromParent(); 2779 2780 return Induction; 2781 } 2782 2783 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2784 if (TripCount) 2785 return TripCount; 2786 2787 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2788 // Find the loop boundaries. 2789 ScalarEvolution *SE = PSE.getSE(); 2790 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2791 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 2792 "Invalid loop count"); 2793 2794 Type *IdxTy = Legal->getWidestInductionType(); 2795 2796 // The exit count might have the type of i64 while the phi is i32. This can 2797 // happen if we have an induction variable that is sign extended before the 2798 // compare. The only way that we get a backedge taken count is that the 2799 // induction variable was signed and as such will not overflow. In such a case 2800 // truncation is legal. 2801 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 2802 IdxTy->getPrimitiveSizeInBits()) 2803 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2804 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2805 2806 // Get the total trip count from the count by adding 1. 2807 const SCEV *ExitCount = SE->getAddExpr( 2808 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2809 2810 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2811 2812 // Expand the trip count and place the new instructions in the preheader. 2813 // Notice that the pre-header does not change, only the loop body. 2814 SCEVExpander Exp(*SE, DL, "induction"); 2815 2816 // Count holds the overall loop count (N). 2817 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2818 L->getLoopPreheader()->getTerminator()); 2819 2820 if (TripCount->getType()->isPointerTy()) 2821 TripCount = 2822 CastInst::CreatePointerCast(TripCount, IdxTy, 2823 "exitcount.ptrcnt.to.int", 2824 L->getLoopPreheader()->getTerminator()); 2825 2826 return TripCount; 2827 } 2828 2829 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 2830 if (VectorTripCount) 2831 return VectorTripCount; 2832 2833 Value *TC = getOrCreateTripCount(L); 2834 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2835 2836 // Now we need to generate the expression for N - (N % VF), which is 2837 // the part that the vectorized body will execute. 2838 // The loop step is equal to the vectorization factor (num of SIMD elements) 2839 // times the unroll factor (num of SIMD instructions). 2840 Constant *Step = ConstantInt::get(TC->getType(), VF * UF); 2841 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2842 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2843 2844 return VectorTripCount; 2845 } 2846 2847 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 2848 BasicBlock *Bypass) { 2849 Value *Count = getOrCreateTripCount(L); 2850 BasicBlock *BB = L->getLoopPreheader(); 2851 IRBuilder<> Builder(BB->getTerminator()); 2852 2853 // Generate code to check that the loop's trip count that we computed by 2854 // adding one to the backedge-taken count will not overflow. 2855 Value *CheckMinIters = 2856 Builder.CreateICmpULT(Count, 2857 ConstantInt::get(Count->getType(), VF * UF), 2858 "min.iters.check"); 2859 2860 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), 2861 "min.iters.checked"); 2862 // Update dominator tree immediately if the generated block is a 2863 // LoopBypassBlock because SCEV expansions to generate loop bypass 2864 // checks may query it before the current function is finished. 2865 DT->addNewBlock(NewBB, BB); 2866 if (L->getParentLoop()) 2867 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2868 ReplaceInstWithInst(BB->getTerminator(), 2869 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 2870 LoopBypassBlocks.push_back(BB); 2871 } 2872 2873 void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L, 2874 BasicBlock *Bypass) { 2875 Value *TC = getOrCreateVectorTripCount(L); 2876 BasicBlock *BB = L->getLoopPreheader(); 2877 IRBuilder<> Builder(BB->getTerminator()); 2878 2879 // Now, compare the new count to zero. If it is zero skip the vector loop and 2880 // jump to the scalar loop. 2881 Value *Cmp = Builder.CreateICmpEQ(TC, Constant::getNullValue(TC->getType()), 2882 "cmp.zero"); 2883 2884 // Generate code to check that the loop's trip count that we computed by 2885 // adding one to the backedge-taken count will not overflow. 2886 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), 2887 "vector.ph"); 2888 // Update dominator tree immediately if the generated block is a 2889 // LoopBypassBlock because SCEV expansions to generate loop bypass 2890 // checks may query it before the current function is finished. 2891 DT->addNewBlock(NewBB, BB); 2892 if (L->getParentLoop()) 2893 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2894 ReplaceInstWithInst(BB->getTerminator(), 2895 BranchInst::Create(Bypass, NewBB, Cmp)); 2896 LoopBypassBlocks.push_back(BB); 2897 } 2898 2899 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 2900 BasicBlock *BB = L->getLoopPreheader(); 2901 2902 // Generate the code to check that the SCEV assumptions that we made. 2903 // We want the new basic block to start at the first instruction in a 2904 // sequence of instructions that form a check. 2905 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 2906 "scev.check"); 2907 Value *SCEVCheck = 2908 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 2909 2910 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 2911 if (C->isZero()) 2912 return; 2913 2914 // Create a new block containing the stride check. 2915 BB->setName("vector.scevcheck"); 2916 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2917 // Update dominator tree immediately if the generated block is a 2918 // LoopBypassBlock because SCEV expansions to generate loop bypass 2919 // checks may query it before the current function is finished. 2920 DT->addNewBlock(NewBB, BB); 2921 if (L->getParentLoop()) 2922 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2923 ReplaceInstWithInst(BB->getTerminator(), 2924 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 2925 LoopBypassBlocks.push_back(BB); 2926 AddedSafetyChecks = true; 2927 } 2928 2929 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, 2930 BasicBlock *Bypass) { 2931 BasicBlock *BB = L->getLoopPreheader(); 2932 2933 // Generate the code that checks in runtime if arrays overlap. We put the 2934 // checks into a separate block to make the more common case of few elements 2935 // faster. 2936 Instruction *FirstCheckInst; 2937 Instruction *MemRuntimeCheck; 2938 std::tie(FirstCheckInst, MemRuntimeCheck) = 2939 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 2940 if (!MemRuntimeCheck) 2941 return; 2942 2943 // Create a new block containing the memory check. 2944 BB->setName("vector.memcheck"); 2945 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2946 // Update dominator tree immediately if the generated block is a 2947 // LoopBypassBlock because SCEV expansions to generate loop bypass 2948 // checks may query it before the current function is finished. 2949 DT->addNewBlock(NewBB, BB); 2950 if (L->getParentLoop()) 2951 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2952 ReplaceInstWithInst(BB->getTerminator(), 2953 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 2954 LoopBypassBlocks.push_back(BB); 2955 AddedSafetyChecks = true; 2956 2957 // We currently don't use LoopVersioning for the actual loop cloning but we 2958 // still use it to add the noalias metadata. 2959 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 2960 PSE.getSE()); 2961 LVer->prepareNoAliasMetadata(); 2962 } 2963 2964 2965 void InnerLoopVectorizer::createEmptyLoop() { 2966 /* 2967 In this function we generate a new loop. The new loop will contain 2968 the vectorized instructions while the old loop will continue to run the 2969 scalar remainder. 2970 2971 [ ] <-- loop iteration number check. 2972 / | 2973 / v 2974 | [ ] <-- vector loop bypass (may consist of multiple blocks). 2975 | / | 2976 | / v 2977 || [ ] <-- vector pre header. 2978 |/ | 2979 | v 2980 | [ ] \ 2981 | [ ]_| <-- vector loop. 2982 | | 2983 | v 2984 | -[ ] <--- middle-block. 2985 | / | 2986 | / v 2987 -|- >[ ] <--- new preheader. 2988 | | 2989 | v 2990 | [ ] \ 2991 | [ ]_| <-- old scalar loop to handle remainder. 2992 \ | 2993 \ v 2994 >[ ] <-- exit block. 2995 ... 2996 */ 2997 2998 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 2999 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 3000 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 3001 assert(VectorPH && "Invalid loop structure"); 3002 assert(ExitBlock && "Must have an exit block"); 3003 3004 // Some loops have a single integer induction variable, while other loops 3005 // don't. One example is c++ iterators that often have multiple pointer 3006 // induction variables. In the code below we also support a case where we 3007 // don't have a single induction variable. 3008 // 3009 // We try to obtain an induction variable from the original loop as hard 3010 // as possible. However if we don't find one that: 3011 // - is an integer 3012 // - counts from zero, stepping by one 3013 // - is the size of the widest induction variable type 3014 // then we create a new one. 3015 OldInduction = Legal->getInduction(); 3016 Type *IdxTy = Legal->getWidestInductionType(); 3017 3018 // Split the single block loop into the two loop structure described above. 3019 BasicBlock *VecBody = 3020 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 3021 BasicBlock *MiddleBlock = 3022 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 3023 BasicBlock *ScalarPH = 3024 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 3025 3026 // Create and register the new vector loop. 3027 Loop* Lp = new Loop(); 3028 Loop *ParentLoop = OrigLoop->getParentLoop(); 3029 3030 // Insert the new loop into the loop nest and register the new basic blocks 3031 // before calling any utilities such as SCEV that require valid LoopInfo. 3032 if (ParentLoop) { 3033 ParentLoop->addChildLoop(Lp); 3034 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 3035 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 3036 } else { 3037 LI->addTopLevelLoop(Lp); 3038 } 3039 Lp->addBasicBlockToLoop(VecBody, *LI); 3040 3041 // Find the loop boundaries. 3042 Value *Count = getOrCreateTripCount(Lp); 3043 3044 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3045 3046 // We need to test whether the backedge-taken count is uint##_max. Adding one 3047 // to it will cause overflow and an incorrect loop trip count in the vector 3048 // body. In case of overflow we want to directly jump to the scalar remainder 3049 // loop. 3050 emitMinimumIterationCountCheck(Lp, ScalarPH); 3051 // Now, compare the new count to zero. If it is zero skip the vector loop and 3052 // jump to the scalar loop. 3053 emitVectorLoopEnteredCheck(Lp, ScalarPH); 3054 // Generate the code to check any assumptions that we've made for SCEV 3055 // expressions. 3056 emitSCEVChecks(Lp, ScalarPH); 3057 3058 // Generate the code that checks in runtime if arrays overlap. We put the 3059 // checks into a separate block to make the more common case of few elements 3060 // faster. 3061 emitMemRuntimeChecks(Lp, ScalarPH); 3062 3063 // Generate the induction variable. 3064 // The loop step is equal to the vectorization factor (num of SIMD elements) 3065 // times the unroll factor (num of SIMD instructions). 3066 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3067 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 3068 Induction = 3069 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3070 getDebugLocFromInstOrOperands(OldInduction)); 3071 3072 // We are going to resume the execution of the scalar loop. 3073 // Go over all of the induction variables that we found and fix the 3074 // PHIs that are left in the scalar version of the loop. 3075 // The starting values of PHI nodes depend on the counter of the last 3076 // iteration in the vectorized loop. 3077 // If we come from a bypass edge then we need to start from the original 3078 // start value. 3079 3080 // This variable saves the new starting index for the scalar loop. It is used 3081 // to test if there are any tail iterations left once the vector loop has 3082 // completed. 3083 LoopVectorizationLegality::InductionList::iterator I, E; 3084 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 3085 for (I = List->begin(), E = List->end(); I != E; ++I) { 3086 PHINode *OrigPhi = I->first; 3087 InductionDescriptor II = I->second; 3088 3089 // Create phi nodes to merge from the backedge-taken check block. 3090 PHINode *BCResumeVal = PHINode::Create(OrigPhi->getType(), 3, 3091 "bc.resume.val", 3092 ScalarPH->getTerminator()); 3093 Value *EndValue; 3094 if (OrigPhi == OldInduction) { 3095 // We know what the end value is. 3096 EndValue = CountRoundDown; 3097 } else { 3098 IRBuilder<> B(LoopBypassBlocks.back()->getTerminator()); 3099 Value *CRD = B.CreateSExtOrTrunc(CountRoundDown, 3100 II.getStepValue()->getType(), 3101 "cast.crd"); 3102 EndValue = II.transform(B, CRD); 3103 EndValue->setName("ind.end"); 3104 } 3105 3106 // The new PHI merges the original incoming value, in case of a bypass, 3107 // or the value at the end of the vectorized loop. 3108 BCResumeVal->addIncoming(EndValue, MiddleBlock); 3109 3110 // Fix the scalar body counter (PHI node). 3111 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 3112 3113 // The old induction's phi node in the scalar body needs the truncated 3114 // value. 3115 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3116 BCResumeVal->addIncoming(II.getStartValue(), LoopBypassBlocks[I]); 3117 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 3118 } 3119 3120 // Add a check in the middle block to see if we have completed 3121 // all of the iterations in the first vector loop. 3122 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3123 Value *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 3124 CountRoundDown, "cmp.n", 3125 MiddleBlock->getTerminator()); 3126 ReplaceInstWithInst(MiddleBlock->getTerminator(), 3127 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 3128 3129 // Get ready to start creating new instructions into the vectorized body. 3130 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 3131 3132 // Save the state. 3133 LoopVectorPreHeader = Lp->getLoopPreheader(); 3134 LoopScalarPreHeader = ScalarPH; 3135 LoopMiddleBlock = MiddleBlock; 3136 LoopExitBlock = ExitBlock; 3137 LoopVectorBody.push_back(VecBody); 3138 LoopScalarBody = OldBasicBlock; 3139 3140 LoopVectorizeHints Hints(Lp, true); 3141 Hints.setAlreadyVectorized(); 3142 } 3143 3144 namespace { 3145 struct CSEDenseMapInfo { 3146 static bool canHandle(Instruction *I) { 3147 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3148 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3149 } 3150 static inline Instruction *getEmptyKey() { 3151 return DenseMapInfo<Instruction *>::getEmptyKey(); 3152 } 3153 static inline Instruction *getTombstoneKey() { 3154 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3155 } 3156 static unsigned getHashValue(Instruction *I) { 3157 assert(canHandle(I) && "Unknown instruction!"); 3158 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3159 I->value_op_end())); 3160 } 3161 static bool isEqual(Instruction *LHS, Instruction *RHS) { 3162 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3163 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3164 return LHS == RHS; 3165 return LHS->isIdenticalTo(RHS); 3166 } 3167 }; 3168 } 3169 3170 /// \brief Check whether this block is a predicated block. 3171 /// Due to if predication of stores we might create a sequence of "if(pred) a[i] 3172 /// = ...; " blocks. We start with one vectorized basic block. For every 3173 /// conditional block we split this vectorized block. Therefore, every second 3174 /// block will be a predicated one. 3175 static bool isPredicatedBlock(unsigned BlockNum) { 3176 return BlockNum % 2; 3177 } 3178 3179 ///\brief Perform cse of induction variable instructions. 3180 static void cse(SmallVector<BasicBlock *, 4> &BBs) { 3181 // Perform simple cse. 3182 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3183 for (unsigned i = 0, e = BBs.size(); i != e; ++i) { 3184 BasicBlock *BB = BBs[i]; 3185 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3186 Instruction *In = &*I++; 3187 3188 if (!CSEDenseMapInfo::canHandle(In)) 3189 continue; 3190 3191 // Check if we can replace this instruction with any of the 3192 // visited instructions. 3193 if (Instruction *V = CSEMap.lookup(In)) { 3194 In->replaceAllUsesWith(V); 3195 In->eraseFromParent(); 3196 continue; 3197 } 3198 // Ignore instructions in conditional blocks. We create "if (pred) a[i] = 3199 // ...;" blocks for predicated stores. Every second block is a predicated 3200 // block. 3201 if (isPredicatedBlock(i)) 3202 continue; 3203 3204 CSEMap[In] = In; 3205 } 3206 } 3207 } 3208 3209 /// \brief Adds a 'fast' flag to floating point operations. 3210 static Value *addFastMathFlag(Value *V) { 3211 if (isa<FPMathOperator>(V)){ 3212 FastMathFlags Flags; 3213 Flags.setUnsafeAlgebra(); 3214 cast<Instruction>(V)->setFastMathFlags(Flags); 3215 } 3216 return V; 3217 } 3218 3219 /// Estimate the overhead of scalarizing a value. Insert and Extract are set if 3220 /// the result needs to be inserted and/or extracted from vectors. 3221 static unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract, 3222 const TargetTransformInfo &TTI) { 3223 if (Ty->isVoidTy()) 3224 return 0; 3225 3226 assert(Ty->isVectorTy() && "Can only scalarize vectors"); 3227 unsigned Cost = 0; 3228 3229 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 3230 if (Insert) 3231 Cost += TTI.getVectorInstrCost(Instruction::InsertElement, Ty, i); 3232 if (Extract) 3233 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, Ty, i); 3234 } 3235 3236 return Cost; 3237 } 3238 3239 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3240 // Return the cost of the instruction, including scalarization overhead if it's 3241 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3242 // i.e. either vector version isn't available, or is too expensive. 3243 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3244 const TargetTransformInfo &TTI, 3245 const TargetLibraryInfo *TLI, 3246 bool &NeedToScalarize) { 3247 Function *F = CI->getCalledFunction(); 3248 StringRef FnName = CI->getCalledFunction()->getName(); 3249 Type *ScalarRetTy = CI->getType(); 3250 SmallVector<Type *, 4> Tys, ScalarTys; 3251 for (auto &ArgOp : CI->arg_operands()) 3252 ScalarTys.push_back(ArgOp->getType()); 3253 3254 // Estimate cost of scalarized vector call. The source operands are assumed 3255 // to be vectors, so we need to extract individual elements from there, 3256 // execute VF scalar calls, and then gather the result into the vector return 3257 // value. 3258 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3259 if (VF == 1) 3260 return ScalarCallCost; 3261 3262 // Compute corresponding vector type for return value and arguments. 3263 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3264 for (unsigned i = 0, ie = ScalarTys.size(); i != ie; ++i) 3265 Tys.push_back(ToVectorTy(ScalarTys[i], VF)); 3266 3267 // Compute costs of unpacking argument values for the scalar calls and 3268 // packing the return values to a vector. 3269 unsigned ScalarizationCost = 3270 getScalarizationOverhead(RetTy, true, false, TTI); 3271 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) 3272 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true, TTI); 3273 3274 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3275 3276 // If we can't emit a vector call for this function, then the currently found 3277 // cost is the cost we need to return. 3278 NeedToScalarize = true; 3279 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3280 return Cost; 3281 3282 // If the corresponding vector cost is cheaper, return its cost. 3283 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3284 if (VectorCallCost < Cost) { 3285 NeedToScalarize = false; 3286 return VectorCallCost; 3287 } 3288 return Cost; 3289 } 3290 3291 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3292 // factor VF. Return the cost of the instruction, including scalarization 3293 // overhead if it's needed. 3294 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3295 const TargetTransformInfo &TTI, 3296 const TargetLibraryInfo *TLI) { 3297 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 3298 assert(ID && "Expected intrinsic call!"); 3299 3300 Type *RetTy = ToVectorTy(CI->getType(), VF); 3301 SmallVector<Type *, 4> Tys; 3302 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) 3303 Tys.push_back(ToVectorTy(CI->getArgOperand(i)->getType(), VF)); 3304 3305 return TTI.getIntrinsicInstrCost(ID, RetTy, Tys); 3306 } 3307 3308 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3309 IntegerType *I1 = cast<IntegerType>(T1->getVectorElementType()); 3310 IntegerType *I2 = cast<IntegerType>(T2->getVectorElementType()); 3311 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3312 } 3313 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3314 IntegerType *I1 = cast<IntegerType>(T1->getVectorElementType()); 3315 IntegerType *I2 = cast<IntegerType>(T2->getVectorElementType()); 3316 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3317 } 3318 3319 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3320 // For every instruction `I` in MinBWs, truncate the operands, create a 3321 // truncated version of `I` and reextend its result. InstCombine runs 3322 // later and will remove any ext/trunc pairs. 3323 // 3324 for (auto &KV : MinBWs) { 3325 VectorParts &Parts = WidenMap.get(KV.first); 3326 for (Value *&I : Parts) { 3327 if (I->use_empty()) 3328 continue; 3329 Type *OriginalTy = I->getType(); 3330 Type *ScalarTruncatedTy = IntegerType::get(OriginalTy->getContext(), 3331 KV.second); 3332 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3333 OriginalTy->getVectorNumElements()); 3334 if (TruncatedTy == OriginalTy) 3335 continue; 3336 3337 if (!isa<Instruction>(I)) 3338 continue; 3339 3340 IRBuilder<> B(cast<Instruction>(I)); 3341 auto ShrinkOperand = [&](Value *V) -> Value* { 3342 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3343 if (ZI->getSrcTy() == TruncatedTy) 3344 return ZI->getOperand(0); 3345 return B.CreateZExtOrTrunc(V, TruncatedTy); 3346 }; 3347 3348 // The actual instruction modification depends on the instruction type, 3349 // unfortunately. 3350 Value *NewI = nullptr; 3351 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { 3352 NewI = B.CreateBinOp(BO->getOpcode(), 3353 ShrinkOperand(BO->getOperand(0)), 3354 ShrinkOperand(BO->getOperand(1))); 3355 cast<BinaryOperator>(NewI)->copyIRFlags(I); 3356 } else if (ICmpInst *CI = dyn_cast<ICmpInst>(I)) { 3357 NewI = B.CreateICmp(CI->getPredicate(), 3358 ShrinkOperand(CI->getOperand(0)), 3359 ShrinkOperand(CI->getOperand(1))); 3360 } else if (SelectInst *SI = dyn_cast<SelectInst>(I)) { 3361 NewI = B.CreateSelect(SI->getCondition(), 3362 ShrinkOperand(SI->getTrueValue()), 3363 ShrinkOperand(SI->getFalseValue())); 3364 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 3365 switch (CI->getOpcode()) { 3366 default: llvm_unreachable("Unhandled cast!"); 3367 case Instruction::Trunc: 3368 NewI = ShrinkOperand(CI->getOperand(0)); 3369 break; 3370 case Instruction::SExt: 3371 NewI = B.CreateSExtOrTrunc(CI->getOperand(0), 3372 smallestIntegerVectorType(OriginalTy, 3373 TruncatedTy)); 3374 break; 3375 case Instruction::ZExt: 3376 NewI = B.CreateZExtOrTrunc(CI->getOperand(0), 3377 smallestIntegerVectorType(OriginalTy, 3378 TruncatedTy)); 3379 break; 3380 } 3381 } else if (ShuffleVectorInst *SI = dyn_cast<ShuffleVectorInst>(I)) { 3382 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3383 auto *O0 = 3384 B.CreateZExtOrTrunc(SI->getOperand(0), 3385 VectorType::get(ScalarTruncatedTy, Elements0)); 3386 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3387 auto *O1 = 3388 B.CreateZExtOrTrunc(SI->getOperand(1), 3389 VectorType::get(ScalarTruncatedTy, Elements1)); 3390 3391 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3392 } else if (isa<LoadInst>(I)) { 3393 // Don't do anything with the operands, just extend the result. 3394 continue; 3395 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3396 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3397 auto *O0 = B.CreateZExtOrTrunc( 3398 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3399 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3400 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3401 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3402 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3403 auto *O0 = B.CreateZExtOrTrunc( 3404 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3405 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3406 } else { 3407 llvm_unreachable("Unhandled instruction type!"); 3408 } 3409 3410 // Lastly, extend the result. 3411 NewI->takeName(cast<Instruction>(I)); 3412 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3413 I->replaceAllUsesWith(Res); 3414 cast<Instruction>(I)->eraseFromParent(); 3415 I = Res; 3416 } 3417 } 3418 3419 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3420 for (auto &KV : MinBWs) { 3421 VectorParts &Parts = WidenMap.get(KV.first); 3422 for (Value *&I : Parts) { 3423 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3424 if (Inst && Inst->use_empty()) { 3425 Value *NewI = Inst->getOperand(0); 3426 Inst->eraseFromParent(); 3427 I = NewI; 3428 } 3429 } 3430 } 3431 } 3432 3433 void InnerLoopVectorizer::vectorizeLoop() { 3434 //===------------------------------------------------===// 3435 // 3436 // Notice: any optimization or new instruction that go 3437 // into the code below should be also be implemented in 3438 // the cost-model. 3439 // 3440 //===------------------------------------------------===// 3441 Constant *Zero = Builder.getInt32(0); 3442 3443 // In order to support recurrences we need to be able to vectorize Phi nodes. 3444 // Phi nodes have cycles, so we need to vectorize them in two stages. First, 3445 // we create a new vector PHI node with no incoming edges. We use this value 3446 // when we vectorize all of the instructions that use the PHI. Next, after 3447 // all of the instructions in the block are complete we add the new incoming 3448 // edges to the PHI. At this point all of the instructions in the basic block 3449 // are vectorized, so we can use them to construct the PHI. 3450 PhiVector PHIsToFix; 3451 3452 // Scan the loop in a topological order to ensure that defs are vectorized 3453 // before users. 3454 LoopBlocksDFS DFS(OrigLoop); 3455 DFS.perform(LI); 3456 3457 // Vectorize all of the blocks in the original loop. 3458 for (LoopBlocksDFS::RPOIterator bb = DFS.beginRPO(), 3459 be = DFS.endRPO(); bb != be; ++bb) 3460 vectorizeBlockInLoop(*bb, &PHIsToFix); 3461 3462 // Insert truncates and extends for any truncated instructions as hints to 3463 // InstCombine. 3464 if (VF > 1) 3465 truncateToMinimalBitwidths(); 3466 3467 // At this point every instruction in the original loop is widened to a 3468 // vector form. Now we need to fix the recurrences in PHIsToFix. These PHI 3469 // nodes are currently empty because we did not want to introduce cycles. 3470 // This is the second stage of vectorizing recurrences. 3471 for (PHINode *Phi : PHIsToFix) { 3472 assert(Phi && "Unable to recover vectorized PHI"); 3473 3474 // Handle first-order recurrences that need to be fixed. 3475 if (Legal->isFirstOrderRecurrence(Phi)) { 3476 fixFirstOrderRecurrence(Phi); 3477 continue; 3478 } 3479 3480 // If the phi node is not a first-order recurrence, it must be a reduction. 3481 // Get it's reduction variable descriptor. 3482 assert(Legal->isReductionVariable(Phi) && 3483 "Unable to find the reduction variable"); 3484 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 3485 3486 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3487 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3488 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3489 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3490 RdxDesc.getMinMaxRecurrenceKind(); 3491 setDebugLocFromInst(Builder, ReductionStartValue); 3492 3493 // We need to generate a reduction vector from the incoming scalar. 3494 // To do so, we need to generate the 'identity' vector and override 3495 // one of the elements with the incoming scalar reduction. We need 3496 // to do it in the vector-loop preheader. 3497 Builder.SetInsertPoint(LoopBypassBlocks[1]->getTerminator()); 3498 3499 // This is the vector-clone of the value that leaves the loop. 3500 VectorParts &VectorExit = getVectorValue(LoopExitInst); 3501 Type *VecTy = VectorExit[0]->getType(); 3502 3503 // Find the reduction identity variable. Zero for addition, or, xor, 3504 // one for multiplication, -1 for And. 3505 Value *Identity; 3506 Value *VectorStart; 3507 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3508 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3509 // MinMax reduction have the start value as their identify. 3510 if (VF == 1) { 3511 VectorStart = Identity = ReductionStartValue; 3512 } else { 3513 VectorStart = Identity = 3514 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3515 } 3516 } else { 3517 // Handle other reduction kinds: 3518 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3519 RK, VecTy->getScalarType()); 3520 if (VF == 1) { 3521 Identity = Iden; 3522 // This vector is the Identity vector where the first element is the 3523 // incoming scalar reduction. 3524 VectorStart = ReductionStartValue; 3525 } else { 3526 Identity = ConstantVector::getSplat(VF, Iden); 3527 3528 // This vector is the Identity vector where the first element is the 3529 // incoming scalar reduction. 3530 VectorStart = 3531 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3532 } 3533 } 3534 3535 // Fix the vector-loop phi. 3536 3537 // Reductions do not have to start at zero. They can start with 3538 // any loop invariant values. 3539 VectorParts &VecRdxPhi = WidenMap.get(Phi); 3540 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3541 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3542 VectorParts &Val = getVectorValue(LoopVal); 3543 for (unsigned part = 0; part < UF; ++part) { 3544 // Make sure to add the reduction stat value only to the 3545 // first unroll part. 3546 Value *StartVal = (part == 0) ? VectorStart : Identity; 3547 cast<PHINode>(VecRdxPhi[part])->addIncoming(StartVal, 3548 LoopVectorPreHeader); 3549 cast<PHINode>(VecRdxPhi[part])->addIncoming(Val[part], 3550 LoopVectorBody.back()); 3551 } 3552 3553 // Before each round, move the insertion point right between 3554 // the PHIs and the values we are going to write. 3555 // This allows us to write both PHINodes and the extractelement 3556 // instructions. 3557 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3558 3559 VectorParts RdxParts = getVectorValue(LoopExitInst); 3560 setDebugLocFromInst(Builder, LoopExitInst); 3561 3562 // If the vector reduction can be performed in a smaller type, we truncate 3563 // then extend the loop exit value to enable InstCombine to evaluate the 3564 // entire expression in the smaller type. 3565 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3566 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3567 Builder.SetInsertPoint(LoopVectorBody.back()->getTerminator()); 3568 for (unsigned part = 0; part < UF; ++part) { 3569 Value *Trunc = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3570 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3571 : Builder.CreateZExt(Trunc, VecTy); 3572 for (Value::user_iterator UI = RdxParts[part]->user_begin(); 3573 UI != RdxParts[part]->user_end();) 3574 if (*UI != Trunc) { 3575 (*UI++)->replaceUsesOfWith(RdxParts[part], Extnd); 3576 RdxParts[part] = Extnd; 3577 } else { 3578 ++UI; 3579 } 3580 } 3581 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3582 for (unsigned part = 0; part < UF; ++part) 3583 RdxParts[part] = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3584 } 3585 3586 // Reduce all of the unrolled parts into a single vector. 3587 Value *ReducedPartRdx = RdxParts[0]; 3588 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3589 setDebugLocFromInst(Builder, ReducedPartRdx); 3590 for (unsigned part = 1; part < UF; ++part) { 3591 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3592 // Floating point operations had to be 'fast' to enable the reduction. 3593 ReducedPartRdx = addFastMathFlag( 3594 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxParts[part], 3595 ReducedPartRdx, "bin.rdx")); 3596 else 3597 ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp( 3598 Builder, MinMaxKind, ReducedPartRdx, RdxParts[part]); 3599 } 3600 3601 if (VF > 1) { 3602 // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles 3603 // and vector ops, reducing the set of values being computed by half each 3604 // round. 3605 assert(isPowerOf2_32(VF) && 3606 "Reduction emission only supported for pow2 vectors!"); 3607 Value *TmpVec = ReducedPartRdx; 3608 SmallVector<Constant*, 32> ShuffleMask(VF, nullptr); 3609 for (unsigned i = VF; i != 1; i >>= 1) { 3610 // Move the upper half of the vector to the lower half. 3611 for (unsigned j = 0; j != i/2; ++j) 3612 ShuffleMask[j] = Builder.getInt32(i/2 + j); 3613 3614 // Fill the rest of the mask with undef. 3615 std::fill(&ShuffleMask[i/2], ShuffleMask.end(), 3616 UndefValue::get(Builder.getInt32Ty())); 3617 3618 Value *Shuf = 3619 Builder.CreateShuffleVector(TmpVec, 3620 UndefValue::get(TmpVec->getType()), 3621 ConstantVector::get(ShuffleMask), 3622 "rdx.shuf"); 3623 3624 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3625 // Floating point operations had to be 'fast' to enable the reduction. 3626 TmpVec = addFastMathFlag(Builder.CreateBinOp( 3627 (Instruction::BinaryOps)Op, TmpVec, Shuf, "bin.rdx")); 3628 else 3629 TmpVec = RecurrenceDescriptor::createMinMaxOp(Builder, MinMaxKind, 3630 TmpVec, Shuf); 3631 } 3632 3633 // The result is in the first element of the vector. 3634 ReducedPartRdx = Builder.CreateExtractElement(TmpVec, 3635 Builder.getInt32(0)); 3636 3637 // If the reduction can be performed in a smaller type, we need to extend 3638 // the reduction to the wider type before we branch to the original loop. 3639 if (Phi->getType() != RdxDesc.getRecurrenceType()) 3640 ReducedPartRdx = 3641 RdxDesc.isSigned() 3642 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 3643 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 3644 } 3645 3646 // Create a phi node that merges control-flow from the backedge-taken check 3647 // block and the middle block. 3648 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 3649 LoopScalarPreHeader->getTerminator()); 3650 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3651 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 3652 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3653 3654 // Now, we need to fix the users of the reduction variable 3655 // inside and outside of the scalar remainder loop. 3656 // We know that the loop is in LCSSA form. We need to update the 3657 // PHI nodes in the exit blocks. 3658 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 3659 LEE = LoopExitBlock->end(); LEI != LEE; ++LEI) { 3660 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 3661 if (!LCSSAPhi) break; 3662 3663 // All PHINodes need to have a single entry edge, or two if 3664 // we already fixed them. 3665 assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 3666 3667 // We found our reduction value exit-PHI. Update it with the 3668 // incoming bypass edge. 3669 if (LCSSAPhi->getIncomingValue(0) == LoopExitInst) { 3670 // Add an edge coming from the bypass. 3671 LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3672 break; 3673 } 3674 }// end of the LCSSA phi scan. 3675 3676 // Fix the scalar loop reduction variable with the incoming reduction sum 3677 // from the vector body and from the backedge value. 3678 int IncomingEdgeBlockIdx = 3679 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 3680 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 3681 // Pick the other block. 3682 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 3683 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 3684 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 3685 } // end of for each Phi in PHIsToFix. 3686 3687 fixLCSSAPHIs(); 3688 3689 // Make sure DomTree is updated. 3690 updateAnalysis(); 3691 3692 // Predicate any stores. 3693 for (auto KV : PredicatedStores) { 3694 BasicBlock::iterator I(KV.first); 3695 auto *BB = SplitBlock(I->getParent(), &*std::next(I), DT, LI); 3696 auto *T = SplitBlockAndInsertIfThen(KV.second, &*I, /*Unreachable=*/false, 3697 /*BranchWeights=*/nullptr, DT, LI); 3698 I->moveBefore(T); 3699 I->getParent()->setName("pred.store.if"); 3700 BB->setName("pred.store.continue"); 3701 } 3702 DEBUG(DT->verifyDomTree()); 3703 // Remove redundant induction instructions. 3704 cse(LoopVectorBody); 3705 } 3706 3707 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 3708 3709 // This is the second phase of vectorizing first-order rececurrences. An 3710 // overview of the transformation is described below. Suppose we have the 3711 // following loop. 3712 // 3713 // for (int i = 0; i < n; ++i) 3714 // b[i] = a[i] - a[i - 1]; 3715 // 3716 // There is a first-order recurrence on "a". For this loop, the shorthand 3717 // scalar IR looks like: 3718 // 3719 // scalar.ph: 3720 // s_init = a[-1] 3721 // br scalar.body 3722 // 3723 // scalar.body: 3724 // i = phi [0, scalar.ph], [i+1, scalar.body] 3725 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3726 // s2 = a[i] 3727 // b[i] = s2 - s1 3728 // br cond, scalar.body, ... 3729 // 3730 // In this example, s1 is a recurrence because it's value depends on the 3731 // previous iteration. In the first phase of vectorization, we created a 3732 // temporary value for s1. We now complete the vectorization and produce the 3733 // shorthand vector IR shown below (for VF = 4, UF = 1). 3734 // 3735 // vector.ph: 3736 // v_init = vector(..., ..., ..., a[-1]) 3737 // br vector.body 3738 // 3739 // vector.body 3740 // i = phi [0, vector.ph], [i+4, vector.body] 3741 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3742 // v2 = a[i, i+1, i+2, i+3]; 3743 // v3 = vector(v1(3), v2(0, 1, 2)) 3744 // b[i, i+1, i+2, i+3] = v2 - v3 3745 // br cond, vector.body, middle.block 3746 // 3747 // middle.block: 3748 // x = v2(3) 3749 // br scalar.ph 3750 // 3751 // scalar.ph: 3752 // s_init = phi [x, middle.block], [a[-1], otherwise] 3753 // br scalar.body 3754 // 3755 // After execution completes the vector loop, we extract the next value of 3756 // the recurrence (x) to use as the initial value in the scalar loop. 3757 3758 // Get the original loop preheader and single loop latch. 3759 auto *Preheader = OrigLoop->getLoopPreheader(); 3760 auto *Latch = OrigLoop->getLoopLatch(); 3761 3762 // Get the initial and previous values of the scalar recurrence. 3763 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 3764 auto *Previous = Phi->getIncomingValueForBlock(Latch); 3765 3766 // Create a vector from the initial value. 3767 auto *VectorInit = ScalarInit; 3768 if (VF > 1) { 3769 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3770 VectorInit = Builder.CreateInsertElement( 3771 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 3772 Builder.getInt32(VF - 1), "vector.recur.init"); 3773 } 3774 3775 // We constructed a temporary phi node in the first phase of vectorization. 3776 // This phi node will eventually be deleted. 3777 auto &PhiParts = getVectorValue(Phi); 3778 Builder.SetInsertPoint(cast<Instruction>(PhiParts[0])); 3779 3780 // Create a phi node for the new recurrence. The current value will either be 3781 // the initial value inserted into a vector or loop-varying vector value. 3782 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 3783 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 3784 3785 // Get the vectorized previous value. We ensured the previous values was an 3786 // instruction when detecting the recurrence. 3787 auto &PreviousParts = getVectorValue(Previous); 3788 3789 // Set the insertion point to be after this instruction. We ensured the 3790 // previous value dominated all uses of the phi when detecting the 3791 // recurrence. 3792 Builder.SetInsertPoint( 3793 &*++BasicBlock::iterator(cast<Instruction>(PreviousParts[UF - 1]))); 3794 3795 // We will construct a vector for the recurrence by combining the values for 3796 // the current and previous iterations. This is the required shuffle mask. 3797 SmallVector<Constant *, 8> ShuffleMask(VF); 3798 ShuffleMask[0] = Builder.getInt32(VF - 1); 3799 for (unsigned I = 1; I < VF; ++I) 3800 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 3801 3802 // The vector from which to take the initial value for the current iteration 3803 // (actual or unrolled). Initially, this is the vector phi node. 3804 Value *Incoming = VecPhi; 3805 3806 // Shuffle the current and previous vector and update the vector parts. 3807 for (unsigned Part = 0; Part < UF; ++Part) { 3808 auto *Shuffle = 3809 VF > 1 3810 ? Builder.CreateShuffleVector(Incoming, PreviousParts[Part], 3811 ConstantVector::get(ShuffleMask)) 3812 : Incoming; 3813 PhiParts[Part]->replaceAllUsesWith(Shuffle); 3814 cast<Instruction>(PhiParts[Part])->eraseFromParent(); 3815 PhiParts[Part] = Shuffle; 3816 Incoming = PreviousParts[Part]; 3817 } 3818 3819 // Fix the latch value of the new recurrence in the vector loop. 3820 VecPhi->addIncoming(Incoming, 3821 LI->getLoopFor(LoopVectorBody[0])->getLoopLatch()); 3822 3823 // Extract the last vector element in the middle block. This will be the 3824 // initial value for the recurrence when jumping to the scalar loop. 3825 auto *Extract = Incoming; 3826 if (VF > 1) { 3827 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3828 Extract = Builder.CreateExtractElement(Extract, Builder.getInt32(VF - 1), 3829 "vector.recur.extract"); 3830 } 3831 3832 // Fix the initial value of the original recurrence in the scalar loop. 3833 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3834 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3835 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3836 auto *Incoming = BB == LoopMiddleBlock ? Extract : ScalarInit; 3837 Start->addIncoming(Incoming, BB); 3838 } 3839 3840 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 3841 Phi->setName("scalar.recur"); 3842 3843 // Finally, fix users of the recurrence outside the loop. The users will need 3844 // either the last value of the scalar recurrence or the last value of the 3845 // vector recurrence we extracted in the middle block. Since the loop is in 3846 // LCSSA form, we just need to find the phi node for the original scalar 3847 // recurrence in the exit block, and then add an edge for the middle block. 3848 for (auto &I : *LoopExitBlock) { 3849 auto *LCSSAPhi = dyn_cast<PHINode>(&I); 3850 if (!LCSSAPhi) 3851 break; 3852 if (LCSSAPhi->getIncomingValue(0) == Phi) { 3853 LCSSAPhi->addIncoming(Extract, LoopMiddleBlock); 3854 break; 3855 } 3856 } 3857 } 3858 3859 void InnerLoopVectorizer::fixLCSSAPHIs() { 3860 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 3861 LEE = LoopExitBlock->end(); LEI != LEE; ++LEI) { 3862 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 3863 if (!LCSSAPhi) break; 3864 if (LCSSAPhi->getNumIncomingValues() == 1) 3865 LCSSAPhi->addIncoming(UndefValue::get(LCSSAPhi->getType()), 3866 LoopMiddleBlock); 3867 } 3868 } 3869 3870 InnerLoopVectorizer::VectorParts 3871 InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) { 3872 assert(std::find(pred_begin(Dst), pred_end(Dst), Src) != pred_end(Dst) && 3873 "Invalid edge"); 3874 3875 // Look for cached value. 3876 std::pair<BasicBlock*, BasicBlock*> Edge(Src, Dst); 3877 EdgeMaskCache::iterator ECEntryIt = MaskCache.find(Edge); 3878 if (ECEntryIt != MaskCache.end()) 3879 return ECEntryIt->second; 3880 3881 VectorParts SrcMask = createBlockInMask(Src); 3882 3883 // The terminator has to be a branch inst! 3884 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 3885 assert(BI && "Unexpected terminator found"); 3886 3887 if (BI->isConditional()) { 3888 VectorParts EdgeMask = getVectorValue(BI->getCondition()); 3889 3890 if (BI->getSuccessor(0) != Dst) 3891 for (unsigned part = 0; part < UF; ++part) 3892 EdgeMask[part] = Builder.CreateNot(EdgeMask[part]); 3893 3894 for (unsigned part = 0; part < UF; ++part) 3895 EdgeMask[part] = Builder.CreateAnd(EdgeMask[part], SrcMask[part]); 3896 3897 MaskCache[Edge] = EdgeMask; 3898 return EdgeMask; 3899 } 3900 3901 MaskCache[Edge] = SrcMask; 3902 return SrcMask; 3903 } 3904 3905 InnerLoopVectorizer::VectorParts 3906 InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) { 3907 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 3908 3909 // Loop incoming mask is all-one. 3910 if (OrigLoop->getHeader() == BB) { 3911 Value *C = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 1); 3912 return getVectorValue(C); 3913 } 3914 3915 // This is the block mask. We OR all incoming edges, and with zero. 3916 Value *Zero = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 0); 3917 VectorParts BlockMask = getVectorValue(Zero); 3918 3919 // For each pred: 3920 for (pred_iterator it = pred_begin(BB), e = pred_end(BB); it != e; ++it) { 3921 VectorParts EM = createEdgeMask(*it, BB); 3922 for (unsigned part = 0; part < UF; ++part) 3923 BlockMask[part] = Builder.CreateOr(BlockMask[part], EM[part]); 3924 } 3925 3926 return BlockMask; 3927 } 3928 3929 void InnerLoopVectorizer::widenPHIInstruction( 3930 Instruction *PN, InnerLoopVectorizer::VectorParts &Entry, unsigned UF, 3931 unsigned VF, PhiVector *PV) { 3932 PHINode* P = cast<PHINode>(PN); 3933 // Handle recurrences. 3934 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 3935 for (unsigned part = 0; part < UF; ++part) { 3936 // This is phase one of vectorizing PHIs. 3937 Type *VecTy = (VF == 1) ? PN->getType() : 3938 VectorType::get(PN->getType(), VF); 3939 Entry[part] = PHINode::Create( 3940 VecTy, 2, "vec.phi", &*LoopVectorBody.back()->getFirstInsertionPt()); 3941 } 3942 PV->push_back(P); 3943 return; 3944 } 3945 3946 setDebugLocFromInst(Builder, P); 3947 // Check for PHI nodes that are lowered to vector selects. 3948 if (P->getParent() != OrigLoop->getHeader()) { 3949 // We know that all PHIs in non-header blocks are converted into 3950 // selects, so we don't have to worry about the insertion order and we 3951 // can just use the builder. 3952 // At this point we generate the predication tree. There may be 3953 // duplications since this is a simple recursive scan, but future 3954 // optimizations will clean it up. 3955 3956 unsigned NumIncoming = P->getNumIncomingValues(); 3957 3958 // Generate a sequence of selects of the form: 3959 // SELECT(Mask3, In3, 3960 // SELECT(Mask2, In2, 3961 // ( ...))) 3962 for (unsigned In = 0; In < NumIncoming; In++) { 3963 VectorParts Cond = createEdgeMask(P->getIncomingBlock(In), 3964 P->getParent()); 3965 VectorParts &In0 = getVectorValue(P->getIncomingValue(In)); 3966 3967 for (unsigned part = 0; part < UF; ++part) { 3968 // We might have single edge PHIs (blocks) - use an identity 3969 // 'select' for the first PHI operand. 3970 if (In == 0) 3971 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], 3972 In0[part]); 3973 else 3974 // Select between the current value and the previous incoming edge 3975 // based on the incoming mask. 3976 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], 3977 Entry[part], "predphi"); 3978 } 3979 } 3980 return; 3981 } 3982 3983 // This PHINode must be an induction variable. 3984 // Make sure that we know about it. 3985 assert(Legal->getInductionVars()->count(P) && 3986 "Not an induction variable"); 3987 3988 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 3989 3990 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 3991 // which can be found from the original scalar operations. 3992 switch (II.getKind()) { 3993 case InductionDescriptor::IK_NoInduction: 3994 llvm_unreachable("Unknown induction"); 3995 case InductionDescriptor::IK_IntInduction: { 3996 assert(P->getType() == II.getStartValue()->getType() && 3997 "Types must match"); 3998 // Handle other induction variables that are now based on the 3999 // canonical one. 4000 Value *V = Induction; 4001 if (P != OldInduction) { 4002 V = Builder.CreateSExtOrTrunc(Induction, P->getType()); 4003 V = II.transform(Builder, V); 4004 V->setName("offset.idx"); 4005 } 4006 Value *Broadcasted = getBroadcastInstrs(V); 4007 // After broadcasting the induction variable we need to make the vector 4008 // consecutive by adding 0, 1, 2, etc. 4009 for (unsigned part = 0; part < UF; ++part) 4010 Entry[part] = getStepVector(Broadcasted, VF * part, II.getStepValue()); 4011 return; 4012 } 4013 case InductionDescriptor::IK_PtrInduction: 4014 // Handle the pointer induction variable case. 4015 assert(P->getType()->isPointerTy() && "Unexpected type."); 4016 // This is the normalized GEP that starts counting at zero. 4017 Value *PtrInd = Induction; 4018 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStepValue()->getType()); 4019 // This is the vector of results. Notice that we don't generate 4020 // vector geps because scalar geps result in better code. 4021 for (unsigned part = 0; part < UF; ++part) { 4022 if (VF == 1) { 4023 int EltIndex = part; 4024 Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex); 4025 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4026 Value *SclrGep = II.transform(Builder, GlobalIdx); 4027 SclrGep->setName("next.gep"); 4028 Entry[part] = SclrGep; 4029 continue; 4030 } 4031 4032 Value *VecVal = UndefValue::get(VectorType::get(P->getType(), VF)); 4033 for (unsigned int i = 0; i < VF; ++i) { 4034 int EltIndex = i + part * VF; 4035 Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex); 4036 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4037 Value *SclrGep = II.transform(Builder, GlobalIdx); 4038 SclrGep->setName("next.gep"); 4039 VecVal = Builder.CreateInsertElement(VecVal, SclrGep, 4040 Builder.getInt32(i), 4041 "insert.gep"); 4042 } 4043 Entry[part] = VecVal; 4044 } 4045 return; 4046 } 4047 } 4048 4049 void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { 4050 // For each instruction in the old loop. 4051 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 4052 VectorParts &Entry = WidenMap.get(&*it); 4053 4054 switch (it->getOpcode()) { 4055 case Instruction::Br: 4056 // Nothing to do for PHIs and BR, since we already took care of the 4057 // loop control flow instructions. 4058 continue; 4059 case Instruction::PHI: { 4060 // Vectorize PHINodes. 4061 widenPHIInstruction(&*it, Entry, UF, VF, PV); 4062 continue; 4063 }// End of PHI. 4064 4065 case Instruction::Add: 4066 case Instruction::FAdd: 4067 case Instruction::Sub: 4068 case Instruction::FSub: 4069 case Instruction::Mul: 4070 case Instruction::FMul: 4071 case Instruction::UDiv: 4072 case Instruction::SDiv: 4073 case Instruction::FDiv: 4074 case Instruction::URem: 4075 case Instruction::SRem: 4076 case Instruction::FRem: 4077 case Instruction::Shl: 4078 case Instruction::LShr: 4079 case Instruction::AShr: 4080 case Instruction::And: 4081 case Instruction::Or: 4082 case Instruction::Xor: { 4083 // Just widen binops. 4084 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(it); 4085 setDebugLocFromInst(Builder, BinOp); 4086 VectorParts &A = getVectorValue(it->getOperand(0)); 4087 VectorParts &B = getVectorValue(it->getOperand(1)); 4088 4089 // Use this vector value for all users of the original instruction. 4090 for (unsigned Part = 0; Part < UF; ++Part) { 4091 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A[Part], B[Part]); 4092 4093 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 4094 VecOp->copyIRFlags(BinOp); 4095 4096 Entry[Part] = V; 4097 } 4098 4099 addMetadata(Entry, &*it); 4100 break; 4101 } 4102 case Instruction::Select: { 4103 // Widen selects. 4104 // If the selector is loop invariant we can create a select 4105 // instruction with a scalar condition. Otherwise, use vector-select. 4106 auto *SE = PSE.getSE(); 4107 bool InvariantCond = 4108 SE->isLoopInvariant(PSE.getSCEV(it->getOperand(0)), OrigLoop); 4109 setDebugLocFromInst(Builder, &*it); 4110 4111 // The condition can be loop invariant but still defined inside the 4112 // loop. This means that we can't just use the original 'cond' value. 4113 // We have to take the 'vectorized' value and pick the first lane. 4114 // Instcombine will make this a no-op. 4115 VectorParts &Cond = getVectorValue(it->getOperand(0)); 4116 VectorParts &Op0 = getVectorValue(it->getOperand(1)); 4117 VectorParts &Op1 = getVectorValue(it->getOperand(2)); 4118 4119 Value *ScalarCond = (VF == 1) ? Cond[0] : 4120 Builder.CreateExtractElement(Cond[0], Builder.getInt32(0)); 4121 4122 for (unsigned Part = 0; Part < UF; ++Part) { 4123 Entry[Part] = Builder.CreateSelect( 4124 InvariantCond ? ScalarCond : Cond[Part], 4125 Op0[Part], 4126 Op1[Part]); 4127 } 4128 4129 addMetadata(Entry, &*it); 4130 break; 4131 } 4132 4133 case Instruction::ICmp: 4134 case Instruction::FCmp: { 4135 // Widen compares. Generate vector compares. 4136 bool FCmp = (it->getOpcode() == Instruction::FCmp); 4137 CmpInst *Cmp = dyn_cast<CmpInst>(it); 4138 setDebugLocFromInst(Builder, &*it); 4139 VectorParts &A = getVectorValue(it->getOperand(0)); 4140 VectorParts &B = getVectorValue(it->getOperand(1)); 4141 for (unsigned Part = 0; Part < UF; ++Part) { 4142 Value *C = nullptr; 4143 if (FCmp) { 4144 C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]); 4145 cast<FCmpInst>(C)->copyFastMathFlags(&*it); 4146 } else { 4147 C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]); 4148 } 4149 Entry[Part] = C; 4150 } 4151 4152 addMetadata(Entry, &*it); 4153 break; 4154 } 4155 4156 case Instruction::Store: 4157 case Instruction::Load: 4158 vectorizeMemoryInstruction(&*it); 4159 break; 4160 case Instruction::ZExt: 4161 case Instruction::SExt: 4162 case Instruction::FPToUI: 4163 case Instruction::FPToSI: 4164 case Instruction::FPExt: 4165 case Instruction::PtrToInt: 4166 case Instruction::IntToPtr: 4167 case Instruction::SIToFP: 4168 case Instruction::UIToFP: 4169 case Instruction::Trunc: 4170 case Instruction::FPTrunc: 4171 case Instruction::BitCast: { 4172 CastInst *CI = dyn_cast<CastInst>(it); 4173 setDebugLocFromInst(Builder, &*it); 4174 /// Optimize the special case where the source is the induction 4175 /// variable. Notice that we can only optimize the 'trunc' case 4176 /// because: a. FP conversions lose precision, b. sext/zext may wrap, 4177 /// c. other casts depend on pointer size. 4178 if (CI->getOperand(0) == OldInduction && 4179 it->getOpcode() == Instruction::Trunc) { 4180 Value *ScalarCast = Builder.CreateCast(CI->getOpcode(), Induction, 4181 CI->getType()); 4182 Value *Broadcasted = getBroadcastInstrs(ScalarCast); 4183 InductionDescriptor II = 4184 Legal->getInductionVars()->lookup(OldInduction); 4185 Constant *Step = ConstantInt::getSigned( 4186 CI->getType(), II.getStepValue()->getSExtValue()); 4187 for (unsigned Part = 0; Part < UF; ++Part) 4188 Entry[Part] = getStepVector(Broadcasted, VF * Part, Step); 4189 addMetadata(Entry, &*it); 4190 break; 4191 } 4192 /// Vectorize casts. 4193 Type *DestTy = (VF == 1) ? CI->getType() : 4194 VectorType::get(CI->getType(), VF); 4195 4196 VectorParts &A = getVectorValue(it->getOperand(0)); 4197 for (unsigned Part = 0; Part < UF; ++Part) 4198 Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy); 4199 addMetadata(Entry, &*it); 4200 break; 4201 } 4202 4203 case Instruction::Call: { 4204 // Ignore dbg intrinsics. 4205 if (isa<DbgInfoIntrinsic>(it)) 4206 break; 4207 setDebugLocFromInst(Builder, &*it); 4208 4209 Module *M = BB->getParent()->getParent(); 4210 CallInst *CI = cast<CallInst>(it); 4211 4212 StringRef FnName = CI->getCalledFunction()->getName(); 4213 Function *F = CI->getCalledFunction(); 4214 Type *RetTy = ToVectorTy(CI->getType(), VF); 4215 SmallVector<Type *, 4> Tys; 4216 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) 4217 Tys.push_back(ToVectorTy(CI->getArgOperand(i)->getType(), VF)); 4218 4219 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 4220 if (ID && 4221 (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 4222 ID == Intrinsic::lifetime_start)) { 4223 scalarizeInstruction(&*it); 4224 break; 4225 } 4226 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4227 // version of the instruction. 4228 // Is it beneficial to perform intrinsic call compared to lib call? 4229 bool NeedToScalarize; 4230 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4231 bool UseVectorIntrinsic = 4232 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4233 if (!UseVectorIntrinsic && NeedToScalarize) { 4234 scalarizeInstruction(&*it); 4235 break; 4236 } 4237 4238 for (unsigned Part = 0; Part < UF; ++Part) { 4239 SmallVector<Value *, 4> Args; 4240 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4241 Value *Arg = CI->getArgOperand(i); 4242 // Some intrinsics have a scalar argument - don't replace it with a 4243 // vector. 4244 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) { 4245 VectorParts &VectorArg = getVectorValue(CI->getArgOperand(i)); 4246 Arg = VectorArg[Part]; 4247 } 4248 Args.push_back(Arg); 4249 } 4250 4251 Function *VectorF; 4252 if (UseVectorIntrinsic) { 4253 // Use vector version of the intrinsic. 4254 Type *TysForDecl[] = {CI->getType()}; 4255 if (VF > 1) 4256 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4257 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4258 } else { 4259 // Use vector version of the library call. 4260 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4261 assert(!VFnName.empty() && "Vector function name is empty."); 4262 VectorF = M->getFunction(VFnName); 4263 if (!VectorF) { 4264 // Generate a declaration 4265 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4266 VectorF = 4267 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4268 VectorF->copyAttributesFrom(F); 4269 } 4270 } 4271 assert(VectorF && "Can't create vector function."); 4272 Entry[Part] = Builder.CreateCall(VectorF, Args); 4273 } 4274 4275 addMetadata(Entry, &*it); 4276 break; 4277 } 4278 4279 default: 4280 // All other instructions are unsupported. Scalarize them. 4281 scalarizeInstruction(&*it); 4282 break; 4283 }// end of switch. 4284 }// end of for_each instr. 4285 } 4286 4287 void InnerLoopVectorizer::updateAnalysis() { 4288 // Forget the original basic block. 4289 PSE.getSE()->forgetLoop(OrigLoop); 4290 4291 // Update the dominator tree information. 4292 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4293 "Entry does not dominate exit."); 4294 4295 // We don't predicate stores by this point, so the vector body should be a 4296 // single loop. 4297 assert(LoopVectorBody.size() == 1 && "Expected single block loop!"); 4298 DT->addNewBlock(LoopVectorBody[0], LoopVectorPreHeader); 4299 4300 DT->addNewBlock(LoopMiddleBlock, LoopVectorBody.back()); 4301 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4302 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4303 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4304 4305 DEBUG(DT->verifyDomTree()); 4306 } 4307 4308 /// \brief Check whether it is safe to if-convert this phi node. 4309 /// 4310 /// Phi nodes with constant expressions that can trap are not safe to if 4311 /// convert. 4312 static bool canIfConvertPHINodes(BasicBlock *BB) { 4313 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { 4314 PHINode *Phi = dyn_cast<PHINode>(I); 4315 if (!Phi) 4316 return true; 4317 for (unsigned p = 0, e = Phi->getNumIncomingValues(); p != e; ++p) 4318 if (Constant *C = dyn_cast<Constant>(Phi->getIncomingValue(p))) 4319 if (C->canTrap()) 4320 return false; 4321 } 4322 return true; 4323 } 4324 4325 bool LoopVectorizationLegality::canVectorizeWithIfConvert() { 4326 if (!EnableIfConversion) { 4327 emitAnalysis(VectorizationReport() << "if-conversion is disabled"); 4328 return false; 4329 } 4330 4331 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable"); 4332 4333 // A list of pointers that we can safely read and write to. 4334 SmallPtrSet<Value *, 8> SafePointes; 4335 4336 // Collect safe addresses. 4337 for (Loop::block_iterator BI = TheLoop->block_begin(), 4338 BE = TheLoop->block_end(); BI != BE; ++BI) { 4339 BasicBlock *BB = *BI; 4340 4341 if (blockNeedsPredication(BB)) 4342 continue; 4343 4344 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { 4345 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 4346 SafePointes.insert(LI->getPointerOperand()); 4347 else if (StoreInst *SI = dyn_cast<StoreInst>(I)) 4348 SafePointes.insert(SI->getPointerOperand()); 4349 } 4350 } 4351 4352 // Collect the blocks that need predication. 4353 BasicBlock *Header = TheLoop->getHeader(); 4354 for (Loop::block_iterator BI = TheLoop->block_begin(), 4355 BE = TheLoop->block_end(); BI != BE; ++BI) { 4356 BasicBlock *BB = *BI; 4357 4358 // We don't support switch statements inside loops. 4359 if (!isa<BranchInst>(BB->getTerminator())) { 4360 emitAnalysis(VectorizationReport(BB->getTerminator()) 4361 << "loop contains a switch statement"); 4362 return false; 4363 } 4364 4365 // We must be able to predicate all blocks that need to be predicated. 4366 if (blockNeedsPredication(BB)) { 4367 if (!blockCanBePredicated(BB, SafePointes)) { 4368 emitAnalysis(VectorizationReport(BB->getTerminator()) 4369 << "control flow cannot be substituted for a select"); 4370 return false; 4371 } 4372 } else if (BB != Header && !canIfConvertPHINodes(BB)) { 4373 emitAnalysis(VectorizationReport(BB->getTerminator()) 4374 << "control flow cannot be substituted for a select"); 4375 return false; 4376 } 4377 } 4378 4379 // We can if-convert this loop. 4380 return true; 4381 } 4382 4383 bool LoopVectorizationLegality::canVectorize() { 4384 // We must have a loop in canonical form. Loops with indirectbr in them cannot 4385 // be canonicalized. 4386 if (!TheLoop->getLoopPreheader()) { 4387 emitAnalysis( 4388 VectorizationReport() << 4389 "loop control flow is not understood by vectorizer"); 4390 return false; 4391 } 4392 4393 // We can only vectorize innermost loops. 4394 if (!TheLoop->empty()) { 4395 emitAnalysis(VectorizationReport() << "loop is not the innermost loop"); 4396 return false; 4397 } 4398 4399 // We must have a single backedge. 4400 if (TheLoop->getNumBackEdges() != 1) { 4401 emitAnalysis( 4402 VectorizationReport() << 4403 "loop control flow is not understood by vectorizer"); 4404 return false; 4405 } 4406 4407 // We must have a single exiting block. 4408 if (!TheLoop->getExitingBlock()) { 4409 emitAnalysis( 4410 VectorizationReport() << 4411 "loop control flow is not understood by vectorizer"); 4412 return false; 4413 } 4414 4415 // We only handle bottom-tested loops, i.e. loop in which the condition is 4416 // checked at the end of each iteration. With that we can assume that all 4417 // instructions in the loop are executed the same number of times. 4418 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 4419 emitAnalysis( 4420 VectorizationReport() << 4421 "loop control flow is not understood by vectorizer"); 4422 return false; 4423 } 4424 4425 // We need to have a loop header. 4426 DEBUG(dbgs() << "LV: Found a loop: " << 4427 TheLoop->getHeader()->getName() << '\n'); 4428 4429 // Check if we can if-convert non-single-bb loops. 4430 unsigned NumBlocks = TheLoop->getNumBlocks(); 4431 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { 4432 DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); 4433 return false; 4434 } 4435 4436 // ScalarEvolution needs to be able to find the exit count. 4437 const SCEV *ExitCount = PSE.getBackedgeTakenCount(); 4438 if (ExitCount == PSE.getSE()->getCouldNotCompute()) { 4439 emitAnalysis(VectorizationReport() 4440 << "could not determine number of loop iterations"); 4441 DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n"); 4442 return false; 4443 } 4444 4445 // Check if we can vectorize the instructions and CFG in this loop. 4446 if (!canVectorizeInstrs()) { 4447 DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); 4448 return false; 4449 } 4450 4451 // Go over each instruction and look at memory deps. 4452 if (!canVectorizeMemory()) { 4453 DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); 4454 return false; 4455 } 4456 4457 // Collect all of the variables that remain uniform after vectorization. 4458 collectLoopUniforms(); 4459 4460 DEBUG(dbgs() << "LV: We can vectorize this loop" 4461 << (LAI->getRuntimePointerChecking()->Need 4462 ? " (with a runtime bound check)" 4463 : "") 4464 << "!\n"); 4465 4466 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 4467 4468 // If an override option has been passed in for interleaved accesses, use it. 4469 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 4470 UseInterleaved = EnableInterleavedMemAccesses; 4471 4472 // Analyze interleaved memory accesses. 4473 if (UseInterleaved) 4474 InterleaveInfo.analyzeInterleaving(Strides); 4475 4476 unsigned SCEVThreshold = VectorizeSCEVCheckThreshold; 4477 if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) 4478 SCEVThreshold = PragmaVectorizeSCEVCheckThreshold; 4479 4480 if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) { 4481 emitAnalysis(VectorizationReport() 4482 << "Too many SCEV assumptions need to be made and checked " 4483 << "at runtime"); 4484 DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n"); 4485 return false; 4486 } 4487 4488 // Okay! We can vectorize. At this point we don't have any other mem analysis 4489 // which may limit our maximum vectorization factor, so just return true with 4490 // no restrictions. 4491 return true; 4492 } 4493 4494 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { 4495 if (Ty->isPointerTy()) 4496 return DL.getIntPtrType(Ty); 4497 4498 // It is possible that char's or short's overflow when we ask for the loop's 4499 // trip count, work around this by changing the type size. 4500 if (Ty->getScalarSizeInBits() < 32) 4501 return Type::getInt32Ty(Ty->getContext()); 4502 4503 return Ty; 4504 } 4505 4506 static Type* getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { 4507 Ty0 = convertPointerToIntegerType(DL, Ty0); 4508 Ty1 = convertPointerToIntegerType(DL, Ty1); 4509 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) 4510 return Ty0; 4511 return Ty1; 4512 } 4513 4514 /// \brief Check that the instruction has outside loop users and is not an 4515 /// identified reduction variable. 4516 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, 4517 SmallPtrSetImpl<Value *> &Reductions) { 4518 // Reduction instructions are allowed to have exit users. All other 4519 // instructions must not have external users. 4520 if (!Reductions.count(Inst)) 4521 //Check that all of the users of the loop are inside the BB. 4522 for (User *U : Inst->users()) { 4523 Instruction *UI = cast<Instruction>(U); 4524 // This user may be a reduction exit value. 4525 if (!TheLoop->contains(UI)) { 4526 DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); 4527 return true; 4528 } 4529 } 4530 return false; 4531 } 4532 4533 bool LoopVectorizationLegality::canVectorizeInstrs() { 4534 BasicBlock *Header = TheLoop->getHeader(); 4535 4536 // Look for the attribute signaling the absence of NaNs. 4537 Function &F = *Header->getParent(); 4538 const DataLayout &DL = F.getParent()->getDataLayout(); 4539 HasFunNoNaNAttr = 4540 F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; 4541 4542 // For each block in the loop. 4543 for (Loop::block_iterator bb = TheLoop->block_begin(), 4544 be = TheLoop->block_end(); bb != be; ++bb) { 4545 4546 // Scan the instructions in the block and look for hazards. 4547 for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e; 4548 ++it) { 4549 4550 if (PHINode *Phi = dyn_cast<PHINode>(it)) { 4551 Type *PhiTy = Phi->getType(); 4552 // Check that this PHI type is allowed. 4553 if (!PhiTy->isIntegerTy() && 4554 !PhiTy->isFloatingPointTy() && 4555 !PhiTy->isPointerTy()) { 4556 emitAnalysis(VectorizationReport(&*it) 4557 << "loop control flow is not understood by vectorizer"); 4558 DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); 4559 return false; 4560 } 4561 4562 // If this PHINode is not in the header block, then we know that we 4563 // can convert it to select during if-conversion. No need to check if 4564 // the PHIs in this block are induction or reduction variables. 4565 if (*bb != Header) { 4566 // Check that this instruction has no outside users or is an 4567 // identified reduction value with an outside user. 4568 if (!hasOutsideLoopUser(TheLoop, &*it, AllowedExit)) 4569 continue; 4570 emitAnalysis(VectorizationReport(&*it) << 4571 "value could not be identified as " 4572 "an induction or reduction variable"); 4573 return false; 4574 } 4575 4576 // We only allow if-converted PHIs with exactly two incoming values. 4577 if (Phi->getNumIncomingValues() != 2) { 4578 emitAnalysis(VectorizationReport(&*it) 4579 << "control flow not understood by vectorizer"); 4580 DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); 4581 return false; 4582 } 4583 4584 InductionDescriptor ID; 4585 if (InductionDescriptor::isInductionPHI(Phi, PSE.getSE(), ID)) { 4586 Inductions[Phi] = ID; 4587 // Get the widest type. 4588 if (!WidestIndTy) 4589 WidestIndTy = convertPointerToIntegerType(DL, PhiTy); 4590 else 4591 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); 4592 4593 // Int inductions are special because we only allow one IV. 4594 if (ID.getKind() == InductionDescriptor::IK_IntInduction && 4595 ID.getStepValue()->isOne() && 4596 isa<Constant>(ID.getStartValue()) && 4597 cast<Constant>(ID.getStartValue())->isNullValue()) { 4598 // Use the phi node with the widest type as induction. Use the last 4599 // one if there are multiple (no good reason for doing this other 4600 // than it is expedient). We've checked that it begins at zero and 4601 // steps by one, so this is a canonical induction variable. 4602 if (!Induction || PhiTy == WidestIndTy) 4603 Induction = Phi; 4604 } 4605 4606 DEBUG(dbgs() << "LV: Found an induction variable.\n"); 4607 4608 // Until we explicitly handle the case of an induction variable with 4609 // an outside loop user we have to give up vectorizing this loop. 4610 if (hasOutsideLoopUser(TheLoop, &*it, AllowedExit)) { 4611 emitAnalysis(VectorizationReport(&*it) << 4612 "use of induction value outside of the " 4613 "loop is not handled by vectorizer"); 4614 return false; 4615 } 4616 4617 continue; 4618 } 4619 4620 RecurrenceDescriptor RedDes; 4621 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) { 4622 if (RedDes.hasUnsafeAlgebra()) 4623 Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst()); 4624 AllowedExit.insert(RedDes.getLoopExitInstr()); 4625 Reductions[Phi] = RedDes; 4626 continue; 4627 } 4628 4629 if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, DT)) { 4630 FirstOrderRecurrences.insert(Phi); 4631 continue; 4632 } 4633 4634 emitAnalysis(VectorizationReport(&*it) << 4635 "value that could not be identified as " 4636 "reduction is used outside the loop"); 4637 DEBUG(dbgs() << "LV: Found an unidentified PHI."<< *Phi <<"\n"); 4638 return false; 4639 }// end of PHI handling 4640 4641 // We handle calls that: 4642 // * Are debug info intrinsics. 4643 // * Have a mapping to an IR intrinsic. 4644 // * Have a vector version available. 4645 CallInst *CI = dyn_cast<CallInst>(it); 4646 if (CI && !getIntrinsicIDForCall(CI, TLI) && !isa<DbgInfoIntrinsic>(CI) && 4647 !(CI->getCalledFunction() && TLI && 4648 TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) { 4649 emitAnalysis(VectorizationReport(&*it) 4650 << "call instruction cannot be vectorized"); 4651 DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); 4652 return false; 4653 } 4654 4655 // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the 4656 // second argument is the same (i.e. loop invariant) 4657 if (CI && 4658 hasVectorInstrinsicScalarOpd(getIntrinsicIDForCall(CI, TLI), 1)) { 4659 auto *SE = PSE.getSE(); 4660 if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) { 4661 emitAnalysis(VectorizationReport(&*it) 4662 << "intrinsic instruction cannot be vectorized"); 4663 DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n"); 4664 return false; 4665 } 4666 } 4667 4668 // Check that the instruction return type is vectorizable. 4669 // Also, we can't vectorize extractelement instructions. 4670 if ((!VectorType::isValidElementType(it->getType()) && 4671 !it->getType()->isVoidTy()) || isa<ExtractElementInst>(it)) { 4672 emitAnalysis(VectorizationReport(&*it) 4673 << "instruction return type cannot be vectorized"); 4674 DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); 4675 return false; 4676 } 4677 4678 // Check that the stored type is vectorizable. 4679 if (StoreInst *ST = dyn_cast<StoreInst>(it)) { 4680 Type *T = ST->getValueOperand()->getType(); 4681 if (!VectorType::isValidElementType(T)) { 4682 emitAnalysis(VectorizationReport(ST) << 4683 "store instruction cannot be vectorized"); 4684 return false; 4685 } 4686 if (EnableMemAccessVersioning) 4687 collectStridedAccess(ST); 4688 } 4689 4690 if (EnableMemAccessVersioning) 4691 if (LoadInst *LI = dyn_cast<LoadInst>(it)) 4692 collectStridedAccess(LI); 4693 4694 // Reduction instructions are allowed to have exit users. 4695 // All other instructions must not have external users. 4696 if (hasOutsideLoopUser(TheLoop, &*it, AllowedExit)) { 4697 emitAnalysis(VectorizationReport(&*it) << 4698 "value cannot be used outside the loop"); 4699 return false; 4700 } 4701 4702 } // next instr. 4703 4704 } 4705 4706 if (!Induction) { 4707 DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); 4708 if (Inductions.empty()) { 4709 emitAnalysis(VectorizationReport() 4710 << "loop induction variable could not be identified"); 4711 return false; 4712 } 4713 } 4714 4715 // Now we know the widest induction type, check if our found induction 4716 // is the same size. If it's not, unset it here and InnerLoopVectorizer 4717 // will create another. 4718 if (Induction && WidestIndTy != Induction->getType()) 4719 Induction = nullptr; 4720 4721 return true; 4722 } 4723 4724 void LoopVectorizationLegality::collectStridedAccess(Value *MemAccess) { 4725 Value *Ptr = nullptr; 4726 if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess)) 4727 Ptr = LI->getPointerOperand(); 4728 else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess)) 4729 Ptr = SI->getPointerOperand(); 4730 else 4731 return; 4732 4733 Value *Stride = getStrideFromPointer(Ptr, PSE.getSE(), TheLoop); 4734 if (!Stride) 4735 return; 4736 4737 DEBUG(dbgs() << "LV: Found a strided access that we can version"); 4738 DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n"); 4739 Strides[Ptr] = Stride; 4740 StrideSet.insert(Stride); 4741 } 4742 4743 void LoopVectorizationLegality::collectLoopUniforms() { 4744 // We now know that the loop is vectorizable! 4745 // Collect variables that will remain uniform after vectorization. 4746 std::vector<Value*> Worklist; 4747 BasicBlock *Latch = TheLoop->getLoopLatch(); 4748 4749 // Start with the conditional branch and walk up the block. 4750 Worklist.push_back(Latch->getTerminator()->getOperand(0)); 4751 4752 // Also add all consecutive pointer values; these values will be uniform 4753 // after vectorization (and subsequent cleanup) and, until revectorization is 4754 // supported, all dependencies must also be uniform. 4755 for (Loop::block_iterator B = TheLoop->block_begin(), 4756 BE = TheLoop->block_end(); B != BE; ++B) 4757 for (BasicBlock::iterator I = (*B)->begin(), IE = (*B)->end(); 4758 I != IE; ++I) 4759 if (I->getType()->isPointerTy() && isConsecutivePtr(&*I)) 4760 Worklist.insert(Worklist.end(), I->op_begin(), I->op_end()); 4761 4762 while (!Worklist.empty()) { 4763 Instruction *I = dyn_cast<Instruction>(Worklist.back()); 4764 Worklist.pop_back(); 4765 4766 // Look at instructions inside this loop. 4767 // Stop when reaching PHI nodes. 4768 // TODO: we need to follow values all over the loop, not only in this block. 4769 if (!I || !TheLoop->contains(I) || isa<PHINode>(I)) 4770 continue; 4771 4772 // This is a known uniform. 4773 Uniforms.insert(I); 4774 4775 // Insert all operands. 4776 Worklist.insert(Worklist.end(), I->op_begin(), I->op_end()); 4777 } 4778 } 4779 4780 bool LoopVectorizationLegality::canVectorizeMemory() { 4781 LAI = &LAA->getInfo(TheLoop, Strides); 4782 auto &OptionalReport = LAI->getReport(); 4783 if (OptionalReport) 4784 emitAnalysis(VectorizationReport(*OptionalReport)); 4785 if (!LAI->canVectorizeMemory()) 4786 return false; 4787 4788 if (LAI->hasStoreToLoopInvariantAddress()) { 4789 emitAnalysis( 4790 VectorizationReport() 4791 << "write to a loop invariant address could not be vectorized"); 4792 DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); 4793 return false; 4794 } 4795 4796 Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks()); 4797 PSE.addPredicate(LAI->PSE.getUnionPredicate()); 4798 4799 return true; 4800 } 4801 4802 bool LoopVectorizationLegality::isInductionVariable(const Value *V) { 4803 Value *In0 = const_cast<Value*>(V); 4804 PHINode *PN = dyn_cast_or_null<PHINode>(In0); 4805 if (!PN) 4806 return false; 4807 4808 return Inductions.count(PN); 4809 } 4810 4811 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) { 4812 return FirstOrderRecurrences.count(Phi); 4813 } 4814 4815 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { 4816 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 4817 } 4818 4819 bool LoopVectorizationLegality::blockCanBePredicated(BasicBlock *BB, 4820 SmallPtrSetImpl<Value *> &SafePtrs) { 4821 4822 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 4823 // Check that we don't have a constant expression that can trap as operand. 4824 for (Instruction::op_iterator OI = it->op_begin(), OE = it->op_end(); 4825 OI != OE; ++OI) { 4826 if (Constant *C = dyn_cast<Constant>(*OI)) 4827 if (C->canTrap()) 4828 return false; 4829 } 4830 // We might be able to hoist the load. 4831 if (it->mayReadFromMemory()) { 4832 LoadInst *LI = dyn_cast<LoadInst>(it); 4833 if (!LI) 4834 return false; 4835 if (!SafePtrs.count(LI->getPointerOperand())) { 4836 if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) || 4837 isLegalMaskedGather(LI->getType())) { 4838 MaskedOp.insert(LI); 4839 continue; 4840 } 4841 return false; 4842 } 4843 } 4844 4845 // We don't predicate stores at the moment. 4846 if (it->mayWriteToMemory()) { 4847 StoreInst *SI = dyn_cast<StoreInst>(it); 4848 // We only support predication of stores in basic blocks with one 4849 // predecessor. 4850 if (!SI) 4851 return false; 4852 4853 bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0); 4854 bool isSinglePredecessor = SI->getParent()->getSinglePredecessor(); 4855 4856 if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr || 4857 !isSinglePredecessor) { 4858 // Build a masked store if it is legal for the target, otherwise 4859 // scalarize the block. 4860 bool isLegalMaskedOp = 4861 isLegalMaskedStore(SI->getValueOperand()->getType(), 4862 SI->getPointerOperand()) || 4863 isLegalMaskedScatter(SI->getValueOperand()->getType()); 4864 if (isLegalMaskedOp) { 4865 --NumPredStores; 4866 MaskedOp.insert(SI); 4867 continue; 4868 } 4869 return false; 4870 } 4871 } 4872 if (it->mayThrow()) 4873 return false; 4874 4875 // The instructions below can trap. 4876 switch (it->getOpcode()) { 4877 default: continue; 4878 case Instruction::UDiv: 4879 case Instruction::SDiv: 4880 case Instruction::URem: 4881 case Instruction::SRem: 4882 return false; 4883 } 4884 } 4885 4886 return true; 4887 } 4888 4889 void InterleavedAccessInfo::collectConstStridedAccesses( 4890 MapVector<Instruction *, StrideDescriptor> &StrideAccesses, 4891 const ValueToValueMap &Strides) { 4892 // Holds load/store instructions in program order. 4893 SmallVector<Instruction *, 16> AccessList; 4894 4895 for (auto *BB : TheLoop->getBlocks()) { 4896 bool IsPred = LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 4897 4898 for (auto &I : *BB) { 4899 if (!isa<LoadInst>(&I) && !isa<StoreInst>(&I)) 4900 continue; 4901 // FIXME: Currently we can't handle mixed accesses and predicated accesses 4902 if (IsPred) 4903 return; 4904 4905 AccessList.push_back(&I); 4906 } 4907 } 4908 4909 if (AccessList.empty()) 4910 return; 4911 4912 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 4913 for (auto I : AccessList) { 4914 LoadInst *LI = dyn_cast<LoadInst>(I); 4915 StoreInst *SI = dyn_cast<StoreInst>(I); 4916 4917 Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); 4918 int Stride = isStridedPtr(PSE, Ptr, TheLoop, Strides); 4919 4920 // The factor of the corresponding interleave group. 4921 unsigned Factor = std::abs(Stride); 4922 4923 // Ignore the access if the factor is too small or too large. 4924 if (Factor < 2 || Factor > MaxInterleaveGroupFactor) 4925 continue; 4926 4927 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 4928 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 4929 unsigned Size = DL.getTypeAllocSize(PtrTy->getElementType()); 4930 4931 // An alignment of 0 means target ABI alignment. 4932 unsigned Align = LI ? LI->getAlignment() : SI->getAlignment(); 4933 if (!Align) 4934 Align = DL.getABITypeAlignment(PtrTy->getElementType()); 4935 4936 StrideAccesses[I] = StrideDescriptor(Stride, Scev, Size, Align); 4937 } 4938 } 4939 4940 // Analyze interleaved accesses and collect them into interleave groups. 4941 // 4942 // Notice that the vectorization on interleaved groups will change instruction 4943 // orders and may break dependences. But the memory dependence check guarantees 4944 // that there is no overlap between two pointers of different strides, element 4945 // sizes or underlying bases. 4946 // 4947 // For pointers sharing the same stride, element size and underlying base, no 4948 // need to worry about Read-After-Write dependences and Write-After-Read 4949 // dependences. 4950 // 4951 // E.g. The RAW dependence: A[i] = a; 4952 // b = A[i]; 4953 // This won't exist as it is a store-load forwarding conflict, which has 4954 // already been checked and forbidden in the dependence check. 4955 // 4956 // E.g. The WAR dependence: a = A[i]; // (1) 4957 // A[i] = b; // (2) 4958 // The store group of (2) is always inserted at or below (2), and the load group 4959 // of (1) is always inserted at or above (1). The dependence is safe. 4960 void InterleavedAccessInfo::analyzeInterleaving( 4961 const ValueToValueMap &Strides) { 4962 DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 4963 4964 // Holds all the stride accesses. 4965 MapVector<Instruction *, StrideDescriptor> StrideAccesses; 4966 collectConstStridedAccesses(StrideAccesses, Strides); 4967 4968 if (StrideAccesses.empty()) 4969 return; 4970 4971 // Holds all interleaved store groups temporarily. 4972 SmallSetVector<InterleaveGroup *, 4> StoreGroups; 4973 // Holds all interleaved load groups temporarily. 4974 SmallSetVector<InterleaveGroup *, 4> LoadGroups; 4975 4976 // Search the load-load/write-write pair B-A in bottom-up order and try to 4977 // insert B into the interleave group of A according to 3 rules: 4978 // 1. A and B have the same stride. 4979 // 2. A and B have the same memory object size. 4980 // 3. B belongs to the group according to the distance. 4981 // 4982 // The bottom-up order can avoid breaking the Write-After-Write dependences 4983 // between two pointers of the same base. 4984 // E.g. A[i] = a; (1) 4985 // A[i] = b; (2) 4986 // A[i+1] = c (3) 4987 // We form the group (2)+(3) in front, so (1) has to form groups with accesses 4988 // above (1), which guarantees that (1) is always above (2). 4989 for (auto I = StrideAccesses.rbegin(), E = StrideAccesses.rend(); I != E; 4990 ++I) { 4991 Instruction *A = I->first; 4992 StrideDescriptor DesA = I->second; 4993 4994 InterleaveGroup *Group = getInterleaveGroup(A); 4995 if (!Group) { 4996 DEBUG(dbgs() << "LV: Creating an interleave group with:" << *A << '\n'); 4997 Group = createInterleaveGroup(A, DesA.Stride, DesA.Align); 4998 } 4999 5000 if (A->mayWriteToMemory()) 5001 StoreGroups.insert(Group); 5002 else 5003 LoadGroups.insert(Group); 5004 5005 for (auto II = std::next(I); II != E; ++II) { 5006 Instruction *B = II->first; 5007 StrideDescriptor DesB = II->second; 5008 5009 // Ignore if B is already in a group or B is a different memory operation. 5010 if (isInterleaved(B) || A->mayReadFromMemory() != B->mayReadFromMemory()) 5011 continue; 5012 5013 // Check the rule 1 and 2. 5014 if (DesB.Stride != DesA.Stride || DesB.Size != DesA.Size) 5015 continue; 5016 5017 // Calculate the distance and prepare for the rule 3. 5018 const SCEVConstant *DistToA = dyn_cast<SCEVConstant>( 5019 PSE.getSE()->getMinusSCEV(DesB.Scev, DesA.Scev)); 5020 if (!DistToA) 5021 continue; 5022 5023 int DistanceToA = DistToA->getAPInt().getSExtValue(); 5024 5025 // Skip if the distance is not multiple of size as they are not in the 5026 // same group. 5027 if (DistanceToA % static_cast<int>(DesA.Size)) 5028 continue; 5029 5030 // The index of B is the index of A plus the related index to A. 5031 int IndexB = 5032 Group->getIndex(A) + DistanceToA / static_cast<int>(DesA.Size); 5033 5034 // Try to insert B into the group. 5035 if (Group->insertMember(B, IndexB, DesB.Align)) { 5036 DEBUG(dbgs() << "LV: Inserted:" << *B << '\n' 5037 << " into the interleave group with" << *A << '\n'); 5038 InterleaveGroupMap[B] = Group; 5039 5040 // Set the first load in program order as the insert position. 5041 if (B->mayReadFromMemory()) 5042 Group->setInsertPos(B); 5043 } 5044 } // Iteration on instruction B 5045 } // Iteration on instruction A 5046 5047 // Remove interleaved store groups with gaps. 5048 for (InterleaveGroup *Group : StoreGroups) 5049 if (Group->getNumMembers() != Group->getFactor()) 5050 releaseGroup(Group); 5051 5052 // Remove interleaved load groups that don't have the first and last member. 5053 // This guarantees that we won't do speculative out of bounds loads. 5054 for (InterleaveGroup *Group : LoadGroups) 5055 if (!Group->getMember(0) || !Group->getMember(Group->getFactor() - 1)) 5056 releaseGroup(Group); 5057 } 5058 5059 LoopVectorizationCostModel::VectorizationFactor 5060 LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { 5061 // Width 1 means no vectorize 5062 VectorizationFactor Factor = { 1U, 0U }; 5063 if (OptForSize && Legal->getRuntimePointerChecking()->Need) { 5064 emitAnalysis(VectorizationReport() << 5065 "runtime pointer checks needed. Enable vectorization of this " 5066 "loop with '#pragma clang loop vectorize(enable)' when " 5067 "compiling with -Os/-Oz"); 5068 DEBUG(dbgs() << 5069 "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 5070 return Factor; 5071 } 5072 5073 if (!EnableCondStoresVectorization && Legal->getNumPredStores()) { 5074 emitAnalysis(VectorizationReport() << 5075 "store that is conditionally executed prevents vectorization"); 5076 DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); 5077 return Factor; 5078 } 5079 5080 // Find the trip count. 5081 unsigned TC = SE->getSmallConstantTripCount(TheLoop); 5082 DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5083 5084 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5085 unsigned SmallestType, WidestType; 5086 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5087 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 5088 unsigned MaxSafeDepDist = -1U; 5089 if (Legal->getMaxSafeDepDistBytes() != -1U) 5090 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 5091 WidestRegister = ((WidestRegister < MaxSafeDepDist) ? 5092 WidestRegister : MaxSafeDepDist); 5093 unsigned MaxVectorSize = WidestRegister / WidestType; 5094 5095 DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " 5096 << WidestType << " bits.\n"); 5097 DEBUG(dbgs() << "LV: The Widest register is: " 5098 << WidestRegister << " bits.\n"); 5099 5100 if (MaxVectorSize == 0) { 5101 DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5102 MaxVectorSize = 1; 5103 } 5104 5105 assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements" 5106 " into one vector!"); 5107 5108 unsigned VF = MaxVectorSize; 5109 if (MaximizeBandwidth && !OptForSize) { 5110 // Collect all viable vectorization factors. 5111 SmallVector<unsigned, 8> VFs; 5112 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 5113 for (unsigned VS = MaxVectorSize; VS <= NewMaxVectorSize; VS *= 2) 5114 VFs.push_back(VS); 5115 5116 // For each VF calculate its register usage. 5117 auto RUs = calculateRegisterUsage(VFs); 5118 5119 // Select the largest VF which doesn't require more registers than existing 5120 // ones. 5121 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 5122 for (int i = RUs.size() - 1; i >= 0; --i) { 5123 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 5124 VF = VFs[i]; 5125 break; 5126 } 5127 } 5128 } 5129 5130 // If we optimize the program for size, avoid creating the tail loop. 5131 if (OptForSize) { 5132 // If we are unable to calculate the trip count then don't try to vectorize. 5133 if (TC < 2) { 5134 emitAnalysis 5135 (VectorizationReport() << 5136 "unable to calculate the loop count due to complex control flow"); 5137 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 5138 return Factor; 5139 } 5140 5141 // Find the maximum SIMD width that can fit within the trip count. 5142 VF = TC % MaxVectorSize; 5143 5144 if (VF == 0) 5145 VF = MaxVectorSize; 5146 else { 5147 // If the trip count that we found modulo the vectorization factor is not 5148 // zero then we require a tail. 5149 emitAnalysis(VectorizationReport() << 5150 "cannot optimize for size and vectorize at the " 5151 "same time. Enable vectorization of this loop " 5152 "with '#pragma clang loop vectorize(enable)' " 5153 "when compiling with -Os/-Oz"); 5154 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 5155 return Factor; 5156 } 5157 } 5158 5159 int UserVF = Hints->getWidth(); 5160 if (UserVF != 0) { 5161 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 5162 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 5163 5164 Factor.Width = UserVF; 5165 return Factor; 5166 } 5167 5168 float Cost = expectedCost(1).first; 5169 #ifndef NDEBUG 5170 const float ScalarCost = Cost; 5171 #endif /* NDEBUG */ 5172 unsigned Width = 1; 5173 DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 5174 5175 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5176 // Ignore scalar width, because the user explicitly wants vectorization. 5177 if (ForceVectorization && VF > 1) { 5178 Width = 2; 5179 Cost = expectedCost(Width).first / (float)Width; 5180 } 5181 5182 for (unsigned i=2; i <= VF; i*=2) { 5183 // Notice that the vector loop needs to be executed less times, so 5184 // we need to divide the cost of the vector loops by the width of 5185 // the vector elements. 5186 VectorizationCostTy C = expectedCost(i); 5187 float VectorCost = C.first / (float)i; 5188 DEBUG(dbgs() << "LV: Vector loop of width " << i << " costs: " << 5189 (int)VectorCost << ".\n"); 5190 if (!C.second && !ForceVectorization) { 5191 DEBUG(dbgs() << "LV: Not considering vector loop of width " << i << 5192 " because it will not generate any vector instructions.\n"); 5193 continue; 5194 } 5195 if (VectorCost < Cost) { 5196 Cost = VectorCost; 5197 Width = i; 5198 } 5199 } 5200 5201 DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 5202 << "LV: Vectorization seems to be not beneficial, " 5203 << "but was forced by a user.\n"); 5204 DEBUG(dbgs() << "LV: Selecting VF: "<< Width << ".\n"); 5205 Factor.Width = Width; 5206 Factor.Cost = Width * Cost; 5207 return Factor; 5208 } 5209 5210 std::pair<unsigned, unsigned> 5211 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5212 unsigned MinWidth = -1U; 5213 unsigned MaxWidth = 8; 5214 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5215 5216 // For each block. 5217 for (Loop::block_iterator bb = TheLoop->block_begin(), 5218 be = TheLoop->block_end(); bb != be; ++bb) { 5219 BasicBlock *BB = *bb; 5220 5221 // For each instruction in the loop. 5222 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 5223 Type *T = it->getType(); 5224 5225 // Skip ignored values. 5226 if (ValuesToIgnore.count(&*it)) 5227 continue; 5228 5229 // Only examine Loads, Stores and PHINodes. 5230 if (!isa<LoadInst>(it) && !isa<StoreInst>(it) && !isa<PHINode>(it)) 5231 continue; 5232 5233 // Examine PHI nodes that are reduction variables. Update the type to 5234 // account for the recurrence type. 5235 if (PHINode *PN = dyn_cast<PHINode>(it)) { 5236 if (!Legal->isReductionVariable(PN)) 5237 continue; 5238 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 5239 T = RdxDesc.getRecurrenceType(); 5240 } 5241 5242 // Examine the stored values. 5243 if (StoreInst *ST = dyn_cast<StoreInst>(it)) 5244 T = ST->getValueOperand()->getType(); 5245 5246 // Ignore loaded pointer types and stored pointer types that are not 5247 // consecutive. However, we do want to take consecutive stores/loads of 5248 // pointer vectors into account. 5249 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&*it)) 5250 continue; 5251 5252 MinWidth = std::min(MinWidth, 5253 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5254 MaxWidth = std::max(MaxWidth, 5255 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5256 } 5257 } 5258 5259 return {MinWidth, MaxWidth}; 5260 } 5261 5262 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 5263 unsigned VF, 5264 unsigned LoopCost) { 5265 5266 // -- The interleave heuristics -- 5267 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5268 // There are many micro-architectural considerations that we can't predict 5269 // at this level. For example, frontend pressure (on decode or fetch) due to 5270 // code size, or the number and capabilities of the execution ports. 5271 // 5272 // We use the following heuristics to select the interleave count: 5273 // 1. If the code has reductions, then we interleave to break the cross 5274 // iteration dependency. 5275 // 2. If the loop is really small, then we interleave to reduce the loop 5276 // overhead. 5277 // 3. We don't interleave if we think that we will spill registers to memory 5278 // due to the increased register pressure. 5279 5280 // When we optimize for size, we don't interleave. 5281 if (OptForSize) 5282 return 1; 5283 5284 // We used the distance for the interleave count. 5285 if (Legal->getMaxSafeDepDistBytes() != -1U) 5286 return 1; 5287 5288 // Do not interleave loops with a relatively small trip count. 5289 unsigned TC = SE->getSmallConstantTripCount(TheLoop); 5290 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 5291 return 1; 5292 5293 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 5294 DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters << 5295 " registers\n"); 5296 5297 if (VF == 1) { 5298 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5299 TargetNumRegisters = ForceTargetNumScalarRegs; 5300 } else { 5301 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5302 TargetNumRegisters = ForceTargetNumVectorRegs; 5303 } 5304 5305 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5306 // We divide by these constants so assume that we have at least one 5307 // instruction that uses at least one register. 5308 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 5309 R.NumInstructions = std::max(R.NumInstructions, 1U); 5310 5311 // We calculate the interleave count using the following formula. 5312 // Subtract the number of loop invariants from the number of available 5313 // registers. These registers are used by all of the interleaved instances. 5314 // Next, divide the remaining registers by the number of registers that is 5315 // required by the loop, in order to estimate how many parallel instances 5316 // fit without causing spills. All of this is rounded down if necessary to be 5317 // a power of two. We want power of two interleave count to simplify any 5318 // addressing operations or alignment considerations. 5319 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 5320 R.MaxLocalUsers); 5321 5322 // Don't count the induction variable as interleaved. 5323 if (EnableIndVarRegisterHeur) 5324 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 5325 std::max(1U, (R.MaxLocalUsers - 1))); 5326 5327 // Clamp the interleave ranges to reasonable counts. 5328 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 5329 5330 // Check if the user has overridden the max. 5331 if (VF == 1) { 5332 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5333 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5334 } else { 5335 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5336 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5337 } 5338 5339 // If we did not calculate the cost for VF (because the user selected the VF) 5340 // then we calculate the cost of VF here. 5341 if (LoopCost == 0) 5342 LoopCost = expectedCost(VF).first; 5343 5344 // Clamp the calculated IC to be between the 1 and the max interleave count 5345 // that the target allows. 5346 if (IC > MaxInterleaveCount) 5347 IC = MaxInterleaveCount; 5348 else if (IC < 1) 5349 IC = 1; 5350 5351 // Interleave if we vectorized this loop and there is a reduction that could 5352 // benefit from interleaving. 5353 if (VF > 1 && Legal->getReductionVars()->size()) { 5354 DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5355 return IC; 5356 } 5357 5358 // Note that if we've already vectorized the loop we will have done the 5359 // runtime check and so interleaving won't require further checks. 5360 bool InterleavingRequiresRuntimePointerCheck = 5361 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 5362 5363 // We want to interleave small loops in order to reduce the loop overhead and 5364 // potentially expose ILP opportunities. 5365 DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 5366 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 5367 // We assume that the cost overhead is 1 and we use the cost model 5368 // to estimate the cost of the loop and interleave until the cost of the 5369 // loop overhead is about 5% of the cost of the loop. 5370 unsigned SmallIC = 5371 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5372 5373 // Interleave until store/load ports (estimated by max interleave count) are 5374 // saturated. 5375 unsigned NumStores = Legal->getNumStores(); 5376 unsigned NumLoads = Legal->getNumLoads(); 5377 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5378 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5379 5380 // If we have a scalar reduction (vector reductions are already dealt with 5381 // by this point), we can increase the critical path length if the loop 5382 // we're interleaving is inside another loop. Limit, by default to 2, so the 5383 // critical path only gets increased by one reduction operation. 5384 if (Legal->getReductionVars()->size() && 5385 TheLoop->getLoopDepth() > 1) { 5386 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5387 SmallIC = std::min(SmallIC, F); 5388 StoresIC = std::min(StoresIC, F); 5389 LoadsIC = std::min(LoadsIC, F); 5390 } 5391 5392 if (EnableLoadStoreRuntimeInterleave && 5393 std::max(StoresIC, LoadsIC) > SmallIC) { 5394 DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5395 return std::max(StoresIC, LoadsIC); 5396 } 5397 5398 DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5399 return SmallIC; 5400 } 5401 5402 // Interleave if this is a large loop (small loops are already dealt with by 5403 // this point) that could benefit from interleaving. 5404 bool HasReductions = (Legal->getReductionVars()->size() > 0); 5405 if (TTI.enableAggressiveInterleaving(HasReductions)) { 5406 DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5407 return IC; 5408 } 5409 5410 DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5411 return 1; 5412 } 5413 5414 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5415 LoopVectorizationCostModel::calculateRegisterUsage( 5416 const SmallVector<unsigned, 8> &VFs) { 5417 // This function calculates the register usage by measuring the highest number 5418 // of values that are alive at a single location. Obviously, this is a very 5419 // rough estimation. We scan the loop in a topological order in order and 5420 // assign a number to each instruction. We use RPO to ensure that defs are 5421 // met before their users. We assume that each instruction that has in-loop 5422 // users starts an interval. We record every time that an in-loop value is 5423 // used, so we have a list of the first and last occurrences of each 5424 // instruction. Next, we transpose this data structure into a multi map that 5425 // holds the list of intervals that *end* at a specific location. This multi 5426 // map allows us to perform a linear search. We scan the instructions linearly 5427 // and record each time that a new interval starts, by placing it in a set. 5428 // If we find this value in the multi-map then we remove it from the set. 5429 // The max register usage is the maximum size of the set. 5430 // We also search for instructions that are defined outside the loop, but are 5431 // used inside the loop. We need this number separately from the max-interval 5432 // usage number because when we unroll, loop-invariant values do not take 5433 // more register. 5434 LoopBlocksDFS DFS(TheLoop); 5435 DFS.perform(LI); 5436 5437 RegisterUsage RU; 5438 RU.NumInstructions = 0; 5439 5440 // Each 'key' in the map opens a new interval. The values 5441 // of the map are the index of the 'last seen' usage of the 5442 // instruction that is the key. 5443 typedef DenseMap<Instruction*, unsigned> IntervalMap; 5444 // Maps instruction to its index. 5445 DenseMap<unsigned, Instruction*> IdxToInstr; 5446 // Marks the end of each interval. 5447 IntervalMap EndPoint; 5448 // Saves the list of instruction indices that are used in the loop. 5449 SmallSet<Instruction*, 8> Ends; 5450 // Saves the list of values that are used in the loop but are 5451 // defined outside the loop, such as arguments and constants. 5452 SmallPtrSet<Value*, 8> LoopInvariants; 5453 5454 unsigned Index = 0; 5455 for (LoopBlocksDFS::RPOIterator bb = DFS.beginRPO(), 5456 be = DFS.endRPO(); bb != be; ++bb) { 5457 RU.NumInstructions += (*bb)->size(); 5458 for (Instruction &I : **bb) { 5459 IdxToInstr[Index++] = &I; 5460 5461 // Save the end location of each USE. 5462 for (unsigned i = 0; i < I.getNumOperands(); ++i) { 5463 Value *U = I.getOperand(i); 5464 Instruction *Instr = dyn_cast<Instruction>(U); 5465 5466 // Ignore non-instruction values such as arguments, constants, etc. 5467 if (!Instr) continue; 5468 5469 // If this instruction is outside the loop then record it and continue. 5470 if (!TheLoop->contains(Instr)) { 5471 LoopInvariants.insert(Instr); 5472 continue; 5473 } 5474 5475 // Overwrite previous end points. 5476 EndPoint[Instr] = Index; 5477 Ends.insert(Instr); 5478 } 5479 } 5480 } 5481 5482 // Saves the list of intervals that end with the index in 'key'. 5483 typedef SmallVector<Instruction*, 2> InstrList; 5484 DenseMap<unsigned, InstrList> TransposeEnds; 5485 5486 // Transpose the EndPoints to a list of values that end at each index. 5487 for (IntervalMap::iterator it = EndPoint.begin(), e = EndPoint.end(); 5488 it != e; ++it) 5489 TransposeEnds[it->second].push_back(it->first); 5490 5491 SmallSet<Instruction*, 8> OpenIntervals; 5492 5493 // Get the size of the widest register. 5494 unsigned MaxSafeDepDist = -1U; 5495 if (Legal->getMaxSafeDepDistBytes() != -1U) 5496 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 5497 unsigned WidestRegister = 5498 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 5499 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5500 5501 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5502 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 5503 5504 DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5505 5506 // A lambda that gets the register usage for the given type and VF. 5507 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 5508 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 5509 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 5510 }; 5511 5512 for (unsigned int i = 0; i < Index; ++i) { 5513 Instruction *I = IdxToInstr[i]; 5514 // Ignore instructions that are never used within the loop. 5515 if (!Ends.count(I)) continue; 5516 5517 // Skip ignored values. 5518 if (ValuesToIgnore.count(I)) 5519 continue; 5520 5521 // Remove all of the instructions that end at this location. 5522 InstrList &List = TransposeEnds[i]; 5523 for (unsigned int j = 0, e = List.size(); j < e; ++j) 5524 OpenIntervals.erase(List[j]); 5525 5526 // For each VF find the maximum usage of registers. 5527 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 5528 if (VFs[j] == 1) { 5529 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 5530 continue; 5531 } 5532 5533 // Count the number of live intervals. 5534 unsigned RegUsage = 0; 5535 for (auto Inst : OpenIntervals) 5536 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 5537 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 5538 } 5539 5540 DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 5541 << OpenIntervals.size() << '\n'); 5542 5543 // Add the current instruction to the list of open intervals. 5544 OpenIntervals.insert(I); 5545 } 5546 5547 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 5548 unsigned Invariant = 0; 5549 if (VFs[i] == 1) 5550 Invariant = LoopInvariants.size(); 5551 else { 5552 for (auto Inst : LoopInvariants) 5553 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 5554 } 5555 5556 DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 5557 DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 5558 DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); 5559 DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n'); 5560 5561 RU.LoopInvariantRegs = Invariant; 5562 RU.MaxLocalUsers = MaxUsages[i]; 5563 RUs[i] = RU; 5564 } 5565 5566 return RUs; 5567 } 5568 5569 LoopVectorizationCostModel::VectorizationCostTy 5570 LoopVectorizationCostModel::expectedCost(unsigned VF) { 5571 VectorizationCostTy Cost; 5572 5573 // For each block. 5574 for (Loop::block_iterator bb = TheLoop->block_begin(), 5575 be = TheLoop->block_end(); bb != be; ++bb) { 5576 VectorizationCostTy BlockCost; 5577 BasicBlock *BB = *bb; 5578 5579 // For each instruction in the old loop. 5580 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 5581 // Skip dbg intrinsics. 5582 if (isa<DbgInfoIntrinsic>(it)) 5583 continue; 5584 5585 // Skip ignored values. 5586 if (ValuesToIgnore.count(&*it)) 5587 continue; 5588 5589 VectorizationCostTy C = getInstructionCost(&*it, VF); 5590 5591 // Check if we should override the cost. 5592 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 5593 C.first = ForceTargetInstructionCost; 5594 5595 BlockCost.first += C.first; 5596 BlockCost.second |= C.second; 5597 DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << 5598 " for VF " << VF << " For instruction: " << *it << '\n'); 5599 } 5600 5601 // We assume that if-converted blocks have a 50% chance of being executed. 5602 // When the code is scalar then some of the blocks are avoided due to CF. 5603 // When the code is vectorized we execute all code paths. 5604 if (VF == 1 && Legal->blockNeedsPredication(*bb)) 5605 BlockCost.first /= 2; 5606 5607 Cost.first += BlockCost.first; 5608 Cost.second |= BlockCost.second; 5609 } 5610 5611 return Cost; 5612 } 5613 5614 /// \brief Check if the load/store instruction \p I may be translated into 5615 /// gather/scatter during vectorization. 5616 /// 5617 /// Pointer \p Ptr specifies address in memory for the given scalar memory 5618 /// instruction. We need it to retrieve data type. 5619 /// Using gather/scatter is possible when it is supported by target. 5620 static bool isGatherOrScatterLegal(Instruction *I, Value *Ptr, 5621 LoopVectorizationLegality *Legal) { 5622 Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType(); 5623 return (isa<LoadInst>(I) && Legal->isLegalMaskedGather(DataTy)) || 5624 (isa<StoreInst>(I) && Legal->isLegalMaskedScatter(DataTy)); 5625 } 5626 5627 /// \brief Check whether the address computation for a non-consecutive memory 5628 /// access looks like an unlikely candidate for being merged into the indexing 5629 /// mode. 5630 /// 5631 /// We look for a GEP which has one index that is an induction variable and all 5632 /// other indices are loop invariant. If the stride of this access is also 5633 /// within a small bound we decide that this address computation can likely be 5634 /// merged into the addressing mode. 5635 /// In all other cases, we identify the address computation as complex. 5636 static bool isLikelyComplexAddressComputation(Value *Ptr, 5637 LoopVectorizationLegality *Legal, 5638 ScalarEvolution *SE, 5639 const Loop *TheLoop) { 5640 GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(Ptr); 5641 if (!Gep) 5642 return true; 5643 5644 // We are looking for a gep with all loop invariant indices except for one 5645 // which should be an induction variable. 5646 unsigned NumOperands = Gep->getNumOperands(); 5647 for (unsigned i = 1; i < NumOperands; ++i) { 5648 Value *Opd = Gep->getOperand(i); 5649 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 5650 !Legal->isInductionVariable(Opd)) 5651 return true; 5652 } 5653 5654 // Now we know we have a GEP ptr, %inv, %ind, %inv. Make sure that the step 5655 // can likely be merged into the address computation. 5656 unsigned MaxMergeDistance = 64; 5657 5658 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Ptr)); 5659 if (!AddRec) 5660 return true; 5661 5662 // Check the step is constant. 5663 const SCEV *Step = AddRec->getStepRecurrence(*SE); 5664 // Calculate the pointer stride and check if it is consecutive. 5665 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step); 5666 if (!C) 5667 return true; 5668 5669 const APInt &APStepVal = C->getAPInt(); 5670 5671 // Huge step value - give up. 5672 if (APStepVal.getBitWidth() > 64) 5673 return true; 5674 5675 int64_t StepVal = APStepVal.getSExtValue(); 5676 5677 return StepVal > MaxMergeDistance; 5678 } 5679 5680 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 5681 return Legal->hasStride(I->getOperand(0)) || 5682 Legal->hasStride(I->getOperand(1)); 5683 } 5684 5685 LoopVectorizationCostModel::VectorizationCostTy 5686 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 5687 // If we know that this instruction will remain uniform, check the cost of 5688 // the scalar version. 5689 if (Legal->isUniformAfterVectorization(I)) 5690 VF = 1; 5691 5692 Type *VectorTy; 5693 unsigned C = getInstructionCost(I, VF, VectorTy); 5694 5695 bool TypeNotScalarized = VF > 1 && !VectorTy->isVoidTy() && 5696 TTI.getNumberOfParts(VectorTy) < VF; 5697 return VectorizationCostTy(C, TypeNotScalarized); 5698 } 5699 5700 unsigned 5701 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF, 5702 Type *&VectorTy) { 5703 Type *RetTy = I->getType(); 5704 if (VF > 1 && MinBWs.count(I)) 5705 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 5706 VectorTy = ToVectorTy(RetTy, VF); 5707 5708 // TODO: We need to estimate the cost of intrinsic calls. 5709 switch (I->getOpcode()) { 5710 case Instruction::GetElementPtr: 5711 // We mark this instruction as zero-cost because the cost of GEPs in 5712 // vectorized code depends on whether the corresponding memory instruction 5713 // is scalarized or not. Therefore, we handle GEPs with the memory 5714 // instruction cost. 5715 return 0; 5716 case Instruction::Br: { 5717 return TTI.getCFInstrCost(I->getOpcode()); 5718 } 5719 case Instruction::PHI: { 5720 auto *Phi = cast<PHINode>(I); 5721 5722 // First-order recurrences are replaced by vector shuffles inside the loop. 5723 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 5724 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 5725 VectorTy, VF - 1, VectorTy); 5726 5727 // TODO: IF-converted IFs become selects. 5728 return 0; 5729 } 5730 case Instruction::Add: 5731 case Instruction::FAdd: 5732 case Instruction::Sub: 5733 case Instruction::FSub: 5734 case Instruction::Mul: 5735 case Instruction::FMul: 5736 case Instruction::UDiv: 5737 case Instruction::SDiv: 5738 case Instruction::FDiv: 5739 case Instruction::URem: 5740 case Instruction::SRem: 5741 case Instruction::FRem: 5742 case Instruction::Shl: 5743 case Instruction::LShr: 5744 case Instruction::AShr: 5745 case Instruction::And: 5746 case Instruction::Or: 5747 case Instruction::Xor: { 5748 // Since we will replace the stride by 1 the multiplication should go away. 5749 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 5750 return 0; 5751 // Certain instructions can be cheaper to vectorize if they have a constant 5752 // second vector operand. One example of this are shifts on x86. 5753 TargetTransformInfo::OperandValueKind Op1VK = 5754 TargetTransformInfo::OK_AnyValue; 5755 TargetTransformInfo::OperandValueKind Op2VK = 5756 TargetTransformInfo::OK_AnyValue; 5757 TargetTransformInfo::OperandValueProperties Op1VP = 5758 TargetTransformInfo::OP_None; 5759 TargetTransformInfo::OperandValueProperties Op2VP = 5760 TargetTransformInfo::OP_None; 5761 Value *Op2 = I->getOperand(1); 5762 5763 // Check for a splat of a constant or for a non uniform vector of constants. 5764 if (isa<ConstantInt>(Op2)) { 5765 ConstantInt *CInt = cast<ConstantInt>(Op2); 5766 if (CInt && CInt->getValue().isPowerOf2()) 5767 Op2VP = TargetTransformInfo::OP_PowerOf2; 5768 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 5769 } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) { 5770 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 5771 Constant *SplatValue = cast<Constant>(Op2)->getSplatValue(); 5772 if (SplatValue) { 5773 ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue); 5774 if (CInt && CInt->getValue().isPowerOf2()) 5775 Op2VP = TargetTransformInfo::OP_PowerOf2; 5776 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 5777 } 5778 } 5779 5780 return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, Op2VK, 5781 Op1VP, Op2VP); 5782 } 5783 case Instruction::Select: { 5784 SelectInst *SI = cast<SelectInst>(I); 5785 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 5786 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 5787 Type *CondTy = SI->getCondition()->getType(); 5788 if (!ScalarCond) 5789 CondTy = VectorType::get(CondTy, VF); 5790 5791 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy); 5792 } 5793 case Instruction::ICmp: 5794 case Instruction::FCmp: { 5795 Type *ValTy = I->getOperand(0)->getType(); 5796 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 5797 auto It = MinBWs.find(Op0AsInstruction); 5798 if (VF > 1 && It != MinBWs.end()) 5799 ValTy = IntegerType::get(ValTy->getContext(), It->second); 5800 VectorTy = ToVectorTy(ValTy, VF); 5801 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy); 5802 } 5803 case Instruction::Store: 5804 case Instruction::Load: { 5805 StoreInst *SI = dyn_cast<StoreInst>(I); 5806 LoadInst *LI = dyn_cast<LoadInst>(I); 5807 Type *ValTy = (SI ? SI->getValueOperand()->getType() : 5808 LI->getType()); 5809 VectorTy = ToVectorTy(ValTy, VF); 5810 5811 unsigned Alignment = SI ? SI->getAlignment() : LI->getAlignment(); 5812 unsigned AS = SI ? SI->getPointerAddressSpace() : 5813 LI->getPointerAddressSpace(); 5814 Value *Ptr = SI ? SI->getPointerOperand() : LI->getPointerOperand(); 5815 // We add the cost of address computation here instead of with the gep 5816 // instruction because only here we know whether the operation is 5817 // scalarized. 5818 if (VF == 1) 5819 return TTI.getAddressComputationCost(VectorTy) + 5820 TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 5821 5822 // For an interleaved access, calculate the total cost of the whole 5823 // interleave group. 5824 if (Legal->isAccessInterleaved(I)) { 5825 auto Group = Legal->getInterleavedAccessGroup(I); 5826 assert(Group && "Fail to get an interleaved access group."); 5827 5828 // Only calculate the cost once at the insert position. 5829 if (Group->getInsertPos() != I) 5830 return 0; 5831 5832 unsigned InterleaveFactor = Group->getFactor(); 5833 Type *WideVecTy = 5834 VectorType::get(VectorTy->getVectorElementType(), 5835 VectorTy->getVectorNumElements() * InterleaveFactor); 5836 5837 // Holds the indices of existing members in an interleaved load group. 5838 // An interleaved store group doesn't need this as it dones't allow gaps. 5839 SmallVector<unsigned, 4> Indices; 5840 if (LI) { 5841 for (unsigned i = 0; i < InterleaveFactor; i++) 5842 if (Group->getMember(i)) 5843 Indices.push_back(i); 5844 } 5845 5846 // Calculate the cost of the whole interleaved group. 5847 unsigned Cost = TTI.getInterleavedMemoryOpCost( 5848 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 5849 Group->getAlignment(), AS); 5850 5851 if (Group->isReverse()) 5852 Cost += 5853 Group->getNumMembers() * 5854 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5855 5856 // FIXME: The interleaved load group with a huge gap could be even more 5857 // expensive than scalar operations. Then we could ignore such group and 5858 // use scalar operations instead. 5859 return Cost; 5860 } 5861 5862 // Scalarized loads/stores. 5863 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 5864 bool UseGatherOrScatter = (ConsecutiveStride == 0) && 5865 isGatherOrScatterLegal(I, Ptr, Legal); 5866 5867 bool Reverse = ConsecutiveStride < 0; 5868 const DataLayout &DL = I->getModule()->getDataLayout(); 5869 unsigned ScalarAllocatedSize = DL.getTypeAllocSize(ValTy); 5870 unsigned VectorElementSize = DL.getTypeStoreSize(VectorTy) / VF; 5871 if ((!ConsecutiveStride && !UseGatherOrScatter) || 5872 ScalarAllocatedSize != VectorElementSize) { 5873 bool IsComplexComputation = 5874 isLikelyComplexAddressComputation(Ptr, Legal, SE, TheLoop); 5875 unsigned Cost = 0; 5876 // The cost of extracting from the value vector and pointer vector. 5877 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 5878 for (unsigned i = 0; i < VF; ++i) { 5879 // The cost of extracting the pointer operand. 5880 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, PtrTy, i); 5881 // In case of STORE, the cost of ExtractElement from the vector. 5882 // In case of LOAD, the cost of InsertElement into the returned 5883 // vector. 5884 Cost += TTI.getVectorInstrCost(SI ? Instruction::ExtractElement : 5885 Instruction::InsertElement, 5886 VectorTy, i); 5887 } 5888 5889 // The cost of the scalar loads/stores. 5890 Cost += VF * TTI.getAddressComputationCost(PtrTy, IsComplexComputation); 5891 Cost += VF * TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 5892 Alignment, AS); 5893 return Cost; 5894 } 5895 5896 unsigned Cost = TTI.getAddressComputationCost(VectorTy); 5897 if (UseGatherOrScatter) { 5898 assert(ConsecutiveStride == 0 && 5899 "Gather/Scatter are not used for consecutive stride"); 5900 return Cost + 5901 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 5902 Legal->isMaskRequired(I), Alignment); 5903 } 5904 // Wide load/stores. 5905 if (Legal->isMaskRequired(I)) 5906 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, 5907 AS); 5908 else 5909 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 5910 5911 if (Reverse) 5912 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, 5913 VectorTy, 0); 5914 return Cost; 5915 } 5916 case Instruction::ZExt: 5917 case Instruction::SExt: 5918 case Instruction::FPToUI: 5919 case Instruction::FPToSI: 5920 case Instruction::FPExt: 5921 case Instruction::PtrToInt: 5922 case Instruction::IntToPtr: 5923 case Instruction::SIToFP: 5924 case Instruction::UIToFP: 5925 case Instruction::Trunc: 5926 case Instruction::FPTrunc: 5927 case Instruction::BitCast: { 5928 // We optimize the truncation of induction variable. 5929 // The cost of these is the same as the scalar operation. 5930 if (I->getOpcode() == Instruction::Trunc && 5931 Legal->isInductionVariable(I->getOperand(0))) 5932 return TTI.getCastInstrCost(I->getOpcode(), I->getType(), 5933 I->getOperand(0)->getType()); 5934 5935 Type *SrcScalarTy = I->getOperand(0)->getType(); 5936 Type *SrcVecTy = ToVectorTy(SrcScalarTy, VF); 5937 if (VF > 1 && MinBWs.count(I)) { 5938 // This cast is going to be shrunk. This may remove the cast or it might 5939 // turn it into slightly different cast. For example, if MinBW == 16, 5940 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 5941 // 5942 // Calculate the modified src and dest types. 5943 Type *MinVecTy = VectorTy; 5944 if (I->getOpcode() == Instruction::Trunc) { 5945 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 5946 VectorTy = largestIntegerVectorType(ToVectorTy(I->getType(), VF), 5947 MinVecTy); 5948 } else if (I->getOpcode() == Instruction::ZExt || 5949 I->getOpcode() == Instruction::SExt) { 5950 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 5951 VectorTy = smallestIntegerVectorType(ToVectorTy(I->getType(), VF), 5952 MinVecTy); 5953 } 5954 } 5955 5956 return TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy); 5957 } 5958 case Instruction::Call: { 5959 bool NeedToScalarize; 5960 CallInst *CI = cast<CallInst>(I); 5961 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 5962 if (getIntrinsicIDForCall(CI, TLI)) 5963 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 5964 return CallCost; 5965 } 5966 default: { 5967 // We are scalarizing the instruction. Return the cost of the scalar 5968 // instruction, plus the cost of insert and extract into vector 5969 // elements, times the vector width. 5970 unsigned Cost = 0; 5971 5972 if (!RetTy->isVoidTy() && VF != 1) { 5973 unsigned InsCost = TTI.getVectorInstrCost(Instruction::InsertElement, 5974 VectorTy); 5975 unsigned ExtCost = TTI.getVectorInstrCost(Instruction::ExtractElement, 5976 VectorTy); 5977 5978 // The cost of inserting the results plus extracting each one of the 5979 // operands. 5980 Cost += VF * (InsCost + ExtCost * I->getNumOperands()); 5981 } 5982 5983 // The cost of executing VF copies of the scalar instruction. This opcode 5984 // is unknown. Assume that it is the same as 'mul'. 5985 Cost += VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy); 5986 return Cost; 5987 } 5988 }// end of switch. 5989 } 5990 5991 char LoopVectorize::ID = 0; 5992 static const char lv_name[] = "Loop Vectorization"; 5993 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 5994 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 5995 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 5996 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 5997 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 5998 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 5999 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6000 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6001 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6002 INITIALIZE_PASS_DEPENDENCY(LCSSA) 6003 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6004 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 6005 INITIALIZE_PASS_DEPENDENCY(LoopAccessAnalysis) 6006 INITIALIZE_PASS_DEPENDENCY(DemandedBits) 6007 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6008 6009 namespace llvm { 6010 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { 6011 return new LoopVectorize(NoUnrolling, AlwaysVectorize); 6012 } 6013 } 6014 6015 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6016 // Check for a store. 6017 if (StoreInst *ST = dyn_cast<StoreInst>(Inst)) 6018 return Legal->isConsecutivePtr(ST->getPointerOperand()) != 0; 6019 6020 // Check for a load. 6021 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) 6022 return Legal->isConsecutivePtr(LI->getPointerOperand()) != 0; 6023 6024 return false; 6025 } 6026 6027 6028 void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr, 6029 bool IfPredicateStore) { 6030 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 6031 // Holds vector parameters or scalars, in case of uniform vals. 6032 SmallVector<VectorParts, 4> Params; 6033 6034 setDebugLocFromInst(Builder, Instr); 6035 6036 // Find all of the vectorized parameters. 6037 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 6038 Value *SrcOp = Instr->getOperand(op); 6039 6040 // If we are accessing the old induction variable, use the new one. 6041 if (SrcOp == OldInduction) { 6042 Params.push_back(getVectorValue(SrcOp)); 6043 continue; 6044 } 6045 6046 // Try using previously calculated values. 6047 Instruction *SrcInst = dyn_cast<Instruction>(SrcOp); 6048 6049 // If the src is an instruction that appeared earlier in the basic block 6050 // then it should already be vectorized. 6051 if (SrcInst && OrigLoop->contains(SrcInst)) { 6052 assert(WidenMap.has(SrcInst) && "Source operand is unavailable"); 6053 // The parameter is a vector value from earlier. 6054 Params.push_back(WidenMap.get(SrcInst)); 6055 } else { 6056 // The parameter is a scalar from outside the loop. Maybe even a constant. 6057 VectorParts Scalars; 6058 Scalars.append(UF, SrcOp); 6059 Params.push_back(Scalars); 6060 } 6061 } 6062 6063 assert(Params.size() == Instr->getNumOperands() && 6064 "Invalid number of operands"); 6065 6066 // Does this instruction return a value ? 6067 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 6068 6069 Value *UndefVec = IsVoidRetTy ? nullptr : 6070 UndefValue::get(Instr->getType()); 6071 // Create a new entry in the WidenMap and initialize it to Undef or Null. 6072 VectorParts &VecResults = WidenMap.splat(Instr, UndefVec); 6073 6074 VectorParts Cond; 6075 if (IfPredicateStore) { 6076 assert(Instr->getParent()->getSinglePredecessor() && 6077 "Only support single predecessor blocks"); 6078 Cond = createEdgeMask(Instr->getParent()->getSinglePredecessor(), 6079 Instr->getParent()); 6080 } 6081 6082 // For each vector unroll 'part': 6083 for (unsigned Part = 0; Part < UF; ++Part) { 6084 // For each scalar that we create: 6085 6086 // Start an "if (pred) a[i] = ..." block. 6087 Value *Cmp = nullptr; 6088 if (IfPredicateStore) { 6089 if (Cond[Part]->getType()->isVectorTy()) 6090 Cond[Part] = 6091 Builder.CreateExtractElement(Cond[Part], Builder.getInt32(0)); 6092 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cond[Part], 6093 ConstantInt::get(Cond[Part]->getType(), 1)); 6094 } 6095 6096 Instruction *Cloned = Instr->clone(); 6097 if (!IsVoidRetTy) 6098 Cloned->setName(Instr->getName() + ".cloned"); 6099 // Replace the operands of the cloned instructions with extracted scalars. 6100 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 6101 Value *Op = Params[op][Part]; 6102 Cloned->setOperand(op, Op); 6103 } 6104 6105 // Place the cloned scalar in the new loop. 6106 Builder.Insert(Cloned); 6107 6108 // If we just cloned a new assumption, add it the assumption cache. 6109 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 6110 if (II->getIntrinsicID() == Intrinsic::assume) 6111 AC->registerAssumption(II); 6112 6113 // If the original scalar returns a value we need to place it in a vector 6114 // so that future users will be able to use it. 6115 if (!IsVoidRetTy) 6116 VecResults[Part] = Cloned; 6117 6118 // End if-block. 6119 if (IfPredicateStore) 6120 PredicatedStores.push_back(std::make_pair(cast<StoreInst>(Cloned), 6121 Cmp)); 6122 } 6123 } 6124 6125 void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) { 6126 StoreInst *SI = dyn_cast<StoreInst>(Instr); 6127 bool IfPredicateStore = (SI && Legal->blockNeedsPredication(SI->getParent())); 6128 6129 return scalarizeInstruction(Instr, IfPredicateStore); 6130 } 6131 6132 Value *InnerLoopUnroller::reverseVector(Value *Vec) { 6133 return Vec; 6134 } 6135 6136 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { 6137 return V; 6138 } 6139 6140 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step) { 6141 // When unrolling and the VF is 1, we only need to add a simple scalar. 6142 Type *ITy = Val->getType(); 6143 assert(!ITy->isVectorTy() && "Val must be a scalar"); 6144 Constant *C = ConstantInt::get(ITy, StartIdx); 6145 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 6146 } 6147