1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 11 // and generates target-independent LLVM-IR. 12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 13 // of instructions in order to estimate the profitability of vectorization. 14 // 15 // The loop vectorizer combines consecutive loop iterations into a single 16 // 'wide' iteration. After this transformation the index is incremented 17 // by the SIMD vector width, and not by one. 18 // 19 // This pass has three parts: 20 // 1. The main loop pass that drives the different parts. 21 // 2. LoopVectorizationLegality - A unit that checks for the legality 22 // of the vectorization. 23 // 3. InnerLoopVectorizer - A unit that performs the actual 24 // widening of instructions. 25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 26 // of vectorization. It decides on the optimal vector width, which 27 // can be one, if vectorization is not profitable. 28 // 29 //===----------------------------------------------------------------------===// 30 // 31 // The reduction-variable vectorization is based on the paper: 32 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 33 // 34 // Variable uniformity checks are inspired by: 35 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 36 // 37 // The interleaved access vectorization is based on the paper: 38 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 39 // Data for SIMD 40 // 41 // Other ideas/concepts are from: 42 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 43 // 44 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 45 // Vectorizing Compilers. 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/ADT/DenseMap.h" 50 #include "llvm/ADT/Hashing.h" 51 #include "llvm/ADT/MapVector.h" 52 #include "llvm/ADT/SetVector.h" 53 #include "llvm/ADT/SmallPtrSet.h" 54 #include "llvm/ADT/SmallSet.h" 55 #include "llvm/ADT/SmallVector.h" 56 #include "llvm/ADT/Statistic.h" 57 #include "llvm/ADT/StringExtras.h" 58 #include "llvm/Analysis/AliasAnalysis.h" 59 #include "llvm/Analysis/AssumptionCache.h" 60 #include "llvm/Analysis/BasicAliasAnalysis.h" 61 #include "llvm/Analysis/BlockFrequencyInfo.h" 62 #include "llvm/Analysis/CodeMetrics.h" 63 #include "llvm/Analysis/DemandedBits.h" 64 #include "llvm/Analysis/GlobalsModRef.h" 65 #include "llvm/Analysis/LoopAccessAnalysis.h" 66 #include "llvm/Analysis/LoopInfo.h" 67 #include "llvm/Analysis/LoopIterator.h" 68 #include "llvm/Analysis/LoopPass.h" 69 #include "llvm/Analysis/ScalarEvolution.h" 70 #include "llvm/Analysis/ScalarEvolutionExpander.h" 71 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 72 #include "llvm/Analysis/TargetTransformInfo.h" 73 #include "llvm/Analysis/ValueTracking.h" 74 #include "llvm/Analysis/VectorUtils.h" 75 #include "llvm/IR/Constants.h" 76 #include "llvm/IR/DataLayout.h" 77 #include "llvm/IR/DebugInfo.h" 78 #include "llvm/IR/DerivedTypes.h" 79 #include "llvm/IR/DiagnosticInfo.h" 80 #include "llvm/IR/Dominators.h" 81 #include "llvm/IR/Function.h" 82 #include "llvm/IR/IRBuilder.h" 83 #include "llvm/IR/Instructions.h" 84 #include "llvm/IR/IntrinsicInst.h" 85 #include "llvm/IR/LLVMContext.h" 86 #include "llvm/IR/Module.h" 87 #include "llvm/IR/PatternMatch.h" 88 #include "llvm/IR/Type.h" 89 #include "llvm/IR/Value.h" 90 #include "llvm/IR/ValueHandle.h" 91 #include "llvm/IR/Verifier.h" 92 #include "llvm/Pass.h" 93 #include "llvm/Support/BranchProbability.h" 94 #include "llvm/Support/CommandLine.h" 95 #include "llvm/Support/Debug.h" 96 #include "llvm/Support/raw_ostream.h" 97 #include "llvm/Transforms/Scalar.h" 98 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 99 #include "llvm/Transforms/Utils/Local.h" 100 #include "llvm/Transforms/Utils/LoopUtils.h" 101 #include "llvm/Transforms/Utils/LoopVersioning.h" 102 #include "llvm/Transforms/Vectorize.h" 103 #include <algorithm> 104 #include <functional> 105 #include <map> 106 #include <tuple> 107 108 using namespace llvm; 109 using namespace llvm::PatternMatch; 110 111 #define LV_NAME "loop-vectorize" 112 #define DEBUG_TYPE LV_NAME 113 114 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 115 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 116 117 static cl::opt<bool> 118 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, 119 cl::desc("Enable if-conversion during vectorization.")); 120 121 /// We don't vectorize loops with a known constant trip count below this number. 122 static cl::opt<unsigned> TinyTripCountVectorThreshold( 123 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 124 cl::desc("Don't vectorize loops with a constant " 125 "trip count that is smaller than this " 126 "value.")); 127 128 static cl::opt<bool> MaximizeBandwidth( 129 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 130 cl::desc("Maximize bandwidth when selecting vectorization factor which " 131 "will be determined by the smallest type in loop.")); 132 133 static cl::opt<bool> EnableInterleavedMemAccesses( 134 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 135 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 136 137 /// Maximum factor for an interleaved memory access. 138 static cl::opt<unsigned> MaxInterleaveGroupFactor( 139 "max-interleave-group-factor", cl::Hidden, 140 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 141 cl::init(8)); 142 143 /// We don't interleave loops with a known constant trip count below this 144 /// number. 145 static const unsigned TinyTripCountInterleaveThreshold = 128; 146 147 static cl::opt<unsigned> ForceTargetNumScalarRegs( 148 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 149 cl::desc("A flag that overrides the target's number of scalar registers.")); 150 151 static cl::opt<unsigned> ForceTargetNumVectorRegs( 152 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 153 cl::desc("A flag that overrides the target's number of vector registers.")); 154 155 /// Maximum vectorization interleave count. 156 static const unsigned MaxInterleaveFactor = 16; 157 158 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 159 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 160 cl::desc("A flag that overrides the target's max interleave factor for " 161 "scalar loops.")); 162 163 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 164 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 165 cl::desc("A flag that overrides the target's max interleave factor for " 166 "vectorized loops.")); 167 168 static cl::opt<unsigned> ForceTargetInstructionCost( 169 "force-target-instruction-cost", cl::init(0), cl::Hidden, 170 cl::desc("A flag that overrides the target's expected cost for " 171 "an instruction to a single constant value. Mostly " 172 "useful for getting consistent testing.")); 173 174 static cl::opt<unsigned> SmallLoopCost( 175 "small-loop-cost", cl::init(20), cl::Hidden, 176 cl::desc( 177 "The cost of a loop that is considered 'small' by the interleaver.")); 178 179 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 180 "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden, 181 cl::desc("Enable the use of the block frequency analysis to access PGO " 182 "heuristics minimizing code growth in cold regions and being more " 183 "aggressive in hot regions.")); 184 185 // Runtime interleave loops for load/store throughput. 186 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 187 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 188 cl::desc( 189 "Enable runtime interleaving until load/store ports are saturated")); 190 191 /// The number of stores in a loop that are allowed to need predication. 192 static cl::opt<unsigned> NumberOfStoresToPredicate( 193 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 194 cl::desc("Max number of stores to be predicated behind an if.")); 195 196 static cl::opt<bool> EnableIndVarRegisterHeur( 197 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 198 cl::desc("Count the induction variable only once when interleaving")); 199 200 static cl::opt<bool> EnableCondStoresVectorization( 201 "enable-cond-stores-vec", cl::init(false), cl::Hidden, 202 cl::desc("Enable if predication of stores during vectorization.")); 203 204 static cl::opt<unsigned> MaxNestedScalarReductionIC( 205 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 206 cl::desc("The maximum interleave count to use when interleaving a scalar " 207 "reduction in a nested loop.")); 208 209 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 210 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 211 cl::desc("The maximum allowed number of runtime memory checks with a " 212 "vectorize(enable) pragma.")); 213 214 static cl::opt<unsigned> VectorizeSCEVCheckThreshold( 215 "vectorize-scev-check-threshold", cl::init(16), cl::Hidden, 216 cl::desc("The maximum number of SCEV checks allowed.")); 217 218 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold( 219 "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden, 220 cl::desc("The maximum number of SCEV checks allowed with a " 221 "vectorize(enable) pragma")); 222 223 namespace { 224 225 // Forward declarations. 226 class LoopVectorizeHints; 227 class LoopVectorizationLegality; 228 class LoopVectorizationCostModel; 229 class LoopVectorizationRequirements; 230 231 /// \brief This modifies LoopAccessReport to initialize message with 232 /// loop-vectorizer-specific part. 233 class VectorizationReport : public LoopAccessReport { 234 public: 235 VectorizationReport(Instruction *I = nullptr) 236 : LoopAccessReport("loop not vectorized: ", I) {} 237 238 /// \brief This allows promotion of the loop-access analysis report into the 239 /// loop-vectorizer report. It modifies the message to add the 240 /// loop-vectorizer-specific part of the message. 241 explicit VectorizationReport(const LoopAccessReport &R) 242 : LoopAccessReport(Twine("loop not vectorized: ") + R.str(), 243 R.getInstr()) {} 244 }; 245 246 /// A helper function for converting Scalar types to vector types. 247 /// If the incoming type is void, we return void. If the VF is 1, we return 248 /// the scalar type. 249 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 250 if (Scalar->isVoidTy() || VF == 1) 251 return Scalar; 252 return VectorType::get(Scalar, VF); 253 } 254 255 /// A helper function that returns GEP instruction and knows to skip a 256 /// 'bitcast'. The 'bitcast' may be skipped if the source and the destination 257 /// pointee types of the 'bitcast' have the same size. 258 /// For example: 259 /// bitcast double** %var to i64* - can be skipped 260 /// bitcast double** %var to i8* - can not 261 static GetElementPtrInst *getGEPInstruction(Value *Ptr) { 262 263 if (isa<GetElementPtrInst>(Ptr)) 264 return cast<GetElementPtrInst>(Ptr); 265 266 if (isa<BitCastInst>(Ptr) && 267 isa<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0))) { 268 Type *BitcastTy = Ptr->getType(); 269 Type *GEPTy = cast<BitCastInst>(Ptr)->getSrcTy(); 270 if (!isa<PointerType>(BitcastTy) || !isa<PointerType>(GEPTy)) 271 return nullptr; 272 Type *Pointee1Ty = cast<PointerType>(BitcastTy)->getPointerElementType(); 273 Type *Pointee2Ty = cast<PointerType>(GEPTy)->getPointerElementType(); 274 const DataLayout &DL = cast<BitCastInst>(Ptr)->getModule()->getDataLayout(); 275 if (DL.getTypeSizeInBits(Pointee1Ty) == DL.getTypeSizeInBits(Pointee2Ty)) 276 return cast<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0)); 277 } 278 return nullptr; 279 } 280 281 /// InnerLoopVectorizer vectorizes loops which contain only one basic 282 /// block to a specified vectorization factor (VF). 283 /// This class performs the widening of scalars into vectors, or multiple 284 /// scalars. This class also implements the following features: 285 /// * It inserts an epilogue loop for handling loops that don't have iteration 286 /// counts that are known to be a multiple of the vectorization factor. 287 /// * It handles the code generation for reduction variables. 288 /// * Scalarization (implementation using scalars) of un-vectorizable 289 /// instructions. 290 /// InnerLoopVectorizer does not perform any vectorization-legality 291 /// checks, and relies on the caller to check for the different legality 292 /// aspects. The InnerLoopVectorizer relies on the 293 /// LoopVectorizationLegality class to provide information about the induction 294 /// and reduction variables that were found to a given vectorization factor. 295 class InnerLoopVectorizer { 296 public: 297 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 298 LoopInfo *LI, DominatorTree *DT, 299 const TargetLibraryInfo *TLI, 300 const TargetTransformInfo *TTI, AssumptionCache *AC, 301 unsigned VecWidth, unsigned UnrollFactor) 302 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 303 AC(AC), VF(VecWidth), UF(UnrollFactor), 304 Builder(PSE.getSE()->getContext()), Induction(nullptr), 305 OldInduction(nullptr), WidenMap(UnrollFactor), TripCount(nullptr), 306 VectorTripCount(nullptr), Legal(nullptr), AddedSafetyChecks(false) {} 307 308 // Perform the actual loop widening (vectorization). 309 // MinimumBitWidths maps scalar integer values to the smallest bitwidth they 310 // can be validly truncated to. The cost model has assumed this truncation 311 // will happen when vectorizing. 312 void vectorize(LoopVectorizationLegality *L, 313 const MapVector<Instruction *, uint64_t> &MinimumBitWidths) { 314 MinBWs = &MinimumBitWidths; 315 Legal = L; 316 // Create a new empty loop. Unlink the old loop and connect the new one. 317 createEmptyLoop(); 318 // Widen each instruction in the old loop to a new one in the new loop. 319 // Use the Legality module to find the induction and reduction variables. 320 vectorizeLoop(); 321 } 322 323 // Return true if any runtime check is added. 324 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 325 326 virtual ~InnerLoopVectorizer() {} 327 328 protected: 329 /// A small list of PHINodes. 330 typedef SmallVector<PHINode *, 4> PhiVector; 331 /// When we unroll loops we have multiple vector values for each scalar. 332 /// This data structure holds the unrolled and vectorized values that 333 /// originated from one scalar instruction. 334 typedef SmallVector<Value *, 2> VectorParts; 335 336 // When we if-convert we need to create edge masks. We have to cache values 337 // so that we don't end up with exponential recursion/IR. 338 typedef DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts> 339 EdgeMaskCache; 340 341 /// Create an empty loop, based on the loop ranges of the old loop. 342 void createEmptyLoop(); 343 344 /// Set up the values of the IVs correctly when exiting the vector loop. 345 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 346 Value *CountRoundDown, Value *EndValue, 347 BasicBlock *MiddleBlock); 348 349 /// Create a new induction variable inside L. 350 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 351 Value *Step, Instruction *DL); 352 /// Copy and widen the instructions from the old loop. 353 virtual void vectorizeLoop(); 354 355 /// Fix a first-order recurrence. This is the second phase of vectorizing 356 /// this phi node. 357 void fixFirstOrderRecurrence(PHINode *Phi); 358 359 /// \brief The Loop exit block may have single value PHI nodes where the 360 /// incoming value is 'Undef'. While vectorizing we only handled real values 361 /// that were defined inside the loop. Here we fix the 'undef case'. 362 /// See PR14725. 363 void fixLCSSAPHIs(); 364 365 /// Shrinks vector element sizes based on information in "MinBWs". 366 void truncateToMinimalBitwidths(); 367 368 /// A helper function that computes the predicate of the block BB, assuming 369 /// that the header block of the loop is set to True. It returns the *entry* 370 /// mask for the block BB. 371 VectorParts createBlockInMask(BasicBlock *BB); 372 /// A helper function that computes the predicate of the edge between SRC 373 /// and DST. 374 VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst); 375 376 /// A helper function to vectorize a single BB within the innermost loop. 377 void vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV); 378 379 /// Vectorize a single PHINode in a block. This method handles the induction 380 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 381 /// arbitrary length vectors. 382 void widenPHIInstruction(Instruction *PN, VectorParts &Entry, unsigned UF, 383 unsigned VF, PhiVector *PV); 384 385 /// Insert the new loop to the loop hierarchy and pass manager 386 /// and update the analysis passes. 387 void updateAnalysis(); 388 389 /// This instruction is un-vectorizable. Implement it as a sequence 390 /// of scalars. If \p IfPredicateStore is true we need to 'hide' each 391 /// scalarized instruction behind an if block predicated on the control 392 /// dependence of the instruction. 393 virtual void scalarizeInstruction(Instruction *Instr, 394 bool IfPredicateStore = false); 395 396 /// Vectorize Load and Store instructions, 397 virtual void vectorizeMemoryInstruction(Instruction *Instr); 398 399 /// Create a broadcast instruction. This method generates a broadcast 400 /// instruction (shuffle) for loop invariant values and for the induction 401 /// value. If this is the induction variable then we extend it to N, N+1, ... 402 /// this is needed because each iteration in the loop corresponds to a SIMD 403 /// element. 404 virtual Value *getBroadcastInstrs(Value *V); 405 406 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 407 /// to each vector element of Val. The sequence starts at StartIndex. 408 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step); 409 410 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 411 /// to each vector element of Val. The sequence starts at StartIndex. 412 /// Step is a SCEV. In order to get StepValue it takes the existing value 413 /// from SCEV or creates a new using SCEVExpander. 414 virtual Value *getStepVector(Value *Val, int StartIdx, const SCEV *Step); 415 416 /// Create a vector induction variable based on an existing scalar one. 417 /// Currently only works for integer induction variables with a constant 418 /// step. 419 /// If TruncType is provided, instead of widening the original IV, we 420 /// widen a version of the IV truncated to TruncType. 421 void widenInductionVariable(const InductionDescriptor &II, VectorParts &Entry, 422 IntegerType *TruncType = nullptr); 423 424 /// When we go over instructions in the basic block we rely on previous 425 /// values within the current basic block or on loop invariant values. 426 /// When we widen (vectorize) values we place them in the map. If the values 427 /// are not within the map, they have to be loop invariant, so we simply 428 /// broadcast them into a vector. 429 VectorParts &getVectorValue(Value *V); 430 431 /// Try to vectorize the interleaved access group that \p Instr belongs to. 432 void vectorizeInterleaveGroup(Instruction *Instr); 433 434 /// Generate a shuffle sequence that will reverse the vector Vec. 435 virtual Value *reverseVector(Value *Vec); 436 437 /// Returns (and creates if needed) the original loop trip count. 438 Value *getOrCreateTripCount(Loop *NewLoop); 439 440 /// Returns (and creates if needed) the trip count of the widened loop. 441 Value *getOrCreateVectorTripCount(Loop *NewLoop); 442 443 /// Emit a bypass check to see if the trip count would overflow, or we 444 /// wouldn't have enough iterations to execute one vector loop. 445 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 446 /// Emit a bypass check to see if the vector trip count is nonzero. 447 void emitVectorLoopEnteredCheck(Loop *L, BasicBlock *Bypass); 448 /// Emit a bypass check to see if all of the SCEV assumptions we've 449 /// had to make are correct. 450 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 451 /// Emit bypass checks to check any memory assumptions we may have made. 452 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 453 454 /// Add additional metadata to \p To that was not present on \p Orig. 455 /// 456 /// Currently this is used to add the noalias annotations based on the 457 /// inserted memchecks. Use this for instructions that are *cloned* into the 458 /// vector loop. 459 void addNewMetadata(Instruction *To, const Instruction *Orig); 460 461 /// Add metadata from one instruction to another. 462 /// 463 /// This includes both the original MDs from \p From and additional ones (\see 464 /// addNewMetadata). Use this for *newly created* instructions in the vector 465 /// loop. 466 void addMetadata(Instruction *To, const Instruction *From); 467 468 /// \brief Similar to the previous function but it adds the metadata to a 469 /// vector of instructions. 470 void addMetadata(SmallVectorImpl<Value *> &To, const Instruction *From); 471 472 /// This is a helper class that holds the vectorizer state. It maps scalar 473 /// instructions to vector instructions. When the code is 'unrolled' then 474 /// then a single scalar value is mapped to multiple vector parts. The parts 475 /// are stored in the VectorPart type. 476 struct ValueMap { 477 /// C'tor. UnrollFactor controls the number of vectors ('parts') that 478 /// are mapped. 479 ValueMap(unsigned UnrollFactor) : UF(UnrollFactor) {} 480 481 /// \return True if 'Key' is saved in the Value Map. 482 bool has(Value *Key) const { return MapStorage.count(Key); } 483 484 /// Initializes a new entry in the map. Sets all of the vector parts to the 485 /// save value in 'Val'. 486 /// \return A reference to a vector with splat values. 487 VectorParts &splat(Value *Key, Value *Val) { 488 VectorParts &Entry = MapStorage[Key]; 489 Entry.assign(UF, Val); 490 return Entry; 491 } 492 493 ///\return A reference to the value that is stored at 'Key'. 494 VectorParts &get(Value *Key) { 495 VectorParts &Entry = MapStorage[Key]; 496 if (Entry.empty()) 497 Entry.resize(UF); 498 assert(Entry.size() == UF); 499 return Entry; 500 } 501 502 private: 503 /// The unroll factor. Each entry in the map stores this number of vector 504 /// elements. 505 unsigned UF; 506 507 /// Map storage. We use std::map and not DenseMap because insertions to a 508 /// dense map invalidates its iterators. 509 std::map<Value *, VectorParts> MapStorage; 510 }; 511 512 /// The original loop. 513 Loop *OrigLoop; 514 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 515 /// dynamic knowledge to simplify SCEV expressions and converts them to a 516 /// more usable form. 517 PredicatedScalarEvolution &PSE; 518 /// Loop Info. 519 LoopInfo *LI; 520 /// Dominator Tree. 521 DominatorTree *DT; 522 /// Alias Analysis. 523 AliasAnalysis *AA; 524 /// Target Library Info. 525 const TargetLibraryInfo *TLI; 526 /// Target Transform Info. 527 const TargetTransformInfo *TTI; 528 /// Assumption Cache. 529 AssumptionCache *AC; 530 531 /// \brief LoopVersioning. It's only set up (non-null) if memchecks were 532 /// used. 533 /// 534 /// This is currently only used to add no-alias metadata based on the 535 /// memchecks. The actually versioning is performed manually. 536 std::unique_ptr<LoopVersioning> LVer; 537 538 /// The vectorization SIMD factor to use. Each vector will have this many 539 /// vector elements. 540 unsigned VF; 541 542 protected: 543 /// The vectorization unroll factor to use. Each scalar is vectorized to this 544 /// many different vector instructions. 545 unsigned UF; 546 547 /// The builder that we use 548 IRBuilder<> Builder; 549 550 // --- Vectorization state --- 551 552 /// The vector-loop preheader. 553 BasicBlock *LoopVectorPreHeader; 554 /// The scalar-loop preheader. 555 BasicBlock *LoopScalarPreHeader; 556 /// Middle Block between the vector and the scalar. 557 BasicBlock *LoopMiddleBlock; 558 /// The ExitBlock of the scalar loop. 559 BasicBlock *LoopExitBlock; 560 /// The vector loop body. 561 BasicBlock *LoopVectorBody; 562 /// The scalar loop body. 563 BasicBlock *LoopScalarBody; 564 /// A list of all bypass blocks. The first block is the entry of the loop. 565 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 566 567 /// The new Induction variable which was added to the new block. 568 PHINode *Induction; 569 /// The induction variable of the old basic block. 570 PHINode *OldInduction; 571 /// Maps scalars to widened vectors. 572 ValueMap WidenMap; 573 /// Store instructions that should be predicated, as a pair 574 /// <StoreInst, Predicate> 575 SmallVector<std::pair<StoreInst *, Value *>, 4> PredicatedStores; 576 EdgeMaskCache MaskCache; 577 /// Trip count of the original loop. 578 Value *TripCount; 579 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 580 Value *VectorTripCount; 581 582 /// Map of scalar integer values to the smallest bitwidth they can be legally 583 /// represented as. The vector equivalents of these values should be truncated 584 /// to this type. 585 const MapVector<Instruction *, uint64_t> *MinBWs; 586 LoopVectorizationLegality *Legal; 587 588 // Record whether runtime checks are added. 589 bool AddedSafetyChecks; 590 }; 591 592 class InnerLoopUnroller : public InnerLoopVectorizer { 593 public: 594 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 595 LoopInfo *LI, DominatorTree *DT, 596 const TargetLibraryInfo *TLI, 597 const TargetTransformInfo *TTI, AssumptionCache *AC, 598 unsigned UnrollFactor) 599 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, 1, 600 UnrollFactor) {} 601 602 private: 603 void scalarizeInstruction(Instruction *Instr, 604 bool IfPredicateStore = false) override; 605 void vectorizeMemoryInstruction(Instruction *Instr) override; 606 Value *getBroadcastInstrs(Value *V) override; 607 Value *getStepVector(Value *Val, int StartIdx, Value *Step) override; 608 Value *getStepVector(Value *Val, int StartIdx, const SCEV *StepSCEV) override; 609 Value *reverseVector(Value *Vec) override; 610 }; 611 612 /// \brief Look for a meaningful debug location on the instruction or it's 613 /// operands. 614 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 615 if (!I) 616 return I; 617 618 DebugLoc Empty; 619 if (I->getDebugLoc() != Empty) 620 return I; 621 622 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 623 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 624 if (OpInst->getDebugLoc() != Empty) 625 return OpInst; 626 } 627 628 return I; 629 } 630 631 /// \brief Set the debug location in the builder using the debug location in the 632 /// instruction. 633 static void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 634 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) 635 B.SetCurrentDebugLocation(Inst->getDebugLoc()); 636 else 637 B.SetCurrentDebugLocation(DebugLoc()); 638 } 639 640 #ifndef NDEBUG 641 /// \return string containing a file name and a line # for the given loop. 642 static std::string getDebugLocString(const Loop *L) { 643 std::string Result; 644 if (L) { 645 raw_string_ostream OS(Result); 646 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 647 LoopDbgLoc.print(OS); 648 else 649 // Just print the module name. 650 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 651 OS.flush(); 652 } 653 return Result; 654 } 655 #endif 656 657 /// \brief Propagate known metadata from one instruction to another. 658 static void propagateMetadata(Instruction *To, const Instruction *From) { 659 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 660 From->getAllMetadataOtherThanDebugLoc(Metadata); 661 662 for (auto M : Metadata) { 663 unsigned Kind = M.first; 664 665 // These are safe to transfer (this is safe for TBAA, even when we 666 // if-convert, because should that metadata have had a control dependency 667 // on the condition, and thus actually aliased with some other 668 // non-speculated memory access when the condition was false, this would be 669 // caught by the runtime overlap checks). 670 if (Kind != LLVMContext::MD_tbaa && Kind != LLVMContext::MD_alias_scope && 671 Kind != LLVMContext::MD_noalias && Kind != LLVMContext::MD_fpmath && 672 Kind != LLVMContext::MD_nontemporal) 673 continue; 674 675 To->setMetadata(Kind, M.second); 676 } 677 } 678 679 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 680 const Instruction *Orig) { 681 // If the loop was versioned with memchecks, add the corresponding no-alias 682 // metadata. 683 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 684 LVer->annotateInstWithNoAlias(To, Orig); 685 } 686 687 void InnerLoopVectorizer::addMetadata(Instruction *To, 688 const Instruction *From) { 689 propagateMetadata(To, From); 690 addNewMetadata(To, From); 691 } 692 693 void InnerLoopVectorizer::addMetadata(SmallVectorImpl<Value *> &To, 694 const Instruction *From) { 695 for (Value *V : To) 696 if (Instruction *I = dyn_cast<Instruction>(V)) 697 addMetadata(I, From); 698 } 699 700 /// \brief The group of interleaved loads/stores sharing the same stride and 701 /// close to each other. 702 /// 703 /// Each member in this group has an index starting from 0, and the largest 704 /// index should be less than interleaved factor, which is equal to the absolute 705 /// value of the access's stride. 706 /// 707 /// E.g. An interleaved load group of factor 4: 708 /// for (unsigned i = 0; i < 1024; i+=4) { 709 /// a = A[i]; // Member of index 0 710 /// b = A[i+1]; // Member of index 1 711 /// d = A[i+3]; // Member of index 3 712 /// ... 713 /// } 714 /// 715 /// An interleaved store group of factor 4: 716 /// for (unsigned i = 0; i < 1024; i+=4) { 717 /// ... 718 /// A[i] = a; // Member of index 0 719 /// A[i+1] = b; // Member of index 1 720 /// A[i+2] = c; // Member of index 2 721 /// A[i+3] = d; // Member of index 3 722 /// } 723 /// 724 /// Note: the interleaved load group could have gaps (missing members), but 725 /// the interleaved store group doesn't allow gaps. 726 class InterleaveGroup { 727 public: 728 InterleaveGroup(Instruction *Instr, int Stride, unsigned Align) 729 : Align(Align), SmallestKey(0), LargestKey(0), InsertPos(Instr) { 730 assert(Align && "The alignment should be non-zero"); 731 732 Factor = std::abs(Stride); 733 assert(Factor > 1 && "Invalid interleave factor"); 734 735 Reverse = Stride < 0; 736 Members[0] = Instr; 737 } 738 739 bool isReverse() const { return Reverse; } 740 unsigned getFactor() const { return Factor; } 741 unsigned getAlignment() const { return Align; } 742 unsigned getNumMembers() const { return Members.size(); } 743 744 /// \brief Try to insert a new member \p Instr with index \p Index and 745 /// alignment \p NewAlign. The index is related to the leader and it could be 746 /// negative if it is the new leader. 747 /// 748 /// \returns false if the instruction doesn't belong to the group. 749 bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) { 750 assert(NewAlign && "The new member's alignment should be non-zero"); 751 752 int Key = Index + SmallestKey; 753 754 // Skip if there is already a member with the same index. 755 if (Members.count(Key)) 756 return false; 757 758 if (Key > LargestKey) { 759 // The largest index is always less than the interleave factor. 760 if (Index >= static_cast<int>(Factor)) 761 return false; 762 763 LargestKey = Key; 764 } else if (Key < SmallestKey) { 765 // The largest index is always less than the interleave factor. 766 if (LargestKey - Key >= static_cast<int>(Factor)) 767 return false; 768 769 SmallestKey = Key; 770 } 771 772 // It's always safe to select the minimum alignment. 773 Align = std::min(Align, NewAlign); 774 Members[Key] = Instr; 775 return true; 776 } 777 778 /// \brief Get the member with the given index \p Index 779 /// 780 /// \returns nullptr if contains no such member. 781 Instruction *getMember(unsigned Index) const { 782 int Key = SmallestKey + Index; 783 if (!Members.count(Key)) 784 return nullptr; 785 786 return Members.find(Key)->second; 787 } 788 789 /// \brief Get the index for the given member. Unlike the key in the member 790 /// map, the index starts from 0. 791 unsigned getIndex(Instruction *Instr) const { 792 for (auto I : Members) 793 if (I.second == Instr) 794 return I.first - SmallestKey; 795 796 llvm_unreachable("InterleaveGroup contains no such member"); 797 } 798 799 Instruction *getInsertPos() const { return InsertPos; } 800 void setInsertPos(Instruction *Inst) { InsertPos = Inst; } 801 802 private: 803 unsigned Factor; // Interleave Factor. 804 bool Reverse; 805 unsigned Align; 806 DenseMap<int, Instruction *> Members; 807 int SmallestKey; 808 int LargestKey; 809 810 // To avoid breaking dependences, vectorized instructions of an interleave 811 // group should be inserted at either the first load or the last store in 812 // program order. 813 // 814 // E.g. %even = load i32 // Insert Position 815 // %add = add i32 %even // Use of %even 816 // %odd = load i32 817 // 818 // store i32 %even 819 // %odd = add i32 // Def of %odd 820 // store i32 %odd // Insert Position 821 Instruction *InsertPos; 822 }; 823 824 /// \brief Drive the analysis of interleaved memory accesses in the loop. 825 /// 826 /// Use this class to analyze interleaved accesses only when we can vectorize 827 /// a loop. Otherwise it's meaningless to do analysis as the vectorization 828 /// on interleaved accesses is unsafe. 829 /// 830 /// The analysis collects interleave groups and records the relationships 831 /// between the member and the group in a map. 832 class InterleavedAccessInfo { 833 public: 834 InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L, 835 DominatorTree *DT) 836 : PSE(PSE), TheLoop(L), DT(DT), RequiresScalarEpilogue(false) {} 837 838 ~InterleavedAccessInfo() { 839 SmallSet<InterleaveGroup *, 4> DelSet; 840 // Avoid releasing a pointer twice. 841 for (auto &I : InterleaveGroupMap) 842 DelSet.insert(I.second); 843 for (auto *Ptr : DelSet) 844 delete Ptr; 845 } 846 847 /// \brief Analyze the interleaved accesses and collect them in interleave 848 /// groups. Substitute symbolic strides using \p Strides. 849 void analyzeInterleaving(const ValueToValueMap &Strides); 850 851 /// \brief Check if \p Instr belongs to any interleave group. 852 bool isInterleaved(Instruction *Instr) const { 853 return InterleaveGroupMap.count(Instr); 854 } 855 856 /// \brief Return the maximum interleave factor of all interleaved groups. 857 unsigned getMaxInterleaveFactor() const { 858 unsigned MaxFactor = 1; 859 for (auto &Entry : InterleaveGroupMap) 860 MaxFactor = std::max(MaxFactor, Entry.second->getFactor()); 861 return MaxFactor; 862 } 863 864 /// \brief Get the interleave group that \p Instr belongs to. 865 /// 866 /// \returns nullptr if doesn't have such group. 867 InterleaveGroup *getInterleaveGroup(Instruction *Instr) const { 868 if (InterleaveGroupMap.count(Instr)) 869 return InterleaveGroupMap.find(Instr)->second; 870 return nullptr; 871 } 872 873 /// \brief Returns true if an interleaved group that may access memory 874 /// out-of-bounds requires a scalar epilogue iteration for correctness. 875 bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; } 876 877 private: 878 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks. 879 /// Simplifies SCEV expressions in the context of existing SCEV assumptions. 880 /// The interleaved access analysis can also add new predicates (for example 881 /// by versioning strides of pointers). 882 PredicatedScalarEvolution &PSE; 883 Loop *TheLoop; 884 DominatorTree *DT; 885 886 /// True if the loop may contain non-reversed interleaved groups with 887 /// out-of-bounds accesses. We ensure we don't speculatively access memory 888 /// out-of-bounds by executing at least one scalar epilogue iteration. 889 bool RequiresScalarEpilogue; 890 891 /// Holds the relationships between the members and the interleave group. 892 DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap; 893 894 /// \brief The descriptor for a strided memory access. 895 struct StrideDescriptor { 896 StrideDescriptor(int Stride, const SCEV *Scev, unsigned Size, 897 unsigned Align) 898 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {} 899 900 StrideDescriptor() : Stride(0), Scev(nullptr), Size(0), Align(0) {} 901 902 int Stride; // The access's stride. It is negative for a reverse access. 903 const SCEV *Scev; // The scalar expression of this access 904 unsigned Size; // The size of the memory object. 905 unsigned Align; // The alignment of this access. 906 }; 907 908 /// \brief Create a new interleave group with the given instruction \p Instr, 909 /// stride \p Stride and alignment \p Align. 910 /// 911 /// \returns the newly created interleave group. 912 InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride, 913 unsigned Align) { 914 assert(!InterleaveGroupMap.count(Instr) && 915 "Already in an interleaved access group"); 916 InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align); 917 return InterleaveGroupMap[Instr]; 918 } 919 920 /// \brief Release the group and remove all the relationships. 921 void releaseGroup(InterleaveGroup *Group) { 922 for (unsigned i = 0; i < Group->getFactor(); i++) 923 if (Instruction *Member = Group->getMember(i)) 924 InterleaveGroupMap.erase(Member); 925 926 delete Group; 927 } 928 929 /// \brief Collect all the accesses with a constant stride in program order. 930 void collectConstStridedAccesses( 931 MapVector<Instruction *, StrideDescriptor> &StrideAccesses, 932 const ValueToValueMap &Strides); 933 }; 934 935 /// Utility class for getting and setting loop vectorizer hints in the form 936 /// of loop metadata. 937 /// This class keeps a number of loop annotations locally (as member variables) 938 /// and can, upon request, write them back as metadata on the loop. It will 939 /// initially scan the loop for existing metadata, and will update the local 940 /// values based on information in the loop. 941 /// We cannot write all values to metadata, as the mere presence of some info, 942 /// for example 'force', means a decision has been made. So, we need to be 943 /// careful NOT to add them if the user hasn't specifically asked so. 944 class LoopVectorizeHints { 945 enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE }; 946 947 /// Hint - associates name and validation with the hint value. 948 struct Hint { 949 const char *Name; 950 unsigned Value; // This may have to change for non-numeric values. 951 HintKind Kind; 952 953 Hint(const char *Name, unsigned Value, HintKind Kind) 954 : Name(Name), Value(Value), Kind(Kind) {} 955 956 bool validate(unsigned Val) { 957 switch (Kind) { 958 case HK_WIDTH: 959 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth; 960 case HK_UNROLL: 961 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor; 962 case HK_FORCE: 963 return (Val <= 1); 964 } 965 return false; 966 } 967 }; 968 969 /// Vectorization width. 970 Hint Width; 971 /// Vectorization interleave factor. 972 Hint Interleave; 973 /// Vectorization forced 974 Hint Force; 975 976 /// Return the loop metadata prefix. 977 static StringRef Prefix() { return "llvm.loop."; } 978 979 /// True if there is any unsafe math in the loop. 980 bool PotentiallyUnsafe; 981 982 public: 983 enum ForceKind { 984 FK_Undefined = -1, ///< Not selected. 985 FK_Disabled = 0, ///< Forcing disabled. 986 FK_Enabled = 1, ///< Forcing enabled. 987 }; 988 989 LoopVectorizeHints(const Loop *L, bool DisableInterleaving) 990 : Width("vectorize.width", VectorizerParams::VectorizationFactor, 991 HK_WIDTH), 992 Interleave("interleave.count", DisableInterleaving, HK_UNROLL), 993 Force("vectorize.enable", FK_Undefined, HK_FORCE), 994 PotentiallyUnsafe(false), TheLoop(L) { 995 // Populate values with existing loop metadata. 996 getHintsFromMetadata(); 997 998 // force-vector-interleave overrides DisableInterleaving. 999 if (VectorizerParams::isInterleaveForced()) 1000 Interleave.Value = VectorizerParams::VectorizationInterleave; 1001 1002 DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs() 1003 << "LV: Interleaving disabled by the pass manager\n"); 1004 } 1005 1006 /// Mark the loop L as already vectorized by setting the width to 1. 1007 void setAlreadyVectorized() { 1008 Width.Value = Interleave.Value = 1; 1009 Hint Hints[] = {Width, Interleave}; 1010 writeHintsToMetadata(Hints); 1011 } 1012 1013 bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const { 1014 if (getForce() == LoopVectorizeHints::FK_Disabled) { 1015 DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); 1016 emitOptimizationRemarkAnalysis(F->getContext(), 1017 vectorizeAnalysisPassName(), *F, 1018 L->getStartLoc(), emitRemark()); 1019 return false; 1020 } 1021 1022 if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) { 1023 DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); 1024 emitOptimizationRemarkAnalysis(F->getContext(), 1025 vectorizeAnalysisPassName(), *F, 1026 L->getStartLoc(), emitRemark()); 1027 return false; 1028 } 1029 1030 if (getWidth() == 1 && getInterleave() == 1) { 1031 // FIXME: Add a separate metadata to indicate when the loop has already 1032 // been vectorized instead of setting width and count to 1. 1033 DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); 1034 // FIXME: Add interleave.disable metadata. This will allow 1035 // vectorize.disable to be used without disabling the pass and errors 1036 // to differentiate between disabled vectorization and a width of 1. 1037 emitOptimizationRemarkAnalysis( 1038 F->getContext(), vectorizeAnalysisPassName(), *F, L->getStartLoc(), 1039 "loop not vectorized: vectorization and interleaving are explicitly " 1040 "disabled, or vectorize width and interleave count are both set to " 1041 "1"); 1042 return false; 1043 } 1044 1045 return true; 1046 } 1047 1048 /// Dumps all the hint information. 1049 std::string emitRemark() const { 1050 VectorizationReport R; 1051 if (Force.Value == LoopVectorizeHints::FK_Disabled) 1052 R << "vectorization is explicitly disabled"; 1053 else { 1054 R << "use -Rpass-analysis=loop-vectorize for more info"; 1055 if (Force.Value == LoopVectorizeHints::FK_Enabled) { 1056 R << " (Force=true"; 1057 if (Width.Value != 0) 1058 R << ", Vector Width=" << Width.Value; 1059 if (Interleave.Value != 0) 1060 R << ", Interleave Count=" << Interleave.Value; 1061 R << ")"; 1062 } 1063 } 1064 1065 return R.str(); 1066 } 1067 1068 unsigned getWidth() const { return Width.Value; } 1069 unsigned getInterleave() const { return Interleave.Value; } 1070 enum ForceKind getForce() const { return (ForceKind)Force.Value; } 1071 const char *vectorizeAnalysisPassName() const { 1072 // If hints are provided that don't disable vectorization use the 1073 // AlwaysPrint pass name to force the frontend to print the diagnostic. 1074 if (getWidth() == 1) 1075 return LV_NAME; 1076 if (getForce() == LoopVectorizeHints::FK_Disabled) 1077 return LV_NAME; 1078 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0) 1079 return LV_NAME; 1080 return DiagnosticInfo::AlwaysPrint; 1081 } 1082 1083 bool allowReordering() const { 1084 // When enabling loop hints are provided we allow the vectorizer to change 1085 // the order of operations that is given by the scalar loop. This is not 1086 // enabled by default because can be unsafe or inefficient. For example, 1087 // reordering floating-point operations will change the way round-off 1088 // error accumulates in the loop. 1089 return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1; 1090 } 1091 1092 bool isPotentiallyUnsafe() const { 1093 // Avoid FP vectorization if the target is unsure about proper support. 1094 // This may be related to the SIMD unit in the target not handling 1095 // IEEE 754 FP ops properly, or bad single-to-double promotions. 1096 // Otherwise, a sequence of vectorized loops, even without reduction, 1097 // could lead to different end results on the destination vectors. 1098 return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe; 1099 } 1100 1101 void setPotentiallyUnsafe() { PotentiallyUnsafe = true; } 1102 1103 private: 1104 /// Find hints specified in the loop metadata and update local values. 1105 void getHintsFromMetadata() { 1106 MDNode *LoopID = TheLoop->getLoopID(); 1107 if (!LoopID) 1108 return; 1109 1110 // First operand should refer to the loop id itself. 1111 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 1112 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 1113 1114 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1115 const MDString *S = nullptr; 1116 SmallVector<Metadata *, 4> Args; 1117 1118 // The expected hint is either a MDString or a MDNode with the first 1119 // operand a MDString. 1120 if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) { 1121 if (!MD || MD->getNumOperands() == 0) 1122 continue; 1123 S = dyn_cast<MDString>(MD->getOperand(0)); 1124 for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i) 1125 Args.push_back(MD->getOperand(i)); 1126 } else { 1127 S = dyn_cast<MDString>(LoopID->getOperand(i)); 1128 assert(Args.size() == 0 && "too many arguments for MDString"); 1129 } 1130 1131 if (!S) 1132 continue; 1133 1134 // Check if the hint starts with the loop metadata prefix. 1135 StringRef Name = S->getString(); 1136 if (Args.size() == 1) 1137 setHint(Name, Args[0]); 1138 } 1139 } 1140 1141 /// Checks string hint with one operand and set value if valid. 1142 void setHint(StringRef Name, Metadata *Arg) { 1143 if (!Name.startswith(Prefix())) 1144 return; 1145 Name = Name.substr(Prefix().size(), StringRef::npos); 1146 1147 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); 1148 if (!C) 1149 return; 1150 unsigned Val = C->getZExtValue(); 1151 1152 Hint *Hints[] = {&Width, &Interleave, &Force}; 1153 for (auto H : Hints) { 1154 if (Name == H->Name) { 1155 if (H->validate(Val)) 1156 H->Value = Val; 1157 else 1158 DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); 1159 break; 1160 } 1161 } 1162 } 1163 1164 /// Create a new hint from name / value pair. 1165 MDNode *createHintMetadata(StringRef Name, unsigned V) const { 1166 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1167 Metadata *MDs[] = {MDString::get(Context, Name), 1168 ConstantAsMetadata::get( 1169 ConstantInt::get(Type::getInt32Ty(Context), V))}; 1170 return MDNode::get(Context, MDs); 1171 } 1172 1173 /// Matches metadata with hint name. 1174 bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) { 1175 MDString *Name = dyn_cast<MDString>(Node->getOperand(0)); 1176 if (!Name) 1177 return false; 1178 1179 for (auto H : HintTypes) 1180 if (Name->getString().endswith(H.Name)) 1181 return true; 1182 return false; 1183 } 1184 1185 /// Sets current hints into loop metadata, keeping other values intact. 1186 void writeHintsToMetadata(ArrayRef<Hint> HintTypes) { 1187 if (HintTypes.size() == 0) 1188 return; 1189 1190 // Reserve the first element to LoopID (see below). 1191 SmallVector<Metadata *, 4> MDs(1); 1192 // If the loop already has metadata, then ignore the existing operands. 1193 MDNode *LoopID = TheLoop->getLoopID(); 1194 if (LoopID) { 1195 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1196 MDNode *Node = cast<MDNode>(LoopID->getOperand(i)); 1197 // If node in update list, ignore old value. 1198 if (!matchesHintMetadataName(Node, HintTypes)) 1199 MDs.push_back(Node); 1200 } 1201 } 1202 1203 // Now, add the missing hints. 1204 for (auto H : HintTypes) 1205 MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value)); 1206 1207 // Replace current metadata node with new one. 1208 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1209 MDNode *NewLoopID = MDNode::get(Context, MDs); 1210 // Set operand 0 to refer to the loop id itself. 1211 NewLoopID->replaceOperandWith(0, NewLoopID); 1212 1213 TheLoop->setLoopID(NewLoopID); 1214 } 1215 1216 /// The loop these hints belong to. 1217 const Loop *TheLoop; 1218 }; 1219 1220 static void emitAnalysisDiag(const Function *TheFunction, const Loop *TheLoop, 1221 const LoopVectorizeHints &Hints, 1222 const LoopAccessReport &Message) { 1223 const char *Name = Hints.vectorizeAnalysisPassName(); 1224 LoopAccessReport::emitAnalysis(Message, TheFunction, TheLoop, Name); 1225 } 1226 1227 static void emitMissedWarning(Function *F, Loop *L, 1228 const LoopVectorizeHints &LH) { 1229 emitOptimizationRemarkMissed(F->getContext(), LV_NAME, *F, L->getStartLoc(), 1230 LH.emitRemark()); 1231 1232 if (LH.getForce() == LoopVectorizeHints::FK_Enabled) { 1233 if (LH.getWidth() != 1) 1234 emitLoopVectorizeWarning( 1235 F->getContext(), *F, L->getStartLoc(), 1236 "failed explicitly specified loop vectorization"); 1237 else if (LH.getInterleave() != 1) 1238 emitLoopInterleaveWarning( 1239 F->getContext(), *F, L->getStartLoc(), 1240 "failed explicitly specified loop interleaving"); 1241 } 1242 } 1243 1244 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and 1245 /// to what vectorization factor. 1246 /// This class does not look at the profitability of vectorization, only the 1247 /// legality. This class has two main kinds of checks: 1248 /// * Memory checks - The code in canVectorizeMemory checks if vectorization 1249 /// will change the order of memory accesses in a way that will change the 1250 /// correctness of the program. 1251 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory 1252 /// checks for a number of different conditions, such as the availability of a 1253 /// single induction variable, that all types are supported and vectorize-able, 1254 /// etc. This code reflects the capabilities of InnerLoopVectorizer. 1255 /// This class is also used by InnerLoopVectorizer for identifying 1256 /// induction variable and the different reduction variables. 1257 class LoopVectorizationLegality { 1258 public: 1259 LoopVectorizationLegality(Loop *L, PredicatedScalarEvolution &PSE, 1260 DominatorTree *DT, TargetLibraryInfo *TLI, 1261 AliasAnalysis *AA, Function *F, 1262 const TargetTransformInfo *TTI, 1263 LoopAccessAnalysis *LAA, 1264 LoopVectorizationRequirements *R, 1265 LoopVectorizeHints *H) 1266 : NumPredStores(0), TheLoop(L), PSE(PSE), TLI(TLI), TheFunction(F), 1267 TTI(TTI), DT(DT), LAA(LAA), LAI(nullptr), InterleaveInfo(PSE, L, DT), 1268 Induction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false), 1269 Requirements(R), Hints(H) {} 1270 1271 /// ReductionList contains the reduction descriptors for all 1272 /// of the reductions that were found in the loop. 1273 typedef DenseMap<PHINode *, RecurrenceDescriptor> ReductionList; 1274 1275 /// InductionList saves induction variables and maps them to the 1276 /// induction descriptor. 1277 typedef MapVector<PHINode *, InductionDescriptor> InductionList; 1278 1279 /// RecurrenceSet contains the phi nodes that are recurrences other than 1280 /// inductions and reductions. 1281 typedef SmallPtrSet<const PHINode *, 8> RecurrenceSet; 1282 1283 /// Returns true if it is legal to vectorize this loop. 1284 /// This does not mean that it is profitable to vectorize this 1285 /// loop, only that it is legal to do so. 1286 bool canVectorize(); 1287 1288 /// Returns the Induction variable. 1289 PHINode *getInduction() { return Induction; } 1290 1291 /// Returns the reduction variables found in the loop. 1292 ReductionList *getReductionVars() { return &Reductions; } 1293 1294 /// Returns the induction variables found in the loop. 1295 InductionList *getInductionVars() { return &Inductions; } 1296 1297 /// Return the first-order recurrences found in the loop. 1298 RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; } 1299 1300 /// Returns the widest induction type. 1301 Type *getWidestInductionType() { return WidestIndTy; } 1302 1303 /// Returns True if V is an induction variable in this loop. 1304 bool isInductionVariable(const Value *V); 1305 1306 /// Returns True if PN is a reduction variable in this loop. 1307 bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); } 1308 1309 /// Returns True if Phi is a first-order recurrence in this loop. 1310 bool isFirstOrderRecurrence(const PHINode *Phi); 1311 1312 /// Return true if the block BB needs to be predicated in order for the loop 1313 /// to be vectorized. 1314 bool blockNeedsPredication(BasicBlock *BB); 1315 1316 /// Check if this pointer is consecutive when vectorizing. This happens 1317 /// when the last index of the GEP is the induction variable, or that the 1318 /// pointer itself is an induction variable. 1319 /// This check allows us to vectorize A[idx] into a wide load/store. 1320 /// Returns: 1321 /// 0 - Stride is unknown or non-consecutive. 1322 /// 1 - Address is consecutive. 1323 /// -1 - Address is consecutive, and decreasing. 1324 int isConsecutivePtr(Value *Ptr); 1325 1326 /// Returns true if the value V is uniform within the loop. 1327 bool isUniform(Value *V); 1328 1329 /// Returns true if this instruction will remain scalar after vectorization. 1330 bool isUniformAfterVectorization(Instruction *I) { return Uniforms.count(I); } 1331 1332 /// Returns the information that we collected about runtime memory check. 1333 const RuntimePointerChecking *getRuntimePointerChecking() const { 1334 return LAI->getRuntimePointerChecking(); 1335 } 1336 1337 const LoopAccessInfo *getLAI() const { return LAI; } 1338 1339 /// \brief Check if \p Instr belongs to any interleaved access group. 1340 bool isAccessInterleaved(Instruction *Instr) { 1341 return InterleaveInfo.isInterleaved(Instr); 1342 } 1343 1344 /// \brief Return the maximum interleave factor of all interleaved groups. 1345 unsigned getMaxInterleaveFactor() const { 1346 return InterleaveInfo.getMaxInterleaveFactor(); 1347 } 1348 1349 /// \brief Get the interleaved access group that \p Instr belongs to. 1350 const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { 1351 return InterleaveInfo.getInterleaveGroup(Instr); 1352 } 1353 1354 /// \brief Returns true if an interleaved group requires a scalar iteration 1355 /// to handle accesses with gaps. 1356 bool requiresScalarEpilogue() const { 1357 return InterleaveInfo.requiresScalarEpilogue(); 1358 } 1359 1360 unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); } 1361 1362 bool hasStride(Value *V) { return LAI->hasStride(V); } 1363 1364 /// Returns true if the target machine supports masked store operation 1365 /// for the given \p DataType and kind of access to \p Ptr. 1366 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1367 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType); 1368 } 1369 /// Returns true if the target machine supports masked load operation 1370 /// for the given \p DataType and kind of access to \p Ptr. 1371 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1372 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType); 1373 } 1374 /// Returns true if the target machine supports masked scatter operation 1375 /// for the given \p DataType. 1376 bool isLegalMaskedScatter(Type *DataType) { 1377 return TTI->isLegalMaskedScatter(DataType); 1378 } 1379 /// Returns true if the target machine supports masked gather operation 1380 /// for the given \p DataType. 1381 bool isLegalMaskedGather(Type *DataType) { 1382 return TTI->isLegalMaskedGather(DataType); 1383 } 1384 1385 /// Returns true if vector representation of the instruction \p I 1386 /// requires mask. 1387 bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); } 1388 unsigned getNumStores() const { return LAI->getNumStores(); } 1389 unsigned getNumLoads() const { return LAI->getNumLoads(); } 1390 unsigned getNumPredStores() const { return NumPredStores; } 1391 1392 private: 1393 /// Check if a single basic block loop is vectorizable. 1394 /// At this point we know that this is a loop with a constant trip count 1395 /// and we only need to check individual instructions. 1396 bool canVectorizeInstrs(); 1397 1398 /// When we vectorize loops we may change the order in which 1399 /// we read and write from memory. This method checks if it is 1400 /// legal to vectorize the code, considering only memory constrains. 1401 /// Returns true if the loop is vectorizable 1402 bool canVectorizeMemory(); 1403 1404 /// Return true if we can vectorize this loop using the IF-conversion 1405 /// transformation. 1406 bool canVectorizeWithIfConvert(); 1407 1408 /// Collect the variables that need to stay uniform after vectorization. 1409 void collectLoopUniforms(); 1410 1411 /// Return true if all of the instructions in the block can be speculatively 1412 /// executed. \p SafePtrs is a list of addresses that are known to be legal 1413 /// and we know that we can read from them without segfault. 1414 bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs); 1415 1416 /// Updates the vectorization state by adding \p Phi to the inductions list. 1417 /// This can set \p Phi as the main induction of the loop if \p Phi is a 1418 /// better choice for the main induction than the existing one. 1419 void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID, 1420 SmallPtrSetImpl<Value *> &AllowedExit); 1421 1422 /// Report an analysis message to assist the user in diagnosing loops that are 1423 /// not vectorized. These are handled as LoopAccessReport rather than 1424 /// VectorizationReport because the << operator of VectorizationReport returns 1425 /// LoopAccessReport. 1426 void emitAnalysis(const LoopAccessReport &Message) const { 1427 emitAnalysisDiag(TheFunction, TheLoop, *Hints, Message); 1428 } 1429 1430 /// \brief If an access has a symbolic strides, this maps the pointer value to 1431 /// the stride symbol. 1432 const ValueToValueMap *getSymbolicStrides() { 1433 // FIXME: Currently, the set of symbolic strides is sometimes queried before 1434 // it's collected. This happens from canVectorizeWithIfConvert, when the 1435 // pointer is checked to reference consecutive elements suitable for a 1436 // masked access. 1437 return LAI ? &LAI->getSymbolicStrides() : nullptr; 1438 } 1439 1440 unsigned NumPredStores; 1441 1442 /// The loop that we evaluate. 1443 Loop *TheLoop; 1444 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. 1445 /// Applies dynamic knowledge to simplify SCEV expressions in the context 1446 /// of existing SCEV assumptions. The analysis will also add a minimal set 1447 /// of new predicates if this is required to enable vectorization and 1448 /// unrolling. 1449 PredicatedScalarEvolution &PSE; 1450 /// Target Library Info. 1451 TargetLibraryInfo *TLI; 1452 /// Parent function 1453 Function *TheFunction; 1454 /// Target Transform Info 1455 const TargetTransformInfo *TTI; 1456 /// Dominator Tree. 1457 DominatorTree *DT; 1458 // LoopAccess analysis. 1459 LoopAccessAnalysis *LAA; 1460 // And the loop-accesses info corresponding to this loop. This pointer is 1461 // null until canVectorizeMemory sets it up. 1462 const LoopAccessInfo *LAI; 1463 1464 /// The interleave access information contains groups of interleaved accesses 1465 /// with the same stride and close to each other. 1466 InterleavedAccessInfo InterleaveInfo; 1467 1468 // --- vectorization state --- // 1469 1470 /// Holds the integer induction variable. This is the counter of the 1471 /// loop. 1472 PHINode *Induction; 1473 /// Holds the reduction variables. 1474 ReductionList Reductions; 1475 /// Holds all of the induction variables that we found in the loop. 1476 /// Notice that inductions don't need to start at zero and that induction 1477 /// variables can be pointers. 1478 InductionList Inductions; 1479 /// Holds the phi nodes that are first-order recurrences. 1480 RecurrenceSet FirstOrderRecurrences; 1481 /// Holds the widest induction type encountered. 1482 Type *WidestIndTy; 1483 1484 /// Allowed outside users. This holds the induction and reduction 1485 /// vars which can be accessed from outside the loop. 1486 SmallPtrSet<Value *, 4> AllowedExit; 1487 /// This set holds the variables which are known to be uniform after 1488 /// vectorization. 1489 SmallPtrSet<Instruction *, 4> Uniforms; 1490 1491 /// Can we assume the absence of NaNs. 1492 bool HasFunNoNaNAttr; 1493 1494 /// Vectorization requirements that will go through late-evaluation. 1495 LoopVectorizationRequirements *Requirements; 1496 1497 /// Used to emit an analysis of any legality issues. 1498 LoopVectorizeHints *Hints; 1499 1500 /// While vectorizing these instructions we have to generate a 1501 /// call to the appropriate masked intrinsic 1502 SmallPtrSet<const Instruction *, 8> MaskedOp; 1503 }; 1504 1505 /// LoopVectorizationCostModel - estimates the expected speedups due to 1506 /// vectorization. 1507 /// In many cases vectorization is not profitable. This can happen because of 1508 /// a number of reasons. In this class we mainly attempt to predict the 1509 /// expected speedup/slowdowns due to the supported instruction set. We use the 1510 /// TargetTransformInfo to query the different backends for the cost of 1511 /// different operations. 1512 class LoopVectorizationCostModel { 1513 public: 1514 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, 1515 LoopInfo *LI, LoopVectorizationLegality *Legal, 1516 const TargetTransformInfo &TTI, 1517 const TargetLibraryInfo *TLI, DemandedBits *DB, 1518 AssumptionCache *AC, const Function *F, 1519 const LoopVectorizeHints *Hints) 1520 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 1521 AC(AC), TheFunction(F), Hints(Hints) {} 1522 1523 /// Information about vectorization costs 1524 struct VectorizationFactor { 1525 unsigned Width; // Vector width with best cost 1526 unsigned Cost; // Cost of the loop with that width 1527 }; 1528 /// \return The most profitable vectorization factor and the cost of that VF. 1529 /// This method checks every power of two up to VF. If UserVF is not ZERO 1530 /// then this vectorization factor will be selected if vectorization is 1531 /// possible. 1532 VectorizationFactor selectVectorizationFactor(bool OptForSize); 1533 1534 /// \return The size (in bits) of the smallest and widest types in the code 1535 /// that needs to be vectorized. We ignore values that remain scalar such as 1536 /// 64 bit loop indices. 1537 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1538 1539 /// \return The desired interleave count. 1540 /// If interleave count has been specified by metadata it will be returned. 1541 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1542 /// are the selected vectorization factor and the cost of the selected VF. 1543 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 1544 unsigned LoopCost); 1545 1546 /// \return The most profitable unroll factor. 1547 /// This method finds the best unroll-factor based on register pressure and 1548 /// other parameters. VF and LoopCost are the selected vectorization factor 1549 /// and the cost of the selected VF. 1550 unsigned computeInterleaveCount(bool OptForSize, unsigned VF, 1551 unsigned LoopCost); 1552 1553 /// \brief A struct that represents some properties of the register usage 1554 /// of a loop. 1555 struct RegisterUsage { 1556 /// Holds the number of loop invariant values that are used in the loop. 1557 unsigned LoopInvariantRegs; 1558 /// Holds the maximum number of concurrent live intervals in the loop. 1559 unsigned MaxLocalUsers; 1560 /// Holds the number of instructions in the loop. 1561 unsigned NumInstructions; 1562 }; 1563 1564 /// \return Returns information about the register usages of the loop for the 1565 /// given vectorization factors. 1566 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 1567 1568 /// Collect values we want to ignore in the cost model. 1569 void collectValuesToIgnore(); 1570 1571 private: 1572 /// The vectorization cost is a combination of the cost itself and a boolean 1573 /// indicating whether any of the contributing operations will actually 1574 /// operate on 1575 /// vector values after type legalization in the backend. If this latter value 1576 /// is 1577 /// false, then all operations will be scalarized (i.e. no vectorization has 1578 /// actually taken place). 1579 typedef std::pair<unsigned, bool> VectorizationCostTy; 1580 1581 /// Returns the expected execution cost. The unit of the cost does 1582 /// not matter because we use the 'cost' units to compare different 1583 /// vector widths. The cost that is returned is *not* normalized by 1584 /// the factor width. 1585 VectorizationCostTy expectedCost(unsigned VF); 1586 1587 /// Returns the execution time cost of an instruction for a given vector 1588 /// width. Vector width of one means scalar. 1589 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1590 1591 /// The cost-computation logic from getInstructionCost which provides 1592 /// the vector type as an output parameter. 1593 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1594 1595 /// Returns whether the instruction is a load or store and will be a emitted 1596 /// as a vector operation. 1597 bool isConsecutiveLoadOrStore(Instruction *I); 1598 1599 /// Report an analysis message to assist the user in diagnosing loops that are 1600 /// not vectorized. These are handled as LoopAccessReport rather than 1601 /// VectorizationReport because the << operator of VectorizationReport returns 1602 /// LoopAccessReport. 1603 void emitAnalysis(const LoopAccessReport &Message) const { 1604 emitAnalysisDiag(TheFunction, TheLoop, *Hints, Message); 1605 } 1606 1607 public: 1608 /// Map of scalar integer values to the smallest bitwidth they can be legally 1609 /// represented as. The vector equivalents of these values should be truncated 1610 /// to this type. 1611 MapVector<Instruction *, uint64_t> MinBWs; 1612 1613 /// The loop that we evaluate. 1614 Loop *TheLoop; 1615 /// Predicated scalar evolution analysis. 1616 PredicatedScalarEvolution &PSE; 1617 /// Loop Info analysis. 1618 LoopInfo *LI; 1619 /// Vectorization legality. 1620 LoopVectorizationLegality *Legal; 1621 /// Vector target information. 1622 const TargetTransformInfo &TTI; 1623 /// Target Library Info. 1624 const TargetLibraryInfo *TLI; 1625 /// Demanded bits analysis. 1626 DemandedBits *DB; 1627 /// Assumption cache. 1628 AssumptionCache *AC; 1629 const Function *TheFunction; 1630 /// Loop Vectorize Hint. 1631 const LoopVectorizeHints *Hints; 1632 /// Values to ignore in the cost model. 1633 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1634 /// Values to ignore in the cost model when VF > 1. 1635 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1636 }; 1637 1638 /// \brief This holds vectorization requirements that must be verified late in 1639 /// the process. The requirements are set by legalize and costmodel. Once 1640 /// vectorization has been determined to be possible and profitable the 1641 /// requirements can be verified by looking for metadata or compiler options. 1642 /// For example, some loops require FP commutativity which is only allowed if 1643 /// vectorization is explicitly specified or if the fast-math compiler option 1644 /// has been provided. 1645 /// Late evaluation of these requirements allows helpful diagnostics to be 1646 /// composed that tells the user what need to be done to vectorize the loop. For 1647 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late 1648 /// evaluation should be used only when diagnostics can generated that can be 1649 /// followed by a non-expert user. 1650 class LoopVectorizationRequirements { 1651 public: 1652 LoopVectorizationRequirements() 1653 : NumRuntimePointerChecks(0), UnsafeAlgebraInst(nullptr) {} 1654 1655 void addUnsafeAlgebraInst(Instruction *I) { 1656 // First unsafe algebra instruction. 1657 if (!UnsafeAlgebraInst) 1658 UnsafeAlgebraInst = I; 1659 } 1660 1661 void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; } 1662 1663 bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) { 1664 const char *Name = Hints.vectorizeAnalysisPassName(); 1665 bool Failed = false; 1666 if (UnsafeAlgebraInst && !Hints.allowReordering()) { 1667 emitOptimizationRemarkAnalysisFPCommute( 1668 F->getContext(), Name, *F, UnsafeAlgebraInst->getDebugLoc(), 1669 VectorizationReport() << "cannot prove it is safe to reorder " 1670 "floating-point operations"); 1671 Failed = true; 1672 } 1673 1674 // Test if runtime memcheck thresholds are exceeded. 1675 bool PragmaThresholdReached = 1676 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 1677 bool ThresholdReached = 1678 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 1679 if ((ThresholdReached && !Hints.allowReordering()) || 1680 PragmaThresholdReached) { 1681 emitOptimizationRemarkAnalysisAliasing( 1682 F->getContext(), Name, *F, L->getStartLoc(), 1683 VectorizationReport() 1684 << "cannot prove it is safe to reorder memory operations"); 1685 DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 1686 Failed = true; 1687 } 1688 1689 return Failed; 1690 } 1691 1692 private: 1693 unsigned NumRuntimePointerChecks; 1694 Instruction *UnsafeAlgebraInst; 1695 }; 1696 1697 static void addInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) { 1698 if (L.empty()) 1699 return V.push_back(&L); 1700 1701 for (Loop *InnerL : L) 1702 addInnerLoop(*InnerL, V); 1703 } 1704 1705 /// The LoopVectorize Pass. 1706 struct LoopVectorize : public FunctionPass { 1707 /// Pass identification, replacement for typeid 1708 static char ID; 1709 1710 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) 1711 : FunctionPass(ID), DisableUnrolling(NoUnrolling), 1712 AlwaysVectorize(AlwaysVectorize) { 1713 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1714 } 1715 1716 ScalarEvolution *SE; 1717 LoopInfo *LI; 1718 TargetTransformInfo *TTI; 1719 DominatorTree *DT; 1720 BlockFrequencyInfo *BFI; 1721 TargetLibraryInfo *TLI; 1722 DemandedBits *DB; 1723 AliasAnalysis *AA; 1724 AssumptionCache *AC; 1725 LoopAccessAnalysis *LAA; 1726 bool DisableUnrolling; 1727 bool AlwaysVectorize; 1728 1729 BlockFrequency ColdEntryFreq; 1730 1731 bool runOnFunction(Function &F) override { 1732 if (skipFunction(F)) 1733 return false; 1734 1735 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1736 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1737 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1738 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1739 BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1740 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1741 TLI = TLIP ? &TLIP->getTLI() : nullptr; 1742 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1743 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1744 LAA = &getAnalysis<LoopAccessAnalysis>(); 1745 DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1746 1747 // Compute some weights outside of the loop over the loops. Compute this 1748 // using a BranchProbability to re-use its scaling math. 1749 const BranchProbability ColdProb(1, 5); // 20% 1750 ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb; 1751 1752 // Don't attempt if 1753 // 1. the target claims to have no vector registers, and 1754 // 2. interleaving won't help ILP. 1755 // 1756 // The second condition is necessary because, even if the target has no 1757 // vector registers, loop vectorization may still enable scalar 1758 // interleaving. 1759 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 1760 return false; 1761 1762 // Build up a worklist of inner-loops to vectorize. This is necessary as 1763 // the act of vectorizing or partially unrolling a loop creates new loops 1764 // and can invalidate iterators across the loops. 1765 SmallVector<Loop *, 8> Worklist; 1766 1767 for (Loop *L : *LI) 1768 addInnerLoop(*L, Worklist); 1769 1770 LoopsAnalyzed += Worklist.size(); 1771 1772 // Now walk the identified inner loops. 1773 bool Changed = false; 1774 while (!Worklist.empty()) 1775 Changed |= processLoop(Worklist.pop_back_val()); 1776 1777 // Process each loop nest in the function. 1778 return Changed; 1779 } 1780 1781 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 1782 SmallVector<Metadata *, 4> MDs; 1783 // Reserve first location for self reference to the LoopID metadata node. 1784 MDs.push_back(nullptr); 1785 bool IsUnrollMetadata = false; 1786 MDNode *LoopID = L->getLoopID(); 1787 if (LoopID) { 1788 // First find existing loop unrolling disable metadata. 1789 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1790 MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 1791 if (MD) { 1792 const MDString *S = dyn_cast<MDString>(MD->getOperand(0)); 1793 IsUnrollMetadata = 1794 S && S->getString().startswith("llvm.loop.unroll.disable"); 1795 } 1796 MDs.push_back(LoopID->getOperand(i)); 1797 } 1798 } 1799 1800 if (!IsUnrollMetadata) { 1801 // Add runtime unroll disable metadata. 1802 LLVMContext &Context = L->getHeader()->getContext(); 1803 SmallVector<Metadata *, 1> DisableOperands; 1804 DisableOperands.push_back( 1805 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 1806 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 1807 MDs.push_back(DisableNode); 1808 MDNode *NewLoopID = MDNode::get(Context, MDs); 1809 // Set operand 0 to refer to the loop id itself. 1810 NewLoopID->replaceOperandWith(0, NewLoopID); 1811 L->setLoopID(NewLoopID); 1812 } 1813 } 1814 1815 bool processLoop(Loop *L) { 1816 assert(L->empty() && "Only process inner loops."); 1817 1818 #ifndef NDEBUG 1819 const std::string DebugLocStr = getDebugLocString(L); 1820 #endif /* NDEBUG */ 1821 1822 DEBUG(dbgs() << "\nLV: Checking a loop in \"" 1823 << L->getHeader()->getParent()->getName() << "\" from " 1824 << DebugLocStr << "\n"); 1825 1826 LoopVectorizeHints Hints(L, DisableUnrolling); 1827 1828 DEBUG(dbgs() << "LV: Loop hints:" 1829 << " force=" 1830 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 1831 ? "disabled" 1832 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 1833 ? "enabled" 1834 : "?")) 1835 << " width=" << Hints.getWidth() 1836 << " unroll=" << Hints.getInterleave() << "\n"); 1837 1838 // Function containing loop 1839 Function *F = L->getHeader()->getParent(); 1840 1841 // Looking at the diagnostic output is the only way to determine if a loop 1842 // was vectorized (other than looking at the IR or machine code), so it 1843 // is important to generate an optimization remark for each loop. Most of 1844 // these messages are generated by emitOptimizationRemarkAnalysis. Remarks 1845 // generated by emitOptimizationRemark and emitOptimizationRemarkMissed are 1846 // less verbose reporting vectorized loops and unvectorized loops that may 1847 // benefit from vectorization, respectively. 1848 1849 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { 1850 DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 1851 return false; 1852 } 1853 1854 // Check the loop for a trip count threshold: 1855 // do not vectorize loops with a tiny trip count. 1856 const unsigned TC = SE->getSmallConstantTripCount(L); 1857 if (TC > 0u && TC < TinyTripCountVectorThreshold) { 1858 DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 1859 << "This loop is not worth vectorizing."); 1860 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 1861 DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 1862 else { 1863 DEBUG(dbgs() << "\n"); 1864 emitAnalysisDiag(F, L, Hints, VectorizationReport() 1865 << "vectorization is not beneficial " 1866 "and is not explicitly forced"); 1867 return false; 1868 } 1869 } 1870 1871 PredicatedScalarEvolution PSE(*SE, *L); 1872 1873 // Check if it is legal to vectorize the loop. 1874 LoopVectorizationRequirements Requirements; 1875 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, LAA, 1876 &Requirements, &Hints); 1877 if (!LVL.canVectorize()) { 1878 DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 1879 emitMissedWarning(F, L, Hints); 1880 return false; 1881 } 1882 1883 // Use the cost model. 1884 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, F, 1885 &Hints); 1886 CM.collectValuesToIgnore(); 1887 1888 // Check the function attributes to find out if this function should be 1889 // optimized for size. 1890 bool OptForSize = 1891 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 1892 1893 // Compute the weighted frequency of this loop being executed and see if it 1894 // is less than 20% of the function entry baseline frequency. Note that we 1895 // always have a canonical loop here because we think we *can* vectorize. 1896 // FIXME: This is hidden behind a flag due to pervasive problems with 1897 // exactly what block frequency models. 1898 if (LoopVectorizeWithBlockFrequency) { 1899 BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader()); 1900 if (Hints.getForce() != LoopVectorizeHints::FK_Enabled && 1901 LoopEntryFreq < ColdEntryFreq) 1902 OptForSize = true; 1903 } 1904 1905 // Check the function attributes to see if implicit floats are allowed. 1906 // FIXME: This check doesn't seem possibly correct -- what if the loop is 1907 // an integer loop and the vector instructions selected are purely integer 1908 // vector instructions? 1909 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 1910 DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 1911 "attribute is used.\n"); 1912 emitAnalysisDiag( 1913 F, L, Hints, 1914 VectorizationReport() 1915 << "loop not vectorized due to NoImplicitFloat attribute"); 1916 emitMissedWarning(F, L, Hints); 1917 return false; 1918 } 1919 1920 // Check if the target supports potentially unsafe FP vectorization. 1921 // FIXME: Add a check for the type of safety issue (denormal, signaling) 1922 // for the target we're vectorizing for, to make sure none of the 1923 // additional fp-math flags can help. 1924 if (Hints.isPotentiallyUnsafe() && 1925 TTI->isFPVectorizationPotentiallyUnsafe()) { 1926 DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 1927 emitAnalysisDiag(F, L, Hints, 1928 VectorizationReport() 1929 << "loop not vectorized due to unsafe FP support."); 1930 emitMissedWarning(F, L, Hints); 1931 return false; 1932 } 1933 1934 // Select the optimal vectorization factor. 1935 const LoopVectorizationCostModel::VectorizationFactor VF = 1936 CM.selectVectorizationFactor(OptForSize); 1937 1938 // Select the interleave count. 1939 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 1940 1941 // Get user interleave count. 1942 unsigned UserIC = Hints.getInterleave(); 1943 1944 // Identify the diagnostic messages that should be produced. 1945 std::string VecDiagMsg, IntDiagMsg; 1946 bool VectorizeLoop = true, InterleaveLoop = true; 1947 1948 if (Requirements.doesNotMeet(F, L, Hints)) { 1949 DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 1950 "requirements.\n"); 1951 emitMissedWarning(F, L, Hints); 1952 return false; 1953 } 1954 1955 if (VF.Width == 1) { 1956 DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 1957 VecDiagMsg = 1958 "the cost-model indicates that vectorization is not beneficial"; 1959 VectorizeLoop = false; 1960 } 1961 1962 if (IC == 1 && UserIC <= 1) { 1963 // Tell the user interleaving is not beneficial. 1964 DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 1965 IntDiagMsg = 1966 "the cost-model indicates that interleaving is not beneficial"; 1967 InterleaveLoop = false; 1968 if (UserIC == 1) 1969 IntDiagMsg += 1970 " and is explicitly disabled or interleave count is set to 1"; 1971 } else if (IC > 1 && UserIC == 1) { 1972 // Tell the user interleaving is beneficial, but it explicitly disabled. 1973 DEBUG(dbgs() 1974 << "LV: Interleaving is beneficial but is explicitly disabled."); 1975 IntDiagMsg = "the cost-model indicates that interleaving is beneficial " 1976 "but is explicitly disabled or interleave count is set to 1"; 1977 InterleaveLoop = false; 1978 } 1979 1980 // Override IC if user provided an interleave count. 1981 IC = UserIC > 0 ? UserIC : IC; 1982 1983 // Emit diagnostic messages, if any. 1984 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 1985 if (!VectorizeLoop && !InterleaveLoop) { 1986 // Do not vectorize or interleaving the loop. 1987 emitOptimizationRemarkAnalysis(F->getContext(), VAPassName, *F, 1988 L->getStartLoc(), VecDiagMsg); 1989 emitOptimizationRemarkAnalysis(F->getContext(), LV_NAME, *F, 1990 L->getStartLoc(), IntDiagMsg); 1991 return false; 1992 } else if (!VectorizeLoop && InterleaveLoop) { 1993 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 1994 emitOptimizationRemarkAnalysis(F->getContext(), VAPassName, *F, 1995 L->getStartLoc(), VecDiagMsg); 1996 } else if (VectorizeLoop && !InterleaveLoop) { 1997 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 1998 << DebugLocStr << '\n'); 1999 emitOptimizationRemarkAnalysis(F->getContext(), LV_NAME, *F, 2000 L->getStartLoc(), IntDiagMsg); 2001 } else if (VectorizeLoop && InterleaveLoop) { 2002 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 2003 << DebugLocStr << '\n'); 2004 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 2005 } 2006 2007 if (!VectorizeLoop) { 2008 assert(IC > 1 && "interleave count should not be 1 or 0"); 2009 // If we decided that it is not legal to vectorize the loop, then 2010 // interleave it. 2011 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, IC); 2012 Unroller.vectorize(&LVL, CM.MinBWs); 2013 2014 emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(), 2015 Twine("interleaved loop (interleaved count: ") + 2016 Twine(IC) + ")"); 2017 } else { 2018 // If we decided that it is *legal* to vectorize the loop, then do it. 2019 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, VF.Width, IC); 2020 LB.vectorize(&LVL, CM.MinBWs); 2021 ++LoopsVectorized; 2022 2023 // Add metadata to disable runtime unrolling a scalar loop when there are 2024 // no runtime checks about strides and memory. A scalar loop that is 2025 // rarely used is not worth unrolling. 2026 if (!LB.areSafetyChecksAdded()) 2027 AddRuntimeUnrollDisableMetaData(L); 2028 2029 // Report the vectorization decision. 2030 emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(), 2031 Twine("vectorized loop (vectorization width: ") + 2032 Twine(VF.Width) + ", interleaved count: " + 2033 Twine(IC) + ")"); 2034 } 2035 2036 // Mark the loop as already vectorized to avoid vectorizing again. 2037 Hints.setAlreadyVectorized(); 2038 2039 DEBUG(verifyFunction(*L->getHeader()->getParent())); 2040 return true; 2041 } 2042 2043 void getAnalysisUsage(AnalysisUsage &AU) const override { 2044 AU.addRequired<AssumptionCacheTracker>(); 2045 AU.addRequiredID(LoopSimplifyID); 2046 AU.addRequiredID(LCSSAID); 2047 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2048 AU.addRequired<DominatorTreeWrapperPass>(); 2049 AU.addRequired<LoopInfoWrapperPass>(); 2050 AU.addRequired<ScalarEvolutionWrapperPass>(); 2051 AU.addRequired<TargetTransformInfoWrapperPass>(); 2052 AU.addRequired<AAResultsWrapperPass>(); 2053 AU.addRequired<LoopAccessAnalysis>(); 2054 AU.addRequired<DemandedBitsWrapperPass>(); 2055 AU.addPreserved<LoopInfoWrapperPass>(); 2056 AU.addPreserved<DominatorTreeWrapperPass>(); 2057 AU.addPreserved<BasicAAWrapperPass>(); 2058 AU.addPreserved<GlobalsAAWrapperPass>(); 2059 } 2060 }; 2061 2062 } // end anonymous namespace 2063 2064 //===----------------------------------------------------------------------===// 2065 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2066 // LoopVectorizationCostModel. 2067 //===----------------------------------------------------------------------===// 2068 2069 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2070 // We need to place the broadcast of invariant variables outside the loop. 2071 Instruction *Instr = dyn_cast<Instruction>(V); 2072 bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody); 2073 bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr; 2074 2075 // Place the code for broadcasting invariant variables in the new preheader. 2076 IRBuilder<>::InsertPointGuard Guard(Builder); 2077 if (Invariant) 2078 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2079 2080 // Broadcast the scalar into all locations in the vector. 2081 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2082 2083 return Shuf; 2084 } 2085 2086 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, 2087 const SCEV *StepSCEV) { 2088 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2089 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2090 Value *StepValue = Exp.expandCodeFor(StepSCEV, StepSCEV->getType(), 2091 &*Builder.GetInsertPoint()); 2092 return getStepVector(Val, StartIdx, StepValue); 2093 } 2094 2095 void InnerLoopVectorizer::widenInductionVariable(const InductionDescriptor &II, 2096 VectorParts &Entry, 2097 IntegerType *TruncType) { 2098 Value *Start = II.getStartValue(); 2099 ConstantInt *Step = II.getConstIntStepValue(); 2100 assert(Step && "Can not widen an IV with a non-constant step"); 2101 2102 // Construct the initial value of the vector IV in the vector loop preheader 2103 auto CurrIP = Builder.saveIP(); 2104 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2105 if (TruncType) { 2106 Step = ConstantInt::getSigned(TruncType, Step->getSExtValue()); 2107 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2108 } 2109 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2110 Value *SteppedStart = getStepVector(SplatStart, 0, Step); 2111 Builder.restoreIP(CurrIP); 2112 2113 Value *SplatVF = 2114 ConstantVector::getSplat(VF, ConstantInt::getSigned(Start->getType(), 2115 VF * Step->getSExtValue())); 2116 // We may need to add the step a number of times, depending on the unroll 2117 // factor. The last of those goes into the PHI. 2118 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2119 &*LoopVectorBody->getFirstInsertionPt()); 2120 Value *LastInduction = VecInd; 2121 for (unsigned Part = 0; Part < UF; ++Part) { 2122 Entry[Part] = LastInduction; 2123 LastInduction = Builder.CreateAdd(LastInduction, SplatVF, "step.add"); 2124 } 2125 2126 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2127 VecInd->addIncoming(LastInduction, LoopVectorBody); 2128 } 2129 2130 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, 2131 Value *Step) { 2132 assert(Val->getType()->isVectorTy() && "Must be a vector"); 2133 assert(Val->getType()->getScalarType()->isIntegerTy() && 2134 "Elem must be an integer"); 2135 assert(Step->getType() == Val->getType()->getScalarType() && 2136 "Step has wrong type"); 2137 // Create the types. 2138 Type *ITy = Val->getType()->getScalarType(); 2139 VectorType *Ty = cast<VectorType>(Val->getType()); 2140 int VLen = Ty->getNumElements(); 2141 SmallVector<Constant *, 8> Indices; 2142 2143 // Create a vector of consecutive numbers from zero to VF. 2144 for (int i = 0; i < VLen; ++i) 2145 Indices.push_back(ConstantInt::get(ITy, StartIdx + i)); 2146 2147 // Add the consecutive indices to the vector value. 2148 Constant *Cv = ConstantVector::get(Indices); 2149 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2150 Step = Builder.CreateVectorSplat(VLen, Step); 2151 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2152 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2153 // which can be found from the original scalar operations. 2154 Step = Builder.CreateMul(Cv, Step); 2155 return Builder.CreateAdd(Val, Step, "induction"); 2156 } 2157 2158 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { 2159 assert(Ptr->getType()->isPointerTy() && "Unexpected non-ptr"); 2160 auto *SE = PSE.getSE(); 2161 // Make sure that the pointer does not point to structs. 2162 if (Ptr->getType()->getPointerElementType()->isAggregateType()) 2163 return 0; 2164 2165 // If this value is a pointer induction variable, we know it is consecutive. 2166 PHINode *Phi = dyn_cast_or_null<PHINode>(Ptr); 2167 if (Phi && Inductions.count(Phi)) { 2168 InductionDescriptor II = Inductions[Phi]; 2169 return II.getConsecutiveDirection(); 2170 } 2171 2172 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2173 if (!Gep) 2174 return 0; 2175 2176 unsigned NumOperands = Gep->getNumOperands(); 2177 Value *GpPtr = Gep->getPointerOperand(); 2178 // If this GEP value is a consecutive pointer induction variable and all of 2179 // the indices are constant, then we know it is consecutive. 2180 Phi = dyn_cast<PHINode>(GpPtr); 2181 if (Phi && Inductions.count(Phi)) { 2182 2183 // Make sure that the pointer does not point to structs. 2184 PointerType *GepPtrType = cast<PointerType>(GpPtr->getType()); 2185 if (GepPtrType->getElementType()->isAggregateType()) 2186 return 0; 2187 2188 // Make sure that all of the index operands are loop invariant. 2189 for (unsigned i = 1; i < NumOperands; ++i) 2190 if (!SE->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), TheLoop)) 2191 return 0; 2192 2193 InductionDescriptor II = Inductions[Phi]; 2194 return II.getConsecutiveDirection(); 2195 } 2196 2197 unsigned InductionOperand = getGEPInductionOperand(Gep); 2198 2199 // Check that all of the gep indices are uniform except for our induction 2200 // operand. 2201 for (unsigned i = 0; i != NumOperands; ++i) 2202 if (i != InductionOperand && 2203 !SE->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), TheLoop)) 2204 return 0; 2205 2206 // We can emit wide load/stores only if the last non-zero index is the 2207 // induction variable. 2208 const SCEV *Last = nullptr; 2209 if (!getSymbolicStrides() || !getSymbolicStrides()->count(Gep)) 2210 Last = PSE.getSCEV(Gep->getOperand(InductionOperand)); 2211 else { 2212 // Because of the multiplication by a stride we can have a s/zext cast. 2213 // We are going to replace this stride by 1 so the cast is safe to ignore. 2214 // 2215 // %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] 2216 // %0 = trunc i64 %indvars.iv to i32 2217 // %mul = mul i32 %0, %Stride1 2218 // %idxprom = zext i32 %mul to i64 << Safe cast. 2219 // %arrayidx = getelementptr inbounds i32* %B, i64 %idxprom 2220 // 2221 Last = replaceSymbolicStrideSCEV(PSE, *getSymbolicStrides(), 2222 Gep->getOperand(InductionOperand), Gep); 2223 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(Last)) 2224 Last = 2225 (C->getSCEVType() == scSignExtend || C->getSCEVType() == scZeroExtend) 2226 ? C->getOperand() 2227 : Last; 2228 } 2229 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Last)) { 2230 const SCEV *Step = AR->getStepRecurrence(*SE); 2231 2232 // The memory is consecutive because the last index is consecutive 2233 // and all other indices are loop invariant. 2234 if (Step->isOne()) 2235 return 1; 2236 if (Step->isAllOnesValue()) 2237 return -1; 2238 } 2239 2240 return 0; 2241 } 2242 2243 bool LoopVectorizationLegality::isUniform(Value *V) { 2244 return LAI->isUniform(V); 2245 } 2246 2247 InnerLoopVectorizer::VectorParts & 2248 InnerLoopVectorizer::getVectorValue(Value *V) { 2249 assert(V != Induction && "The new induction variable should not be used."); 2250 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2251 2252 // If we have a stride that is replaced by one, do it here. 2253 if (Legal->hasStride(V)) 2254 V = ConstantInt::get(V->getType(), 1); 2255 2256 // If we have this scalar in the map, return it. 2257 if (WidenMap.has(V)) 2258 return WidenMap.get(V); 2259 2260 // If this scalar is unknown, assume that it is a constant or that it is 2261 // loop invariant. Broadcast V and save the value for future uses. 2262 Value *B = getBroadcastInstrs(V); 2263 return WidenMap.splat(V, B); 2264 } 2265 2266 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2267 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2268 SmallVector<Constant *, 8> ShuffleMask; 2269 for (unsigned i = 0; i < VF; ++i) 2270 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 2271 2272 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2273 ConstantVector::get(ShuffleMask), 2274 "reverse"); 2275 } 2276 2277 // Get a mask to interleave \p NumVec vectors into a wide vector. 2278 // I.e. <0, VF, VF*2, ..., VF*(NumVec-1), 1, VF+1, VF*2+1, ...> 2279 // E.g. For 2 interleaved vectors, if VF is 4, the mask is: 2280 // <0, 4, 1, 5, 2, 6, 3, 7> 2281 static Constant *getInterleavedMask(IRBuilder<> &Builder, unsigned VF, 2282 unsigned NumVec) { 2283 SmallVector<Constant *, 16> Mask; 2284 for (unsigned i = 0; i < VF; i++) 2285 for (unsigned j = 0; j < NumVec; j++) 2286 Mask.push_back(Builder.getInt32(j * VF + i)); 2287 2288 return ConstantVector::get(Mask); 2289 } 2290 2291 // Get the strided mask starting from index \p Start. 2292 // I.e. <Start, Start + Stride, ..., Start + Stride*(VF-1)> 2293 static Constant *getStridedMask(IRBuilder<> &Builder, unsigned Start, 2294 unsigned Stride, unsigned VF) { 2295 SmallVector<Constant *, 16> Mask; 2296 for (unsigned i = 0; i < VF; i++) 2297 Mask.push_back(Builder.getInt32(Start + i * Stride)); 2298 2299 return ConstantVector::get(Mask); 2300 } 2301 2302 // Get a mask of two parts: The first part consists of sequential integers 2303 // starting from 0, The second part consists of UNDEFs. 2304 // I.e. <0, 1, 2, ..., NumInt - 1, undef, ..., undef> 2305 static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned NumInt, 2306 unsigned NumUndef) { 2307 SmallVector<Constant *, 16> Mask; 2308 for (unsigned i = 0; i < NumInt; i++) 2309 Mask.push_back(Builder.getInt32(i)); 2310 2311 Constant *Undef = UndefValue::get(Builder.getInt32Ty()); 2312 for (unsigned i = 0; i < NumUndef; i++) 2313 Mask.push_back(Undef); 2314 2315 return ConstantVector::get(Mask); 2316 } 2317 2318 // Concatenate two vectors with the same element type. The 2nd vector should 2319 // not have more elements than the 1st vector. If the 2nd vector has less 2320 // elements, extend it with UNDEFs. 2321 static Value *ConcatenateTwoVectors(IRBuilder<> &Builder, Value *V1, 2322 Value *V2) { 2323 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); 2324 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); 2325 assert(VecTy1 && VecTy2 && 2326 VecTy1->getScalarType() == VecTy2->getScalarType() && 2327 "Expect two vectors with the same element type"); 2328 2329 unsigned NumElts1 = VecTy1->getNumElements(); 2330 unsigned NumElts2 = VecTy2->getNumElements(); 2331 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); 2332 2333 if (NumElts1 > NumElts2) { 2334 // Extend with UNDEFs. 2335 Constant *ExtMask = 2336 getSequentialMask(Builder, NumElts2, NumElts1 - NumElts2); 2337 V2 = Builder.CreateShuffleVector(V2, UndefValue::get(VecTy2), ExtMask); 2338 } 2339 2340 Constant *Mask = getSequentialMask(Builder, NumElts1 + NumElts2, 0); 2341 return Builder.CreateShuffleVector(V1, V2, Mask); 2342 } 2343 2344 // Concatenate vectors in the given list. All vectors have the same type. 2345 static Value *ConcatenateVectors(IRBuilder<> &Builder, 2346 ArrayRef<Value *> InputList) { 2347 unsigned NumVec = InputList.size(); 2348 assert(NumVec > 1 && "Should be at least two vectors"); 2349 2350 SmallVector<Value *, 8> ResList; 2351 ResList.append(InputList.begin(), InputList.end()); 2352 do { 2353 SmallVector<Value *, 8> TmpList; 2354 for (unsigned i = 0; i < NumVec - 1; i += 2) { 2355 Value *V0 = ResList[i], *V1 = ResList[i + 1]; 2356 assert((V0->getType() == V1->getType() || i == NumVec - 2) && 2357 "Only the last vector may have a different type"); 2358 2359 TmpList.push_back(ConcatenateTwoVectors(Builder, V0, V1)); 2360 } 2361 2362 // Push the last vector if the total number of vectors is odd. 2363 if (NumVec % 2 != 0) 2364 TmpList.push_back(ResList[NumVec - 1]); 2365 2366 ResList = TmpList; 2367 NumVec = ResList.size(); 2368 } while (NumVec > 1); 2369 2370 return ResList[0]; 2371 } 2372 2373 // Try to vectorize the interleave group that \p Instr belongs to. 2374 // 2375 // E.g. Translate following interleaved load group (factor = 3): 2376 // for (i = 0; i < N; i+=3) { 2377 // R = Pic[i]; // Member of index 0 2378 // G = Pic[i+1]; // Member of index 1 2379 // B = Pic[i+2]; // Member of index 2 2380 // ... // do something to R, G, B 2381 // } 2382 // To: 2383 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2384 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2385 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2386 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2387 // 2388 // Or translate following interleaved store group (factor = 3): 2389 // for (i = 0; i < N; i+=3) { 2390 // ... do something to R, G, B 2391 // Pic[i] = R; // Member of index 0 2392 // Pic[i+1] = G; // Member of index 1 2393 // Pic[i+2] = B; // Member of index 2 2394 // } 2395 // To: 2396 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2397 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2398 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2399 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2400 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2401 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { 2402 const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr); 2403 assert(Group && "Fail to get an interleaved access group."); 2404 2405 // Skip if current instruction is not the insert position. 2406 if (Instr != Group->getInsertPos()) 2407 return; 2408 2409 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2410 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2411 Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); 2412 2413 // Prepare for the vector type of the interleaved load/store. 2414 Type *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2415 unsigned InterleaveFactor = Group->getFactor(); 2416 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2417 Type *PtrTy = VecTy->getPointerTo(Ptr->getType()->getPointerAddressSpace()); 2418 2419 // Prepare for the new pointers. 2420 setDebugLocFromInst(Builder, Ptr); 2421 VectorParts &PtrParts = getVectorValue(Ptr); 2422 SmallVector<Value *, 2> NewPtrs; 2423 unsigned Index = Group->getIndex(Instr); 2424 for (unsigned Part = 0; Part < UF; Part++) { 2425 // Extract the pointer for current instruction from the pointer vector. A 2426 // reverse access uses the pointer in the last lane. 2427 Value *NewPtr = Builder.CreateExtractElement( 2428 PtrParts[Part], 2429 Group->isReverse() ? Builder.getInt32(VF - 1) : Builder.getInt32(0)); 2430 2431 // Notice current instruction could be any index. Need to adjust the address 2432 // to the member of index 0. 2433 // 2434 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2435 // b = A[i]; // Member of index 0 2436 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2437 // 2438 // E.g. A[i+1] = a; // Member of index 1 2439 // A[i] = b; // Member of index 0 2440 // A[i+2] = c; // Member of index 2 (Current instruction) 2441 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2442 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 2443 2444 // Cast to the vector pointer type. 2445 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2446 } 2447 2448 setDebugLocFromInst(Builder, Instr); 2449 Value *UndefVec = UndefValue::get(VecTy); 2450 2451 // Vectorize the interleaved load group. 2452 if (LI) { 2453 for (unsigned Part = 0; Part < UF; Part++) { 2454 Instruction *NewLoadInstr = Builder.CreateAlignedLoad( 2455 NewPtrs[Part], Group->getAlignment(), "wide.vec"); 2456 2457 for (unsigned i = 0; i < InterleaveFactor; i++) { 2458 Instruction *Member = Group->getMember(i); 2459 2460 // Skip the gaps in the group. 2461 if (!Member) 2462 continue; 2463 2464 Constant *StrideMask = getStridedMask(Builder, i, InterleaveFactor, VF); 2465 Value *StridedVec = Builder.CreateShuffleVector( 2466 NewLoadInstr, UndefVec, StrideMask, "strided.vec"); 2467 2468 // If this member has different type, cast the result type. 2469 if (Member->getType() != ScalarTy) { 2470 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2471 StridedVec = Builder.CreateBitOrPointerCast(StridedVec, OtherVTy); 2472 } 2473 2474 VectorParts &Entry = WidenMap.get(Member); 2475 Entry[Part] = 2476 Group->isReverse() ? reverseVector(StridedVec) : StridedVec; 2477 } 2478 2479 addMetadata(NewLoadInstr, Instr); 2480 } 2481 return; 2482 } 2483 2484 // The sub vector type for current instruction. 2485 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2486 2487 // Vectorize the interleaved store group. 2488 for (unsigned Part = 0; Part < UF; Part++) { 2489 // Collect the stored vector from each member. 2490 SmallVector<Value *, 4> StoredVecs; 2491 for (unsigned i = 0; i < InterleaveFactor; i++) { 2492 // Interleaved store group doesn't allow a gap, so each index has a member 2493 Instruction *Member = Group->getMember(i); 2494 assert(Member && "Fail to get a member from an interleaved store group"); 2495 2496 Value *StoredVec = 2497 getVectorValue(dyn_cast<StoreInst>(Member)->getValueOperand())[Part]; 2498 if (Group->isReverse()) 2499 StoredVec = reverseVector(StoredVec); 2500 2501 // If this member has different type, cast it to an unified type. 2502 if (StoredVec->getType() != SubVT) 2503 StoredVec = Builder.CreateBitOrPointerCast(StoredVec, SubVT); 2504 2505 StoredVecs.push_back(StoredVec); 2506 } 2507 2508 // Concatenate all vectors into a wide vector. 2509 Value *WideVec = ConcatenateVectors(Builder, StoredVecs); 2510 2511 // Interleave the elements in the wide vector. 2512 Constant *IMask = getInterleavedMask(Builder, VF, InterleaveFactor); 2513 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2514 "interleaved.vec"); 2515 2516 Instruction *NewStoreInstr = 2517 Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment()); 2518 addMetadata(NewStoreInstr, Instr); 2519 } 2520 } 2521 2522 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { 2523 // Attempt to issue a wide load. 2524 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2525 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2526 2527 assert((LI || SI) && "Invalid Load/Store instruction"); 2528 2529 // Try to vectorize the interleave group if this access is interleaved. 2530 if (Legal->isAccessInterleaved(Instr)) 2531 return vectorizeInterleaveGroup(Instr); 2532 2533 Type *ScalarDataTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2534 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2535 Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); 2536 unsigned Alignment = LI ? LI->getAlignment() : SI->getAlignment(); 2537 // An alignment of 0 means target abi alignment. We need to use the scalar's 2538 // target abi alignment in such a case. 2539 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2540 if (!Alignment) 2541 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2542 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2543 unsigned ScalarAllocatedSize = DL.getTypeAllocSize(ScalarDataTy); 2544 unsigned VectorElementSize = DL.getTypeStoreSize(DataTy) / VF; 2545 2546 if (SI && Legal->blockNeedsPredication(SI->getParent()) && 2547 !Legal->isMaskRequired(SI)) 2548 return scalarizeInstruction(Instr, true); 2549 2550 if (ScalarAllocatedSize != VectorElementSize) 2551 return scalarizeInstruction(Instr); 2552 2553 // If the pointer is loop invariant scalarize the load. 2554 if (LI && Legal->isUniform(Ptr)) 2555 return scalarizeInstruction(Instr); 2556 2557 // If the pointer is non-consecutive and gather/scatter is not supported 2558 // scalarize the instruction. 2559 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 2560 bool Reverse = ConsecutiveStride < 0; 2561 bool CreateGatherScatter = 2562 !ConsecutiveStride && ((LI && Legal->isLegalMaskedGather(ScalarDataTy)) || 2563 (SI && Legal->isLegalMaskedScatter(ScalarDataTy))); 2564 2565 if (!ConsecutiveStride && !CreateGatherScatter) 2566 return scalarizeInstruction(Instr); 2567 2568 Constant *Zero = Builder.getInt32(0); 2569 VectorParts &Entry = WidenMap.get(Instr); 2570 VectorParts VectorGep; 2571 2572 // Handle consecutive loads/stores. 2573 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2574 if (ConsecutiveStride) { 2575 if (Gep && Legal->isInductionVariable(Gep->getPointerOperand())) { 2576 setDebugLocFromInst(Builder, Gep); 2577 Value *PtrOperand = Gep->getPointerOperand(); 2578 Value *FirstBasePtr = getVectorValue(PtrOperand)[0]; 2579 FirstBasePtr = Builder.CreateExtractElement(FirstBasePtr, Zero); 2580 2581 // Create the new GEP with the new induction variable. 2582 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2583 Gep2->setOperand(0, FirstBasePtr); 2584 Gep2->setName("gep.indvar.base"); 2585 Ptr = Builder.Insert(Gep2); 2586 } else if (Gep) { 2587 setDebugLocFromInst(Builder, Gep); 2588 assert(PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getPointerOperand()), 2589 OrigLoop) && 2590 "Base ptr must be invariant"); 2591 // The last index does not have to be the induction. It can be 2592 // consecutive and be a function of the index. For example A[I+1]; 2593 unsigned NumOperands = Gep->getNumOperands(); 2594 unsigned InductionOperand = getGEPInductionOperand(Gep); 2595 // Create the new GEP with the new induction variable. 2596 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2597 2598 for (unsigned i = 0; i < NumOperands; ++i) { 2599 Value *GepOperand = Gep->getOperand(i); 2600 Instruction *GepOperandInst = dyn_cast<Instruction>(GepOperand); 2601 2602 // Update last index or loop invariant instruction anchored in loop. 2603 if (i == InductionOperand || 2604 (GepOperandInst && OrigLoop->contains(GepOperandInst))) { 2605 assert((i == InductionOperand || 2606 PSE.getSE()->isLoopInvariant(PSE.getSCEV(GepOperandInst), 2607 OrigLoop)) && 2608 "Must be last index or loop invariant"); 2609 2610 VectorParts &GEPParts = getVectorValue(GepOperand); 2611 Value *Index = GEPParts[0]; 2612 Index = Builder.CreateExtractElement(Index, Zero); 2613 Gep2->setOperand(i, Index); 2614 Gep2->setName("gep.indvar.idx"); 2615 } 2616 } 2617 Ptr = Builder.Insert(Gep2); 2618 } else { // No GEP 2619 // Use the induction element ptr. 2620 assert(isa<PHINode>(Ptr) && "Invalid induction ptr"); 2621 setDebugLocFromInst(Builder, Ptr); 2622 VectorParts &PtrVal = getVectorValue(Ptr); 2623 Ptr = Builder.CreateExtractElement(PtrVal[0], Zero); 2624 } 2625 } else { 2626 // At this point we should vector version of GEP for Gather or Scatter 2627 assert(CreateGatherScatter && "The instruction should be scalarized"); 2628 if (Gep) { 2629 SmallVector<VectorParts, 4> OpsV; 2630 // Vectorizing GEP, across UF parts, we want to keep each loop-invariant 2631 // base or index of GEP scalar 2632 for (Value *Op : Gep->operands()) { 2633 if (PSE.getSE()->isLoopInvariant(PSE.getSCEV(Op), OrigLoop)) 2634 OpsV.push_back(VectorParts(UF, Op)); 2635 else 2636 OpsV.push_back(getVectorValue(Op)); 2637 } 2638 2639 for (unsigned Part = 0; Part < UF; ++Part) { 2640 SmallVector<Value *, 4> Ops; 2641 Value *GEPBasePtr = OpsV[0][Part]; 2642 for (unsigned i = 1; i < Gep->getNumOperands(); i++) 2643 Ops.push_back(OpsV[i][Part]); 2644 Value *NewGep = 2645 Builder.CreateGEP(nullptr, GEPBasePtr, Ops, "VectorGep"); 2646 assert(NewGep->getType()->isVectorTy() && "Expected vector GEP"); 2647 NewGep = 2648 Builder.CreateBitCast(NewGep, VectorType::get(Ptr->getType(), VF)); 2649 VectorGep.push_back(NewGep); 2650 } 2651 } else 2652 VectorGep = getVectorValue(Ptr); 2653 } 2654 2655 VectorParts Mask = createBlockInMask(Instr->getParent()); 2656 // Handle Stores: 2657 if (SI) { 2658 assert(!Legal->isUniform(SI->getPointerOperand()) && 2659 "We do not allow storing to uniform addresses"); 2660 setDebugLocFromInst(Builder, SI); 2661 // We don't want to update the value in the map as it might be used in 2662 // another expression. So don't use a reference type for "StoredVal". 2663 VectorParts StoredVal = getVectorValue(SI->getValueOperand()); 2664 2665 for (unsigned Part = 0; Part < UF; ++Part) { 2666 Instruction *NewSI = nullptr; 2667 if (CreateGatherScatter) { 2668 Value *MaskPart = Legal->isMaskRequired(SI) ? Mask[Part] : nullptr; 2669 NewSI = Builder.CreateMaskedScatter(StoredVal[Part], VectorGep[Part], 2670 Alignment, MaskPart); 2671 } else { 2672 // Calculate the pointer for the specific unroll-part. 2673 Value *PartPtr = 2674 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2675 2676 if (Reverse) { 2677 // If we store to reverse consecutive memory locations, then we need 2678 // to reverse the order of elements in the stored value. 2679 StoredVal[Part] = reverseVector(StoredVal[Part]); 2680 // If the address is consecutive but reversed, then the 2681 // wide store needs to start at the last vector element. 2682 PartPtr = 2683 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2684 PartPtr = 2685 Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2686 Mask[Part] = reverseVector(Mask[Part]); 2687 } 2688 2689 Value *VecPtr = 2690 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2691 2692 if (Legal->isMaskRequired(SI)) 2693 NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment, 2694 Mask[Part]); 2695 else 2696 NewSI = 2697 Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment); 2698 } 2699 addMetadata(NewSI, SI); 2700 } 2701 return; 2702 } 2703 2704 // Handle loads. 2705 assert(LI && "Must have a load instruction"); 2706 setDebugLocFromInst(Builder, LI); 2707 for (unsigned Part = 0; Part < UF; ++Part) { 2708 Instruction *NewLI; 2709 if (CreateGatherScatter) { 2710 Value *MaskPart = Legal->isMaskRequired(LI) ? Mask[Part] : nullptr; 2711 NewLI = Builder.CreateMaskedGather(VectorGep[Part], Alignment, MaskPart, 2712 0, "wide.masked.gather"); 2713 Entry[Part] = NewLI; 2714 } else { 2715 // Calculate the pointer for the specific unroll-part. 2716 Value *PartPtr = 2717 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2718 2719 if (Reverse) { 2720 // If the address is consecutive but reversed, then the 2721 // wide load needs to start at the last vector element. 2722 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2723 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2724 Mask[Part] = reverseVector(Mask[Part]); 2725 } 2726 2727 Value *VecPtr = 2728 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2729 if (Legal->isMaskRequired(LI)) 2730 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 2731 UndefValue::get(DataTy), 2732 "wide.masked.load"); 2733 else 2734 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 2735 Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI; 2736 } 2737 addMetadata(NewLI, LI); 2738 } 2739 } 2740 2741 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2742 bool IfPredicateStore) { 2743 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2744 // Holds vector parameters or scalars, in case of uniform vals. 2745 SmallVector<VectorParts, 4> Params; 2746 2747 setDebugLocFromInst(Builder, Instr); 2748 2749 // Find all of the vectorized parameters. 2750 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2751 Value *SrcOp = Instr->getOperand(op); 2752 2753 // If we are accessing the old induction variable, use the new one. 2754 if (SrcOp == OldInduction) { 2755 Params.push_back(getVectorValue(SrcOp)); 2756 continue; 2757 } 2758 2759 // Try using previously calculated values. 2760 Instruction *SrcInst = dyn_cast<Instruction>(SrcOp); 2761 2762 // If the src is an instruction that appeared earlier in the basic block, 2763 // then it should already be vectorized. 2764 if (SrcInst && OrigLoop->contains(SrcInst)) { 2765 assert(WidenMap.has(SrcInst) && "Source operand is unavailable"); 2766 // The parameter is a vector value from earlier. 2767 Params.push_back(WidenMap.get(SrcInst)); 2768 } else { 2769 // The parameter is a scalar from outside the loop. Maybe even a constant. 2770 VectorParts Scalars; 2771 Scalars.append(UF, SrcOp); 2772 Params.push_back(Scalars); 2773 } 2774 } 2775 2776 assert(Params.size() == Instr->getNumOperands() && 2777 "Invalid number of operands"); 2778 2779 // Does this instruction return a value ? 2780 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2781 2782 Value *UndefVec = 2783 IsVoidRetTy ? nullptr 2784 : UndefValue::get(VectorType::get(Instr->getType(), VF)); 2785 // Create a new entry in the WidenMap and initialize it to Undef or Null. 2786 VectorParts &VecResults = WidenMap.splat(Instr, UndefVec); 2787 2788 VectorParts Cond; 2789 if (IfPredicateStore) { 2790 assert(Instr->getParent()->getSinglePredecessor() && 2791 "Only support single predecessor blocks"); 2792 Cond = createEdgeMask(Instr->getParent()->getSinglePredecessor(), 2793 Instr->getParent()); 2794 } 2795 2796 // For each vector unroll 'part': 2797 for (unsigned Part = 0; Part < UF; ++Part) { 2798 // For each scalar that we create: 2799 for (unsigned Width = 0; Width < VF; ++Width) { 2800 2801 // Start if-block. 2802 Value *Cmp = nullptr; 2803 if (IfPredicateStore) { 2804 Cmp = Builder.CreateExtractElement(Cond[Part], Builder.getInt32(Width)); 2805 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cmp, 2806 ConstantInt::get(Cmp->getType(), 1)); 2807 } 2808 2809 Instruction *Cloned = Instr->clone(); 2810 if (!IsVoidRetTy) 2811 Cloned->setName(Instr->getName() + ".cloned"); 2812 // Replace the operands of the cloned instructions with extracted scalars. 2813 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2814 Value *Op = Params[op][Part]; 2815 // Param is a vector. Need to extract the right lane. 2816 if (Op->getType()->isVectorTy()) 2817 Op = Builder.CreateExtractElement(Op, Builder.getInt32(Width)); 2818 Cloned->setOperand(op, Op); 2819 } 2820 addNewMetadata(Cloned, Instr); 2821 2822 // Place the cloned scalar in the new loop. 2823 Builder.Insert(Cloned); 2824 2825 // If we just cloned a new assumption, add it the assumption cache. 2826 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2827 if (II->getIntrinsicID() == Intrinsic::assume) 2828 AC->registerAssumption(II); 2829 2830 // If the original scalar returns a value we need to place it in a vector 2831 // so that future users will be able to use it. 2832 if (!IsVoidRetTy) 2833 VecResults[Part] = Builder.CreateInsertElement(VecResults[Part], Cloned, 2834 Builder.getInt32(Width)); 2835 // End if-block. 2836 if (IfPredicateStore) 2837 PredicatedStores.push_back( 2838 std::make_pair(cast<StoreInst>(Cloned), Cmp)); 2839 } 2840 } 2841 } 2842 2843 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2844 Value *End, Value *Step, 2845 Instruction *DL) { 2846 BasicBlock *Header = L->getHeader(); 2847 BasicBlock *Latch = L->getLoopLatch(); 2848 // As we're just creating this loop, it's possible no latch exists 2849 // yet. If so, use the header as this will be a single block loop. 2850 if (!Latch) 2851 Latch = Header; 2852 2853 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2854 setDebugLocFromInst(Builder, getDebugLocFromInstOrOperands(OldInduction)); 2855 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2856 2857 Builder.SetInsertPoint(Latch->getTerminator()); 2858 2859 // Create i+1 and fill the PHINode. 2860 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2861 Induction->addIncoming(Start, L->getLoopPreheader()); 2862 Induction->addIncoming(Next, Latch); 2863 // Create the compare. 2864 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2865 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 2866 2867 // Now we have two terminators. Remove the old one from the block. 2868 Latch->getTerminator()->eraseFromParent(); 2869 2870 return Induction; 2871 } 2872 2873 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2874 if (TripCount) 2875 return TripCount; 2876 2877 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2878 // Find the loop boundaries. 2879 ScalarEvolution *SE = PSE.getSE(); 2880 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2881 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 2882 "Invalid loop count"); 2883 2884 Type *IdxTy = Legal->getWidestInductionType(); 2885 2886 // The exit count might have the type of i64 while the phi is i32. This can 2887 // happen if we have an induction variable that is sign extended before the 2888 // compare. The only way that we get a backedge taken count is that the 2889 // induction variable was signed and as such will not overflow. In such a case 2890 // truncation is legal. 2891 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 2892 IdxTy->getPrimitiveSizeInBits()) 2893 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2894 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2895 2896 // Get the total trip count from the count by adding 1. 2897 const SCEV *ExitCount = SE->getAddExpr( 2898 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2899 2900 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2901 2902 // Expand the trip count and place the new instructions in the preheader. 2903 // Notice that the pre-header does not change, only the loop body. 2904 SCEVExpander Exp(*SE, DL, "induction"); 2905 2906 // Count holds the overall loop count (N). 2907 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2908 L->getLoopPreheader()->getTerminator()); 2909 2910 if (TripCount->getType()->isPointerTy()) 2911 TripCount = 2912 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2913 L->getLoopPreheader()->getTerminator()); 2914 2915 return TripCount; 2916 } 2917 2918 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 2919 if (VectorTripCount) 2920 return VectorTripCount; 2921 2922 Value *TC = getOrCreateTripCount(L); 2923 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2924 2925 // Now we need to generate the expression for the part of the loop that the 2926 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2927 // iterations are not required for correctness, or N - Step, otherwise. Step 2928 // is equal to the vectorization factor (number of SIMD elements) times the 2929 // unroll factor (number of SIMD instructions). 2930 Constant *Step = ConstantInt::get(TC->getType(), VF * UF); 2931 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2932 2933 // If there is a non-reversed interleaved group that may speculatively access 2934 // memory out-of-bounds, we need to ensure that there will be at least one 2935 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 2936 // the trip count, we set the remainder to be equal to the step. If the step 2937 // does not evenly divide the trip count, no adjustment is necessary since 2938 // there will already be scalar iterations. Note that the minimum iterations 2939 // check ensures that N >= Step. 2940 if (VF > 1 && Legal->requiresScalarEpilogue()) { 2941 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2942 R = Builder.CreateSelect(IsZero, Step, R); 2943 } 2944 2945 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2946 2947 return VectorTripCount; 2948 } 2949 2950 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 2951 BasicBlock *Bypass) { 2952 Value *Count = getOrCreateTripCount(L); 2953 BasicBlock *BB = L->getLoopPreheader(); 2954 IRBuilder<> Builder(BB->getTerminator()); 2955 2956 // Generate code to check that the loop's trip count that we computed by 2957 // adding one to the backedge-taken count will not overflow. 2958 Value *CheckMinIters = Builder.CreateICmpULT( 2959 Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check"); 2960 2961 BasicBlock *NewBB = 2962 BB->splitBasicBlock(BB->getTerminator(), "min.iters.checked"); 2963 // Update dominator tree immediately if the generated block is a 2964 // LoopBypassBlock because SCEV expansions to generate loop bypass 2965 // checks may query it before the current function is finished. 2966 DT->addNewBlock(NewBB, BB); 2967 if (L->getParentLoop()) 2968 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2969 ReplaceInstWithInst(BB->getTerminator(), 2970 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 2971 LoopBypassBlocks.push_back(BB); 2972 } 2973 2974 void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L, 2975 BasicBlock *Bypass) { 2976 Value *TC = getOrCreateVectorTripCount(L); 2977 BasicBlock *BB = L->getLoopPreheader(); 2978 IRBuilder<> Builder(BB->getTerminator()); 2979 2980 // Now, compare the new count to zero. If it is zero skip the vector loop and 2981 // jump to the scalar loop. 2982 Value *Cmp = Builder.CreateICmpEQ(TC, Constant::getNullValue(TC->getType()), 2983 "cmp.zero"); 2984 2985 // Generate code to check that the loop's trip count that we computed by 2986 // adding one to the backedge-taken count will not overflow. 2987 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2988 // Update dominator tree immediately if the generated block is a 2989 // LoopBypassBlock because SCEV expansions to generate loop bypass 2990 // checks may query it before the current function is finished. 2991 DT->addNewBlock(NewBB, BB); 2992 if (L->getParentLoop()) 2993 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2994 ReplaceInstWithInst(BB->getTerminator(), 2995 BranchInst::Create(Bypass, NewBB, Cmp)); 2996 LoopBypassBlocks.push_back(BB); 2997 } 2998 2999 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3000 BasicBlock *BB = L->getLoopPreheader(); 3001 3002 // Generate the code to check that the SCEV assumptions that we made. 3003 // We want the new basic block to start at the first instruction in a 3004 // sequence of instructions that form a check. 3005 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 3006 "scev.check"); 3007 Value *SCEVCheck = 3008 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 3009 3010 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 3011 if (C->isZero()) 3012 return; 3013 3014 // Create a new block containing the stride check. 3015 BB->setName("vector.scevcheck"); 3016 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3017 // Update dominator tree immediately if the generated block is a 3018 // LoopBypassBlock because SCEV expansions to generate loop bypass 3019 // checks may query it before the current function is finished. 3020 DT->addNewBlock(NewBB, BB); 3021 if (L->getParentLoop()) 3022 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3023 ReplaceInstWithInst(BB->getTerminator(), 3024 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 3025 LoopBypassBlocks.push_back(BB); 3026 AddedSafetyChecks = true; 3027 } 3028 3029 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 3030 BasicBlock *BB = L->getLoopPreheader(); 3031 3032 // Generate the code that checks in runtime if arrays overlap. We put the 3033 // checks into a separate block to make the more common case of few elements 3034 // faster. 3035 Instruction *FirstCheckInst; 3036 Instruction *MemRuntimeCheck; 3037 std::tie(FirstCheckInst, MemRuntimeCheck) = 3038 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 3039 if (!MemRuntimeCheck) 3040 return; 3041 3042 // Create a new block containing the memory check. 3043 BB->setName("vector.memcheck"); 3044 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3045 // Update dominator tree immediately if the generated block is a 3046 // LoopBypassBlock because SCEV expansions to generate loop bypass 3047 // checks may query it before the current function is finished. 3048 DT->addNewBlock(NewBB, BB); 3049 if (L->getParentLoop()) 3050 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3051 ReplaceInstWithInst(BB->getTerminator(), 3052 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 3053 LoopBypassBlocks.push_back(BB); 3054 AddedSafetyChecks = true; 3055 3056 // We currently don't use LoopVersioning for the actual loop cloning but we 3057 // still use it to add the noalias metadata. 3058 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 3059 PSE.getSE()); 3060 LVer->prepareNoAliasMetadata(); 3061 } 3062 3063 void InnerLoopVectorizer::createEmptyLoop() { 3064 /* 3065 In this function we generate a new loop. The new loop will contain 3066 the vectorized instructions while the old loop will continue to run the 3067 scalar remainder. 3068 3069 [ ] <-- loop iteration number check. 3070 / | 3071 / v 3072 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3073 | / | 3074 | / v 3075 || [ ] <-- vector pre header. 3076 |/ | 3077 | v 3078 | [ ] \ 3079 | [ ]_| <-- vector loop. 3080 | | 3081 | v 3082 | -[ ] <--- middle-block. 3083 | / | 3084 | / v 3085 -|- >[ ] <--- new preheader. 3086 | | 3087 | v 3088 | [ ] \ 3089 | [ ]_| <-- old scalar loop to handle remainder. 3090 \ | 3091 \ v 3092 >[ ] <-- exit block. 3093 ... 3094 */ 3095 3096 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 3097 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 3098 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 3099 assert(VectorPH && "Invalid loop structure"); 3100 assert(ExitBlock && "Must have an exit block"); 3101 3102 // Some loops have a single integer induction variable, while other loops 3103 // don't. One example is c++ iterators that often have multiple pointer 3104 // induction variables. In the code below we also support a case where we 3105 // don't have a single induction variable. 3106 // 3107 // We try to obtain an induction variable from the original loop as hard 3108 // as possible. However if we don't find one that: 3109 // - is an integer 3110 // - counts from zero, stepping by one 3111 // - is the size of the widest induction variable type 3112 // then we create a new one. 3113 OldInduction = Legal->getInduction(); 3114 Type *IdxTy = Legal->getWidestInductionType(); 3115 3116 // Split the single block loop into the two loop structure described above. 3117 BasicBlock *VecBody = 3118 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 3119 BasicBlock *MiddleBlock = 3120 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 3121 BasicBlock *ScalarPH = 3122 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 3123 3124 // Create and register the new vector loop. 3125 Loop *Lp = new Loop(); 3126 Loop *ParentLoop = OrigLoop->getParentLoop(); 3127 3128 // Insert the new loop into the loop nest and register the new basic blocks 3129 // before calling any utilities such as SCEV that require valid LoopInfo. 3130 if (ParentLoop) { 3131 ParentLoop->addChildLoop(Lp); 3132 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 3133 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 3134 } else { 3135 LI->addTopLevelLoop(Lp); 3136 } 3137 Lp->addBasicBlockToLoop(VecBody, *LI); 3138 3139 // Find the loop boundaries. 3140 Value *Count = getOrCreateTripCount(Lp); 3141 3142 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3143 3144 // We need to test whether the backedge-taken count is uint##_max. Adding one 3145 // to it will cause overflow and an incorrect loop trip count in the vector 3146 // body. In case of overflow we want to directly jump to the scalar remainder 3147 // loop. 3148 emitMinimumIterationCountCheck(Lp, ScalarPH); 3149 // Now, compare the new count to zero. If it is zero skip the vector loop and 3150 // jump to the scalar loop. 3151 emitVectorLoopEnteredCheck(Lp, ScalarPH); 3152 // Generate the code to check any assumptions that we've made for SCEV 3153 // expressions. 3154 emitSCEVChecks(Lp, ScalarPH); 3155 3156 // Generate the code that checks in runtime if arrays overlap. We put the 3157 // checks into a separate block to make the more common case of few elements 3158 // faster. 3159 emitMemRuntimeChecks(Lp, ScalarPH); 3160 3161 // Generate the induction variable. 3162 // The loop step is equal to the vectorization factor (num of SIMD elements) 3163 // times the unroll factor (num of SIMD instructions). 3164 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3165 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 3166 Induction = 3167 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3168 getDebugLocFromInstOrOperands(OldInduction)); 3169 3170 // We are going to resume the execution of the scalar loop. 3171 // Go over all of the induction variables that we found and fix the 3172 // PHIs that are left in the scalar version of the loop. 3173 // The starting values of PHI nodes depend on the counter of the last 3174 // iteration in the vectorized loop. 3175 // If we come from a bypass edge then we need to start from the original 3176 // start value. 3177 3178 // This variable saves the new starting index for the scalar loop. It is used 3179 // to test if there are any tail iterations left once the vector loop has 3180 // completed. 3181 LoopVectorizationLegality::InductionList::iterator I, E; 3182 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 3183 for (I = List->begin(), E = List->end(); I != E; ++I) { 3184 PHINode *OrigPhi = I->first; 3185 InductionDescriptor II = I->second; 3186 3187 // Create phi nodes to merge from the backedge-taken check block. 3188 PHINode *BCResumeVal = PHINode::Create( 3189 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 3190 Value *EndValue; 3191 if (OrigPhi == OldInduction) { 3192 // We know what the end value is. 3193 EndValue = CountRoundDown; 3194 } else { 3195 IRBuilder<> B(LoopBypassBlocks.back()->getTerminator()); 3196 Value *CRD = B.CreateSExtOrTrunc(CountRoundDown, 3197 II.getStep()->getType(), "cast.crd"); 3198 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3199 EndValue = II.transform(B, CRD, PSE.getSE(), DL); 3200 EndValue->setName("ind.end"); 3201 } 3202 3203 // The new PHI merges the original incoming value, in case of a bypass, 3204 // or the value at the end of the vectorized loop. 3205 BCResumeVal->addIncoming(EndValue, MiddleBlock); 3206 3207 // Fix up external users of the induction variable. 3208 fixupIVUsers(OrigPhi, II, CountRoundDown, EndValue, MiddleBlock); 3209 3210 // Fix the scalar body counter (PHI node). 3211 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 3212 3213 // The old induction's phi node in the scalar body needs the truncated 3214 // value. 3215 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3216 BCResumeVal->addIncoming(II.getStartValue(), LoopBypassBlocks[I]); 3217 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 3218 } 3219 3220 // Add a check in the middle block to see if we have completed 3221 // all of the iterations in the first vector loop. 3222 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3223 Value *CmpN = 3224 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 3225 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 3226 ReplaceInstWithInst(MiddleBlock->getTerminator(), 3227 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 3228 3229 // Get ready to start creating new instructions into the vectorized body. 3230 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 3231 3232 // Save the state. 3233 LoopVectorPreHeader = Lp->getLoopPreheader(); 3234 LoopScalarPreHeader = ScalarPH; 3235 LoopMiddleBlock = MiddleBlock; 3236 LoopExitBlock = ExitBlock; 3237 LoopVectorBody = VecBody; 3238 LoopScalarBody = OldBasicBlock; 3239 3240 // Keep all loop hints from the original loop on the vector loop (we'll 3241 // replace the vectorizer-specific hints below). 3242 if (MDNode *LID = OrigLoop->getLoopID()) 3243 Lp->setLoopID(LID); 3244 3245 LoopVectorizeHints Hints(Lp, true); 3246 Hints.setAlreadyVectorized(); 3247 } 3248 3249 // Fix up external users of the induction variable. At this point, we are 3250 // in LCSSA form, with all external PHIs that use the IV having one input value, 3251 // coming from the remainder loop. We need those PHIs to also have a correct 3252 // value for the IV when arriving directly from the middle block. 3253 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3254 const InductionDescriptor &II, 3255 Value *CountRoundDown, Value *EndValue, 3256 BasicBlock *MiddleBlock) { 3257 // There are two kinds of external IV usages - those that use the value 3258 // computed in the last iteration (the PHI) and those that use the penultimate 3259 // value (the value that feeds into the phi from the loop latch). 3260 // We allow both, but they, obviously, have different values. 3261 3262 // We only expect at most one of each kind of user. This is because LCSSA will 3263 // canonicalize the users to a single PHI node per exit block, and we 3264 // currently only vectorize loops with a single exit. 3265 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 3266 3267 // An external user of the last iteration's value should see the value that 3268 // the remainder loop uses to initialize its own IV. 3269 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3270 for (User *U : PostInc->users()) { 3271 Instruction *UI = cast<Instruction>(U); 3272 if (!OrigLoop->contains(UI)) { 3273 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3274 // One corner case we have to handle is two IVs "chasing" each-other, 3275 // that is %IV2 = phi [...], [ %IV1, %latch ] 3276 // In this case, if IV1 has an external use, we need to avoid adding both 3277 // "last value of IV1" and "penultimate value of IV2". Since we don't know 3278 // which IV will be handled first, check we haven't handled this user yet. 3279 PHINode *User = cast<PHINode>(UI); 3280 if (User->getBasicBlockIndex(MiddleBlock) == -1) 3281 User->addIncoming(EndValue, MiddleBlock); 3282 break; 3283 } 3284 } 3285 3286 // An external user of the penultimate value need to see EndValue - Step. 3287 // The simplest way to get this is to recompute it from the constituent SCEVs, 3288 // that is Start + (Step * (CRD - 1)). 3289 for (User *U : OrigPhi->users()) { 3290 Instruction *UI = cast<Instruction>(U); 3291 if (!OrigLoop->contains(UI)) { 3292 const DataLayout &DL = 3293 OrigLoop->getHeader()->getModule()->getDataLayout(); 3294 3295 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3296 PHINode *User = cast<PHINode>(UI); 3297 // As above, check we haven't already handled this user. 3298 if (User->getBasicBlockIndex(MiddleBlock) != -1) 3299 break; 3300 3301 IRBuilder<> B(MiddleBlock->getTerminator()); 3302 Value *CountMinusOne = B.CreateSub( 3303 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3304 Value *CMO = B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType(), 3305 "cast.cmo"); 3306 Value *Escape = II.transform(B, CMO, PSE.getSE(), DL); 3307 Escape->setName("ind.escape"); 3308 User->addIncoming(Escape, MiddleBlock); 3309 break; 3310 } 3311 } 3312 } 3313 3314 namespace { 3315 struct CSEDenseMapInfo { 3316 static bool canHandle(Instruction *I) { 3317 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3318 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3319 } 3320 static inline Instruction *getEmptyKey() { 3321 return DenseMapInfo<Instruction *>::getEmptyKey(); 3322 } 3323 static inline Instruction *getTombstoneKey() { 3324 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3325 } 3326 static unsigned getHashValue(Instruction *I) { 3327 assert(canHandle(I) && "Unknown instruction!"); 3328 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3329 I->value_op_end())); 3330 } 3331 static bool isEqual(Instruction *LHS, Instruction *RHS) { 3332 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3333 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3334 return LHS == RHS; 3335 return LHS->isIdenticalTo(RHS); 3336 } 3337 }; 3338 } 3339 3340 ///\brief Perform cse of induction variable instructions. 3341 static void cse(BasicBlock *BB) { 3342 // Perform simple cse. 3343 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3344 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3345 Instruction *In = &*I++; 3346 3347 if (!CSEDenseMapInfo::canHandle(In)) 3348 continue; 3349 3350 // Check if we can replace this instruction with any of the 3351 // visited instructions. 3352 if (Instruction *V = CSEMap.lookup(In)) { 3353 In->replaceAllUsesWith(V); 3354 In->eraseFromParent(); 3355 continue; 3356 } 3357 3358 CSEMap[In] = In; 3359 } 3360 } 3361 3362 /// \brief Adds a 'fast' flag to floating point operations. 3363 static Value *addFastMathFlag(Value *V) { 3364 if (isa<FPMathOperator>(V)) { 3365 FastMathFlags Flags; 3366 Flags.setUnsafeAlgebra(); 3367 cast<Instruction>(V)->setFastMathFlags(Flags); 3368 } 3369 return V; 3370 } 3371 3372 /// Estimate the overhead of scalarizing a value. Insert and Extract are set if 3373 /// the result needs to be inserted and/or extracted from vectors. 3374 static unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract, 3375 const TargetTransformInfo &TTI) { 3376 if (Ty->isVoidTy()) 3377 return 0; 3378 3379 assert(Ty->isVectorTy() && "Can only scalarize vectors"); 3380 unsigned Cost = 0; 3381 3382 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 3383 if (Insert) 3384 Cost += TTI.getVectorInstrCost(Instruction::InsertElement, Ty, i); 3385 if (Extract) 3386 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, Ty, i); 3387 } 3388 3389 return Cost; 3390 } 3391 3392 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3393 // Return the cost of the instruction, including scalarization overhead if it's 3394 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3395 // i.e. either vector version isn't available, or is too expensive. 3396 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3397 const TargetTransformInfo &TTI, 3398 const TargetLibraryInfo *TLI, 3399 bool &NeedToScalarize) { 3400 Function *F = CI->getCalledFunction(); 3401 StringRef FnName = CI->getCalledFunction()->getName(); 3402 Type *ScalarRetTy = CI->getType(); 3403 SmallVector<Type *, 4> Tys, ScalarTys; 3404 for (auto &ArgOp : CI->arg_operands()) 3405 ScalarTys.push_back(ArgOp->getType()); 3406 3407 // Estimate cost of scalarized vector call. The source operands are assumed 3408 // to be vectors, so we need to extract individual elements from there, 3409 // execute VF scalar calls, and then gather the result into the vector return 3410 // value. 3411 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3412 if (VF == 1) 3413 return ScalarCallCost; 3414 3415 // Compute corresponding vector type for return value and arguments. 3416 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3417 for (unsigned i = 0, ie = ScalarTys.size(); i != ie; ++i) 3418 Tys.push_back(ToVectorTy(ScalarTys[i], VF)); 3419 3420 // Compute costs of unpacking argument values for the scalar calls and 3421 // packing the return values to a vector. 3422 unsigned ScalarizationCost = 3423 getScalarizationOverhead(RetTy, true, false, TTI); 3424 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) 3425 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true, TTI); 3426 3427 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3428 3429 // If we can't emit a vector call for this function, then the currently found 3430 // cost is the cost we need to return. 3431 NeedToScalarize = true; 3432 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3433 return Cost; 3434 3435 // If the corresponding vector cost is cheaper, return its cost. 3436 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3437 if (VectorCallCost < Cost) { 3438 NeedToScalarize = false; 3439 return VectorCallCost; 3440 } 3441 return Cost; 3442 } 3443 3444 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3445 // factor VF. Return the cost of the instruction, including scalarization 3446 // overhead if it's needed. 3447 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3448 const TargetTransformInfo &TTI, 3449 const TargetLibraryInfo *TLI) { 3450 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3451 assert(ID && "Expected intrinsic call!"); 3452 3453 Type *RetTy = ToVectorTy(CI->getType(), VF); 3454 SmallVector<Type *, 4> Tys; 3455 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) 3456 Tys.push_back(ToVectorTy(CI->getArgOperand(i)->getType(), VF)); 3457 3458 FastMathFlags FMF; 3459 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3460 FMF = FPMO->getFastMathFlags(); 3461 3462 return TTI.getIntrinsicInstrCost(ID, RetTy, Tys, FMF); 3463 } 3464 3465 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3466 IntegerType *I1 = cast<IntegerType>(T1->getVectorElementType()); 3467 IntegerType *I2 = cast<IntegerType>(T2->getVectorElementType()); 3468 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3469 } 3470 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3471 IntegerType *I1 = cast<IntegerType>(T1->getVectorElementType()); 3472 IntegerType *I2 = cast<IntegerType>(T2->getVectorElementType()); 3473 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3474 } 3475 3476 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3477 // For every instruction `I` in MinBWs, truncate the operands, create a 3478 // truncated version of `I` and reextend its result. InstCombine runs 3479 // later and will remove any ext/trunc pairs. 3480 // 3481 SmallPtrSet<Value *, 4> Erased; 3482 for (const auto &KV : *MinBWs) { 3483 VectorParts &Parts = WidenMap.get(KV.first); 3484 for (Value *&I : Parts) { 3485 if (Erased.count(I) || I->use_empty()) 3486 continue; 3487 Type *OriginalTy = I->getType(); 3488 Type *ScalarTruncatedTy = 3489 IntegerType::get(OriginalTy->getContext(), KV.second); 3490 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3491 OriginalTy->getVectorNumElements()); 3492 if (TruncatedTy == OriginalTy) 3493 continue; 3494 3495 if (!isa<Instruction>(I)) 3496 continue; 3497 3498 IRBuilder<> B(cast<Instruction>(I)); 3499 auto ShrinkOperand = [&](Value *V) -> Value * { 3500 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3501 if (ZI->getSrcTy() == TruncatedTy) 3502 return ZI->getOperand(0); 3503 return B.CreateZExtOrTrunc(V, TruncatedTy); 3504 }; 3505 3506 // The actual instruction modification depends on the instruction type, 3507 // unfortunately. 3508 Value *NewI = nullptr; 3509 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { 3510 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3511 ShrinkOperand(BO->getOperand(1))); 3512 cast<BinaryOperator>(NewI)->copyIRFlags(I); 3513 } else if (ICmpInst *CI = dyn_cast<ICmpInst>(I)) { 3514 NewI = 3515 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3516 ShrinkOperand(CI->getOperand(1))); 3517 } else if (SelectInst *SI = dyn_cast<SelectInst>(I)) { 3518 NewI = B.CreateSelect(SI->getCondition(), 3519 ShrinkOperand(SI->getTrueValue()), 3520 ShrinkOperand(SI->getFalseValue())); 3521 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 3522 switch (CI->getOpcode()) { 3523 default: 3524 llvm_unreachable("Unhandled cast!"); 3525 case Instruction::Trunc: 3526 NewI = ShrinkOperand(CI->getOperand(0)); 3527 break; 3528 case Instruction::SExt: 3529 NewI = B.CreateSExtOrTrunc( 3530 CI->getOperand(0), 3531 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3532 break; 3533 case Instruction::ZExt: 3534 NewI = B.CreateZExtOrTrunc( 3535 CI->getOperand(0), 3536 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3537 break; 3538 } 3539 } else if (ShuffleVectorInst *SI = dyn_cast<ShuffleVectorInst>(I)) { 3540 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3541 auto *O0 = B.CreateZExtOrTrunc( 3542 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3543 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3544 auto *O1 = B.CreateZExtOrTrunc( 3545 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3546 3547 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3548 } else if (isa<LoadInst>(I)) { 3549 // Don't do anything with the operands, just extend the result. 3550 continue; 3551 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3552 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3553 auto *O0 = B.CreateZExtOrTrunc( 3554 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3555 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3556 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3557 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3558 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3559 auto *O0 = B.CreateZExtOrTrunc( 3560 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3561 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3562 } else { 3563 llvm_unreachable("Unhandled instruction type!"); 3564 } 3565 3566 // Lastly, extend the result. 3567 NewI->takeName(cast<Instruction>(I)); 3568 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3569 I->replaceAllUsesWith(Res); 3570 cast<Instruction>(I)->eraseFromParent(); 3571 Erased.insert(I); 3572 I = Res; 3573 } 3574 } 3575 3576 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3577 for (const auto &KV : *MinBWs) { 3578 VectorParts &Parts = WidenMap.get(KV.first); 3579 for (Value *&I : Parts) { 3580 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3581 if (Inst && Inst->use_empty()) { 3582 Value *NewI = Inst->getOperand(0); 3583 Inst->eraseFromParent(); 3584 I = NewI; 3585 } 3586 } 3587 } 3588 } 3589 3590 void InnerLoopVectorizer::vectorizeLoop() { 3591 //===------------------------------------------------===// 3592 // 3593 // Notice: any optimization or new instruction that go 3594 // into the code below should be also be implemented in 3595 // the cost-model. 3596 // 3597 //===------------------------------------------------===// 3598 Constant *Zero = Builder.getInt32(0); 3599 3600 // In order to support recurrences we need to be able to vectorize Phi nodes. 3601 // Phi nodes have cycles, so we need to vectorize them in two stages. First, 3602 // we create a new vector PHI node with no incoming edges. We use this value 3603 // when we vectorize all of the instructions that use the PHI. Next, after 3604 // all of the instructions in the block are complete we add the new incoming 3605 // edges to the PHI. At this point all of the instructions in the basic block 3606 // are vectorized, so we can use them to construct the PHI. 3607 PhiVector PHIsToFix; 3608 3609 // Scan the loop in a topological order to ensure that defs are vectorized 3610 // before users. 3611 LoopBlocksDFS DFS(OrigLoop); 3612 DFS.perform(LI); 3613 3614 // Vectorize all of the blocks in the original loop. 3615 for (LoopBlocksDFS::RPOIterator bb = DFS.beginRPO(), be = DFS.endRPO(); 3616 bb != be; ++bb) 3617 vectorizeBlockInLoop(*bb, &PHIsToFix); 3618 3619 // Insert truncates and extends for any truncated instructions as hints to 3620 // InstCombine. 3621 if (VF > 1) 3622 truncateToMinimalBitwidths(); 3623 3624 // At this point every instruction in the original loop is widened to a 3625 // vector form. Now we need to fix the recurrences in PHIsToFix. These PHI 3626 // nodes are currently empty because we did not want to introduce cycles. 3627 // This is the second stage of vectorizing recurrences. 3628 for (PHINode *Phi : PHIsToFix) { 3629 assert(Phi && "Unable to recover vectorized PHI"); 3630 3631 // Handle first-order recurrences that need to be fixed. 3632 if (Legal->isFirstOrderRecurrence(Phi)) { 3633 fixFirstOrderRecurrence(Phi); 3634 continue; 3635 } 3636 3637 // If the phi node is not a first-order recurrence, it must be a reduction. 3638 // Get it's reduction variable descriptor. 3639 assert(Legal->isReductionVariable(Phi) && 3640 "Unable to find the reduction variable"); 3641 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 3642 3643 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3644 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3645 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3646 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3647 RdxDesc.getMinMaxRecurrenceKind(); 3648 setDebugLocFromInst(Builder, ReductionStartValue); 3649 3650 // We need to generate a reduction vector from the incoming scalar. 3651 // To do so, we need to generate the 'identity' vector and override 3652 // one of the elements with the incoming scalar reduction. We need 3653 // to do it in the vector-loop preheader. 3654 Builder.SetInsertPoint(LoopBypassBlocks[1]->getTerminator()); 3655 3656 // This is the vector-clone of the value that leaves the loop. 3657 VectorParts &VectorExit = getVectorValue(LoopExitInst); 3658 Type *VecTy = VectorExit[0]->getType(); 3659 3660 // Find the reduction identity variable. Zero for addition, or, xor, 3661 // one for multiplication, -1 for And. 3662 Value *Identity; 3663 Value *VectorStart; 3664 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3665 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3666 // MinMax reduction have the start value as their identify. 3667 if (VF == 1) { 3668 VectorStart = Identity = ReductionStartValue; 3669 } else { 3670 VectorStart = Identity = 3671 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3672 } 3673 } else { 3674 // Handle other reduction kinds: 3675 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3676 RK, VecTy->getScalarType()); 3677 if (VF == 1) { 3678 Identity = Iden; 3679 // This vector is the Identity vector where the first element is the 3680 // incoming scalar reduction. 3681 VectorStart = ReductionStartValue; 3682 } else { 3683 Identity = ConstantVector::getSplat(VF, Iden); 3684 3685 // This vector is the Identity vector where the first element is the 3686 // incoming scalar reduction. 3687 VectorStart = 3688 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3689 } 3690 } 3691 3692 // Fix the vector-loop phi. 3693 3694 // Reductions do not have to start at zero. They can start with 3695 // any loop invariant values. 3696 VectorParts &VecRdxPhi = WidenMap.get(Phi); 3697 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3698 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3699 VectorParts &Val = getVectorValue(LoopVal); 3700 for (unsigned part = 0; part < UF; ++part) { 3701 // Make sure to add the reduction stat value only to the 3702 // first unroll part. 3703 Value *StartVal = (part == 0) ? VectorStart : Identity; 3704 cast<PHINode>(VecRdxPhi[part]) 3705 ->addIncoming(StartVal, LoopVectorPreHeader); 3706 cast<PHINode>(VecRdxPhi[part]) 3707 ->addIncoming(Val[part], LoopVectorBody); 3708 } 3709 3710 // Before each round, move the insertion point right between 3711 // the PHIs and the values we are going to write. 3712 // This allows us to write both PHINodes and the extractelement 3713 // instructions. 3714 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3715 3716 VectorParts RdxParts = getVectorValue(LoopExitInst); 3717 setDebugLocFromInst(Builder, LoopExitInst); 3718 3719 // If the vector reduction can be performed in a smaller type, we truncate 3720 // then extend the loop exit value to enable InstCombine to evaluate the 3721 // entire expression in the smaller type. 3722 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3723 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3724 Builder.SetInsertPoint(LoopVectorBody->getTerminator()); 3725 for (unsigned part = 0; part < UF; ++part) { 3726 Value *Trunc = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3727 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3728 : Builder.CreateZExt(Trunc, VecTy); 3729 for (Value::user_iterator UI = RdxParts[part]->user_begin(); 3730 UI != RdxParts[part]->user_end();) 3731 if (*UI != Trunc) { 3732 (*UI++)->replaceUsesOfWith(RdxParts[part], Extnd); 3733 RdxParts[part] = Extnd; 3734 } else { 3735 ++UI; 3736 } 3737 } 3738 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3739 for (unsigned part = 0; part < UF; ++part) 3740 RdxParts[part] = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3741 } 3742 3743 // Reduce all of the unrolled parts into a single vector. 3744 Value *ReducedPartRdx = RdxParts[0]; 3745 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3746 setDebugLocFromInst(Builder, ReducedPartRdx); 3747 for (unsigned part = 1; part < UF; ++part) { 3748 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3749 // Floating point operations had to be 'fast' to enable the reduction. 3750 ReducedPartRdx = addFastMathFlag( 3751 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxParts[part], 3752 ReducedPartRdx, "bin.rdx")); 3753 else 3754 ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp( 3755 Builder, MinMaxKind, ReducedPartRdx, RdxParts[part]); 3756 } 3757 3758 if (VF > 1) { 3759 // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles 3760 // and vector ops, reducing the set of values being computed by half each 3761 // round. 3762 assert(isPowerOf2_32(VF) && 3763 "Reduction emission only supported for pow2 vectors!"); 3764 Value *TmpVec = ReducedPartRdx; 3765 SmallVector<Constant *, 32> ShuffleMask(VF, nullptr); 3766 for (unsigned i = VF; i != 1; i >>= 1) { 3767 // Move the upper half of the vector to the lower half. 3768 for (unsigned j = 0; j != i / 2; ++j) 3769 ShuffleMask[j] = Builder.getInt32(i / 2 + j); 3770 3771 // Fill the rest of the mask with undef. 3772 std::fill(&ShuffleMask[i / 2], ShuffleMask.end(), 3773 UndefValue::get(Builder.getInt32Ty())); 3774 3775 Value *Shuf = Builder.CreateShuffleVector( 3776 TmpVec, UndefValue::get(TmpVec->getType()), 3777 ConstantVector::get(ShuffleMask), "rdx.shuf"); 3778 3779 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3780 // Floating point operations had to be 'fast' to enable the reduction. 3781 TmpVec = addFastMathFlag(Builder.CreateBinOp( 3782 (Instruction::BinaryOps)Op, TmpVec, Shuf, "bin.rdx")); 3783 else 3784 TmpVec = RecurrenceDescriptor::createMinMaxOp(Builder, MinMaxKind, 3785 TmpVec, Shuf); 3786 } 3787 3788 // The result is in the first element of the vector. 3789 ReducedPartRdx = 3790 Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 3791 3792 // If the reduction can be performed in a smaller type, we need to extend 3793 // the reduction to the wider type before we branch to the original loop. 3794 if (Phi->getType() != RdxDesc.getRecurrenceType()) 3795 ReducedPartRdx = 3796 RdxDesc.isSigned() 3797 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 3798 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 3799 } 3800 3801 // Create a phi node that merges control-flow from the backedge-taken check 3802 // block and the middle block. 3803 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 3804 LoopScalarPreHeader->getTerminator()); 3805 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3806 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 3807 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3808 3809 // Now, we need to fix the users of the reduction variable 3810 // inside and outside of the scalar remainder loop. 3811 // We know that the loop is in LCSSA form. We need to update the 3812 // PHI nodes in the exit blocks. 3813 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 3814 LEE = LoopExitBlock->end(); 3815 LEI != LEE; ++LEI) { 3816 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 3817 if (!LCSSAPhi) 3818 break; 3819 3820 // All PHINodes need to have a single entry edge, or two if 3821 // we already fixed them. 3822 assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 3823 3824 // We found our reduction value exit-PHI. Update it with the 3825 // incoming bypass edge. 3826 if (LCSSAPhi->getIncomingValue(0) == LoopExitInst) { 3827 // Add an edge coming from the bypass. 3828 LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3829 break; 3830 } 3831 } // end of the LCSSA phi scan. 3832 3833 // Fix the scalar loop reduction variable with the incoming reduction sum 3834 // from the vector body and from the backedge value. 3835 int IncomingEdgeBlockIdx = 3836 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 3837 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 3838 // Pick the other block. 3839 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 3840 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 3841 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 3842 } // end of for each Phi in PHIsToFix. 3843 3844 fixLCSSAPHIs(); 3845 3846 // Make sure DomTree is updated. 3847 updateAnalysis(); 3848 3849 // Predicate any stores. 3850 for (auto KV : PredicatedStores) { 3851 BasicBlock::iterator I(KV.first); 3852 auto *BB = SplitBlock(I->getParent(), &*std::next(I), DT, LI); 3853 auto *T = SplitBlockAndInsertIfThen(KV.second, &*I, /*Unreachable=*/false, 3854 /*BranchWeights=*/nullptr, DT, LI); 3855 I->moveBefore(T); 3856 I->getParent()->setName("pred.store.if"); 3857 BB->setName("pred.store.continue"); 3858 } 3859 DEBUG(DT->verifyDomTree()); 3860 // Remove redundant induction instructions. 3861 cse(LoopVectorBody); 3862 } 3863 3864 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 3865 3866 // This is the second phase of vectorizing first-order recurrences. An 3867 // overview of the transformation is described below. Suppose we have the 3868 // following loop. 3869 // 3870 // for (int i = 0; i < n; ++i) 3871 // b[i] = a[i] - a[i - 1]; 3872 // 3873 // There is a first-order recurrence on "a". For this loop, the shorthand 3874 // scalar IR looks like: 3875 // 3876 // scalar.ph: 3877 // s_init = a[-1] 3878 // br scalar.body 3879 // 3880 // scalar.body: 3881 // i = phi [0, scalar.ph], [i+1, scalar.body] 3882 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3883 // s2 = a[i] 3884 // b[i] = s2 - s1 3885 // br cond, scalar.body, ... 3886 // 3887 // In this example, s1 is a recurrence because it's value depends on the 3888 // previous iteration. In the first phase of vectorization, we created a 3889 // temporary value for s1. We now complete the vectorization and produce the 3890 // shorthand vector IR shown below (for VF = 4, UF = 1). 3891 // 3892 // vector.ph: 3893 // v_init = vector(..., ..., ..., a[-1]) 3894 // br vector.body 3895 // 3896 // vector.body 3897 // i = phi [0, vector.ph], [i+4, vector.body] 3898 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3899 // v2 = a[i, i+1, i+2, i+3]; 3900 // v3 = vector(v1(3), v2(0, 1, 2)) 3901 // b[i, i+1, i+2, i+3] = v2 - v3 3902 // br cond, vector.body, middle.block 3903 // 3904 // middle.block: 3905 // x = v2(3) 3906 // br scalar.ph 3907 // 3908 // scalar.ph: 3909 // s_init = phi [x, middle.block], [a[-1], otherwise] 3910 // br scalar.body 3911 // 3912 // After execution completes the vector loop, we extract the next value of 3913 // the recurrence (x) to use as the initial value in the scalar loop. 3914 3915 // Get the original loop preheader and single loop latch. 3916 auto *Preheader = OrigLoop->getLoopPreheader(); 3917 auto *Latch = OrigLoop->getLoopLatch(); 3918 3919 // Get the initial and previous values of the scalar recurrence. 3920 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 3921 auto *Previous = Phi->getIncomingValueForBlock(Latch); 3922 3923 // Create a vector from the initial value. 3924 auto *VectorInit = ScalarInit; 3925 if (VF > 1) { 3926 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3927 VectorInit = Builder.CreateInsertElement( 3928 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 3929 Builder.getInt32(VF - 1), "vector.recur.init"); 3930 } 3931 3932 // We constructed a temporary phi node in the first phase of vectorization. 3933 // This phi node will eventually be deleted. 3934 auto &PhiParts = getVectorValue(Phi); 3935 Builder.SetInsertPoint(cast<Instruction>(PhiParts[0])); 3936 3937 // Create a phi node for the new recurrence. The current value will either be 3938 // the initial value inserted into a vector or loop-varying vector value. 3939 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 3940 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 3941 3942 // Get the vectorized previous value. We ensured the previous values was an 3943 // instruction when detecting the recurrence. 3944 auto &PreviousParts = getVectorValue(Previous); 3945 3946 // Set the insertion point to be after this instruction. We ensured the 3947 // previous value dominated all uses of the phi when detecting the 3948 // recurrence. 3949 Builder.SetInsertPoint( 3950 &*++BasicBlock::iterator(cast<Instruction>(PreviousParts[UF - 1]))); 3951 3952 // We will construct a vector for the recurrence by combining the values for 3953 // the current and previous iterations. This is the required shuffle mask. 3954 SmallVector<Constant *, 8> ShuffleMask(VF); 3955 ShuffleMask[0] = Builder.getInt32(VF - 1); 3956 for (unsigned I = 1; I < VF; ++I) 3957 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 3958 3959 // The vector from which to take the initial value for the current iteration 3960 // (actual or unrolled). Initially, this is the vector phi node. 3961 Value *Incoming = VecPhi; 3962 3963 // Shuffle the current and previous vector and update the vector parts. 3964 for (unsigned Part = 0; Part < UF; ++Part) { 3965 auto *Shuffle = 3966 VF > 1 3967 ? Builder.CreateShuffleVector(Incoming, PreviousParts[Part], 3968 ConstantVector::get(ShuffleMask)) 3969 : Incoming; 3970 PhiParts[Part]->replaceAllUsesWith(Shuffle); 3971 cast<Instruction>(PhiParts[Part])->eraseFromParent(); 3972 PhiParts[Part] = Shuffle; 3973 Incoming = PreviousParts[Part]; 3974 } 3975 3976 // Fix the latch value of the new recurrence in the vector loop. 3977 VecPhi->addIncoming(Incoming, 3978 LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3979 3980 // Extract the last vector element in the middle block. This will be the 3981 // initial value for the recurrence when jumping to the scalar loop. 3982 auto *Extract = Incoming; 3983 if (VF > 1) { 3984 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3985 Extract = Builder.CreateExtractElement(Extract, Builder.getInt32(VF - 1), 3986 "vector.recur.extract"); 3987 } 3988 3989 // Fix the initial value of the original recurrence in the scalar loop. 3990 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3991 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3992 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3993 auto *Incoming = BB == LoopMiddleBlock ? Extract : ScalarInit; 3994 Start->addIncoming(Incoming, BB); 3995 } 3996 3997 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 3998 Phi->setName("scalar.recur"); 3999 4000 // Finally, fix users of the recurrence outside the loop. The users will need 4001 // either the last value of the scalar recurrence or the last value of the 4002 // vector recurrence we extracted in the middle block. Since the loop is in 4003 // LCSSA form, we just need to find the phi node for the original scalar 4004 // recurrence in the exit block, and then add an edge for the middle block. 4005 for (auto &I : *LoopExitBlock) { 4006 auto *LCSSAPhi = dyn_cast<PHINode>(&I); 4007 if (!LCSSAPhi) 4008 break; 4009 if (LCSSAPhi->getIncomingValue(0) == Phi) { 4010 LCSSAPhi->addIncoming(Extract, LoopMiddleBlock); 4011 break; 4012 } 4013 } 4014 } 4015 4016 void InnerLoopVectorizer::fixLCSSAPHIs() { 4017 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 4018 LEE = LoopExitBlock->end(); 4019 LEI != LEE; ++LEI) { 4020 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 4021 if (!LCSSAPhi) 4022 break; 4023 if (LCSSAPhi->getNumIncomingValues() == 1) 4024 LCSSAPhi->addIncoming(UndefValue::get(LCSSAPhi->getType()), 4025 LoopMiddleBlock); 4026 } 4027 } 4028 4029 InnerLoopVectorizer::VectorParts 4030 InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) { 4031 assert(std::find(pred_begin(Dst), pred_end(Dst), Src) != pred_end(Dst) && 4032 "Invalid edge"); 4033 4034 // Look for cached value. 4035 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 4036 EdgeMaskCache::iterator ECEntryIt = MaskCache.find(Edge); 4037 if (ECEntryIt != MaskCache.end()) 4038 return ECEntryIt->second; 4039 4040 VectorParts SrcMask = createBlockInMask(Src); 4041 4042 // The terminator has to be a branch inst! 4043 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 4044 assert(BI && "Unexpected terminator found"); 4045 4046 if (BI->isConditional()) { 4047 VectorParts EdgeMask = getVectorValue(BI->getCondition()); 4048 4049 if (BI->getSuccessor(0) != Dst) 4050 for (unsigned part = 0; part < UF; ++part) 4051 EdgeMask[part] = Builder.CreateNot(EdgeMask[part]); 4052 4053 for (unsigned part = 0; part < UF; ++part) 4054 EdgeMask[part] = Builder.CreateAnd(EdgeMask[part], SrcMask[part]); 4055 4056 MaskCache[Edge] = EdgeMask; 4057 return EdgeMask; 4058 } 4059 4060 MaskCache[Edge] = SrcMask; 4061 return SrcMask; 4062 } 4063 4064 InnerLoopVectorizer::VectorParts 4065 InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) { 4066 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 4067 4068 // Loop incoming mask is all-one. 4069 if (OrigLoop->getHeader() == BB) { 4070 Value *C = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 1); 4071 return getVectorValue(C); 4072 } 4073 4074 // This is the block mask. We OR all incoming edges, and with zero. 4075 Value *Zero = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 0); 4076 VectorParts BlockMask = getVectorValue(Zero); 4077 4078 // For each pred: 4079 for (pred_iterator it = pred_begin(BB), e = pred_end(BB); it != e; ++it) { 4080 VectorParts EM = createEdgeMask(*it, BB); 4081 for (unsigned part = 0; part < UF; ++part) 4082 BlockMask[part] = Builder.CreateOr(BlockMask[part], EM[part]); 4083 } 4084 4085 return BlockMask; 4086 } 4087 4088 void InnerLoopVectorizer::widenPHIInstruction( 4089 Instruction *PN, InnerLoopVectorizer::VectorParts &Entry, unsigned UF, 4090 unsigned VF, PhiVector *PV) { 4091 PHINode *P = cast<PHINode>(PN); 4092 // Handle recurrences. 4093 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 4094 for (unsigned part = 0; part < UF; ++part) { 4095 // This is phase one of vectorizing PHIs. 4096 Type *VecTy = 4097 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4098 Entry[part] = PHINode::Create( 4099 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4100 } 4101 PV->push_back(P); 4102 return; 4103 } 4104 4105 setDebugLocFromInst(Builder, P); 4106 // Check for PHI nodes that are lowered to vector selects. 4107 if (P->getParent() != OrigLoop->getHeader()) { 4108 // We know that all PHIs in non-header blocks are converted into 4109 // selects, so we don't have to worry about the insertion order and we 4110 // can just use the builder. 4111 // At this point we generate the predication tree. There may be 4112 // duplications since this is a simple recursive scan, but future 4113 // optimizations will clean it up. 4114 4115 unsigned NumIncoming = P->getNumIncomingValues(); 4116 4117 // Generate a sequence of selects of the form: 4118 // SELECT(Mask3, In3, 4119 // SELECT(Mask2, In2, 4120 // ( ...))) 4121 for (unsigned In = 0; In < NumIncoming; In++) { 4122 VectorParts Cond = 4123 createEdgeMask(P->getIncomingBlock(In), P->getParent()); 4124 VectorParts &In0 = getVectorValue(P->getIncomingValue(In)); 4125 4126 for (unsigned part = 0; part < UF; ++part) { 4127 // We might have single edge PHIs (blocks) - use an identity 4128 // 'select' for the first PHI operand. 4129 if (In == 0) 4130 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], In0[part]); 4131 else 4132 // Select between the current value and the previous incoming edge 4133 // based on the incoming mask. 4134 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], Entry[part], 4135 "predphi"); 4136 } 4137 } 4138 return; 4139 } 4140 4141 // This PHINode must be an induction variable. 4142 // Make sure that we know about it. 4143 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 4144 4145 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 4146 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4147 4148 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4149 // which can be found from the original scalar operations. 4150 switch (II.getKind()) { 4151 case InductionDescriptor::IK_NoInduction: 4152 llvm_unreachable("Unknown induction"); 4153 case InductionDescriptor::IK_IntInduction: { 4154 assert(P->getType() == II.getStartValue()->getType() && "Types must match"); 4155 if (VF == 1 || P->getType() != Induction->getType() || 4156 !II.getConstIntStepValue()) { 4157 Value *V = Induction; 4158 // Handle other induction variables that are now based on the 4159 // canonical one. 4160 if (P != OldInduction) { 4161 V = Builder.CreateSExtOrTrunc(Induction, P->getType()); 4162 V = II.transform(Builder, V, PSE.getSE(), DL); 4163 V->setName("offset.idx"); 4164 } 4165 Value *Broadcasted = getBroadcastInstrs(V); 4166 // After broadcasting the induction variable we need to make the vector 4167 // consecutive by adding 0, 1, 2, etc. 4168 for (unsigned part = 0; part < UF; ++part) 4169 Entry[part] = getStepVector(Broadcasted, VF * part, II.getStep()); 4170 } else { 4171 // Instead of re-creating the vector IV by splatting the scalar IV 4172 // in each iteration, we can make a new independent vector IV. 4173 widenInductionVariable(II, Entry); 4174 } 4175 return; 4176 } 4177 case InductionDescriptor::IK_PtrInduction: 4178 // Handle the pointer induction variable case. 4179 assert(P->getType()->isPointerTy() && "Unexpected type."); 4180 // This is the normalized GEP that starts counting at zero. 4181 Value *PtrInd = Induction; 4182 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 4183 // This is the vector of results. Notice that we don't generate 4184 // vector geps because scalar geps result in better code. 4185 for (unsigned part = 0; part < UF; ++part) { 4186 if (VF == 1) { 4187 int EltIndex = part; 4188 Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex); 4189 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4190 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 4191 SclrGep->setName("next.gep"); 4192 Entry[part] = SclrGep; 4193 continue; 4194 } 4195 4196 Value *VecVal = UndefValue::get(VectorType::get(P->getType(), VF)); 4197 for (unsigned int i = 0; i < VF; ++i) { 4198 int EltIndex = i + part * VF; 4199 Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex); 4200 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4201 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 4202 SclrGep->setName("next.gep"); 4203 VecVal = Builder.CreateInsertElement(VecVal, SclrGep, 4204 Builder.getInt32(i), "insert.gep"); 4205 } 4206 Entry[part] = VecVal; 4207 } 4208 return; 4209 } 4210 } 4211 4212 void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { 4213 // For each instruction in the old loop. 4214 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 4215 VectorParts &Entry = WidenMap.get(&*it); 4216 4217 switch (it->getOpcode()) { 4218 case Instruction::Br: 4219 // Nothing to do for PHIs and BR, since we already took care of the 4220 // loop control flow instructions. 4221 continue; 4222 case Instruction::PHI: { 4223 // Vectorize PHINodes. 4224 widenPHIInstruction(&*it, Entry, UF, VF, PV); 4225 continue; 4226 } // End of PHI. 4227 4228 case Instruction::Add: 4229 case Instruction::FAdd: 4230 case Instruction::Sub: 4231 case Instruction::FSub: 4232 case Instruction::Mul: 4233 case Instruction::FMul: 4234 case Instruction::UDiv: 4235 case Instruction::SDiv: 4236 case Instruction::FDiv: 4237 case Instruction::URem: 4238 case Instruction::SRem: 4239 case Instruction::FRem: 4240 case Instruction::Shl: 4241 case Instruction::LShr: 4242 case Instruction::AShr: 4243 case Instruction::And: 4244 case Instruction::Or: 4245 case Instruction::Xor: { 4246 // Just widen binops. 4247 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(it); 4248 setDebugLocFromInst(Builder, BinOp); 4249 VectorParts &A = getVectorValue(it->getOperand(0)); 4250 VectorParts &B = getVectorValue(it->getOperand(1)); 4251 4252 // Use this vector value for all users of the original instruction. 4253 for (unsigned Part = 0; Part < UF; ++Part) { 4254 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A[Part], B[Part]); 4255 4256 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 4257 VecOp->copyIRFlags(BinOp); 4258 4259 Entry[Part] = V; 4260 } 4261 4262 addMetadata(Entry, &*it); 4263 break; 4264 } 4265 case Instruction::Select: { 4266 // Widen selects. 4267 // If the selector is loop invariant we can create a select 4268 // instruction with a scalar condition. Otherwise, use vector-select. 4269 auto *SE = PSE.getSE(); 4270 bool InvariantCond = 4271 SE->isLoopInvariant(PSE.getSCEV(it->getOperand(0)), OrigLoop); 4272 setDebugLocFromInst(Builder, &*it); 4273 4274 // The condition can be loop invariant but still defined inside the 4275 // loop. This means that we can't just use the original 'cond' value. 4276 // We have to take the 'vectorized' value and pick the first lane. 4277 // Instcombine will make this a no-op. 4278 VectorParts &Cond = getVectorValue(it->getOperand(0)); 4279 VectorParts &Op0 = getVectorValue(it->getOperand(1)); 4280 VectorParts &Op1 = getVectorValue(it->getOperand(2)); 4281 4282 Value *ScalarCond = 4283 (VF == 1) 4284 ? Cond[0] 4285 : Builder.CreateExtractElement(Cond[0], Builder.getInt32(0)); 4286 4287 for (unsigned Part = 0; Part < UF; ++Part) { 4288 Entry[Part] = Builder.CreateSelect( 4289 InvariantCond ? ScalarCond : Cond[Part], Op0[Part], Op1[Part]); 4290 } 4291 4292 addMetadata(Entry, &*it); 4293 break; 4294 } 4295 4296 case Instruction::ICmp: 4297 case Instruction::FCmp: { 4298 // Widen compares. Generate vector compares. 4299 bool FCmp = (it->getOpcode() == Instruction::FCmp); 4300 CmpInst *Cmp = dyn_cast<CmpInst>(it); 4301 setDebugLocFromInst(Builder, &*it); 4302 VectorParts &A = getVectorValue(it->getOperand(0)); 4303 VectorParts &B = getVectorValue(it->getOperand(1)); 4304 for (unsigned Part = 0; Part < UF; ++Part) { 4305 Value *C = nullptr; 4306 if (FCmp) { 4307 C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]); 4308 cast<FCmpInst>(C)->copyFastMathFlags(&*it); 4309 } else { 4310 C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]); 4311 } 4312 Entry[Part] = C; 4313 } 4314 4315 addMetadata(Entry, &*it); 4316 break; 4317 } 4318 4319 case Instruction::Store: 4320 case Instruction::Load: 4321 vectorizeMemoryInstruction(&*it); 4322 break; 4323 case Instruction::ZExt: 4324 case Instruction::SExt: 4325 case Instruction::FPToUI: 4326 case Instruction::FPToSI: 4327 case Instruction::FPExt: 4328 case Instruction::PtrToInt: 4329 case Instruction::IntToPtr: 4330 case Instruction::SIToFP: 4331 case Instruction::UIToFP: 4332 case Instruction::Trunc: 4333 case Instruction::FPTrunc: 4334 case Instruction::BitCast: { 4335 CastInst *CI = dyn_cast<CastInst>(it); 4336 setDebugLocFromInst(Builder, &*it); 4337 /// Optimize the special case where the source is a constant integer 4338 /// induction variable. Notice that we can only optimize the 'trunc' case 4339 /// because: a. FP conversions lose precision, b. sext/zext may wrap, 4340 /// c. other casts depend on pointer size. 4341 4342 if (CI->getOperand(0) == OldInduction && 4343 it->getOpcode() == Instruction::Trunc) { 4344 InductionDescriptor II = 4345 Legal->getInductionVars()->lookup(OldInduction); 4346 if (auto StepValue = II.getConstIntStepValue()) { 4347 IntegerType *TruncType = cast<IntegerType>(CI->getType()); 4348 if (VF == 1) { 4349 StepValue = 4350 ConstantInt::getSigned(TruncType, StepValue->getSExtValue()); 4351 Value *ScalarCast = 4352 Builder.CreateCast(CI->getOpcode(), Induction, CI->getType()); 4353 Value *Broadcasted = getBroadcastInstrs(ScalarCast); 4354 for (unsigned Part = 0; Part < UF; ++Part) 4355 Entry[Part] = getStepVector(Broadcasted, VF * Part, StepValue); 4356 } else { 4357 // Truncating a vector induction variable on each iteration 4358 // may be expensive. Instead, truncate the initial value, and create 4359 // a new, truncated, vector IV based on that. 4360 widenInductionVariable(II, Entry, TruncType); 4361 } 4362 addMetadata(Entry, &*it); 4363 break; 4364 } 4365 } 4366 /// Vectorize casts. 4367 Type *DestTy = 4368 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4369 4370 VectorParts &A = getVectorValue(it->getOperand(0)); 4371 for (unsigned Part = 0; Part < UF; ++Part) 4372 Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy); 4373 addMetadata(Entry, &*it); 4374 break; 4375 } 4376 4377 case Instruction::Call: { 4378 // Ignore dbg intrinsics. 4379 if (isa<DbgInfoIntrinsic>(it)) 4380 break; 4381 setDebugLocFromInst(Builder, &*it); 4382 4383 Module *M = BB->getParent()->getParent(); 4384 CallInst *CI = cast<CallInst>(it); 4385 4386 StringRef FnName = CI->getCalledFunction()->getName(); 4387 Function *F = CI->getCalledFunction(); 4388 Type *RetTy = ToVectorTy(CI->getType(), VF); 4389 SmallVector<Type *, 4> Tys; 4390 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) 4391 Tys.push_back(ToVectorTy(CI->getArgOperand(i)->getType(), VF)); 4392 4393 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4394 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 4395 ID == Intrinsic::lifetime_start)) { 4396 scalarizeInstruction(&*it); 4397 break; 4398 } 4399 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4400 // version of the instruction. 4401 // Is it beneficial to perform intrinsic call compared to lib call? 4402 bool NeedToScalarize; 4403 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4404 bool UseVectorIntrinsic = 4405 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4406 if (!UseVectorIntrinsic && NeedToScalarize) { 4407 scalarizeInstruction(&*it); 4408 break; 4409 } 4410 4411 for (unsigned Part = 0; Part < UF; ++Part) { 4412 SmallVector<Value *, 4> Args; 4413 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4414 Value *Arg = CI->getArgOperand(i); 4415 // Some intrinsics have a scalar argument - don't replace it with a 4416 // vector. 4417 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) { 4418 VectorParts &VectorArg = getVectorValue(CI->getArgOperand(i)); 4419 Arg = VectorArg[Part]; 4420 } 4421 Args.push_back(Arg); 4422 } 4423 4424 Function *VectorF; 4425 if (UseVectorIntrinsic) { 4426 // Use vector version of the intrinsic. 4427 Type *TysForDecl[] = {CI->getType()}; 4428 if (VF > 1) 4429 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4430 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4431 } else { 4432 // Use vector version of the library call. 4433 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4434 assert(!VFnName.empty() && "Vector function name is empty."); 4435 VectorF = M->getFunction(VFnName); 4436 if (!VectorF) { 4437 // Generate a declaration 4438 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4439 VectorF = 4440 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4441 VectorF->copyAttributesFrom(F); 4442 } 4443 } 4444 assert(VectorF && "Can't create vector function."); 4445 4446 SmallVector<OperandBundleDef, 1> OpBundles; 4447 CI->getOperandBundlesAsDefs(OpBundles); 4448 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4449 4450 if (isa<FPMathOperator>(V)) 4451 V->copyFastMathFlags(CI); 4452 4453 Entry[Part] = V; 4454 } 4455 4456 addMetadata(Entry, &*it); 4457 break; 4458 } 4459 4460 default: 4461 // All other instructions are unsupported. Scalarize them. 4462 scalarizeInstruction(&*it); 4463 break; 4464 } // end of switch. 4465 } // end of for_each instr. 4466 } 4467 4468 void InnerLoopVectorizer::updateAnalysis() { 4469 // Forget the original basic block. 4470 PSE.getSE()->forgetLoop(OrigLoop); 4471 4472 // Update the dominator tree information. 4473 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4474 "Entry does not dominate exit."); 4475 4476 // We don't predicate stores by this point, so the vector body should be a 4477 // single loop. 4478 DT->addNewBlock(LoopVectorBody, LoopVectorPreHeader); 4479 4480 DT->addNewBlock(LoopMiddleBlock, LoopVectorBody); 4481 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4482 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4483 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4484 4485 DEBUG(DT->verifyDomTree()); 4486 } 4487 4488 /// \brief Check whether it is safe to if-convert this phi node. 4489 /// 4490 /// Phi nodes with constant expressions that can trap are not safe to if 4491 /// convert. 4492 static bool canIfConvertPHINodes(BasicBlock *BB) { 4493 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { 4494 PHINode *Phi = dyn_cast<PHINode>(I); 4495 if (!Phi) 4496 return true; 4497 for (unsigned p = 0, e = Phi->getNumIncomingValues(); p != e; ++p) 4498 if (Constant *C = dyn_cast<Constant>(Phi->getIncomingValue(p))) 4499 if (C->canTrap()) 4500 return false; 4501 } 4502 return true; 4503 } 4504 4505 bool LoopVectorizationLegality::canVectorizeWithIfConvert() { 4506 if (!EnableIfConversion) { 4507 emitAnalysis(VectorizationReport() << "if-conversion is disabled"); 4508 return false; 4509 } 4510 4511 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable"); 4512 4513 // A list of pointers that we can safely read and write to. 4514 SmallPtrSet<Value *, 8> SafePointes; 4515 4516 // Collect safe addresses. 4517 for (Loop::block_iterator BI = TheLoop->block_begin(), 4518 BE = TheLoop->block_end(); 4519 BI != BE; ++BI) { 4520 BasicBlock *BB = *BI; 4521 4522 if (blockNeedsPredication(BB)) 4523 continue; 4524 4525 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { 4526 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 4527 SafePointes.insert(LI->getPointerOperand()); 4528 else if (StoreInst *SI = dyn_cast<StoreInst>(I)) 4529 SafePointes.insert(SI->getPointerOperand()); 4530 } 4531 } 4532 4533 // Collect the blocks that need predication. 4534 BasicBlock *Header = TheLoop->getHeader(); 4535 for (Loop::block_iterator BI = TheLoop->block_begin(), 4536 BE = TheLoop->block_end(); 4537 BI != BE; ++BI) { 4538 BasicBlock *BB = *BI; 4539 4540 // We don't support switch statements inside loops. 4541 if (!isa<BranchInst>(BB->getTerminator())) { 4542 emitAnalysis(VectorizationReport(BB->getTerminator()) 4543 << "loop contains a switch statement"); 4544 return false; 4545 } 4546 4547 // We must be able to predicate all blocks that need to be predicated. 4548 if (blockNeedsPredication(BB)) { 4549 if (!blockCanBePredicated(BB, SafePointes)) { 4550 emitAnalysis(VectorizationReport(BB->getTerminator()) 4551 << "control flow cannot be substituted for a select"); 4552 return false; 4553 } 4554 } else if (BB != Header && !canIfConvertPHINodes(BB)) { 4555 emitAnalysis(VectorizationReport(BB->getTerminator()) 4556 << "control flow cannot be substituted for a select"); 4557 return false; 4558 } 4559 } 4560 4561 // We can if-convert this loop. 4562 return true; 4563 } 4564 4565 bool LoopVectorizationLegality::canVectorize() { 4566 // We must have a loop in canonical form. Loops with indirectbr in them cannot 4567 // be canonicalized. 4568 if (!TheLoop->getLoopPreheader()) { 4569 emitAnalysis(VectorizationReport() 4570 << "loop control flow is not understood by vectorizer"); 4571 return false; 4572 } 4573 4574 // We can only vectorize innermost loops. 4575 if (!TheLoop->empty()) { 4576 emitAnalysis(VectorizationReport() << "loop is not the innermost loop"); 4577 return false; 4578 } 4579 4580 // We must have a single backedge. 4581 if (TheLoop->getNumBackEdges() != 1) { 4582 emitAnalysis(VectorizationReport() 4583 << "loop control flow is not understood by vectorizer"); 4584 return false; 4585 } 4586 4587 // We must have a single exiting block. 4588 if (!TheLoop->getExitingBlock()) { 4589 emitAnalysis(VectorizationReport() 4590 << "loop control flow is not understood by vectorizer"); 4591 return false; 4592 } 4593 4594 // We only handle bottom-tested loops, i.e. loop in which the condition is 4595 // checked at the end of each iteration. With that we can assume that all 4596 // instructions in the loop are executed the same number of times. 4597 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 4598 emitAnalysis(VectorizationReport() 4599 << "loop control flow is not understood by vectorizer"); 4600 return false; 4601 } 4602 4603 // We need to have a loop header. 4604 DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName() 4605 << '\n'); 4606 4607 // Check if we can if-convert non-single-bb loops. 4608 unsigned NumBlocks = TheLoop->getNumBlocks(); 4609 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { 4610 DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); 4611 return false; 4612 } 4613 4614 // ScalarEvolution needs to be able to find the exit count. 4615 const SCEV *ExitCount = PSE.getBackedgeTakenCount(); 4616 if (ExitCount == PSE.getSE()->getCouldNotCompute()) { 4617 emitAnalysis(VectorizationReport() 4618 << "could not determine number of loop iterations"); 4619 DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n"); 4620 return false; 4621 } 4622 4623 // Check if we can vectorize the instructions and CFG in this loop. 4624 if (!canVectorizeInstrs()) { 4625 DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); 4626 return false; 4627 } 4628 4629 // Go over each instruction and look at memory deps. 4630 if (!canVectorizeMemory()) { 4631 DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); 4632 return false; 4633 } 4634 4635 // Collect all of the variables that remain uniform after vectorization. 4636 collectLoopUniforms(); 4637 4638 DEBUG(dbgs() << "LV: We can vectorize this loop" 4639 << (LAI->getRuntimePointerChecking()->Need 4640 ? " (with a runtime bound check)" 4641 : "") 4642 << "!\n"); 4643 4644 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 4645 4646 // If an override option has been passed in for interleaved accesses, use it. 4647 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 4648 UseInterleaved = EnableInterleavedMemAccesses; 4649 4650 // Analyze interleaved memory accesses. 4651 if (UseInterleaved) 4652 InterleaveInfo.analyzeInterleaving(*getSymbolicStrides()); 4653 4654 unsigned SCEVThreshold = VectorizeSCEVCheckThreshold; 4655 if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) 4656 SCEVThreshold = PragmaVectorizeSCEVCheckThreshold; 4657 4658 if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) { 4659 emitAnalysis(VectorizationReport() 4660 << "Too many SCEV assumptions need to be made and checked " 4661 << "at runtime"); 4662 DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n"); 4663 return false; 4664 } 4665 4666 // Okay! We can vectorize. At this point we don't have any other mem analysis 4667 // which may limit our maximum vectorization factor, so just return true with 4668 // no restrictions. 4669 return true; 4670 } 4671 4672 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { 4673 if (Ty->isPointerTy()) 4674 return DL.getIntPtrType(Ty); 4675 4676 // It is possible that char's or short's overflow when we ask for the loop's 4677 // trip count, work around this by changing the type size. 4678 if (Ty->getScalarSizeInBits() < 32) 4679 return Type::getInt32Ty(Ty->getContext()); 4680 4681 return Ty; 4682 } 4683 4684 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { 4685 Ty0 = convertPointerToIntegerType(DL, Ty0); 4686 Ty1 = convertPointerToIntegerType(DL, Ty1); 4687 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) 4688 return Ty0; 4689 return Ty1; 4690 } 4691 4692 /// \brief Check that the instruction has outside loop users and is not an 4693 /// identified reduction variable. 4694 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, 4695 SmallPtrSetImpl<Value *> &AllowedExit) { 4696 // Reduction and Induction instructions are allowed to have exit users. All 4697 // other instructions must not have external users. 4698 if (!AllowedExit.count(Inst)) 4699 // Check that all of the users of the loop are inside the BB. 4700 for (User *U : Inst->users()) { 4701 Instruction *UI = cast<Instruction>(U); 4702 // This user may be a reduction exit value. 4703 if (!TheLoop->contains(UI)) { 4704 DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); 4705 return true; 4706 } 4707 } 4708 return false; 4709 } 4710 4711 void LoopVectorizationLegality::addInductionPhi( 4712 PHINode *Phi, const InductionDescriptor &ID, 4713 SmallPtrSetImpl<Value *> &AllowedExit) { 4714 Inductions[Phi] = ID; 4715 Type *PhiTy = Phi->getType(); 4716 const DataLayout &DL = Phi->getModule()->getDataLayout(); 4717 4718 // Get the widest type. 4719 if (!WidestIndTy) 4720 WidestIndTy = convertPointerToIntegerType(DL, PhiTy); 4721 else 4722 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); 4723 4724 // Int inductions are special because we only allow one IV. 4725 if (ID.getKind() == InductionDescriptor::IK_IntInduction && 4726 ID.getConstIntStepValue() && 4727 ID.getConstIntStepValue()->isOne() && 4728 isa<Constant>(ID.getStartValue()) && 4729 cast<Constant>(ID.getStartValue())->isNullValue()) { 4730 4731 // Use the phi node with the widest type as induction. Use the last 4732 // one if there are multiple (no good reason for doing this other 4733 // than it is expedient). We've checked that it begins at zero and 4734 // steps by one, so this is a canonical induction variable. 4735 if (!Induction || PhiTy == WidestIndTy) 4736 Induction = Phi; 4737 } 4738 4739 // Both the PHI node itself, and the "post-increment" value feeding 4740 // back into the PHI node may have external users. 4741 AllowedExit.insert(Phi); 4742 AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch())); 4743 4744 DEBUG(dbgs() << "LV: Found an induction variable.\n"); 4745 return; 4746 } 4747 4748 bool LoopVectorizationLegality::canVectorizeInstrs() { 4749 BasicBlock *Header = TheLoop->getHeader(); 4750 4751 // Look for the attribute signaling the absence of NaNs. 4752 Function &F = *Header->getParent(); 4753 HasFunNoNaNAttr = 4754 F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; 4755 4756 // For each block in the loop. 4757 for (Loop::block_iterator bb = TheLoop->block_begin(), 4758 be = TheLoop->block_end(); 4759 bb != be; ++bb) { 4760 4761 // Scan the instructions in the block and look for hazards. 4762 for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e; 4763 ++it) { 4764 4765 if (PHINode *Phi = dyn_cast<PHINode>(it)) { 4766 Type *PhiTy = Phi->getType(); 4767 // Check that this PHI type is allowed. 4768 if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() && 4769 !PhiTy->isPointerTy()) { 4770 emitAnalysis(VectorizationReport(&*it) 4771 << "loop control flow is not understood by vectorizer"); 4772 DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); 4773 return false; 4774 } 4775 4776 // If this PHINode is not in the header block, then we know that we 4777 // can convert it to select during if-conversion. No need to check if 4778 // the PHIs in this block are induction or reduction variables. 4779 if (*bb != Header) { 4780 // Check that this instruction has no outside users or is an 4781 // identified reduction value with an outside user. 4782 if (!hasOutsideLoopUser(TheLoop, &*it, AllowedExit)) 4783 continue; 4784 emitAnalysis(VectorizationReport(&*it) 4785 << "value could not be identified as " 4786 "an induction or reduction variable"); 4787 return false; 4788 } 4789 4790 // We only allow if-converted PHIs with exactly two incoming values. 4791 if (Phi->getNumIncomingValues() != 2) { 4792 emitAnalysis(VectorizationReport(&*it) 4793 << "control flow not understood by vectorizer"); 4794 DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); 4795 return false; 4796 } 4797 4798 RecurrenceDescriptor RedDes; 4799 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) { 4800 if (RedDes.hasUnsafeAlgebra()) 4801 Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst()); 4802 AllowedExit.insert(RedDes.getLoopExitInstr()); 4803 Reductions[Phi] = RedDes; 4804 continue; 4805 } 4806 4807 InductionDescriptor ID; 4808 if (InductionDescriptor::isInductionPHI(Phi, PSE, ID)) { 4809 addInductionPhi(Phi, ID, AllowedExit); 4810 continue; 4811 } 4812 4813 if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, DT)) { 4814 FirstOrderRecurrences.insert(Phi); 4815 continue; 4816 } 4817 4818 // As a last resort, coerce the PHI to a AddRec expression 4819 // and re-try classifying it a an induction PHI. 4820 if (InductionDescriptor::isInductionPHI(Phi, PSE, ID, true)) { 4821 addInductionPhi(Phi, ID, AllowedExit); 4822 continue; 4823 } 4824 4825 emitAnalysis(VectorizationReport(&*it) 4826 << "value that could not be identified as " 4827 "reduction is used outside the loop"); 4828 DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n"); 4829 return false; 4830 } // end of PHI handling 4831 4832 // We handle calls that: 4833 // * Are debug info intrinsics. 4834 // * Have a mapping to an IR intrinsic. 4835 // * Have a vector version available. 4836 CallInst *CI = dyn_cast<CallInst>(it); 4837 if (CI && !getVectorIntrinsicIDForCall(CI, TLI) && 4838 !isa<DbgInfoIntrinsic>(CI) && 4839 !(CI->getCalledFunction() && TLI && 4840 TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) { 4841 emitAnalysis(VectorizationReport(&*it) 4842 << "call instruction cannot be vectorized"); 4843 DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); 4844 return false; 4845 } 4846 4847 // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the 4848 // second argument is the same (i.e. loop invariant) 4849 if (CI && hasVectorInstrinsicScalarOpd( 4850 getVectorIntrinsicIDForCall(CI, TLI), 1)) { 4851 auto *SE = PSE.getSE(); 4852 if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) { 4853 emitAnalysis(VectorizationReport(&*it) 4854 << "intrinsic instruction cannot be vectorized"); 4855 DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n"); 4856 return false; 4857 } 4858 } 4859 4860 // Check that the instruction return type is vectorizable. 4861 // Also, we can't vectorize extractelement instructions. 4862 if ((!VectorType::isValidElementType(it->getType()) && 4863 !it->getType()->isVoidTy()) || 4864 isa<ExtractElementInst>(it)) { 4865 emitAnalysis(VectorizationReport(&*it) 4866 << "instruction return type cannot be vectorized"); 4867 DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); 4868 return false; 4869 } 4870 4871 // Check that the stored type is vectorizable. 4872 if (StoreInst *ST = dyn_cast<StoreInst>(it)) { 4873 Type *T = ST->getValueOperand()->getType(); 4874 if (!VectorType::isValidElementType(T)) { 4875 emitAnalysis(VectorizationReport(ST) 4876 << "store instruction cannot be vectorized"); 4877 return false; 4878 } 4879 4880 // FP instructions can allow unsafe algebra, thus vectorizable by 4881 // non-IEEE-754 compliant SIMD units. 4882 // This applies to floating-point math operations and calls, not memory 4883 // operations, shuffles, or casts, as they don't change precision or 4884 // semantics. 4885 } else if (it->getType()->isFloatingPointTy() && 4886 (CI || it->isBinaryOp()) && !it->hasUnsafeAlgebra()) { 4887 DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n"); 4888 Hints->setPotentiallyUnsafe(); 4889 } 4890 4891 // Reduction instructions are allowed to have exit users. 4892 // All other instructions must not have external users. 4893 if (hasOutsideLoopUser(TheLoop, &*it, AllowedExit)) { 4894 emitAnalysis(VectorizationReport(&*it) 4895 << "value cannot be used outside the loop"); 4896 return false; 4897 } 4898 4899 } // next instr. 4900 } 4901 4902 if (!Induction) { 4903 DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); 4904 if (Inductions.empty()) { 4905 emitAnalysis(VectorizationReport() 4906 << "loop induction variable could not be identified"); 4907 return false; 4908 } 4909 } 4910 4911 // Now we know the widest induction type, check if our found induction 4912 // is the same size. If it's not, unset it here and InnerLoopVectorizer 4913 // will create another. 4914 if (Induction && WidestIndTy != Induction->getType()) 4915 Induction = nullptr; 4916 4917 return true; 4918 } 4919 4920 void LoopVectorizationLegality::collectLoopUniforms() { 4921 // We now know that the loop is vectorizable! 4922 // Collect variables that will remain uniform after vectorization. 4923 std::vector<Value *> Worklist; 4924 BasicBlock *Latch = TheLoop->getLoopLatch(); 4925 4926 // Start with the conditional branch and walk up the block. 4927 Worklist.push_back(Latch->getTerminator()->getOperand(0)); 4928 4929 // Also add all consecutive pointer values; these values will be uniform 4930 // after vectorization (and subsequent cleanup) and, until revectorization is 4931 // supported, all dependencies must also be uniform. 4932 for (Loop::block_iterator B = TheLoop->block_begin(), 4933 BE = TheLoop->block_end(); 4934 B != BE; ++B) 4935 for (BasicBlock::iterator I = (*B)->begin(), IE = (*B)->end(); I != IE; ++I) 4936 if (I->getType()->isPointerTy() && isConsecutivePtr(&*I)) 4937 Worklist.insert(Worklist.end(), I->op_begin(), I->op_end()); 4938 4939 while (!Worklist.empty()) { 4940 Instruction *I = dyn_cast<Instruction>(Worklist.back()); 4941 Worklist.pop_back(); 4942 4943 // Look at instructions inside this loop. 4944 // Stop when reaching PHI nodes. 4945 // TODO: we need to follow values all over the loop, not only in this block. 4946 if (!I || !TheLoop->contains(I) || isa<PHINode>(I)) 4947 continue; 4948 4949 // This is a known uniform. 4950 Uniforms.insert(I); 4951 4952 // Insert all operands. 4953 Worklist.insert(Worklist.end(), I->op_begin(), I->op_end()); 4954 } 4955 } 4956 4957 bool LoopVectorizationLegality::canVectorizeMemory() { 4958 LAI = &LAA->getInfo(TheLoop); 4959 auto &OptionalReport = LAI->getReport(); 4960 if (OptionalReport) 4961 emitAnalysis(VectorizationReport(*OptionalReport)); 4962 if (!LAI->canVectorizeMemory()) 4963 return false; 4964 4965 if (LAI->hasStoreToLoopInvariantAddress()) { 4966 emitAnalysis( 4967 VectorizationReport() 4968 << "write to a loop invariant address could not be vectorized"); 4969 DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); 4970 return false; 4971 } 4972 4973 Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks()); 4974 PSE.addPredicate(LAI->PSE.getUnionPredicate()); 4975 4976 return true; 4977 } 4978 4979 bool LoopVectorizationLegality::isInductionVariable(const Value *V) { 4980 Value *In0 = const_cast<Value *>(V); 4981 PHINode *PN = dyn_cast_or_null<PHINode>(In0); 4982 if (!PN) 4983 return false; 4984 4985 return Inductions.count(PN); 4986 } 4987 4988 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) { 4989 return FirstOrderRecurrences.count(Phi); 4990 } 4991 4992 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { 4993 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 4994 } 4995 4996 bool LoopVectorizationLegality::blockCanBePredicated( 4997 BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) { 4998 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 4999 5000 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 5001 // Check that we don't have a constant expression that can trap as operand. 5002 for (Instruction::op_iterator OI = it->op_begin(), OE = it->op_end(); 5003 OI != OE; ++OI) { 5004 if (Constant *C = dyn_cast<Constant>(*OI)) 5005 if (C->canTrap()) 5006 return false; 5007 } 5008 // We might be able to hoist the load. 5009 if (it->mayReadFromMemory()) { 5010 LoadInst *LI = dyn_cast<LoadInst>(it); 5011 if (!LI) 5012 return false; 5013 if (!SafePtrs.count(LI->getPointerOperand())) { 5014 if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) || 5015 isLegalMaskedGather(LI->getType())) { 5016 MaskedOp.insert(LI); 5017 continue; 5018 } 5019 // !llvm.mem.parallel_loop_access implies if-conversion safety. 5020 if (IsAnnotatedParallel) 5021 continue; 5022 return false; 5023 } 5024 } 5025 5026 // We don't predicate stores at the moment. 5027 if (it->mayWriteToMemory()) { 5028 StoreInst *SI = dyn_cast<StoreInst>(it); 5029 // We only support predication of stores in basic blocks with one 5030 // predecessor. 5031 if (!SI) 5032 return false; 5033 5034 // Build a masked store if it is legal for the target. 5035 if (isLegalMaskedStore(SI->getValueOperand()->getType(), 5036 SI->getPointerOperand()) || 5037 isLegalMaskedScatter(SI->getValueOperand()->getType())) { 5038 MaskedOp.insert(SI); 5039 continue; 5040 } 5041 5042 bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0); 5043 bool isSinglePredecessor = SI->getParent()->getSinglePredecessor(); 5044 5045 if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr || 5046 !isSinglePredecessor) 5047 return false; 5048 } 5049 if (it->mayThrow()) 5050 return false; 5051 5052 // The instructions below can trap. 5053 switch (it->getOpcode()) { 5054 default: 5055 continue; 5056 case Instruction::UDiv: 5057 case Instruction::SDiv: 5058 case Instruction::URem: 5059 case Instruction::SRem: 5060 return false; 5061 } 5062 } 5063 5064 return true; 5065 } 5066 5067 void InterleavedAccessInfo::collectConstStridedAccesses( 5068 MapVector<Instruction *, StrideDescriptor> &StrideAccesses, 5069 const ValueToValueMap &Strides) { 5070 // Holds load/store instructions in program order. 5071 SmallVector<Instruction *, 16> AccessList; 5072 5073 for (auto *BB : TheLoop->getBlocks()) { 5074 bool IsPred = LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 5075 5076 for (auto &I : *BB) { 5077 if (!isa<LoadInst>(&I) && !isa<StoreInst>(&I)) 5078 continue; 5079 // FIXME: Currently we can't handle mixed accesses and predicated accesses 5080 if (IsPred) 5081 return; 5082 5083 AccessList.push_back(&I); 5084 } 5085 } 5086 5087 if (AccessList.empty()) 5088 return; 5089 5090 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 5091 for (auto I : AccessList) { 5092 LoadInst *LI = dyn_cast<LoadInst>(I); 5093 StoreInst *SI = dyn_cast<StoreInst>(I); 5094 5095 Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); 5096 int Stride = getPtrStride(PSE, Ptr, TheLoop, Strides); 5097 5098 // The factor of the corresponding interleave group. 5099 unsigned Factor = std::abs(Stride); 5100 5101 // Ignore the access if the factor is too small or too large. 5102 if (Factor < 2 || Factor > MaxInterleaveGroupFactor) 5103 continue; 5104 5105 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 5106 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 5107 unsigned Size = DL.getTypeAllocSize(PtrTy->getElementType()); 5108 5109 // An alignment of 0 means target ABI alignment. 5110 unsigned Align = LI ? LI->getAlignment() : SI->getAlignment(); 5111 if (!Align) 5112 Align = DL.getABITypeAlignment(PtrTy->getElementType()); 5113 5114 StrideAccesses[I] = StrideDescriptor(Stride, Scev, Size, Align); 5115 } 5116 } 5117 5118 // Analyze interleaved accesses and collect them into interleave groups. 5119 // 5120 // Notice that the vectorization on interleaved groups will change instruction 5121 // orders and may break dependences. But the memory dependence check guarantees 5122 // that there is no overlap between two pointers of different strides, element 5123 // sizes or underlying bases. 5124 // 5125 // For pointers sharing the same stride, element size and underlying base, no 5126 // need to worry about Read-After-Write dependences and Write-After-Read 5127 // dependences. 5128 // 5129 // E.g. The RAW dependence: A[i] = a; 5130 // b = A[i]; 5131 // This won't exist as it is a store-load forwarding conflict, which has 5132 // already been checked and forbidden in the dependence check. 5133 // 5134 // E.g. The WAR dependence: a = A[i]; // (1) 5135 // A[i] = b; // (2) 5136 // The store group of (2) is always inserted at or below (2), and the load group 5137 // of (1) is always inserted at or above (1). The dependence is safe. 5138 void InterleavedAccessInfo::analyzeInterleaving( 5139 const ValueToValueMap &Strides) { 5140 DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 5141 5142 // Holds all the stride accesses. 5143 MapVector<Instruction *, StrideDescriptor> StrideAccesses; 5144 collectConstStridedAccesses(StrideAccesses, Strides); 5145 5146 if (StrideAccesses.empty()) 5147 return; 5148 5149 // Holds all interleaved store groups temporarily. 5150 SmallSetVector<InterleaveGroup *, 4> StoreGroups; 5151 // Holds all interleaved load groups temporarily. 5152 SmallSetVector<InterleaveGroup *, 4> LoadGroups; 5153 5154 // Search the load-load/write-write pair B-A in bottom-up order and try to 5155 // insert B into the interleave group of A according to 3 rules: 5156 // 1. A and B have the same stride. 5157 // 2. A and B have the same memory object size. 5158 // 3. B belongs to the group according to the distance. 5159 // 5160 // The bottom-up order can avoid breaking the Write-After-Write dependences 5161 // between two pointers of the same base. 5162 // E.g. A[i] = a; (1) 5163 // A[i] = b; (2) 5164 // A[i+1] = c (3) 5165 // We form the group (2)+(3) in front, so (1) has to form groups with accesses 5166 // above (1), which guarantees that (1) is always above (2). 5167 for (auto I = StrideAccesses.rbegin(), E = StrideAccesses.rend(); I != E; 5168 ++I) { 5169 Instruction *A = I->first; 5170 StrideDescriptor DesA = I->second; 5171 5172 InterleaveGroup *Group = getInterleaveGroup(A); 5173 if (!Group) { 5174 DEBUG(dbgs() << "LV: Creating an interleave group with:" << *A << '\n'); 5175 Group = createInterleaveGroup(A, DesA.Stride, DesA.Align); 5176 } 5177 5178 if (A->mayWriteToMemory()) 5179 StoreGroups.insert(Group); 5180 else 5181 LoadGroups.insert(Group); 5182 5183 for (auto II = std::next(I); II != E; ++II) { 5184 Instruction *B = II->first; 5185 StrideDescriptor DesB = II->second; 5186 5187 // Ignore if B is already in a group or B is a different memory operation. 5188 if (isInterleaved(B) || A->mayReadFromMemory() != B->mayReadFromMemory()) 5189 continue; 5190 5191 // Check the rule 1 and 2. 5192 if (DesB.Stride != DesA.Stride || DesB.Size != DesA.Size) 5193 continue; 5194 5195 // Calculate the distance and prepare for the rule 3. 5196 const SCEVConstant *DistToA = dyn_cast<SCEVConstant>( 5197 PSE.getSE()->getMinusSCEV(DesB.Scev, DesA.Scev)); 5198 if (!DistToA) 5199 continue; 5200 5201 int DistanceToA = DistToA->getAPInt().getSExtValue(); 5202 5203 // Skip if the distance is not multiple of size as they are not in the 5204 // same group. 5205 if (DistanceToA % static_cast<int>(DesA.Size)) 5206 continue; 5207 5208 // The index of B is the index of A plus the related index to A. 5209 int IndexB = 5210 Group->getIndex(A) + DistanceToA / static_cast<int>(DesA.Size); 5211 5212 // Try to insert B into the group. 5213 if (Group->insertMember(B, IndexB, DesB.Align)) { 5214 DEBUG(dbgs() << "LV: Inserted:" << *B << '\n' 5215 << " into the interleave group with" << *A << '\n'); 5216 InterleaveGroupMap[B] = Group; 5217 5218 // Set the first load in program order as the insert position. 5219 if (B->mayReadFromMemory()) 5220 Group->setInsertPos(B); 5221 } 5222 } // Iteration on instruction B 5223 } // Iteration on instruction A 5224 5225 // Remove interleaved store groups with gaps. 5226 for (InterleaveGroup *Group : StoreGroups) 5227 if (Group->getNumMembers() != Group->getFactor()) 5228 releaseGroup(Group); 5229 5230 // If there is a non-reversed interleaved load group with gaps, we will need 5231 // to execute at least one scalar epilogue iteration. This will ensure that 5232 // we don't speculatively access memory out-of-bounds. Note that we only need 5233 // to look for a member at index factor - 1, since every group must have a 5234 // member at index zero. 5235 for (InterleaveGroup *Group : LoadGroups) 5236 if (!Group->getMember(Group->getFactor() - 1)) { 5237 if (Group->isReverse()) { 5238 releaseGroup(Group); 5239 } else { 5240 DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 5241 RequiresScalarEpilogue = true; 5242 } 5243 } 5244 } 5245 5246 LoopVectorizationCostModel::VectorizationFactor 5247 LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { 5248 // Width 1 means no vectorize 5249 VectorizationFactor Factor = {1U, 0U}; 5250 if (OptForSize && Legal->getRuntimePointerChecking()->Need) { 5251 emitAnalysis( 5252 VectorizationReport() 5253 << "runtime pointer checks needed. Enable vectorization of this " 5254 "loop with '#pragma clang loop vectorize(enable)' when " 5255 "compiling with -Os/-Oz"); 5256 DEBUG(dbgs() 5257 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 5258 return Factor; 5259 } 5260 5261 if (!EnableCondStoresVectorization && Legal->getNumPredStores()) { 5262 emitAnalysis( 5263 VectorizationReport() 5264 << "store that is conditionally executed prevents vectorization"); 5265 DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); 5266 return Factor; 5267 } 5268 5269 // Find the trip count. 5270 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5271 DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5272 5273 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5274 unsigned SmallestType, WidestType; 5275 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5276 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 5277 unsigned MaxSafeDepDist = -1U; 5278 5279 // Get the maximum safe dependence distance in bits computed by LAA. If the 5280 // loop contains any interleaved accesses, we divide the dependence distance 5281 // by the maximum interleave factor of all interleaved groups. Note that 5282 // although the division ensures correctness, this is a fairly conservative 5283 // computation because the maximum distance computed by LAA may not involve 5284 // any of the interleaved accesses. 5285 if (Legal->getMaxSafeDepDistBytes() != -1U) 5286 MaxSafeDepDist = 5287 Legal->getMaxSafeDepDistBytes() * 8 / Legal->getMaxInterleaveFactor(); 5288 5289 WidestRegister = 5290 ((WidestRegister < MaxSafeDepDist) ? WidestRegister : MaxSafeDepDist); 5291 unsigned MaxVectorSize = WidestRegister / WidestType; 5292 5293 DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " 5294 << WidestType << " bits.\n"); 5295 DEBUG(dbgs() << "LV: The Widest register is: " << WidestRegister 5296 << " bits.\n"); 5297 5298 if (MaxVectorSize == 0) { 5299 DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5300 MaxVectorSize = 1; 5301 } 5302 5303 assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements" 5304 " into one vector!"); 5305 5306 unsigned VF = MaxVectorSize; 5307 if (MaximizeBandwidth && !OptForSize) { 5308 // Collect all viable vectorization factors. 5309 SmallVector<unsigned, 8> VFs; 5310 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 5311 for (unsigned VS = MaxVectorSize; VS <= NewMaxVectorSize; VS *= 2) 5312 VFs.push_back(VS); 5313 5314 // For each VF calculate its register usage. 5315 auto RUs = calculateRegisterUsage(VFs); 5316 5317 // Select the largest VF which doesn't require more registers than existing 5318 // ones. 5319 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 5320 for (int i = RUs.size() - 1; i >= 0; --i) { 5321 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 5322 VF = VFs[i]; 5323 break; 5324 } 5325 } 5326 } 5327 5328 // If we optimize the program for size, avoid creating the tail loop. 5329 if (OptForSize) { 5330 // If we are unable to calculate the trip count then don't try to vectorize. 5331 if (TC < 2) { 5332 emitAnalysis( 5333 VectorizationReport() 5334 << "unable to calculate the loop count due to complex control flow"); 5335 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 5336 return Factor; 5337 } 5338 5339 // Find the maximum SIMD width that can fit within the trip count. 5340 VF = TC % MaxVectorSize; 5341 5342 if (VF == 0) 5343 VF = MaxVectorSize; 5344 else { 5345 // If the trip count that we found modulo the vectorization factor is not 5346 // zero then we require a tail. 5347 emitAnalysis(VectorizationReport() 5348 << "cannot optimize for size and vectorize at the " 5349 "same time. Enable vectorization of this loop " 5350 "with '#pragma clang loop vectorize(enable)' " 5351 "when compiling with -Os/-Oz"); 5352 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 5353 return Factor; 5354 } 5355 } 5356 5357 int UserVF = Hints->getWidth(); 5358 if (UserVF != 0) { 5359 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 5360 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 5361 5362 Factor.Width = UserVF; 5363 return Factor; 5364 } 5365 5366 float Cost = expectedCost(1).first; 5367 #ifndef NDEBUG 5368 const float ScalarCost = Cost; 5369 #endif /* NDEBUG */ 5370 unsigned Width = 1; 5371 DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 5372 5373 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5374 // Ignore scalar width, because the user explicitly wants vectorization. 5375 if (ForceVectorization && VF > 1) { 5376 Width = 2; 5377 Cost = expectedCost(Width).first / (float)Width; 5378 } 5379 5380 for (unsigned i = 2; i <= VF; i *= 2) { 5381 // Notice that the vector loop needs to be executed less times, so 5382 // we need to divide the cost of the vector loops by the width of 5383 // the vector elements. 5384 VectorizationCostTy C = expectedCost(i); 5385 float VectorCost = C.first / (float)i; 5386 DEBUG(dbgs() << "LV: Vector loop of width " << i 5387 << " costs: " << (int)VectorCost << ".\n"); 5388 if (!C.second && !ForceVectorization) { 5389 DEBUG( 5390 dbgs() << "LV: Not considering vector loop of width " << i 5391 << " because it will not generate any vector instructions.\n"); 5392 continue; 5393 } 5394 if (VectorCost < Cost) { 5395 Cost = VectorCost; 5396 Width = i; 5397 } 5398 } 5399 5400 DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 5401 << "LV: Vectorization seems to be not beneficial, " 5402 << "but was forced by a user.\n"); 5403 DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 5404 Factor.Width = Width; 5405 Factor.Cost = Width * Cost; 5406 return Factor; 5407 } 5408 5409 std::pair<unsigned, unsigned> 5410 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5411 unsigned MinWidth = -1U; 5412 unsigned MaxWidth = 8; 5413 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5414 5415 // For each block. 5416 for (Loop::block_iterator bb = TheLoop->block_begin(), 5417 be = TheLoop->block_end(); 5418 bb != be; ++bb) { 5419 BasicBlock *BB = *bb; 5420 5421 // For each instruction in the loop. 5422 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 5423 Type *T = it->getType(); 5424 5425 // Skip ignored values. 5426 if (ValuesToIgnore.count(&*it)) 5427 continue; 5428 5429 // Only examine Loads, Stores and PHINodes. 5430 if (!isa<LoadInst>(it) && !isa<StoreInst>(it) && !isa<PHINode>(it)) 5431 continue; 5432 5433 // Examine PHI nodes that are reduction variables. Update the type to 5434 // account for the recurrence type. 5435 if (PHINode *PN = dyn_cast<PHINode>(it)) { 5436 if (!Legal->isReductionVariable(PN)) 5437 continue; 5438 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 5439 T = RdxDesc.getRecurrenceType(); 5440 } 5441 5442 // Examine the stored values. 5443 if (StoreInst *ST = dyn_cast<StoreInst>(it)) 5444 T = ST->getValueOperand()->getType(); 5445 5446 // Ignore loaded pointer types and stored pointer types that are not 5447 // consecutive. However, we do want to take consecutive stores/loads of 5448 // pointer vectors into account. 5449 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&*it)) 5450 continue; 5451 5452 MinWidth = std::min(MinWidth, 5453 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5454 MaxWidth = std::max(MaxWidth, 5455 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5456 } 5457 } 5458 5459 return {MinWidth, MaxWidth}; 5460 } 5461 5462 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 5463 unsigned VF, 5464 unsigned LoopCost) { 5465 5466 // -- The interleave heuristics -- 5467 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5468 // There are many micro-architectural considerations that we can't predict 5469 // at this level. For example, frontend pressure (on decode or fetch) due to 5470 // code size, or the number and capabilities of the execution ports. 5471 // 5472 // We use the following heuristics to select the interleave count: 5473 // 1. If the code has reductions, then we interleave to break the cross 5474 // iteration dependency. 5475 // 2. If the loop is really small, then we interleave to reduce the loop 5476 // overhead. 5477 // 3. We don't interleave if we think that we will spill registers to memory 5478 // due to the increased register pressure. 5479 5480 // When we optimize for size, we don't interleave. 5481 if (OptForSize) 5482 return 1; 5483 5484 // We used the distance for the interleave count. 5485 if (Legal->getMaxSafeDepDistBytes() != -1U) 5486 return 1; 5487 5488 // Do not interleave loops with a relatively small trip count. 5489 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5490 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 5491 return 1; 5492 5493 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 5494 DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5495 << " registers\n"); 5496 5497 if (VF == 1) { 5498 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5499 TargetNumRegisters = ForceTargetNumScalarRegs; 5500 } else { 5501 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5502 TargetNumRegisters = ForceTargetNumVectorRegs; 5503 } 5504 5505 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5506 // We divide by these constants so assume that we have at least one 5507 // instruction that uses at least one register. 5508 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 5509 R.NumInstructions = std::max(R.NumInstructions, 1U); 5510 5511 // We calculate the interleave count using the following formula. 5512 // Subtract the number of loop invariants from the number of available 5513 // registers. These registers are used by all of the interleaved instances. 5514 // Next, divide the remaining registers by the number of registers that is 5515 // required by the loop, in order to estimate how many parallel instances 5516 // fit without causing spills. All of this is rounded down if necessary to be 5517 // a power of two. We want power of two interleave count to simplify any 5518 // addressing operations or alignment considerations. 5519 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 5520 R.MaxLocalUsers); 5521 5522 // Don't count the induction variable as interleaved. 5523 if (EnableIndVarRegisterHeur) 5524 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 5525 std::max(1U, (R.MaxLocalUsers - 1))); 5526 5527 // Clamp the interleave ranges to reasonable counts. 5528 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 5529 5530 // Check if the user has overridden the max. 5531 if (VF == 1) { 5532 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5533 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5534 } else { 5535 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5536 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5537 } 5538 5539 // If we did not calculate the cost for VF (because the user selected the VF) 5540 // then we calculate the cost of VF here. 5541 if (LoopCost == 0) 5542 LoopCost = expectedCost(VF).first; 5543 5544 // Clamp the calculated IC to be between the 1 and the max interleave count 5545 // that the target allows. 5546 if (IC > MaxInterleaveCount) 5547 IC = MaxInterleaveCount; 5548 else if (IC < 1) 5549 IC = 1; 5550 5551 // Interleave if we vectorized this loop and there is a reduction that could 5552 // benefit from interleaving. 5553 if (VF > 1 && Legal->getReductionVars()->size()) { 5554 DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5555 return IC; 5556 } 5557 5558 // Note that if we've already vectorized the loop we will have done the 5559 // runtime check and so interleaving won't require further checks. 5560 bool InterleavingRequiresRuntimePointerCheck = 5561 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 5562 5563 // We want to interleave small loops in order to reduce the loop overhead and 5564 // potentially expose ILP opportunities. 5565 DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 5566 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 5567 // We assume that the cost overhead is 1 and we use the cost model 5568 // to estimate the cost of the loop and interleave until the cost of the 5569 // loop overhead is about 5% of the cost of the loop. 5570 unsigned SmallIC = 5571 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5572 5573 // Interleave until store/load ports (estimated by max interleave count) are 5574 // saturated. 5575 unsigned NumStores = Legal->getNumStores(); 5576 unsigned NumLoads = Legal->getNumLoads(); 5577 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5578 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5579 5580 // If we have a scalar reduction (vector reductions are already dealt with 5581 // by this point), we can increase the critical path length if the loop 5582 // we're interleaving is inside another loop. Limit, by default to 2, so the 5583 // critical path only gets increased by one reduction operation. 5584 if (Legal->getReductionVars()->size() && TheLoop->getLoopDepth() > 1) { 5585 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5586 SmallIC = std::min(SmallIC, F); 5587 StoresIC = std::min(StoresIC, F); 5588 LoadsIC = std::min(LoadsIC, F); 5589 } 5590 5591 if (EnableLoadStoreRuntimeInterleave && 5592 std::max(StoresIC, LoadsIC) > SmallIC) { 5593 DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5594 return std::max(StoresIC, LoadsIC); 5595 } 5596 5597 DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5598 return SmallIC; 5599 } 5600 5601 // Interleave if this is a large loop (small loops are already dealt with by 5602 // this point) that could benefit from interleaving. 5603 bool HasReductions = (Legal->getReductionVars()->size() > 0); 5604 if (TTI.enableAggressiveInterleaving(HasReductions)) { 5605 DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5606 return IC; 5607 } 5608 5609 DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5610 return 1; 5611 } 5612 5613 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5614 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 5615 // This function calculates the register usage by measuring the highest number 5616 // of values that are alive at a single location. Obviously, this is a very 5617 // rough estimation. We scan the loop in a topological order in order and 5618 // assign a number to each instruction. We use RPO to ensure that defs are 5619 // met before their users. We assume that each instruction that has in-loop 5620 // users starts an interval. We record every time that an in-loop value is 5621 // used, so we have a list of the first and last occurrences of each 5622 // instruction. Next, we transpose this data structure into a multi map that 5623 // holds the list of intervals that *end* at a specific location. This multi 5624 // map allows us to perform a linear search. We scan the instructions linearly 5625 // and record each time that a new interval starts, by placing it in a set. 5626 // If we find this value in the multi-map then we remove it from the set. 5627 // The max register usage is the maximum size of the set. 5628 // We also search for instructions that are defined outside the loop, but are 5629 // used inside the loop. We need this number separately from the max-interval 5630 // usage number because when we unroll, loop-invariant values do not take 5631 // more register. 5632 LoopBlocksDFS DFS(TheLoop); 5633 DFS.perform(LI); 5634 5635 RegisterUsage RU; 5636 RU.NumInstructions = 0; 5637 5638 // Each 'key' in the map opens a new interval. The values 5639 // of the map are the index of the 'last seen' usage of the 5640 // instruction that is the key. 5641 typedef DenseMap<Instruction *, unsigned> IntervalMap; 5642 // Maps instruction to its index. 5643 DenseMap<unsigned, Instruction *> IdxToInstr; 5644 // Marks the end of each interval. 5645 IntervalMap EndPoint; 5646 // Saves the list of instruction indices that are used in the loop. 5647 SmallSet<Instruction *, 8> Ends; 5648 // Saves the list of values that are used in the loop but are 5649 // defined outside the loop, such as arguments and constants. 5650 SmallPtrSet<Value *, 8> LoopInvariants; 5651 5652 unsigned Index = 0; 5653 for (LoopBlocksDFS::RPOIterator bb = DFS.beginRPO(), be = DFS.endRPO(); 5654 bb != be; ++bb) { 5655 RU.NumInstructions += (*bb)->size(); 5656 for (Instruction &I : **bb) { 5657 IdxToInstr[Index++] = &I; 5658 5659 // Save the end location of each USE. 5660 for (unsigned i = 0; i < I.getNumOperands(); ++i) { 5661 Value *U = I.getOperand(i); 5662 Instruction *Instr = dyn_cast<Instruction>(U); 5663 5664 // Ignore non-instruction values such as arguments, constants, etc. 5665 if (!Instr) 5666 continue; 5667 5668 // If this instruction is outside the loop then record it and continue. 5669 if (!TheLoop->contains(Instr)) { 5670 LoopInvariants.insert(Instr); 5671 continue; 5672 } 5673 5674 // Overwrite previous end points. 5675 EndPoint[Instr] = Index; 5676 Ends.insert(Instr); 5677 } 5678 } 5679 } 5680 5681 // Saves the list of intervals that end with the index in 'key'. 5682 typedef SmallVector<Instruction *, 2> InstrList; 5683 DenseMap<unsigned, InstrList> TransposeEnds; 5684 5685 // Transpose the EndPoints to a list of values that end at each index. 5686 for (IntervalMap::iterator it = EndPoint.begin(), e = EndPoint.end(); it != e; 5687 ++it) 5688 TransposeEnds[it->second].push_back(it->first); 5689 5690 SmallSet<Instruction *, 8> OpenIntervals; 5691 5692 // Get the size of the widest register. 5693 unsigned MaxSafeDepDist = -1U; 5694 if (Legal->getMaxSafeDepDistBytes() != -1U) 5695 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 5696 unsigned WidestRegister = 5697 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 5698 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5699 5700 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5701 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 5702 5703 DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5704 5705 // A lambda that gets the register usage for the given type and VF. 5706 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 5707 if (Ty->isTokenTy()) 5708 return 0U; 5709 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 5710 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 5711 }; 5712 5713 for (unsigned int i = 0; i < Index; ++i) { 5714 Instruction *I = IdxToInstr[i]; 5715 // Ignore instructions that are never used within the loop. 5716 if (!Ends.count(I)) 5717 continue; 5718 5719 // Remove all of the instructions that end at this location. 5720 InstrList &List = TransposeEnds[i]; 5721 for (unsigned int j = 0, e = List.size(); j < e; ++j) 5722 OpenIntervals.erase(List[j]); 5723 5724 // Skip ignored values. 5725 if (ValuesToIgnore.count(I)) 5726 continue; 5727 5728 // For each VF find the maximum usage of registers. 5729 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 5730 if (VFs[j] == 1) { 5731 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 5732 continue; 5733 } 5734 5735 // Count the number of live intervals. 5736 unsigned RegUsage = 0; 5737 for (auto Inst : OpenIntervals) { 5738 // Skip ignored values for VF > 1. 5739 if (VecValuesToIgnore.count(Inst)) 5740 continue; 5741 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 5742 } 5743 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 5744 } 5745 5746 DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 5747 << OpenIntervals.size() << '\n'); 5748 5749 // Add the current instruction to the list of open intervals. 5750 OpenIntervals.insert(I); 5751 } 5752 5753 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 5754 unsigned Invariant = 0; 5755 if (VFs[i] == 1) 5756 Invariant = LoopInvariants.size(); 5757 else { 5758 for (auto Inst : LoopInvariants) 5759 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 5760 } 5761 5762 DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 5763 DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 5764 DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); 5765 DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n'); 5766 5767 RU.LoopInvariantRegs = Invariant; 5768 RU.MaxLocalUsers = MaxUsages[i]; 5769 RUs[i] = RU; 5770 } 5771 5772 return RUs; 5773 } 5774 5775 LoopVectorizationCostModel::VectorizationCostTy 5776 LoopVectorizationCostModel::expectedCost(unsigned VF) { 5777 VectorizationCostTy Cost; 5778 5779 // For each block. 5780 for (Loop::block_iterator bb = TheLoop->block_begin(), 5781 be = TheLoop->block_end(); 5782 bb != be; ++bb) { 5783 VectorizationCostTy BlockCost; 5784 BasicBlock *BB = *bb; 5785 5786 // For each instruction in the old loop. 5787 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 5788 // Skip dbg intrinsics. 5789 if (isa<DbgInfoIntrinsic>(it)) 5790 continue; 5791 5792 // Skip ignored values. 5793 if (ValuesToIgnore.count(&*it)) 5794 continue; 5795 5796 VectorizationCostTy C = getInstructionCost(&*it, VF); 5797 5798 // Check if we should override the cost. 5799 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 5800 C.first = ForceTargetInstructionCost; 5801 5802 BlockCost.first += C.first; 5803 BlockCost.second |= C.second; 5804 DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " 5805 << VF << " For instruction: " << *it << '\n'); 5806 } 5807 5808 // We assume that if-converted blocks have a 50% chance of being executed. 5809 // When the code is scalar then some of the blocks are avoided due to CF. 5810 // When the code is vectorized we execute all code paths. 5811 if (VF == 1 && Legal->blockNeedsPredication(*bb)) 5812 BlockCost.first /= 2; 5813 5814 Cost.first += BlockCost.first; 5815 Cost.second |= BlockCost.second; 5816 } 5817 5818 return Cost; 5819 } 5820 5821 /// \brief Check if the load/store instruction \p I may be translated into 5822 /// gather/scatter during vectorization. 5823 /// 5824 /// Pointer \p Ptr specifies address in memory for the given scalar memory 5825 /// instruction. We need it to retrieve data type. 5826 /// Using gather/scatter is possible when it is supported by target. 5827 static bool isGatherOrScatterLegal(Instruction *I, Value *Ptr, 5828 LoopVectorizationLegality *Legal) { 5829 Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType(); 5830 return (isa<LoadInst>(I) && Legal->isLegalMaskedGather(DataTy)) || 5831 (isa<StoreInst>(I) && Legal->isLegalMaskedScatter(DataTy)); 5832 } 5833 5834 /// \brief Check whether the address computation for a non-consecutive memory 5835 /// access looks like an unlikely candidate for being merged into the indexing 5836 /// mode. 5837 /// 5838 /// We look for a GEP which has one index that is an induction variable and all 5839 /// other indices are loop invariant. If the stride of this access is also 5840 /// within a small bound we decide that this address computation can likely be 5841 /// merged into the addressing mode. 5842 /// In all other cases, we identify the address computation as complex. 5843 static bool isLikelyComplexAddressComputation(Value *Ptr, 5844 LoopVectorizationLegality *Legal, 5845 ScalarEvolution *SE, 5846 const Loop *TheLoop) { 5847 GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(Ptr); 5848 if (!Gep) 5849 return true; 5850 5851 // We are looking for a gep with all loop invariant indices except for one 5852 // which should be an induction variable. 5853 unsigned NumOperands = Gep->getNumOperands(); 5854 for (unsigned i = 1; i < NumOperands; ++i) { 5855 Value *Opd = Gep->getOperand(i); 5856 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 5857 !Legal->isInductionVariable(Opd)) 5858 return true; 5859 } 5860 5861 // Now we know we have a GEP ptr, %inv, %ind, %inv. Make sure that the step 5862 // can likely be merged into the address computation. 5863 unsigned MaxMergeDistance = 64; 5864 5865 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Ptr)); 5866 if (!AddRec) 5867 return true; 5868 5869 // Check the step is constant. 5870 const SCEV *Step = AddRec->getStepRecurrence(*SE); 5871 // Calculate the pointer stride and check if it is consecutive. 5872 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step); 5873 if (!C) 5874 return true; 5875 5876 const APInt &APStepVal = C->getAPInt(); 5877 5878 // Huge step value - give up. 5879 if (APStepVal.getBitWidth() > 64) 5880 return true; 5881 5882 int64_t StepVal = APStepVal.getSExtValue(); 5883 5884 return StepVal > MaxMergeDistance; 5885 } 5886 5887 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 5888 return Legal->hasStride(I->getOperand(0)) || 5889 Legal->hasStride(I->getOperand(1)); 5890 } 5891 5892 LoopVectorizationCostModel::VectorizationCostTy 5893 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 5894 // If we know that this instruction will remain uniform, check the cost of 5895 // the scalar version. 5896 if (Legal->isUniformAfterVectorization(I)) 5897 VF = 1; 5898 5899 Type *VectorTy; 5900 unsigned C = getInstructionCost(I, VF, VectorTy); 5901 5902 bool TypeNotScalarized = 5903 VF > 1 && !VectorTy->isVoidTy() && TTI.getNumberOfParts(VectorTy) < VF; 5904 return VectorizationCostTy(C, TypeNotScalarized); 5905 } 5906 5907 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 5908 unsigned VF, 5909 Type *&VectorTy) { 5910 Type *RetTy = I->getType(); 5911 if (VF > 1 && MinBWs.count(I)) 5912 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 5913 VectorTy = ToVectorTy(RetTy, VF); 5914 auto SE = PSE.getSE(); 5915 5916 // TODO: We need to estimate the cost of intrinsic calls. 5917 switch (I->getOpcode()) { 5918 case Instruction::GetElementPtr: 5919 // We mark this instruction as zero-cost because the cost of GEPs in 5920 // vectorized code depends on whether the corresponding memory instruction 5921 // is scalarized or not. Therefore, we handle GEPs with the memory 5922 // instruction cost. 5923 return 0; 5924 case Instruction::Br: { 5925 return TTI.getCFInstrCost(I->getOpcode()); 5926 } 5927 case Instruction::PHI: { 5928 auto *Phi = cast<PHINode>(I); 5929 5930 // First-order recurrences are replaced by vector shuffles inside the loop. 5931 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 5932 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 5933 VectorTy, VF - 1, VectorTy); 5934 5935 // TODO: IF-converted IFs become selects. 5936 return 0; 5937 } 5938 case Instruction::Add: 5939 case Instruction::FAdd: 5940 case Instruction::Sub: 5941 case Instruction::FSub: 5942 case Instruction::Mul: 5943 case Instruction::FMul: 5944 case Instruction::UDiv: 5945 case Instruction::SDiv: 5946 case Instruction::FDiv: 5947 case Instruction::URem: 5948 case Instruction::SRem: 5949 case Instruction::FRem: 5950 case Instruction::Shl: 5951 case Instruction::LShr: 5952 case Instruction::AShr: 5953 case Instruction::And: 5954 case Instruction::Or: 5955 case Instruction::Xor: { 5956 // Since we will replace the stride by 1 the multiplication should go away. 5957 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 5958 return 0; 5959 // Certain instructions can be cheaper to vectorize if they have a constant 5960 // second vector operand. One example of this are shifts on x86. 5961 TargetTransformInfo::OperandValueKind Op1VK = 5962 TargetTransformInfo::OK_AnyValue; 5963 TargetTransformInfo::OperandValueKind Op2VK = 5964 TargetTransformInfo::OK_AnyValue; 5965 TargetTransformInfo::OperandValueProperties Op1VP = 5966 TargetTransformInfo::OP_None; 5967 TargetTransformInfo::OperandValueProperties Op2VP = 5968 TargetTransformInfo::OP_None; 5969 Value *Op2 = I->getOperand(1); 5970 5971 // Check for a splat of a constant or for a non uniform vector of constants. 5972 if (isa<ConstantInt>(Op2)) { 5973 ConstantInt *CInt = cast<ConstantInt>(Op2); 5974 if (CInt && CInt->getValue().isPowerOf2()) 5975 Op2VP = TargetTransformInfo::OP_PowerOf2; 5976 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 5977 } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) { 5978 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 5979 Constant *SplatValue = cast<Constant>(Op2)->getSplatValue(); 5980 if (SplatValue) { 5981 ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue); 5982 if (CInt && CInt->getValue().isPowerOf2()) 5983 Op2VP = TargetTransformInfo::OP_PowerOf2; 5984 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 5985 } 5986 } 5987 5988 return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, Op2VK, 5989 Op1VP, Op2VP); 5990 } 5991 case Instruction::Select: { 5992 SelectInst *SI = cast<SelectInst>(I); 5993 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 5994 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 5995 Type *CondTy = SI->getCondition()->getType(); 5996 if (!ScalarCond) 5997 CondTy = VectorType::get(CondTy, VF); 5998 5999 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy); 6000 } 6001 case Instruction::ICmp: 6002 case Instruction::FCmp: { 6003 Type *ValTy = I->getOperand(0)->getType(); 6004 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 6005 auto It = MinBWs.find(Op0AsInstruction); 6006 if (VF > 1 && It != MinBWs.end()) 6007 ValTy = IntegerType::get(ValTy->getContext(), It->second); 6008 VectorTy = ToVectorTy(ValTy, VF); 6009 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy); 6010 } 6011 case Instruction::Store: 6012 case Instruction::Load: { 6013 StoreInst *SI = dyn_cast<StoreInst>(I); 6014 LoadInst *LI = dyn_cast<LoadInst>(I); 6015 Type *ValTy = (SI ? SI->getValueOperand()->getType() : LI->getType()); 6016 VectorTy = ToVectorTy(ValTy, VF); 6017 6018 unsigned Alignment = SI ? SI->getAlignment() : LI->getAlignment(); 6019 unsigned AS = 6020 SI ? SI->getPointerAddressSpace() : LI->getPointerAddressSpace(); 6021 Value *Ptr = SI ? SI->getPointerOperand() : LI->getPointerOperand(); 6022 // We add the cost of address computation here instead of with the gep 6023 // instruction because only here we know whether the operation is 6024 // scalarized. 6025 if (VF == 1) 6026 return TTI.getAddressComputationCost(VectorTy) + 6027 TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6028 6029 if (LI && Legal->isUniform(Ptr)) { 6030 // Scalar load + broadcast 6031 unsigned Cost = TTI.getAddressComputationCost(ValTy->getScalarType()); 6032 Cost += TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 6033 Alignment, AS); 6034 return Cost + 6035 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, ValTy); 6036 } 6037 6038 // For an interleaved access, calculate the total cost of the whole 6039 // interleave group. 6040 if (Legal->isAccessInterleaved(I)) { 6041 auto Group = Legal->getInterleavedAccessGroup(I); 6042 assert(Group && "Fail to get an interleaved access group."); 6043 6044 // Only calculate the cost once at the insert position. 6045 if (Group->getInsertPos() != I) 6046 return 0; 6047 6048 unsigned InterleaveFactor = Group->getFactor(); 6049 Type *WideVecTy = 6050 VectorType::get(VectorTy->getVectorElementType(), 6051 VectorTy->getVectorNumElements() * InterleaveFactor); 6052 6053 // Holds the indices of existing members in an interleaved load group. 6054 // An interleaved store group doesn't need this as it doesn't allow gaps. 6055 SmallVector<unsigned, 4> Indices; 6056 if (LI) { 6057 for (unsigned i = 0; i < InterleaveFactor; i++) 6058 if (Group->getMember(i)) 6059 Indices.push_back(i); 6060 } 6061 6062 // Calculate the cost of the whole interleaved group. 6063 unsigned Cost = TTI.getInterleavedMemoryOpCost( 6064 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 6065 Group->getAlignment(), AS); 6066 6067 if (Group->isReverse()) 6068 Cost += 6069 Group->getNumMembers() * 6070 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6071 6072 // FIXME: The interleaved load group with a huge gap could be even more 6073 // expensive than scalar operations. Then we could ignore such group and 6074 // use scalar operations instead. 6075 return Cost; 6076 } 6077 6078 // Scalarized loads/stores. 6079 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6080 bool UseGatherOrScatter = 6081 (ConsecutiveStride == 0) && isGatherOrScatterLegal(I, Ptr, Legal); 6082 6083 bool Reverse = ConsecutiveStride < 0; 6084 const DataLayout &DL = I->getModule()->getDataLayout(); 6085 unsigned ScalarAllocatedSize = DL.getTypeAllocSize(ValTy); 6086 unsigned VectorElementSize = DL.getTypeStoreSize(VectorTy) / VF; 6087 if ((!ConsecutiveStride && !UseGatherOrScatter) || 6088 ScalarAllocatedSize != VectorElementSize) { 6089 bool IsComplexComputation = 6090 isLikelyComplexAddressComputation(Ptr, Legal, SE, TheLoop); 6091 unsigned Cost = 0; 6092 // The cost of extracting from the value vector and pointer vector. 6093 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6094 for (unsigned i = 0; i < VF; ++i) { 6095 // The cost of extracting the pointer operand. 6096 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, PtrTy, i); 6097 // In case of STORE, the cost of ExtractElement from the vector. 6098 // In case of LOAD, the cost of InsertElement into the returned 6099 // vector. 6100 Cost += TTI.getVectorInstrCost(SI ? Instruction::ExtractElement 6101 : Instruction::InsertElement, 6102 VectorTy, i); 6103 } 6104 6105 // The cost of the scalar loads/stores. 6106 Cost += VF * TTI.getAddressComputationCost(PtrTy, IsComplexComputation); 6107 Cost += VF * 6108 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 6109 Alignment, AS); 6110 return Cost; 6111 } 6112 6113 unsigned Cost = TTI.getAddressComputationCost(VectorTy); 6114 if (UseGatherOrScatter) { 6115 assert(ConsecutiveStride == 0 && 6116 "Gather/Scatter are not used for consecutive stride"); 6117 return Cost + 6118 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 6119 Legal->isMaskRequired(I), Alignment); 6120 } 6121 // Wide load/stores. 6122 if (Legal->isMaskRequired(I)) 6123 Cost += 6124 TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6125 else 6126 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6127 6128 if (Reverse) 6129 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6130 return Cost; 6131 } 6132 case Instruction::ZExt: 6133 case Instruction::SExt: 6134 case Instruction::FPToUI: 6135 case Instruction::FPToSI: 6136 case Instruction::FPExt: 6137 case Instruction::PtrToInt: 6138 case Instruction::IntToPtr: 6139 case Instruction::SIToFP: 6140 case Instruction::UIToFP: 6141 case Instruction::Trunc: 6142 case Instruction::FPTrunc: 6143 case Instruction::BitCast: { 6144 // We optimize the truncation of induction variable. 6145 // The cost of these is the same as the scalar operation. 6146 if (I->getOpcode() == Instruction::Trunc && 6147 Legal->isInductionVariable(I->getOperand(0))) 6148 return TTI.getCastInstrCost(I->getOpcode(), I->getType(), 6149 I->getOperand(0)->getType()); 6150 6151 Type *SrcScalarTy = I->getOperand(0)->getType(); 6152 Type *SrcVecTy = ToVectorTy(SrcScalarTy, VF); 6153 if (VF > 1 && MinBWs.count(I)) { 6154 // This cast is going to be shrunk. This may remove the cast or it might 6155 // turn it into slightly different cast. For example, if MinBW == 16, 6156 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 6157 // 6158 // Calculate the modified src and dest types. 6159 Type *MinVecTy = VectorTy; 6160 if (I->getOpcode() == Instruction::Trunc) { 6161 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 6162 VectorTy = 6163 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6164 } else if (I->getOpcode() == Instruction::ZExt || 6165 I->getOpcode() == Instruction::SExt) { 6166 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 6167 VectorTy = 6168 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6169 } 6170 } 6171 6172 return TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy); 6173 } 6174 case Instruction::Call: { 6175 bool NeedToScalarize; 6176 CallInst *CI = cast<CallInst>(I); 6177 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 6178 if (getVectorIntrinsicIDForCall(CI, TLI)) 6179 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 6180 return CallCost; 6181 } 6182 default: { 6183 // We are scalarizing the instruction. Return the cost of the scalar 6184 // instruction, plus the cost of insert and extract into vector 6185 // elements, times the vector width. 6186 unsigned Cost = 0; 6187 6188 if (!RetTy->isVoidTy() && VF != 1) { 6189 unsigned InsCost = 6190 TTI.getVectorInstrCost(Instruction::InsertElement, VectorTy); 6191 unsigned ExtCost = 6192 TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy); 6193 6194 // The cost of inserting the results plus extracting each one of the 6195 // operands. 6196 Cost += VF * (InsCost + ExtCost * I->getNumOperands()); 6197 } 6198 6199 // The cost of executing VF copies of the scalar instruction. This opcode 6200 // is unknown. Assume that it is the same as 'mul'. 6201 Cost += VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy); 6202 return Cost; 6203 } 6204 } // end of switch. 6205 } 6206 6207 char LoopVectorize::ID = 0; 6208 static const char lv_name[] = "Loop Vectorization"; 6209 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 6210 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6211 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 6212 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6213 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 6214 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6215 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6216 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6217 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6218 INITIALIZE_PASS_DEPENDENCY(LCSSAWrapperPass) 6219 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6220 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 6221 INITIALIZE_PASS_DEPENDENCY(LoopAccessAnalysis) 6222 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6223 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6224 6225 namespace llvm { 6226 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { 6227 return new LoopVectorize(NoUnrolling, AlwaysVectorize); 6228 } 6229 } 6230 6231 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6232 // Check for a store. 6233 if (StoreInst *ST = dyn_cast<StoreInst>(Inst)) 6234 return Legal->isConsecutivePtr(ST->getPointerOperand()) != 0; 6235 6236 // Check for a load. 6237 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) 6238 return Legal->isConsecutivePtr(LI->getPointerOperand()) != 0; 6239 6240 return false; 6241 } 6242 6243 void LoopVectorizationCostModel::collectValuesToIgnore() { 6244 // Ignore ephemeral values. 6245 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 6246 6247 // Ignore type-promoting instructions we identified during reduction 6248 // detection. 6249 for (auto &Reduction : *Legal->getReductionVars()) { 6250 RecurrenceDescriptor &RedDes = Reduction.second; 6251 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 6252 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6253 } 6254 6255 // Ignore induction phis that are only used in either GetElementPtr or ICmp 6256 // instruction to exit loop. Induction variables usually have large types and 6257 // can have big impact when estimating register usage. 6258 // This is for when VF > 1. 6259 for (auto &Induction : *Legal->getInductionVars()) { 6260 auto *PN = Induction.first; 6261 auto *UpdateV = PN->getIncomingValueForBlock(TheLoop->getLoopLatch()); 6262 6263 // Check that the PHI is only used by the induction increment (UpdateV) or 6264 // by GEPs. Then check that UpdateV is only used by a compare instruction or 6265 // the loop header PHI. 6266 // FIXME: Need precise def-use analysis to determine if this instruction 6267 // variable will be vectorized. 6268 if (std::all_of(PN->user_begin(), PN->user_end(), 6269 [&](const User *U) -> bool { 6270 return U == UpdateV || isa<GetElementPtrInst>(U); 6271 }) && 6272 std::all_of(UpdateV->user_begin(), UpdateV->user_end(), 6273 [&](const User *U) -> bool { 6274 return U == PN || isa<ICmpInst>(U); 6275 })) { 6276 VecValuesToIgnore.insert(PN); 6277 VecValuesToIgnore.insert(UpdateV); 6278 } 6279 } 6280 6281 // Ignore instructions that will not be vectorized. 6282 // This is for when VF > 1. 6283 for (auto bb = TheLoop->block_begin(), be = TheLoop->block_end(); bb != be; 6284 ++bb) { 6285 for (auto &Inst : **bb) { 6286 switch (Inst.getOpcode()) 6287 case Instruction::GetElementPtr: { 6288 // Ignore GEP if its last operand is an induction variable so that it is 6289 // a consecutive load/store and won't be vectorized as scatter/gather 6290 // pattern. 6291 6292 GetElementPtrInst *Gep = cast<GetElementPtrInst>(&Inst); 6293 unsigned NumOperands = Gep->getNumOperands(); 6294 unsigned InductionOperand = getGEPInductionOperand(Gep); 6295 bool GepToIgnore = true; 6296 6297 // Check that all of the gep indices are uniform except for the 6298 // induction operand. 6299 for (unsigned i = 0; i != NumOperands; ++i) { 6300 if (i != InductionOperand && 6301 !PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), 6302 TheLoop)) { 6303 GepToIgnore = false; 6304 break; 6305 } 6306 } 6307 6308 if (GepToIgnore) 6309 VecValuesToIgnore.insert(&Inst); 6310 break; 6311 } 6312 } 6313 } 6314 } 6315 6316 void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr, 6317 bool IfPredicateStore) { 6318 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 6319 // Holds vector parameters or scalars, in case of uniform vals. 6320 SmallVector<VectorParts, 4> Params; 6321 6322 setDebugLocFromInst(Builder, Instr); 6323 6324 // Find all of the vectorized parameters. 6325 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 6326 Value *SrcOp = Instr->getOperand(op); 6327 6328 // If we are accessing the old induction variable, use the new one. 6329 if (SrcOp == OldInduction) { 6330 Params.push_back(getVectorValue(SrcOp)); 6331 continue; 6332 } 6333 6334 // Try using previously calculated values. 6335 Instruction *SrcInst = dyn_cast<Instruction>(SrcOp); 6336 6337 // If the src is an instruction that appeared earlier in the basic block 6338 // then it should already be vectorized. 6339 if (SrcInst && OrigLoop->contains(SrcInst)) { 6340 assert(WidenMap.has(SrcInst) && "Source operand is unavailable"); 6341 // The parameter is a vector value from earlier. 6342 Params.push_back(WidenMap.get(SrcInst)); 6343 } else { 6344 // The parameter is a scalar from outside the loop. Maybe even a constant. 6345 VectorParts Scalars; 6346 Scalars.append(UF, SrcOp); 6347 Params.push_back(Scalars); 6348 } 6349 } 6350 6351 assert(Params.size() == Instr->getNumOperands() && 6352 "Invalid number of operands"); 6353 6354 // Does this instruction return a value ? 6355 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 6356 6357 Value *UndefVec = IsVoidRetTy ? nullptr : UndefValue::get(Instr->getType()); 6358 // Create a new entry in the WidenMap and initialize it to Undef or Null. 6359 VectorParts &VecResults = WidenMap.splat(Instr, UndefVec); 6360 6361 VectorParts Cond; 6362 if (IfPredicateStore) { 6363 assert(Instr->getParent()->getSinglePredecessor() && 6364 "Only support single predecessor blocks"); 6365 Cond = createEdgeMask(Instr->getParent()->getSinglePredecessor(), 6366 Instr->getParent()); 6367 } 6368 6369 // For each vector unroll 'part': 6370 for (unsigned Part = 0; Part < UF; ++Part) { 6371 // For each scalar that we create: 6372 6373 // Start an "if (pred) a[i] = ..." block. 6374 Value *Cmp = nullptr; 6375 if (IfPredicateStore) { 6376 if (Cond[Part]->getType()->isVectorTy()) 6377 Cond[Part] = 6378 Builder.CreateExtractElement(Cond[Part], Builder.getInt32(0)); 6379 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cond[Part], 6380 ConstantInt::get(Cond[Part]->getType(), 1)); 6381 } 6382 6383 Instruction *Cloned = Instr->clone(); 6384 if (!IsVoidRetTy) 6385 Cloned->setName(Instr->getName() + ".cloned"); 6386 // Replace the operands of the cloned instructions with extracted scalars. 6387 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 6388 Value *Op = Params[op][Part]; 6389 Cloned->setOperand(op, Op); 6390 } 6391 6392 // Place the cloned scalar in the new loop. 6393 Builder.Insert(Cloned); 6394 6395 // If we just cloned a new assumption, add it the assumption cache. 6396 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 6397 if (II->getIntrinsicID() == Intrinsic::assume) 6398 AC->registerAssumption(II); 6399 6400 // If the original scalar returns a value we need to place it in a vector 6401 // so that future users will be able to use it. 6402 if (!IsVoidRetTy) 6403 VecResults[Part] = Cloned; 6404 6405 // End if-block. 6406 if (IfPredicateStore) 6407 PredicatedStores.push_back(std::make_pair(cast<StoreInst>(Cloned), Cmp)); 6408 } 6409 } 6410 6411 void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) { 6412 StoreInst *SI = dyn_cast<StoreInst>(Instr); 6413 bool IfPredicateStore = (SI && Legal->blockNeedsPredication(SI->getParent())); 6414 6415 return scalarizeInstruction(Instr, IfPredicateStore); 6416 } 6417 6418 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 6419 6420 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 6421 6422 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, 6423 const SCEV *StepSCEV) { 6424 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 6425 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 6426 Value *StepValue = Exp.expandCodeFor(StepSCEV, StepSCEV->getType(), 6427 &*Builder.GetInsertPoint()); 6428 return getStepVector(Val, StartIdx, StepValue); 6429 } 6430 6431 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step) { 6432 // When unrolling and the VF is 1, we only need to add a simple scalar. 6433 Type *ITy = Val->getType(); 6434 assert(!ITy->isVectorTy() && "Val must be a scalar"); 6435 Constant *C = ConstantInt::get(ITy, StartIdx); 6436 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 6437 } 6438