1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 11 // and generates target-independent LLVM-IR. 12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 13 // of instructions in order to estimate the profitability of vectorization. 14 // 15 // The loop vectorizer combines consecutive loop iterations into a single 16 // 'wide' iteration. After this transformation the index is incremented 17 // by the SIMD vector width, and not by one. 18 // 19 // This pass has three parts: 20 // 1. The main loop pass that drives the different parts. 21 // 2. LoopVectorizationLegality - A unit that checks for the legality 22 // of the vectorization. 23 // 3. InnerLoopVectorizer - A unit that performs the actual 24 // widening of instructions. 25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 26 // of vectorization. It decides on the optimal vector width, which 27 // can be one, if vectorization is not profitable. 28 // 29 //===----------------------------------------------------------------------===// 30 // 31 // The reduction-variable vectorization is based on the paper: 32 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 33 // 34 // Variable uniformity checks are inspired by: 35 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 36 // 37 // The interleaved access vectorization is based on the paper: 38 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 39 // Data for SIMD 40 // 41 // Other ideas/concepts are from: 42 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 43 // 44 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 45 // Vectorizing Compilers. 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/Transforms/Vectorize.h" 50 #include "llvm/ADT/DenseMap.h" 51 #include "llvm/ADT/Hashing.h" 52 #include "llvm/ADT/MapVector.h" 53 #include "llvm/ADT/SetVector.h" 54 #include "llvm/ADT/SmallPtrSet.h" 55 #include "llvm/ADT/SmallSet.h" 56 #include "llvm/ADT/SmallVector.h" 57 #include "llvm/ADT/Statistic.h" 58 #include "llvm/ADT/StringExtras.h" 59 #include "llvm/Analysis/AliasAnalysis.h" 60 #include "llvm/Analysis/BasicAliasAnalysis.h" 61 #include "llvm/Analysis/AliasSetTracker.h" 62 #include "llvm/Analysis/AssumptionCache.h" 63 #include "llvm/Analysis/BlockFrequencyInfo.h" 64 #include "llvm/Analysis/CodeMetrics.h" 65 #include "llvm/Analysis/DemandedBits.h" 66 #include "llvm/Analysis/GlobalsModRef.h" 67 #include "llvm/Analysis/LoopAccessAnalysis.h" 68 #include "llvm/Analysis/LoopInfo.h" 69 #include "llvm/Analysis/LoopIterator.h" 70 #include "llvm/Analysis/LoopPass.h" 71 #include "llvm/Analysis/ScalarEvolution.h" 72 #include "llvm/Analysis/ScalarEvolutionExpander.h" 73 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 74 #include "llvm/Analysis/TargetTransformInfo.h" 75 #include "llvm/Analysis/ValueTracking.h" 76 #include "llvm/IR/Constants.h" 77 #include "llvm/IR/DataLayout.h" 78 #include "llvm/IR/DebugInfo.h" 79 #include "llvm/IR/DerivedTypes.h" 80 #include "llvm/IR/DiagnosticInfo.h" 81 #include "llvm/IR/Dominators.h" 82 #include "llvm/IR/Function.h" 83 #include "llvm/IR/IRBuilder.h" 84 #include "llvm/IR/Instructions.h" 85 #include "llvm/IR/IntrinsicInst.h" 86 #include "llvm/IR/LLVMContext.h" 87 #include "llvm/IR/Module.h" 88 #include "llvm/IR/PatternMatch.h" 89 #include "llvm/IR/Type.h" 90 #include "llvm/IR/Value.h" 91 #include "llvm/IR/ValueHandle.h" 92 #include "llvm/IR/Verifier.h" 93 #include "llvm/Pass.h" 94 #include "llvm/Support/BranchProbability.h" 95 #include "llvm/Support/CommandLine.h" 96 #include "llvm/Support/Debug.h" 97 #include "llvm/Support/raw_ostream.h" 98 #include "llvm/Transforms/Scalar.h" 99 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 100 #include "llvm/Transforms/Utils/Local.h" 101 #include "llvm/Analysis/VectorUtils.h" 102 #include "llvm/Transforms/Utils/LoopUtils.h" 103 #include <algorithm> 104 #include <functional> 105 #include <map> 106 #include <tuple> 107 108 using namespace llvm; 109 using namespace llvm::PatternMatch; 110 111 #define LV_NAME "loop-vectorize" 112 #define DEBUG_TYPE LV_NAME 113 114 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 115 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 116 117 static cl::opt<bool> 118 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, 119 cl::desc("Enable if-conversion during vectorization.")); 120 121 /// We don't vectorize loops with a known constant trip count below this number. 122 static cl::opt<unsigned> 123 TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), 124 cl::Hidden, 125 cl::desc("Don't vectorize loops with a constant " 126 "trip count that is smaller than this " 127 "value.")); 128 129 static cl::opt<bool> MaximizeBandwidth( 130 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 131 cl::desc("Maximize bandwidth when selecting vectorization factor which " 132 "will be determined by the smallest type in loop.")); 133 134 /// This enables versioning on the strides of symbolically striding memory 135 /// accesses in code like the following. 136 /// for (i = 0; i < N; ++i) 137 /// A[i * Stride1] += B[i * Stride2] ... 138 /// 139 /// Will be roughly translated to 140 /// if (Stride1 == 1 && Stride2 == 1) { 141 /// for (i = 0; i < N; i+=4) 142 /// A[i:i+3] += ... 143 /// } else 144 /// ... 145 static cl::opt<bool> EnableMemAccessVersioning( 146 "enable-mem-access-versioning", cl::init(true), cl::Hidden, 147 cl::desc("Enable symbolic stride memory access versioning")); 148 149 static cl::opt<bool> EnableInterleavedMemAccesses( 150 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 151 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 152 153 /// Maximum factor for an interleaved memory access. 154 static cl::opt<unsigned> MaxInterleaveGroupFactor( 155 "max-interleave-group-factor", cl::Hidden, 156 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 157 cl::init(8)); 158 159 /// We don't interleave loops with a known constant trip count below this 160 /// number. 161 static const unsigned TinyTripCountInterleaveThreshold = 128; 162 163 static cl::opt<unsigned> ForceTargetNumScalarRegs( 164 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 165 cl::desc("A flag that overrides the target's number of scalar registers.")); 166 167 static cl::opt<unsigned> ForceTargetNumVectorRegs( 168 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 169 cl::desc("A flag that overrides the target's number of vector registers.")); 170 171 /// Maximum vectorization interleave count. 172 static const unsigned MaxInterleaveFactor = 16; 173 174 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 175 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 176 cl::desc("A flag that overrides the target's max interleave factor for " 177 "scalar loops.")); 178 179 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 180 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 181 cl::desc("A flag that overrides the target's max interleave factor for " 182 "vectorized loops.")); 183 184 static cl::opt<unsigned> ForceTargetInstructionCost( 185 "force-target-instruction-cost", cl::init(0), cl::Hidden, 186 cl::desc("A flag that overrides the target's expected cost for " 187 "an instruction to a single constant value. Mostly " 188 "useful for getting consistent testing.")); 189 190 static cl::opt<unsigned> SmallLoopCost( 191 "small-loop-cost", cl::init(20), cl::Hidden, 192 cl::desc( 193 "The cost of a loop that is considered 'small' by the interleaver.")); 194 195 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 196 "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden, 197 cl::desc("Enable the use of the block frequency analysis to access PGO " 198 "heuristics minimizing code growth in cold regions and being more " 199 "aggressive in hot regions.")); 200 201 // Runtime interleave loops for load/store throughput. 202 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 203 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 204 cl::desc( 205 "Enable runtime interleaving until load/store ports are saturated")); 206 207 /// The number of stores in a loop that are allowed to need predication. 208 static cl::opt<unsigned> NumberOfStoresToPredicate( 209 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 210 cl::desc("Max number of stores to be predicated behind an if.")); 211 212 static cl::opt<bool> EnableIndVarRegisterHeur( 213 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 214 cl::desc("Count the induction variable only once when interleaving")); 215 216 static cl::opt<bool> EnableCondStoresVectorization( 217 "enable-cond-stores-vec", cl::init(false), cl::Hidden, 218 cl::desc("Enable if predication of stores during vectorization.")); 219 220 static cl::opt<unsigned> MaxNestedScalarReductionIC( 221 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 222 cl::desc("The maximum interleave count to use when interleaving a scalar " 223 "reduction in a nested loop.")); 224 225 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 226 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 227 cl::desc("The maximum allowed number of runtime memory checks with a " 228 "vectorize(enable) pragma.")); 229 230 static cl::opt<unsigned> VectorizeSCEVCheckThreshold( 231 "vectorize-scev-check-threshold", cl::init(16), cl::Hidden, 232 cl::desc("The maximum number of SCEV checks allowed.")); 233 234 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold( 235 "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden, 236 cl::desc("The maximum number of SCEV checks allowed with a " 237 "vectorize(enable) pragma")); 238 239 namespace { 240 241 // Forward declarations. 242 class LoopVectorizeHints; 243 class LoopVectorizationLegality; 244 class LoopVectorizationCostModel; 245 class LoopVectorizationRequirements; 246 247 /// \brief This modifies LoopAccessReport to initialize message with 248 /// loop-vectorizer-specific part. 249 class VectorizationReport : public LoopAccessReport { 250 public: 251 VectorizationReport(Instruction *I = nullptr) 252 : LoopAccessReport("loop not vectorized: ", I) {} 253 254 /// \brief This allows promotion of the loop-access analysis report into the 255 /// loop-vectorizer report. It modifies the message to add the 256 /// loop-vectorizer-specific part of the message. 257 explicit VectorizationReport(const LoopAccessReport &R) 258 : LoopAccessReport(Twine("loop not vectorized: ") + R.str(), 259 R.getInstr()) {} 260 }; 261 262 /// A helper function for converting Scalar types to vector types. 263 /// If the incoming type is void, we return void. If the VF is 1, we return 264 /// the scalar type. 265 static Type* ToVectorTy(Type *Scalar, unsigned VF) { 266 if (Scalar->isVoidTy() || VF == 1) 267 return Scalar; 268 return VectorType::get(Scalar, VF); 269 } 270 271 /// A helper function that returns GEP instruction and knows to skip a 272 /// 'bitcast'. The 'bitcast' may be skipped if the source and the destination 273 /// pointee types of the 'bitcast' have the same size. 274 /// For example: 275 /// bitcast double** %var to i64* - can be skipped 276 /// bitcast double** %var to i8* - can not 277 static GetElementPtrInst *getGEPInstruction(Value *Ptr) { 278 279 if (isa<GetElementPtrInst>(Ptr)) 280 return cast<GetElementPtrInst>(Ptr); 281 282 if (isa<BitCastInst>(Ptr) && 283 isa<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0))) { 284 Type *BitcastTy = Ptr->getType(); 285 Type *GEPTy = cast<BitCastInst>(Ptr)->getSrcTy(); 286 if (!isa<PointerType>(BitcastTy) || !isa<PointerType>(GEPTy)) 287 return nullptr; 288 Type *Pointee1Ty = cast<PointerType>(BitcastTy)->getPointerElementType(); 289 Type *Pointee2Ty = cast<PointerType>(GEPTy)->getPointerElementType(); 290 const DataLayout &DL = cast<BitCastInst>(Ptr)->getModule()->getDataLayout(); 291 if (DL.getTypeSizeInBits(Pointee1Ty) == DL.getTypeSizeInBits(Pointee2Ty)) 292 return cast<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0)); 293 } 294 return nullptr; 295 } 296 297 /// InnerLoopVectorizer vectorizes loops which contain only one basic 298 /// block to a specified vectorization factor (VF). 299 /// This class performs the widening of scalars into vectors, or multiple 300 /// scalars. This class also implements the following features: 301 /// * It inserts an epilogue loop for handling loops that don't have iteration 302 /// counts that are known to be a multiple of the vectorization factor. 303 /// * It handles the code generation for reduction variables. 304 /// * Scalarization (implementation using scalars) of un-vectorizable 305 /// instructions. 306 /// InnerLoopVectorizer does not perform any vectorization-legality 307 /// checks, and relies on the caller to check for the different legality 308 /// aspects. The InnerLoopVectorizer relies on the 309 /// LoopVectorizationLegality class to provide information about the induction 310 /// and reduction variables that were found to a given vectorization factor. 311 class InnerLoopVectorizer { 312 public: 313 InnerLoopVectorizer(Loop *OrigLoop, ScalarEvolution *SE, LoopInfo *LI, 314 DominatorTree *DT, const TargetLibraryInfo *TLI, 315 const TargetTransformInfo *TTI, unsigned VecWidth, 316 unsigned UnrollFactor, SCEVUnionPredicate &Preds) 317 : OrigLoop(OrigLoop), SE(SE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 318 VF(VecWidth), UF(UnrollFactor), Builder(SE->getContext()), 319 Induction(nullptr), OldInduction(nullptr), WidenMap(UnrollFactor), 320 TripCount(nullptr), VectorTripCount(nullptr), Legal(nullptr), 321 AddedSafetyChecks(false), Preds(Preds) {} 322 323 // Perform the actual loop widening (vectorization). 324 // MinimumBitWidths maps scalar integer values to the smallest bitwidth they 325 // can be validly truncated to. The cost model has assumed this truncation 326 // will happen when vectorizing. 327 void vectorize(LoopVectorizationLegality *L, 328 MapVector<Instruction*,uint64_t> MinimumBitWidths) { 329 MinBWs = MinimumBitWidths; 330 Legal = L; 331 // Create a new empty loop. Unlink the old loop and connect the new one. 332 createEmptyLoop(); 333 // Widen each instruction in the old loop to a new one in the new loop. 334 // Use the Legality module to find the induction and reduction variables. 335 vectorizeLoop(); 336 } 337 338 // Return true if any runtime check is added. 339 bool IsSafetyChecksAdded() { 340 return AddedSafetyChecks; 341 } 342 343 virtual ~InnerLoopVectorizer() {} 344 345 protected: 346 /// A small list of PHINodes. 347 typedef SmallVector<PHINode*, 4> PhiVector; 348 /// When we unroll loops we have multiple vector values for each scalar. 349 /// This data structure holds the unrolled and vectorized values that 350 /// originated from one scalar instruction. 351 typedef SmallVector<Value*, 2> VectorParts; 352 353 // When we if-convert we need to create edge masks. We have to cache values 354 // so that we don't end up with exponential recursion/IR. 355 typedef DenseMap<std::pair<BasicBlock*, BasicBlock*>, 356 VectorParts> EdgeMaskCache; 357 358 /// Create an empty loop, based on the loop ranges of the old loop. 359 void createEmptyLoop(); 360 /// Create a new induction variable inside L. 361 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 362 Value *Step, Instruction *DL); 363 /// Copy and widen the instructions from the old loop. 364 virtual void vectorizeLoop(); 365 366 /// \brief The Loop exit block may have single value PHI nodes where the 367 /// incoming value is 'Undef'. While vectorizing we only handled real values 368 /// that were defined inside the loop. Here we fix the 'undef case'. 369 /// See PR14725. 370 void fixLCSSAPHIs(); 371 372 /// Shrinks vector element sizes based on information in "MinBWs". 373 void truncateToMinimalBitwidths(); 374 375 /// A helper function that computes the predicate of the block BB, assuming 376 /// that the header block of the loop is set to True. It returns the *entry* 377 /// mask for the block BB. 378 VectorParts createBlockInMask(BasicBlock *BB); 379 /// A helper function that computes the predicate of the edge between SRC 380 /// and DST. 381 VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst); 382 383 /// A helper function to vectorize a single BB within the innermost loop. 384 void vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV); 385 386 /// Vectorize a single PHINode in a block. This method handles the induction 387 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 388 /// arbitrary length vectors. 389 void widenPHIInstruction(Instruction *PN, VectorParts &Entry, 390 unsigned UF, unsigned VF, PhiVector *PV); 391 392 /// Insert the new loop to the loop hierarchy and pass manager 393 /// and update the analysis passes. 394 void updateAnalysis(); 395 396 /// This instruction is un-vectorizable. Implement it as a sequence 397 /// of scalars. If \p IfPredicateStore is true we need to 'hide' each 398 /// scalarized instruction behind an if block predicated on the control 399 /// dependence of the instruction. 400 virtual void scalarizeInstruction(Instruction *Instr, 401 bool IfPredicateStore=false); 402 403 /// Vectorize Load and Store instructions, 404 virtual void vectorizeMemoryInstruction(Instruction *Instr); 405 406 /// Create a broadcast instruction. This method generates a broadcast 407 /// instruction (shuffle) for loop invariant values and for the induction 408 /// value. If this is the induction variable then we extend it to N, N+1, ... 409 /// this is needed because each iteration in the loop corresponds to a SIMD 410 /// element. 411 virtual Value *getBroadcastInstrs(Value *V); 412 413 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 414 /// to each vector element of Val. The sequence starts at StartIndex. 415 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step); 416 417 /// When we go over instructions in the basic block we rely on previous 418 /// values within the current basic block or on loop invariant values. 419 /// When we widen (vectorize) values we place them in the map. If the values 420 /// are not within the map, they have to be loop invariant, so we simply 421 /// broadcast them into a vector. 422 VectorParts &getVectorValue(Value *V); 423 424 /// Try to vectorize the interleaved access group that \p Instr belongs to. 425 void vectorizeInterleaveGroup(Instruction *Instr); 426 427 /// Generate a shuffle sequence that will reverse the vector Vec. 428 virtual Value *reverseVector(Value *Vec); 429 430 /// Returns (and creates if needed) the original loop trip count. 431 Value *getOrCreateTripCount(Loop *NewLoop); 432 433 /// Returns (and creates if needed) the trip count of the widened loop. 434 Value *getOrCreateVectorTripCount(Loop *NewLoop); 435 436 /// Emit a bypass check to see if the trip count would overflow, or we 437 /// wouldn't have enough iterations to execute one vector loop. 438 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 439 /// Emit a bypass check to see if the vector trip count is nonzero. 440 void emitVectorLoopEnteredCheck(Loop *L, BasicBlock *Bypass); 441 /// Emit a bypass check to see if all of the SCEV assumptions we've 442 /// had to make are correct. 443 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 444 /// Emit bypass checks to check any memory assumptions we may have made. 445 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 446 447 /// This is a helper class that holds the vectorizer state. It maps scalar 448 /// instructions to vector instructions. When the code is 'unrolled' then 449 /// then a single scalar value is mapped to multiple vector parts. The parts 450 /// are stored in the VectorPart type. 451 struct ValueMap { 452 /// C'tor. UnrollFactor controls the number of vectors ('parts') that 453 /// are mapped. 454 ValueMap(unsigned UnrollFactor) : UF(UnrollFactor) {} 455 456 /// \return True if 'Key' is saved in the Value Map. 457 bool has(Value *Key) const { return MapStorage.count(Key); } 458 459 /// Initializes a new entry in the map. Sets all of the vector parts to the 460 /// save value in 'Val'. 461 /// \return A reference to a vector with splat values. 462 VectorParts &splat(Value *Key, Value *Val) { 463 VectorParts &Entry = MapStorage[Key]; 464 Entry.assign(UF, Val); 465 return Entry; 466 } 467 468 ///\return A reference to the value that is stored at 'Key'. 469 VectorParts &get(Value *Key) { 470 VectorParts &Entry = MapStorage[Key]; 471 if (Entry.empty()) 472 Entry.resize(UF); 473 assert(Entry.size() == UF); 474 return Entry; 475 } 476 477 private: 478 /// The unroll factor. Each entry in the map stores this number of vector 479 /// elements. 480 unsigned UF; 481 482 /// Map storage. We use std::map and not DenseMap because insertions to a 483 /// dense map invalidates its iterators. 484 std::map<Value *, VectorParts> MapStorage; 485 }; 486 487 /// The original loop. 488 Loop *OrigLoop; 489 /// Scev analysis to use. 490 ScalarEvolution *SE; 491 /// Loop Info. 492 LoopInfo *LI; 493 /// Dominator Tree. 494 DominatorTree *DT; 495 /// Alias Analysis. 496 AliasAnalysis *AA; 497 /// Target Library Info. 498 const TargetLibraryInfo *TLI; 499 /// Target Transform Info. 500 const TargetTransformInfo *TTI; 501 502 /// The vectorization SIMD factor to use. Each vector will have this many 503 /// vector elements. 504 unsigned VF; 505 506 protected: 507 /// The vectorization unroll factor to use. Each scalar is vectorized to this 508 /// many different vector instructions. 509 unsigned UF; 510 511 /// The builder that we use 512 IRBuilder<> Builder; 513 514 // --- Vectorization state --- 515 516 /// The vector-loop preheader. 517 BasicBlock *LoopVectorPreHeader; 518 /// The scalar-loop preheader. 519 BasicBlock *LoopScalarPreHeader; 520 /// Middle Block between the vector and the scalar. 521 BasicBlock *LoopMiddleBlock; 522 ///The ExitBlock of the scalar loop. 523 BasicBlock *LoopExitBlock; 524 ///The vector loop body. 525 SmallVector<BasicBlock *, 4> LoopVectorBody; 526 ///The scalar loop body. 527 BasicBlock *LoopScalarBody; 528 /// A list of all bypass blocks. The first block is the entry of the loop. 529 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 530 531 /// The new Induction variable which was added to the new block. 532 PHINode *Induction; 533 /// The induction variable of the old basic block. 534 PHINode *OldInduction; 535 /// Maps scalars to widened vectors. 536 ValueMap WidenMap; 537 /// Store instructions that should be predicated, as a pair 538 /// <StoreInst, Predicate> 539 SmallVector<std::pair<StoreInst*,Value*>, 4> PredicatedStores; 540 EdgeMaskCache MaskCache; 541 /// Trip count of the original loop. 542 Value *TripCount; 543 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 544 Value *VectorTripCount; 545 546 /// Map of scalar integer values to the smallest bitwidth they can be legally 547 /// represented as. The vector equivalents of these values should be truncated 548 /// to this type. 549 MapVector<Instruction*,uint64_t> MinBWs; 550 LoopVectorizationLegality *Legal; 551 552 // Record whether runtime check is added. 553 bool AddedSafetyChecks; 554 555 /// The SCEV predicate containing all the SCEV-related assumptions. 556 /// The predicate is used to simplify existing expressions in the 557 /// context of existing SCEV assumptions. Since legality checking is 558 /// not done here, we don't need to use this predicate to record 559 /// further assumptions. 560 SCEVUnionPredicate &Preds; 561 }; 562 563 class InnerLoopUnroller : public InnerLoopVectorizer { 564 public: 565 InnerLoopUnroller(Loop *OrigLoop, ScalarEvolution *SE, LoopInfo *LI, 566 DominatorTree *DT, const TargetLibraryInfo *TLI, 567 const TargetTransformInfo *TTI, unsigned UnrollFactor, 568 SCEVUnionPredicate &Preds) 569 : InnerLoopVectorizer(OrigLoop, SE, LI, DT, TLI, TTI, 1, UnrollFactor, 570 Preds) {} 571 572 private: 573 void scalarizeInstruction(Instruction *Instr, 574 bool IfPredicateStore = false) override; 575 void vectorizeMemoryInstruction(Instruction *Instr) override; 576 Value *getBroadcastInstrs(Value *V) override; 577 Value *getStepVector(Value *Val, int StartIdx, Value *Step) override; 578 Value *reverseVector(Value *Vec) override; 579 }; 580 581 /// \brief Look for a meaningful debug location on the instruction or it's 582 /// operands. 583 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 584 if (!I) 585 return I; 586 587 DebugLoc Empty; 588 if (I->getDebugLoc() != Empty) 589 return I; 590 591 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 592 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 593 if (OpInst->getDebugLoc() != Empty) 594 return OpInst; 595 } 596 597 return I; 598 } 599 600 /// \brief Set the debug location in the builder using the debug location in the 601 /// instruction. 602 static void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 603 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) 604 B.SetCurrentDebugLocation(Inst->getDebugLoc()); 605 else 606 B.SetCurrentDebugLocation(DebugLoc()); 607 } 608 609 #ifndef NDEBUG 610 /// \return string containing a file name and a line # for the given loop. 611 static std::string getDebugLocString(const Loop *L) { 612 std::string Result; 613 if (L) { 614 raw_string_ostream OS(Result); 615 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 616 LoopDbgLoc.print(OS); 617 else 618 // Just print the module name. 619 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 620 OS.flush(); 621 } 622 return Result; 623 } 624 #endif 625 626 /// \brief Propagate known metadata from one instruction to another. 627 static void propagateMetadata(Instruction *To, const Instruction *From) { 628 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 629 From->getAllMetadataOtherThanDebugLoc(Metadata); 630 631 for (auto M : Metadata) { 632 unsigned Kind = M.first; 633 634 // These are safe to transfer (this is safe for TBAA, even when we 635 // if-convert, because should that metadata have had a control dependency 636 // on the condition, and thus actually aliased with some other 637 // non-speculated memory access when the condition was false, this would be 638 // caught by the runtime overlap checks). 639 if (Kind != LLVMContext::MD_tbaa && 640 Kind != LLVMContext::MD_alias_scope && 641 Kind != LLVMContext::MD_noalias && 642 Kind != LLVMContext::MD_fpmath && 643 Kind != LLVMContext::MD_nontemporal) 644 continue; 645 646 To->setMetadata(Kind, M.second); 647 } 648 } 649 650 /// \brief Propagate known metadata from one instruction to a vector of others. 651 static void propagateMetadata(SmallVectorImpl<Value *> &To, 652 const Instruction *From) { 653 for (Value *V : To) 654 if (Instruction *I = dyn_cast<Instruction>(V)) 655 propagateMetadata(I, From); 656 } 657 658 /// \brief The group of interleaved loads/stores sharing the same stride and 659 /// close to each other. 660 /// 661 /// Each member in this group has an index starting from 0, and the largest 662 /// index should be less than interleaved factor, which is equal to the absolute 663 /// value of the access's stride. 664 /// 665 /// E.g. An interleaved load group of factor 4: 666 /// for (unsigned i = 0; i < 1024; i+=4) { 667 /// a = A[i]; // Member of index 0 668 /// b = A[i+1]; // Member of index 1 669 /// d = A[i+3]; // Member of index 3 670 /// ... 671 /// } 672 /// 673 /// An interleaved store group of factor 4: 674 /// for (unsigned i = 0; i < 1024; i+=4) { 675 /// ... 676 /// A[i] = a; // Member of index 0 677 /// A[i+1] = b; // Member of index 1 678 /// A[i+2] = c; // Member of index 2 679 /// A[i+3] = d; // Member of index 3 680 /// } 681 /// 682 /// Note: the interleaved load group could have gaps (missing members), but 683 /// the interleaved store group doesn't allow gaps. 684 class InterleaveGroup { 685 public: 686 InterleaveGroup(Instruction *Instr, int Stride, unsigned Align) 687 : Align(Align), SmallestKey(0), LargestKey(0), InsertPos(Instr) { 688 assert(Align && "The alignment should be non-zero"); 689 690 Factor = std::abs(Stride); 691 assert(Factor > 1 && "Invalid interleave factor"); 692 693 Reverse = Stride < 0; 694 Members[0] = Instr; 695 } 696 697 bool isReverse() const { return Reverse; } 698 unsigned getFactor() const { return Factor; } 699 unsigned getAlignment() const { return Align; } 700 unsigned getNumMembers() const { return Members.size(); } 701 702 /// \brief Try to insert a new member \p Instr with index \p Index and 703 /// alignment \p NewAlign. The index is related to the leader and it could be 704 /// negative if it is the new leader. 705 /// 706 /// \returns false if the instruction doesn't belong to the group. 707 bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) { 708 assert(NewAlign && "The new member's alignment should be non-zero"); 709 710 int Key = Index + SmallestKey; 711 712 // Skip if there is already a member with the same index. 713 if (Members.count(Key)) 714 return false; 715 716 if (Key > LargestKey) { 717 // The largest index is always less than the interleave factor. 718 if (Index >= static_cast<int>(Factor)) 719 return false; 720 721 LargestKey = Key; 722 } else if (Key < SmallestKey) { 723 // The largest index is always less than the interleave factor. 724 if (LargestKey - Key >= static_cast<int>(Factor)) 725 return false; 726 727 SmallestKey = Key; 728 } 729 730 // It's always safe to select the minimum alignment. 731 Align = std::min(Align, NewAlign); 732 Members[Key] = Instr; 733 return true; 734 } 735 736 /// \brief Get the member with the given index \p Index 737 /// 738 /// \returns nullptr if contains no such member. 739 Instruction *getMember(unsigned Index) const { 740 int Key = SmallestKey + Index; 741 if (!Members.count(Key)) 742 return nullptr; 743 744 return Members.find(Key)->second; 745 } 746 747 /// \brief Get the index for the given member. Unlike the key in the member 748 /// map, the index starts from 0. 749 unsigned getIndex(Instruction *Instr) const { 750 for (auto I : Members) 751 if (I.second == Instr) 752 return I.first - SmallestKey; 753 754 llvm_unreachable("InterleaveGroup contains no such member"); 755 } 756 757 Instruction *getInsertPos() const { return InsertPos; } 758 void setInsertPos(Instruction *Inst) { InsertPos = Inst; } 759 760 private: 761 unsigned Factor; // Interleave Factor. 762 bool Reverse; 763 unsigned Align; 764 DenseMap<int, Instruction *> Members; 765 int SmallestKey; 766 int LargestKey; 767 768 // To avoid breaking dependences, vectorized instructions of an interleave 769 // group should be inserted at either the first load or the last store in 770 // program order. 771 // 772 // E.g. %even = load i32 // Insert Position 773 // %add = add i32 %even // Use of %even 774 // %odd = load i32 775 // 776 // store i32 %even 777 // %odd = add i32 // Def of %odd 778 // store i32 %odd // Insert Position 779 Instruction *InsertPos; 780 }; 781 782 /// \brief Drive the analysis of interleaved memory accesses in the loop. 783 /// 784 /// Use this class to analyze interleaved accesses only when we can vectorize 785 /// a loop. Otherwise it's meaningless to do analysis as the vectorization 786 /// on interleaved accesses is unsafe. 787 /// 788 /// The analysis collects interleave groups and records the relationships 789 /// between the member and the group in a map. 790 class InterleavedAccessInfo { 791 public: 792 InterleavedAccessInfo(ScalarEvolution *SE, Loop *L, DominatorTree *DT, 793 SCEVUnionPredicate &Preds) 794 : SE(SE), TheLoop(L), DT(DT), Preds(Preds) {} 795 796 ~InterleavedAccessInfo() { 797 SmallSet<InterleaveGroup *, 4> DelSet; 798 // Avoid releasing a pointer twice. 799 for (auto &I : InterleaveGroupMap) 800 DelSet.insert(I.second); 801 for (auto *Ptr : DelSet) 802 delete Ptr; 803 } 804 805 /// \brief Analyze the interleaved accesses and collect them in interleave 806 /// groups. Substitute symbolic strides using \p Strides. 807 void analyzeInterleaving(const ValueToValueMap &Strides); 808 809 /// \brief Check if \p Instr belongs to any interleave group. 810 bool isInterleaved(Instruction *Instr) const { 811 return InterleaveGroupMap.count(Instr); 812 } 813 814 /// \brief Get the interleave group that \p Instr belongs to. 815 /// 816 /// \returns nullptr if doesn't have such group. 817 InterleaveGroup *getInterleaveGroup(Instruction *Instr) const { 818 if (InterleaveGroupMap.count(Instr)) 819 return InterleaveGroupMap.find(Instr)->second; 820 return nullptr; 821 } 822 823 private: 824 ScalarEvolution *SE; 825 Loop *TheLoop; 826 DominatorTree *DT; 827 828 /// The SCEV predicate containing all the SCEV-related assumptions. 829 /// The predicate is used to simplify SCEV expressions in the 830 /// context of existing SCEV assumptions. The interleaved access 831 /// analysis can also add new predicates (for example by versioning 832 /// strides of pointers). 833 SCEVUnionPredicate &Preds; 834 835 /// Holds the relationships between the members and the interleave group. 836 DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap; 837 838 /// \brief The descriptor for a strided memory access. 839 struct StrideDescriptor { 840 StrideDescriptor(int Stride, const SCEV *Scev, unsigned Size, 841 unsigned Align) 842 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {} 843 844 StrideDescriptor() : Stride(0), Scev(nullptr), Size(0), Align(0) {} 845 846 int Stride; // The access's stride. It is negative for a reverse access. 847 const SCEV *Scev; // The scalar expression of this access 848 unsigned Size; // The size of the memory object. 849 unsigned Align; // The alignment of this access. 850 }; 851 852 /// \brief Create a new interleave group with the given instruction \p Instr, 853 /// stride \p Stride and alignment \p Align. 854 /// 855 /// \returns the newly created interleave group. 856 InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride, 857 unsigned Align) { 858 assert(!InterleaveGroupMap.count(Instr) && 859 "Already in an interleaved access group"); 860 InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align); 861 return InterleaveGroupMap[Instr]; 862 } 863 864 /// \brief Release the group and remove all the relationships. 865 void releaseGroup(InterleaveGroup *Group) { 866 for (unsigned i = 0; i < Group->getFactor(); i++) 867 if (Instruction *Member = Group->getMember(i)) 868 InterleaveGroupMap.erase(Member); 869 870 delete Group; 871 } 872 873 /// \brief Collect all the accesses with a constant stride in program order. 874 void collectConstStridedAccesses( 875 MapVector<Instruction *, StrideDescriptor> &StrideAccesses, 876 const ValueToValueMap &Strides); 877 }; 878 879 /// Utility class for getting and setting loop vectorizer hints in the form 880 /// of loop metadata. 881 /// This class keeps a number of loop annotations locally (as member variables) 882 /// and can, upon request, write them back as metadata on the loop. It will 883 /// initially scan the loop for existing metadata, and will update the local 884 /// values based on information in the loop. 885 /// We cannot write all values to metadata, as the mere presence of some info, 886 /// for example 'force', means a decision has been made. So, we need to be 887 /// careful NOT to add them if the user hasn't specifically asked so. 888 class LoopVectorizeHints { 889 enum HintKind { 890 HK_WIDTH, 891 HK_UNROLL, 892 HK_FORCE 893 }; 894 895 /// Hint - associates name and validation with the hint value. 896 struct Hint { 897 const char * Name; 898 unsigned Value; // This may have to change for non-numeric values. 899 HintKind Kind; 900 901 Hint(const char * Name, unsigned Value, HintKind Kind) 902 : Name(Name), Value(Value), Kind(Kind) { } 903 904 bool validate(unsigned Val) { 905 switch (Kind) { 906 case HK_WIDTH: 907 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth; 908 case HK_UNROLL: 909 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor; 910 case HK_FORCE: 911 return (Val <= 1); 912 } 913 return false; 914 } 915 }; 916 917 /// Vectorization width. 918 Hint Width; 919 /// Vectorization interleave factor. 920 Hint Interleave; 921 /// Vectorization forced 922 Hint Force; 923 924 /// Return the loop metadata prefix. 925 static StringRef Prefix() { return "llvm.loop."; } 926 927 public: 928 enum ForceKind { 929 FK_Undefined = -1, ///< Not selected. 930 FK_Disabled = 0, ///< Forcing disabled. 931 FK_Enabled = 1, ///< Forcing enabled. 932 }; 933 934 LoopVectorizeHints(const Loop *L, bool DisableInterleaving) 935 : Width("vectorize.width", VectorizerParams::VectorizationFactor, 936 HK_WIDTH), 937 Interleave("interleave.count", DisableInterleaving, HK_UNROLL), 938 Force("vectorize.enable", FK_Undefined, HK_FORCE), 939 TheLoop(L) { 940 // Populate values with existing loop metadata. 941 getHintsFromMetadata(); 942 943 // force-vector-interleave overrides DisableInterleaving. 944 if (VectorizerParams::isInterleaveForced()) 945 Interleave.Value = VectorizerParams::VectorizationInterleave; 946 947 DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs() 948 << "LV: Interleaving disabled by the pass manager\n"); 949 } 950 951 /// Mark the loop L as already vectorized by setting the width to 1. 952 void setAlreadyVectorized() { 953 Width.Value = Interleave.Value = 1; 954 Hint Hints[] = {Width, Interleave}; 955 writeHintsToMetadata(Hints); 956 } 957 958 bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const { 959 if (getForce() == LoopVectorizeHints::FK_Disabled) { 960 DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); 961 emitOptimizationRemarkAnalysis(F->getContext(), 962 vectorizeAnalysisPassName(), *F, 963 L->getStartLoc(), emitRemark()); 964 return false; 965 } 966 967 if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) { 968 DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); 969 emitOptimizationRemarkAnalysis(F->getContext(), 970 vectorizeAnalysisPassName(), *F, 971 L->getStartLoc(), emitRemark()); 972 return false; 973 } 974 975 if (getWidth() == 1 && getInterleave() == 1) { 976 // FIXME: Add a separate metadata to indicate when the loop has already 977 // been vectorized instead of setting width and count to 1. 978 DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); 979 // FIXME: Add interleave.disable metadata. This will allow 980 // vectorize.disable to be used without disabling the pass and errors 981 // to differentiate between disabled vectorization and a width of 1. 982 emitOptimizationRemarkAnalysis( 983 F->getContext(), vectorizeAnalysisPassName(), *F, L->getStartLoc(), 984 "loop not vectorized: vectorization and interleaving are explicitly " 985 "disabled, or vectorize width and interleave count are both set to " 986 "1"); 987 return false; 988 } 989 990 return true; 991 } 992 993 /// Dumps all the hint information. 994 std::string emitRemark() const { 995 VectorizationReport R; 996 if (Force.Value == LoopVectorizeHints::FK_Disabled) 997 R << "vectorization is explicitly disabled"; 998 else { 999 R << "use -Rpass-analysis=loop-vectorize for more info"; 1000 if (Force.Value == LoopVectorizeHints::FK_Enabled) { 1001 R << " (Force=true"; 1002 if (Width.Value != 0) 1003 R << ", Vector Width=" << Width.Value; 1004 if (Interleave.Value != 0) 1005 R << ", Interleave Count=" << Interleave.Value; 1006 R << ")"; 1007 } 1008 } 1009 1010 return R.str(); 1011 } 1012 1013 unsigned getWidth() const { return Width.Value; } 1014 unsigned getInterleave() const { return Interleave.Value; } 1015 enum ForceKind getForce() const { return (ForceKind)Force.Value; } 1016 const char *vectorizeAnalysisPassName() const { 1017 // If hints are provided that don't disable vectorization use the 1018 // AlwaysPrint pass name to force the frontend to print the diagnostic. 1019 if (getWidth() == 1) 1020 return LV_NAME; 1021 if (getForce() == LoopVectorizeHints::FK_Disabled) 1022 return LV_NAME; 1023 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0) 1024 return LV_NAME; 1025 return DiagnosticInfo::AlwaysPrint; 1026 } 1027 1028 bool allowReordering() const { 1029 // When enabling loop hints are provided we allow the vectorizer to change 1030 // the order of operations that is given by the scalar loop. This is not 1031 // enabled by default because can be unsafe or inefficient. For example, 1032 // reordering floating-point operations will change the way round-off 1033 // error accumulates in the loop. 1034 return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1; 1035 } 1036 1037 private: 1038 /// Find hints specified in the loop metadata and update local values. 1039 void getHintsFromMetadata() { 1040 MDNode *LoopID = TheLoop->getLoopID(); 1041 if (!LoopID) 1042 return; 1043 1044 // First operand should refer to the loop id itself. 1045 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 1046 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 1047 1048 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1049 const MDString *S = nullptr; 1050 SmallVector<Metadata *, 4> Args; 1051 1052 // The expected hint is either a MDString or a MDNode with the first 1053 // operand a MDString. 1054 if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) { 1055 if (!MD || MD->getNumOperands() == 0) 1056 continue; 1057 S = dyn_cast<MDString>(MD->getOperand(0)); 1058 for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i) 1059 Args.push_back(MD->getOperand(i)); 1060 } else { 1061 S = dyn_cast<MDString>(LoopID->getOperand(i)); 1062 assert(Args.size() == 0 && "too many arguments for MDString"); 1063 } 1064 1065 if (!S) 1066 continue; 1067 1068 // Check if the hint starts with the loop metadata prefix. 1069 StringRef Name = S->getString(); 1070 if (Args.size() == 1) 1071 setHint(Name, Args[0]); 1072 } 1073 } 1074 1075 /// Checks string hint with one operand and set value if valid. 1076 void setHint(StringRef Name, Metadata *Arg) { 1077 if (!Name.startswith(Prefix())) 1078 return; 1079 Name = Name.substr(Prefix().size(), StringRef::npos); 1080 1081 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); 1082 if (!C) return; 1083 unsigned Val = C->getZExtValue(); 1084 1085 Hint *Hints[] = {&Width, &Interleave, &Force}; 1086 for (auto H : Hints) { 1087 if (Name == H->Name) { 1088 if (H->validate(Val)) 1089 H->Value = Val; 1090 else 1091 DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); 1092 break; 1093 } 1094 } 1095 } 1096 1097 /// Create a new hint from name / value pair. 1098 MDNode *createHintMetadata(StringRef Name, unsigned V) const { 1099 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1100 Metadata *MDs[] = {MDString::get(Context, Name), 1101 ConstantAsMetadata::get( 1102 ConstantInt::get(Type::getInt32Ty(Context), V))}; 1103 return MDNode::get(Context, MDs); 1104 } 1105 1106 /// Matches metadata with hint name. 1107 bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) { 1108 MDString* Name = dyn_cast<MDString>(Node->getOperand(0)); 1109 if (!Name) 1110 return false; 1111 1112 for (auto H : HintTypes) 1113 if (Name->getString().endswith(H.Name)) 1114 return true; 1115 return false; 1116 } 1117 1118 /// Sets current hints into loop metadata, keeping other values intact. 1119 void writeHintsToMetadata(ArrayRef<Hint> HintTypes) { 1120 if (HintTypes.size() == 0) 1121 return; 1122 1123 // Reserve the first element to LoopID (see below). 1124 SmallVector<Metadata *, 4> MDs(1); 1125 // If the loop already has metadata, then ignore the existing operands. 1126 MDNode *LoopID = TheLoop->getLoopID(); 1127 if (LoopID) { 1128 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1129 MDNode *Node = cast<MDNode>(LoopID->getOperand(i)); 1130 // If node in update list, ignore old value. 1131 if (!matchesHintMetadataName(Node, HintTypes)) 1132 MDs.push_back(Node); 1133 } 1134 } 1135 1136 // Now, add the missing hints. 1137 for (auto H : HintTypes) 1138 MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value)); 1139 1140 // Replace current metadata node with new one. 1141 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1142 MDNode *NewLoopID = MDNode::get(Context, MDs); 1143 // Set operand 0 to refer to the loop id itself. 1144 NewLoopID->replaceOperandWith(0, NewLoopID); 1145 1146 TheLoop->setLoopID(NewLoopID); 1147 } 1148 1149 /// The loop these hints belong to. 1150 const Loop *TheLoop; 1151 }; 1152 1153 static void emitAnalysisDiag(const Function *TheFunction, const Loop *TheLoop, 1154 const LoopVectorizeHints &Hints, 1155 const LoopAccessReport &Message) { 1156 const char *Name = Hints.vectorizeAnalysisPassName(); 1157 LoopAccessReport::emitAnalysis(Message, TheFunction, TheLoop, Name); 1158 } 1159 1160 static void emitMissedWarning(Function *F, Loop *L, 1161 const LoopVectorizeHints &LH) { 1162 emitOptimizationRemarkMissed(F->getContext(), LV_NAME, *F, L->getStartLoc(), 1163 LH.emitRemark()); 1164 1165 if (LH.getForce() == LoopVectorizeHints::FK_Enabled) { 1166 if (LH.getWidth() != 1) 1167 emitLoopVectorizeWarning( 1168 F->getContext(), *F, L->getStartLoc(), 1169 "failed explicitly specified loop vectorization"); 1170 else if (LH.getInterleave() != 1) 1171 emitLoopInterleaveWarning( 1172 F->getContext(), *F, L->getStartLoc(), 1173 "failed explicitly specified loop interleaving"); 1174 } 1175 } 1176 1177 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and 1178 /// to what vectorization factor. 1179 /// This class does not look at the profitability of vectorization, only the 1180 /// legality. This class has two main kinds of checks: 1181 /// * Memory checks - The code in canVectorizeMemory checks if vectorization 1182 /// will change the order of memory accesses in a way that will change the 1183 /// correctness of the program. 1184 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory 1185 /// checks for a number of different conditions, such as the availability of a 1186 /// single induction variable, that all types are supported and vectorize-able, 1187 /// etc. This code reflects the capabilities of InnerLoopVectorizer. 1188 /// This class is also used by InnerLoopVectorizer for identifying 1189 /// induction variable and the different reduction variables. 1190 class LoopVectorizationLegality { 1191 public: 1192 LoopVectorizationLegality(Loop *L, ScalarEvolution *SE, DominatorTree *DT, 1193 TargetLibraryInfo *TLI, AliasAnalysis *AA, 1194 Function *F, const TargetTransformInfo *TTI, 1195 LoopAccessAnalysis *LAA, 1196 LoopVectorizationRequirements *R, 1197 const LoopVectorizeHints *H, 1198 SCEVUnionPredicate &Preds) 1199 : NumPredStores(0), TheLoop(L), SE(SE), TLI(TLI), TheFunction(F), 1200 TTI(TTI), DT(DT), LAA(LAA), LAI(nullptr), 1201 InterleaveInfo(SE, L, DT, Preds), Induction(nullptr), 1202 WidestIndTy(nullptr), HasFunNoNaNAttr(false), Requirements(R), Hints(H), 1203 Preds(Preds) {} 1204 1205 /// ReductionList contains the reduction descriptors for all 1206 /// of the reductions that were found in the loop. 1207 typedef DenseMap<PHINode *, RecurrenceDescriptor> ReductionList; 1208 1209 /// InductionList saves induction variables and maps them to the 1210 /// induction descriptor. 1211 typedef MapVector<PHINode*, InductionDescriptor> InductionList; 1212 1213 /// Returns true if it is legal to vectorize this loop. 1214 /// This does not mean that it is profitable to vectorize this 1215 /// loop, only that it is legal to do so. 1216 bool canVectorize(); 1217 1218 /// Returns the Induction variable. 1219 PHINode *getInduction() { return Induction; } 1220 1221 /// Returns the reduction variables found in the loop. 1222 ReductionList *getReductionVars() { return &Reductions; } 1223 1224 /// Returns the induction variables found in the loop. 1225 InductionList *getInductionVars() { return &Inductions; } 1226 1227 /// Returns the widest induction type. 1228 Type *getWidestInductionType() { return WidestIndTy; } 1229 1230 /// Returns True if V is an induction variable in this loop. 1231 bool isInductionVariable(const Value *V); 1232 1233 /// Returns True if PN is a reduction variable in this loop. 1234 bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); } 1235 1236 /// Return true if the block BB needs to be predicated in order for the loop 1237 /// to be vectorized. 1238 bool blockNeedsPredication(BasicBlock *BB); 1239 1240 /// Check if this pointer is consecutive when vectorizing. This happens 1241 /// when the last index of the GEP is the induction variable, or that the 1242 /// pointer itself is an induction variable. 1243 /// This check allows us to vectorize A[idx] into a wide load/store. 1244 /// Returns: 1245 /// 0 - Stride is unknown or non-consecutive. 1246 /// 1 - Address is consecutive. 1247 /// -1 - Address is consecutive, and decreasing. 1248 int isConsecutivePtr(Value *Ptr); 1249 1250 /// Returns true if the value V is uniform within the loop. 1251 bool isUniform(Value *V); 1252 1253 /// Returns true if this instruction will remain scalar after vectorization. 1254 bool isUniformAfterVectorization(Instruction* I) { return Uniforms.count(I); } 1255 1256 /// Returns the information that we collected about runtime memory check. 1257 const RuntimePointerChecking *getRuntimePointerChecking() const { 1258 return LAI->getRuntimePointerChecking(); 1259 } 1260 1261 const LoopAccessInfo *getLAI() const { 1262 return LAI; 1263 } 1264 1265 /// \brief Check if \p Instr belongs to any interleaved access group. 1266 bool isAccessInterleaved(Instruction *Instr) { 1267 return InterleaveInfo.isInterleaved(Instr); 1268 } 1269 1270 /// \brief Get the interleaved access group that \p Instr belongs to. 1271 const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { 1272 return InterleaveInfo.getInterleaveGroup(Instr); 1273 } 1274 1275 unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); } 1276 1277 bool hasStride(Value *V) { return StrideSet.count(V); } 1278 bool mustCheckStrides() { return !StrideSet.empty(); } 1279 SmallPtrSet<Value *, 8>::iterator strides_begin() { 1280 return StrideSet.begin(); 1281 } 1282 SmallPtrSet<Value *, 8>::iterator strides_end() { return StrideSet.end(); } 1283 1284 /// Returns true if the target machine supports masked store operation 1285 /// for the given \p DataType and kind of access to \p Ptr. 1286 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1287 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType); 1288 } 1289 /// Returns true if the target machine supports masked load operation 1290 /// for the given \p DataType and kind of access to \p Ptr. 1291 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1292 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType); 1293 } 1294 /// Returns true if vector representation of the instruction \p I 1295 /// requires mask. 1296 bool isMaskRequired(const Instruction* I) { 1297 return (MaskedOp.count(I) != 0); 1298 } 1299 unsigned getNumStores() const { 1300 return LAI->getNumStores(); 1301 } 1302 unsigned getNumLoads() const { 1303 return LAI->getNumLoads(); 1304 } 1305 unsigned getNumPredStores() const { 1306 return NumPredStores; 1307 } 1308 private: 1309 /// Check if a single basic block loop is vectorizable. 1310 /// At this point we know that this is a loop with a constant trip count 1311 /// and we only need to check individual instructions. 1312 bool canVectorizeInstrs(); 1313 1314 /// When we vectorize loops we may change the order in which 1315 /// we read and write from memory. This method checks if it is 1316 /// legal to vectorize the code, considering only memory constrains. 1317 /// Returns true if the loop is vectorizable 1318 bool canVectorizeMemory(); 1319 1320 /// Return true if we can vectorize this loop using the IF-conversion 1321 /// transformation. 1322 bool canVectorizeWithIfConvert(); 1323 1324 /// Collect the variables that need to stay uniform after vectorization. 1325 void collectLoopUniforms(); 1326 1327 /// Return true if all of the instructions in the block can be speculatively 1328 /// executed. \p SafePtrs is a list of addresses that are known to be legal 1329 /// and we know that we can read from them without segfault. 1330 bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs); 1331 1332 /// \brief Collect memory access with loop invariant strides. 1333 /// 1334 /// Looks for accesses like "a[i * StrideA]" where "StrideA" is loop 1335 /// invariant. 1336 void collectStridedAccess(Value *LoadOrStoreInst); 1337 1338 /// Report an analysis message to assist the user in diagnosing loops that are 1339 /// not vectorized. These are handled as LoopAccessReport rather than 1340 /// VectorizationReport because the << operator of VectorizationReport returns 1341 /// LoopAccessReport. 1342 void emitAnalysis(const LoopAccessReport &Message) const { 1343 emitAnalysisDiag(TheFunction, TheLoop, *Hints, Message); 1344 } 1345 1346 unsigned NumPredStores; 1347 1348 /// The loop that we evaluate. 1349 Loop *TheLoop; 1350 /// Scev analysis. 1351 ScalarEvolution *SE; 1352 /// Target Library Info. 1353 TargetLibraryInfo *TLI; 1354 /// Parent function 1355 Function *TheFunction; 1356 /// Target Transform Info 1357 const TargetTransformInfo *TTI; 1358 /// Dominator Tree. 1359 DominatorTree *DT; 1360 // LoopAccess analysis. 1361 LoopAccessAnalysis *LAA; 1362 // And the loop-accesses info corresponding to this loop. This pointer is 1363 // null until canVectorizeMemory sets it up. 1364 const LoopAccessInfo *LAI; 1365 1366 /// The interleave access information contains groups of interleaved accesses 1367 /// with the same stride and close to each other. 1368 InterleavedAccessInfo InterleaveInfo; 1369 1370 // --- vectorization state --- // 1371 1372 /// Holds the integer induction variable. This is the counter of the 1373 /// loop. 1374 PHINode *Induction; 1375 /// Holds the reduction variables. 1376 ReductionList Reductions; 1377 /// Holds all of the induction variables that we found in the loop. 1378 /// Notice that inductions don't need to start at zero and that induction 1379 /// variables can be pointers. 1380 InductionList Inductions; 1381 /// Holds the widest induction type encountered. 1382 Type *WidestIndTy; 1383 1384 /// Allowed outside users. This holds the reduction 1385 /// vars which can be accessed from outside the loop. 1386 SmallPtrSet<Value*, 4> AllowedExit; 1387 /// This set holds the variables which are known to be uniform after 1388 /// vectorization. 1389 SmallPtrSet<Instruction*, 4> Uniforms; 1390 1391 /// Can we assume the absence of NaNs. 1392 bool HasFunNoNaNAttr; 1393 1394 /// Vectorization requirements that will go through late-evaluation. 1395 LoopVectorizationRequirements *Requirements; 1396 1397 /// Used to emit an analysis of any legality issues. 1398 const LoopVectorizeHints *Hints; 1399 1400 ValueToValueMap Strides; 1401 SmallPtrSet<Value *, 8> StrideSet; 1402 1403 /// While vectorizing these instructions we have to generate a 1404 /// call to the appropriate masked intrinsic 1405 SmallPtrSet<const Instruction *, 8> MaskedOp; 1406 1407 /// The SCEV predicate containing all the SCEV-related assumptions. 1408 /// The predicate is used to simplify SCEV expressions in the 1409 /// context of existing SCEV assumptions. The analysis will also 1410 /// add a minimal set of new predicates if this is required to 1411 /// enable vectorization/unrolling. 1412 SCEVUnionPredicate &Preds; 1413 }; 1414 1415 /// LoopVectorizationCostModel - estimates the expected speedups due to 1416 /// vectorization. 1417 /// In many cases vectorization is not profitable. This can happen because of 1418 /// a number of reasons. In this class we mainly attempt to predict the 1419 /// expected speedup/slowdowns due to the supported instruction set. We use the 1420 /// TargetTransformInfo to query the different backends for the cost of 1421 /// different operations. 1422 class LoopVectorizationCostModel { 1423 public: 1424 LoopVectorizationCostModel(Loop *L, ScalarEvolution *SE, LoopInfo *LI, 1425 LoopVectorizationLegality *Legal, 1426 const TargetTransformInfo &TTI, 1427 const TargetLibraryInfo *TLI, DemandedBits *DB, 1428 AssumptionCache *AC, const Function *F, 1429 const LoopVectorizeHints *Hints, 1430 SmallPtrSetImpl<const Value *> &ValuesToIgnore, 1431 SCEVUnionPredicate &Preds) 1432 : TheLoop(L), SE(SE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 1433 TheFunction(F), Hints(Hints), ValuesToIgnore(ValuesToIgnore) {} 1434 1435 /// Information about vectorization costs 1436 struct VectorizationFactor { 1437 unsigned Width; // Vector width with best cost 1438 unsigned Cost; // Cost of the loop with that width 1439 }; 1440 /// \return The most profitable vectorization factor and the cost of that VF. 1441 /// This method checks every power of two up to VF. If UserVF is not ZERO 1442 /// then this vectorization factor will be selected if vectorization is 1443 /// possible. 1444 VectorizationFactor selectVectorizationFactor(bool OptForSize); 1445 1446 /// \return The size (in bits) of the smallest and widest types in the code 1447 /// that needs to be vectorized. We ignore values that remain scalar such as 1448 /// 64 bit loop indices. 1449 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1450 1451 /// \return The desired interleave count. 1452 /// If interleave count has been specified by metadata it will be returned. 1453 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1454 /// are the selected vectorization factor and the cost of the selected VF. 1455 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 1456 unsigned LoopCost); 1457 1458 /// \return The most profitable unroll factor. 1459 /// This method finds the best unroll-factor based on register pressure and 1460 /// other parameters. VF and LoopCost are the selected vectorization factor 1461 /// and the cost of the selected VF. 1462 unsigned computeInterleaveCount(bool OptForSize, unsigned VF, 1463 unsigned LoopCost); 1464 1465 /// \brief A struct that represents some properties of the register usage 1466 /// of a loop. 1467 struct RegisterUsage { 1468 /// Holds the number of loop invariant values that are used in the loop. 1469 unsigned LoopInvariantRegs; 1470 /// Holds the maximum number of concurrent live intervals in the loop. 1471 unsigned MaxLocalUsers; 1472 /// Holds the number of instructions in the loop. 1473 unsigned NumInstructions; 1474 }; 1475 1476 /// \return Returns information about the register usages of the loop for the 1477 /// given vectorization factors. 1478 SmallVector<RegisterUsage, 8> 1479 calculateRegisterUsage(const SmallVector<unsigned, 8> &VFs); 1480 1481 private: 1482 /// Returns the expected execution cost. The unit of the cost does 1483 /// not matter because we use the 'cost' units to compare different 1484 /// vector widths. The cost that is returned is *not* normalized by 1485 /// the factor width. 1486 unsigned expectedCost(unsigned VF); 1487 1488 /// Returns the execution time cost of an instruction for a given vector 1489 /// width. Vector width of one means scalar. 1490 unsigned getInstructionCost(Instruction *I, unsigned VF); 1491 1492 /// Returns whether the instruction is a load or store and will be a emitted 1493 /// as a vector operation. 1494 bool isConsecutiveLoadOrStore(Instruction *I); 1495 1496 /// Report an analysis message to assist the user in diagnosing loops that are 1497 /// not vectorized. These are handled as LoopAccessReport rather than 1498 /// VectorizationReport because the << operator of VectorizationReport returns 1499 /// LoopAccessReport. 1500 void emitAnalysis(const LoopAccessReport &Message) const { 1501 emitAnalysisDiag(TheFunction, TheLoop, *Hints, Message); 1502 } 1503 1504 public: 1505 /// Map of scalar integer values to the smallest bitwidth they can be legally 1506 /// represented as. The vector equivalents of these values should be truncated 1507 /// to this type. 1508 MapVector<Instruction*,uint64_t> MinBWs; 1509 1510 /// The loop that we evaluate. 1511 Loop *TheLoop; 1512 /// Scev analysis. 1513 ScalarEvolution *SE; 1514 /// Loop Info analysis. 1515 LoopInfo *LI; 1516 /// Vectorization legality. 1517 LoopVectorizationLegality *Legal; 1518 /// Vector target information. 1519 const TargetTransformInfo &TTI; 1520 /// Target Library Info. 1521 const TargetLibraryInfo *TLI; 1522 /// Demanded bits analysis 1523 DemandedBits *DB; 1524 const Function *TheFunction; 1525 // Loop Vectorize Hint. 1526 const LoopVectorizeHints *Hints; 1527 // Values to ignore in the cost model. 1528 const SmallPtrSetImpl<const Value *> &ValuesToIgnore; 1529 }; 1530 1531 /// \brief This holds vectorization requirements that must be verified late in 1532 /// the process. The requirements are set by legalize and costmodel. Once 1533 /// vectorization has been determined to be possible and profitable the 1534 /// requirements can be verified by looking for metadata or compiler options. 1535 /// For example, some loops require FP commutativity which is only allowed if 1536 /// vectorization is explicitly specified or if the fast-math compiler option 1537 /// has been provided. 1538 /// Late evaluation of these requirements allows helpful diagnostics to be 1539 /// composed that tells the user what need to be done to vectorize the loop. For 1540 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late 1541 /// evaluation should be used only when diagnostics can generated that can be 1542 /// followed by a non-expert user. 1543 class LoopVectorizationRequirements { 1544 public: 1545 LoopVectorizationRequirements() 1546 : NumRuntimePointerChecks(0), UnsafeAlgebraInst(nullptr) {} 1547 1548 void addUnsafeAlgebraInst(Instruction *I) { 1549 // First unsafe algebra instruction. 1550 if (!UnsafeAlgebraInst) 1551 UnsafeAlgebraInst = I; 1552 } 1553 1554 void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; } 1555 1556 bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) { 1557 const char *Name = Hints.vectorizeAnalysisPassName(); 1558 bool Failed = false; 1559 if (UnsafeAlgebraInst && !Hints.allowReordering()) { 1560 emitOptimizationRemarkAnalysisFPCommute( 1561 F->getContext(), Name, *F, UnsafeAlgebraInst->getDebugLoc(), 1562 VectorizationReport() << "cannot prove it is safe to reorder " 1563 "floating-point operations"); 1564 Failed = true; 1565 } 1566 1567 // Test if runtime memcheck thresholds are exceeded. 1568 bool PragmaThresholdReached = 1569 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 1570 bool ThresholdReached = 1571 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 1572 if ((ThresholdReached && !Hints.allowReordering()) || 1573 PragmaThresholdReached) { 1574 emitOptimizationRemarkAnalysisAliasing( 1575 F->getContext(), Name, *F, L->getStartLoc(), 1576 VectorizationReport() 1577 << "cannot prove it is safe to reorder memory operations"); 1578 DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 1579 Failed = true; 1580 } 1581 1582 return Failed; 1583 } 1584 1585 private: 1586 unsigned NumRuntimePointerChecks; 1587 Instruction *UnsafeAlgebraInst; 1588 }; 1589 1590 static void addInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) { 1591 if (L.empty()) 1592 return V.push_back(&L); 1593 1594 for (Loop *InnerL : L) 1595 addInnerLoop(*InnerL, V); 1596 } 1597 1598 /// The LoopVectorize Pass. 1599 struct LoopVectorize : public FunctionPass { 1600 /// Pass identification, replacement for typeid 1601 static char ID; 1602 1603 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) 1604 : FunctionPass(ID), 1605 DisableUnrolling(NoUnrolling), 1606 AlwaysVectorize(AlwaysVectorize) { 1607 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1608 } 1609 1610 ScalarEvolution *SE; 1611 LoopInfo *LI; 1612 TargetTransformInfo *TTI; 1613 DominatorTree *DT; 1614 BlockFrequencyInfo *BFI; 1615 TargetLibraryInfo *TLI; 1616 DemandedBits *DB; 1617 AliasAnalysis *AA; 1618 AssumptionCache *AC; 1619 LoopAccessAnalysis *LAA; 1620 bool DisableUnrolling; 1621 bool AlwaysVectorize; 1622 1623 BlockFrequency ColdEntryFreq; 1624 1625 bool runOnFunction(Function &F) override { 1626 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1627 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1628 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1629 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1630 BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1631 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1632 TLI = TLIP ? &TLIP->getTLI() : nullptr; 1633 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1634 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1635 LAA = &getAnalysis<LoopAccessAnalysis>(); 1636 DB = &getAnalysis<DemandedBits>(); 1637 1638 // Compute some weights outside of the loop over the loops. Compute this 1639 // using a BranchProbability to re-use its scaling math. 1640 const BranchProbability ColdProb(1, 5); // 20% 1641 ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb; 1642 1643 // Don't attempt if 1644 // 1. the target claims to have no vector registers, and 1645 // 2. interleaving won't help ILP. 1646 // 1647 // The second condition is necessary because, even if the target has no 1648 // vector registers, loop vectorization may still enable scalar 1649 // interleaving. 1650 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 1651 return false; 1652 1653 // Build up a worklist of inner-loops to vectorize. This is necessary as 1654 // the act of vectorizing or partially unrolling a loop creates new loops 1655 // and can invalidate iterators across the loops. 1656 SmallVector<Loop *, 8> Worklist; 1657 1658 for (Loop *L : *LI) 1659 addInnerLoop(*L, Worklist); 1660 1661 LoopsAnalyzed += Worklist.size(); 1662 1663 // Now walk the identified inner loops. 1664 bool Changed = false; 1665 while (!Worklist.empty()) 1666 Changed |= processLoop(Worklist.pop_back_val()); 1667 1668 // Process each loop nest in the function. 1669 return Changed; 1670 } 1671 1672 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 1673 SmallVector<Metadata *, 4> MDs; 1674 // Reserve first location for self reference to the LoopID metadata node. 1675 MDs.push_back(nullptr); 1676 bool IsUnrollMetadata = false; 1677 MDNode *LoopID = L->getLoopID(); 1678 if (LoopID) { 1679 // First find existing loop unrolling disable metadata. 1680 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1681 MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 1682 if (MD) { 1683 const MDString *S = dyn_cast<MDString>(MD->getOperand(0)); 1684 IsUnrollMetadata = 1685 S && S->getString().startswith("llvm.loop.unroll.disable"); 1686 } 1687 MDs.push_back(LoopID->getOperand(i)); 1688 } 1689 } 1690 1691 if (!IsUnrollMetadata) { 1692 // Add runtime unroll disable metadata. 1693 LLVMContext &Context = L->getHeader()->getContext(); 1694 SmallVector<Metadata *, 1> DisableOperands; 1695 DisableOperands.push_back( 1696 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 1697 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 1698 MDs.push_back(DisableNode); 1699 MDNode *NewLoopID = MDNode::get(Context, MDs); 1700 // Set operand 0 to refer to the loop id itself. 1701 NewLoopID->replaceOperandWith(0, NewLoopID); 1702 L->setLoopID(NewLoopID); 1703 } 1704 } 1705 1706 bool processLoop(Loop *L) { 1707 assert(L->empty() && "Only process inner loops."); 1708 1709 #ifndef NDEBUG 1710 const std::string DebugLocStr = getDebugLocString(L); 1711 #endif /* NDEBUG */ 1712 1713 DEBUG(dbgs() << "\nLV: Checking a loop in \"" 1714 << L->getHeader()->getParent()->getName() << "\" from " 1715 << DebugLocStr << "\n"); 1716 1717 LoopVectorizeHints Hints(L, DisableUnrolling); 1718 1719 DEBUG(dbgs() << "LV: Loop hints:" 1720 << " force=" 1721 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 1722 ? "disabled" 1723 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 1724 ? "enabled" 1725 : "?")) << " width=" << Hints.getWidth() 1726 << " unroll=" << Hints.getInterleave() << "\n"); 1727 1728 // Function containing loop 1729 Function *F = L->getHeader()->getParent(); 1730 1731 // Looking at the diagnostic output is the only way to determine if a loop 1732 // was vectorized (other than looking at the IR or machine code), so it 1733 // is important to generate an optimization remark for each loop. Most of 1734 // these messages are generated by emitOptimizationRemarkAnalysis. Remarks 1735 // generated by emitOptimizationRemark and emitOptimizationRemarkMissed are 1736 // less verbose reporting vectorized loops and unvectorized loops that may 1737 // benefit from vectorization, respectively. 1738 1739 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { 1740 DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 1741 return false; 1742 } 1743 1744 // Check the loop for a trip count threshold: 1745 // do not vectorize loops with a tiny trip count. 1746 const unsigned TC = SE->getSmallConstantTripCount(L); 1747 if (TC > 0u && TC < TinyTripCountVectorThreshold) { 1748 DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 1749 << "This loop is not worth vectorizing."); 1750 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 1751 DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 1752 else { 1753 DEBUG(dbgs() << "\n"); 1754 emitAnalysisDiag(F, L, Hints, VectorizationReport() 1755 << "vectorization is not beneficial " 1756 "and is not explicitly forced"); 1757 return false; 1758 } 1759 } 1760 1761 SCEVUnionPredicate Preds; 1762 1763 // Check if it is legal to vectorize the loop. 1764 LoopVectorizationRequirements Requirements; 1765 LoopVectorizationLegality LVL(L, SE, DT, TLI, AA, F, TTI, LAA, 1766 &Requirements, &Hints, Preds); 1767 if (!LVL.canVectorize()) { 1768 DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 1769 emitMissedWarning(F, L, Hints); 1770 return false; 1771 } 1772 1773 // Collect values we want to ignore in the cost model. This includes 1774 // type-promoting instructions we identified during reduction detection. 1775 SmallPtrSet<const Value *, 32> ValuesToIgnore; 1776 CodeMetrics::collectEphemeralValues(L, AC, ValuesToIgnore); 1777 for (auto &Reduction : *LVL.getReductionVars()) { 1778 RecurrenceDescriptor &RedDes = Reduction.second; 1779 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 1780 ValuesToIgnore.insert(Casts.begin(), Casts.end()); 1781 } 1782 1783 // Use the cost model. 1784 LoopVectorizationCostModel CM(L, SE, LI, &LVL, *TTI, TLI, DB, AC, F, &Hints, 1785 ValuesToIgnore, Preds); 1786 1787 // Check the function attributes to find out if this function should be 1788 // optimized for size. 1789 bool OptForSize = Hints.getForce() != LoopVectorizeHints::FK_Enabled && 1790 F->optForSize(); 1791 1792 // Compute the weighted frequency of this loop being executed and see if it 1793 // is less than 20% of the function entry baseline frequency. Note that we 1794 // always have a canonical loop here because we think we *can* vectorize. 1795 // FIXME: This is hidden behind a flag due to pervasive problems with 1796 // exactly what block frequency models. 1797 if (LoopVectorizeWithBlockFrequency) { 1798 BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader()); 1799 if (Hints.getForce() != LoopVectorizeHints::FK_Enabled && 1800 LoopEntryFreq < ColdEntryFreq) 1801 OptForSize = true; 1802 } 1803 1804 // Check the function attributes to see if implicit floats are allowed. 1805 // FIXME: This check doesn't seem possibly correct -- what if the loop is 1806 // an integer loop and the vector instructions selected are purely integer 1807 // vector instructions? 1808 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 1809 DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 1810 "attribute is used.\n"); 1811 emitAnalysisDiag( 1812 F, L, Hints, 1813 VectorizationReport() 1814 << "loop not vectorized due to NoImplicitFloat attribute"); 1815 emitMissedWarning(F, L, Hints); 1816 return false; 1817 } 1818 1819 // Select the optimal vectorization factor. 1820 const LoopVectorizationCostModel::VectorizationFactor VF = 1821 CM.selectVectorizationFactor(OptForSize); 1822 1823 // Select the interleave count. 1824 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 1825 1826 // Get user interleave count. 1827 unsigned UserIC = Hints.getInterleave(); 1828 1829 // Identify the diagnostic messages that should be produced. 1830 std::string VecDiagMsg, IntDiagMsg; 1831 bool VectorizeLoop = true, InterleaveLoop = true; 1832 1833 if (Requirements.doesNotMeet(F, L, Hints)) { 1834 DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 1835 "requirements.\n"); 1836 emitMissedWarning(F, L, Hints); 1837 return false; 1838 } 1839 1840 if (VF.Width == 1) { 1841 DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 1842 VecDiagMsg = 1843 "the cost-model indicates that vectorization is not beneficial"; 1844 VectorizeLoop = false; 1845 } 1846 1847 if (IC == 1 && UserIC <= 1) { 1848 // Tell the user interleaving is not beneficial. 1849 DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 1850 IntDiagMsg = 1851 "the cost-model indicates that interleaving is not beneficial"; 1852 InterleaveLoop = false; 1853 if (UserIC == 1) 1854 IntDiagMsg += 1855 " and is explicitly disabled or interleave count is set to 1"; 1856 } else if (IC > 1 && UserIC == 1) { 1857 // Tell the user interleaving is beneficial, but it explicitly disabled. 1858 DEBUG(dbgs() 1859 << "LV: Interleaving is beneficial but is explicitly disabled."); 1860 IntDiagMsg = "the cost-model indicates that interleaving is beneficial " 1861 "but is explicitly disabled or interleave count is set to 1"; 1862 InterleaveLoop = false; 1863 } 1864 1865 // Override IC if user provided an interleave count. 1866 IC = UserIC > 0 ? UserIC : IC; 1867 1868 // Emit diagnostic messages, if any. 1869 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 1870 if (!VectorizeLoop && !InterleaveLoop) { 1871 // Do not vectorize or interleaving the loop. 1872 emitOptimizationRemarkAnalysis(F->getContext(), VAPassName, *F, 1873 L->getStartLoc(), VecDiagMsg); 1874 emitOptimizationRemarkAnalysis(F->getContext(), LV_NAME, *F, 1875 L->getStartLoc(), IntDiagMsg); 1876 return false; 1877 } else if (!VectorizeLoop && InterleaveLoop) { 1878 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 1879 emitOptimizationRemarkAnalysis(F->getContext(), VAPassName, *F, 1880 L->getStartLoc(), VecDiagMsg); 1881 } else if (VectorizeLoop && !InterleaveLoop) { 1882 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 1883 << DebugLocStr << '\n'); 1884 emitOptimizationRemarkAnalysis(F->getContext(), LV_NAME, *F, 1885 L->getStartLoc(), IntDiagMsg); 1886 } else if (VectorizeLoop && InterleaveLoop) { 1887 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 1888 << DebugLocStr << '\n'); 1889 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 1890 } 1891 1892 if (!VectorizeLoop) { 1893 assert(IC > 1 && "interleave count should not be 1 or 0"); 1894 // If we decided that it is not legal to vectorize the loop then 1895 // interleave it. 1896 InnerLoopUnroller Unroller(L, SE, LI, DT, TLI, TTI, IC, Preds); 1897 Unroller.vectorize(&LVL, CM.MinBWs); 1898 1899 emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(), 1900 Twine("interleaved loop (interleaved count: ") + 1901 Twine(IC) + ")"); 1902 } else { 1903 // If we decided that it is *legal* to vectorize the loop then do it. 1904 InnerLoopVectorizer LB(L, SE, LI, DT, TLI, TTI, VF.Width, IC, Preds); 1905 LB.vectorize(&LVL, CM.MinBWs); 1906 ++LoopsVectorized; 1907 1908 // Add metadata to disable runtime unrolling scalar loop when there's no 1909 // runtime check about strides and memory. Because at this situation, 1910 // scalar loop is rarely used not worthy to be unrolled. 1911 if (!LB.IsSafetyChecksAdded()) 1912 AddRuntimeUnrollDisableMetaData(L); 1913 1914 // Report the vectorization decision. 1915 emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(), 1916 Twine("vectorized loop (vectorization width: ") + 1917 Twine(VF.Width) + ", interleaved count: " + 1918 Twine(IC) + ")"); 1919 } 1920 1921 // Mark the loop as already vectorized to avoid vectorizing again. 1922 Hints.setAlreadyVectorized(); 1923 1924 DEBUG(verifyFunction(*L->getHeader()->getParent())); 1925 return true; 1926 } 1927 1928 void getAnalysisUsage(AnalysisUsage &AU) const override { 1929 AU.addRequired<AssumptionCacheTracker>(); 1930 AU.addRequiredID(LoopSimplifyID); 1931 AU.addRequiredID(LCSSAID); 1932 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1933 AU.addRequired<DominatorTreeWrapperPass>(); 1934 AU.addRequired<LoopInfoWrapperPass>(); 1935 AU.addRequired<ScalarEvolutionWrapperPass>(); 1936 AU.addRequired<TargetTransformInfoWrapperPass>(); 1937 AU.addRequired<AAResultsWrapperPass>(); 1938 AU.addRequired<LoopAccessAnalysis>(); 1939 AU.addRequired<DemandedBits>(); 1940 AU.addPreserved<LoopInfoWrapperPass>(); 1941 AU.addPreserved<DominatorTreeWrapperPass>(); 1942 AU.addPreserved<BasicAAWrapperPass>(); 1943 AU.addPreserved<AAResultsWrapperPass>(); 1944 AU.addPreserved<GlobalsAAWrapperPass>(); 1945 } 1946 1947 }; 1948 1949 } // end anonymous namespace 1950 1951 //===----------------------------------------------------------------------===// 1952 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 1953 // LoopVectorizationCostModel. 1954 //===----------------------------------------------------------------------===// 1955 1956 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 1957 // We need to place the broadcast of invariant variables outside the loop. 1958 Instruction *Instr = dyn_cast<Instruction>(V); 1959 bool NewInstr = 1960 (Instr && std::find(LoopVectorBody.begin(), LoopVectorBody.end(), 1961 Instr->getParent()) != LoopVectorBody.end()); 1962 bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr; 1963 1964 // Place the code for broadcasting invariant variables in the new preheader. 1965 IRBuilder<>::InsertPointGuard Guard(Builder); 1966 if (Invariant) 1967 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1968 1969 // Broadcast the scalar into all locations in the vector. 1970 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 1971 1972 return Shuf; 1973 } 1974 1975 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, 1976 Value *Step) { 1977 assert(Val->getType()->isVectorTy() && "Must be a vector"); 1978 assert(Val->getType()->getScalarType()->isIntegerTy() && 1979 "Elem must be an integer"); 1980 assert(Step->getType() == Val->getType()->getScalarType() && 1981 "Step has wrong type"); 1982 // Create the types. 1983 Type *ITy = Val->getType()->getScalarType(); 1984 VectorType *Ty = cast<VectorType>(Val->getType()); 1985 int VLen = Ty->getNumElements(); 1986 SmallVector<Constant*, 8> Indices; 1987 1988 // Create a vector of consecutive numbers from zero to VF. 1989 for (int i = 0; i < VLen; ++i) 1990 Indices.push_back(ConstantInt::get(ITy, StartIdx + i)); 1991 1992 // Add the consecutive indices to the vector value. 1993 Constant *Cv = ConstantVector::get(Indices); 1994 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 1995 Step = Builder.CreateVectorSplat(VLen, Step); 1996 assert(Step->getType() == Val->getType() && "Invalid step vec"); 1997 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 1998 // which can be found from the original scalar operations. 1999 Step = Builder.CreateMul(Cv, Step); 2000 return Builder.CreateAdd(Val, Step, "induction"); 2001 } 2002 2003 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { 2004 assert(Ptr->getType()->isPointerTy() && "Unexpected non-ptr"); 2005 // Make sure that the pointer does not point to structs. 2006 if (Ptr->getType()->getPointerElementType()->isAggregateType()) 2007 return 0; 2008 2009 // If this value is a pointer induction variable we know it is consecutive. 2010 PHINode *Phi = dyn_cast_or_null<PHINode>(Ptr); 2011 if (Phi && Inductions.count(Phi)) { 2012 InductionDescriptor II = Inductions[Phi]; 2013 return II.getConsecutiveDirection(); 2014 } 2015 2016 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2017 if (!Gep) 2018 return 0; 2019 2020 unsigned NumOperands = Gep->getNumOperands(); 2021 Value *GpPtr = Gep->getPointerOperand(); 2022 // If this GEP value is a consecutive pointer induction variable and all of 2023 // the indices are constant then we know it is consecutive. We can 2024 Phi = dyn_cast<PHINode>(GpPtr); 2025 if (Phi && Inductions.count(Phi)) { 2026 2027 // Make sure that the pointer does not point to structs. 2028 PointerType *GepPtrType = cast<PointerType>(GpPtr->getType()); 2029 if (GepPtrType->getElementType()->isAggregateType()) 2030 return 0; 2031 2032 // Make sure that all of the index operands are loop invariant. 2033 for (unsigned i = 1; i < NumOperands; ++i) 2034 if (!SE->isLoopInvariant(SE->getSCEV(Gep->getOperand(i)), TheLoop)) 2035 return 0; 2036 2037 InductionDescriptor II = Inductions[Phi]; 2038 return II.getConsecutiveDirection(); 2039 } 2040 2041 unsigned InductionOperand = getGEPInductionOperand(Gep); 2042 2043 // Check that all of the gep indices are uniform except for our induction 2044 // operand. 2045 for (unsigned i = 0; i != NumOperands; ++i) 2046 if (i != InductionOperand && 2047 !SE->isLoopInvariant(SE->getSCEV(Gep->getOperand(i)), TheLoop)) 2048 return 0; 2049 2050 // We can emit wide load/stores only if the last non-zero index is the 2051 // induction variable. 2052 const SCEV *Last = nullptr; 2053 if (!Strides.count(Gep)) 2054 Last = SE->getSCEV(Gep->getOperand(InductionOperand)); 2055 else { 2056 // Because of the multiplication by a stride we can have a s/zext cast. 2057 // We are going to replace this stride by 1 so the cast is safe to ignore. 2058 // 2059 // %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] 2060 // %0 = trunc i64 %indvars.iv to i32 2061 // %mul = mul i32 %0, %Stride1 2062 // %idxprom = zext i32 %mul to i64 << Safe cast. 2063 // %arrayidx = getelementptr inbounds i32* %B, i64 %idxprom 2064 // 2065 Last = replaceSymbolicStrideSCEV(SE, Strides, Preds, 2066 Gep->getOperand(InductionOperand), Gep); 2067 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(Last)) 2068 Last = 2069 (C->getSCEVType() == scSignExtend || C->getSCEVType() == scZeroExtend) 2070 ? C->getOperand() 2071 : Last; 2072 } 2073 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Last)) { 2074 const SCEV *Step = AR->getStepRecurrence(*SE); 2075 2076 // The memory is consecutive because the last index is consecutive 2077 // and all other indices are loop invariant. 2078 if (Step->isOne()) 2079 return 1; 2080 if (Step->isAllOnesValue()) 2081 return -1; 2082 } 2083 2084 return 0; 2085 } 2086 2087 bool LoopVectorizationLegality::isUniform(Value *V) { 2088 return LAI->isUniform(V); 2089 } 2090 2091 InnerLoopVectorizer::VectorParts& 2092 InnerLoopVectorizer::getVectorValue(Value *V) { 2093 assert(V != Induction && "The new induction variable should not be used."); 2094 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2095 2096 // If we have a stride that is replaced by one, do it here. 2097 if (Legal->hasStride(V)) 2098 V = ConstantInt::get(V->getType(), 1); 2099 2100 // If we have this scalar in the map, return it. 2101 if (WidenMap.has(V)) 2102 return WidenMap.get(V); 2103 2104 // If this scalar is unknown, assume that it is a constant or that it is 2105 // loop invariant. Broadcast V and save the value for future uses. 2106 Value *B = getBroadcastInstrs(V); 2107 return WidenMap.splat(V, B); 2108 } 2109 2110 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2111 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2112 SmallVector<Constant*, 8> ShuffleMask; 2113 for (unsigned i = 0; i < VF; ++i) 2114 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 2115 2116 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2117 ConstantVector::get(ShuffleMask), 2118 "reverse"); 2119 } 2120 2121 // Get a mask to interleave \p NumVec vectors into a wide vector. 2122 // I.e. <0, VF, VF*2, ..., VF*(NumVec-1), 1, VF+1, VF*2+1, ...> 2123 // E.g. For 2 interleaved vectors, if VF is 4, the mask is: 2124 // <0, 4, 1, 5, 2, 6, 3, 7> 2125 static Constant *getInterleavedMask(IRBuilder<> &Builder, unsigned VF, 2126 unsigned NumVec) { 2127 SmallVector<Constant *, 16> Mask; 2128 for (unsigned i = 0; i < VF; i++) 2129 for (unsigned j = 0; j < NumVec; j++) 2130 Mask.push_back(Builder.getInt32(j * VF + i)); 2131 2132 return ConstantVector::get(Mask); 2133 } 2134 2135 // Get the strided mask starting from index \p Start. 2136 // I.e. <Start, Start + Stride, ..., Start + Stride*(VF-1)> 2137 static Constant *getStridedMask(IRBuilder<> &Builder, unsigned Start, 2138 unsigned Stride, unsigned VF) { 2139 SmallVector<Constant *, 16> Mask; 2140 for (unsigned i = 0; i < VF; i++) 2141 Mask.push_back(Builder.getInt32(Start + i * Stride)); 2142 2143 return ConstantVector::get(Mask); 2144 } 2145 2146 // Get a mask of two parts: The first part consists of sequential integers 2147 // starting from 0, The second part consists of UNDEFs. 2148 // I.e. <0, 1, 2, ..., NumInt - 1, undef, ..., undef> 2149 static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned NumInt, 2150 unsigned NumUndef) { 2151 SmallVector<Constant *, 16> Mask; 2152 for (unsigned i = 0; i < NumInt; i++) 2153 Mask.push_back(Builder.getInt32(i)); 2154 2155 Constant *Undef = UndefValue::get(Builder.getInt32Ty()); 2156 for (unsigned i = 0; i < NumUndef; i++) 2157 Mask.push_back(Undef); 2158 2159 return ConstantVector::get(Mask); 2160 } 2161 2162 // Concatenate two vectors with the same element type. The 2nd vector should 2163 // not have more elements than the 1st vector. If the 2nd vector has less 2164 // elements, extend it with UNDEFs. 2165 static Value *ConcatenateTwoVectors(IRBuilder<> &Builder, Value *V1, 2166 Value *V2) { 2167 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); 2168 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); 2169 assert(VecTy1 && VecTy2 && 2170 VecTy1->getScalarType() == VecTy2->getScalarType() && 2171 "Expect two vectors with the same element type"); 2172 2173 unsigned NumElts1 = VecTy1->getNumElements(); 2174 unsigned NumElts2 = VecTy2->getNumElements(); 2175 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); 2176 2177 if (NumElts1 > NumElts2) { 2178 // Extend with UNDEFs. 2179 Constant *ExtMask = 2180 getSequentialMask(Builder, NumElts2, NumElts1 - NumElts2); 2181 V2 = Builder.CreateShuffleVector(V2, UndefValue::get(VecTy2), ExtMask); 2182 } 2183 2184 Constant *Mask = getSequentialMask(Builder, NumElts1 + NumElts2, 0); 2185 return Builder.CreateShuffleVector(V1, V2, Mask); 2186 } 2187 2188 // Concatenate vectors in the given list. All vectors have the same type. 2189 static Value *ConcatenateVectors(IRBuilder<> &Builder, 2190 ArrayRef<Value *> InputList) { 2191 unsigned NumVec = InputList.size(); 2192 assert(NumVec > 1 && "Should be at least two vectors"); 2193 2194 SmallVector<Value *, 8> ResList; 2195 ResList.append(InputList.begin(), InputList.end()); 2196 do { 2197 SmallVector<Value *, 8> TmpList; 2198 for (unsigned i = 0; i < NumVec - 1; i += 2) { 2199 Value *V0 = ResList[i], *V1 = ResList[i + 1]; 2200 assert((V0->getType() == V1->getType() || i == NumVec - 2) && 2201 "Only the last vector may have a different type"); 2202 2203 TmpList.push_back(ConcatenateTwoVectors(Builder, V0, V1)); 2204 } 2205 2206 // Push the last vector if the total number of vectors is odd. 2207 if (NumVec % 2 != 0) 2208 TmpList.push_back(ResList[NumVec - 1]); 2209 2210 ResList = TmpList; 2211 NumVec = ResList.size(); 2212 } while (NumVec > 1); 2213 2214 return ResList[0]; 2215 } 2216 2217 // Try to vectorize the interleave group that \p Instr belongs to. 2218 // 2219 // E.g. Translate following interleaved load group (factor = 3): 2220 // for (i = 0; i < N; i+=3) { 2221 // R = Pic[i]; // Member of index 0 2222 // G = Pic[i+1]; // Member of index 1 2223 // B = Pic[i+2]; // Member of index 2 2224 // ... // do something to R, G, B 2225 // } 2226 // To: 2227 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2228 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2229 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2230 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2231 // 2232 // Or translate following interleaved store group (factor = 3): 2233 // for (i = 0; i < N; i+=3) { 2234 // ... do something to R, G, B 2235 // Pic[i] = R; // Member of index 0 2236 // Pic[i+1] = G; // Member of index 1 2237 // Pic[i+2] = B; // Member of index 2 2238 // } 2239 // To: 2240 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2241 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2242 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2243 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2244 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2245 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { 2246 const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr); 2247 assert(Group && "Fail to get an interleaved access group."); 2248 2249 // Skip if current instruction is not the insert position. 2250 if (Instr != Group->getInsertPos()) 2251 return; 2252 2253 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2254 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2255 Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); 2256 2257 // Prepare for the vector type of the interleaved load/store. 2258 Type *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2259 unsigned InterleaveFactor = Group->getFactor(); 2260 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2261 Type *PtrTy = VecTy->getPointerTo(Ptr->getType()->getPointerAddressSpace()); 2262 2263 // Prepare for the new pointers. 2264 setDebugLocFromInst(Builder, Ptr); 2265 VectorParts &PtrParts = getVectorValue(Ptr); 2266 SmallVector<Value *, 2> NewPtrs; 2267 unsigned Index = Group->getIndex(Instr); 2268 for (unsigned Part = 0; Part < UF; Part++) { 2269 // Extract the pointer for current instruction from the pointer vector. A 2270 // reverse access uses the pointer in the last lane. 2271 Value *NewPtr = Builder.CreateExtractElement( 2272 PtrParts[Part], 2273 Group->isReverse() ? Builder.getInt32(VF - 1) : Builder.getInt32(0)); 2274 2275 // Notice current instruction could be any index. Need to adjust the address 2276 // to the member of index 0. 2277 // 2278 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2279 // b = A[i]; // Member of index 0 2280 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2281 // 2282 // E.g. A[i+1] = a; // Member of index 1 2283 // A[i] = b; // Member of index 0 2284 // A[i+2] = c; // Member of index 2 (Current instruction) 2285 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2286 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 2287 2288 // Cast to the vector pointer type. 2289 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2290 } 2291 2292 setDebugLocFromInst(Builder, Instr); 2293 Value *UndefVec = UndefValue::get(VecTy); 2294 2295 // Vectorize the interleaved load group. 2296 if (LI) { 2297 for (unsigned Part = 0; Part < UF; Part++) { 2298 Instruction *NewLoadInstr = Builder.CreateAlignedLoad( 2299 NewPtrs[Part], Group->getAlignment(), "wide.vec"); 2300 2301 for (unsigned i = 0; i < InterleaveFactor; i++) { 2302 Instruction *Member = Group->getMember(i); 2303 2304 // Skip the gaps in the group. 2305 if (!Member) 2306 continue; 2307 2308 Constant *StrideMask = getStridedMask(Builder, i, InterleaveFactor, VF); 2309 Value *StridedVec = Builder.CreateShuffleVector( 2310 NewLoadInstr, UndefVec, StrideMask, "strided.vec"); 2311 2312 // If this member has different type, cast the result type. 2313 if (Member->getType() != ScalarTy) { 2314 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2315 StridedVec = Builder.CreateBitOrPointerCast(StridedVec, OtherVTy); 2316 } 2317 2318 VectorParts &Entry = WidenMap.get(Member); 2319 Entry[Part] = 2320 Group->isReverse() ? reverseVector(StridedVec) : StridedVec; 2321 } 2322 2323 propagateMetadata(NewLoadInstr, Instr); 2324 } 2325 return; 2326 } 2327 2328 // The sub vector type for current instruction. 2329 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2330 2331 // Vectorize the interleaved store group. 2332 for (unsigned Part = 0; Part < UF; Part++) { 2333 // Collect the stored vector from each member. 2334 SmallVector<Value *, 4> StoredVecs; 2335 for (unsigned i = 0; i < InterleaveFactor; i++) { 2336 // Interleaved store group doesn't allow a gap, so each index has a member 2337 Instruction *Member = Group->getMember(i); 2338 assert(Member && "Fail to get a member from an interleaved store group"); 2339 2340 Value *StoredVec = 2341 getVectorValue(dyn_cast<StoreInst>(Member)->getValueOperand())[Part]; 2342 if (Group->isReverse()) 2343 StoredVec = reverseVector(StoredVec); 2344 2345 // If this member has different type, cast it to an unified type. 2346 if (StoredVec->getType() != SubVT) 2347 StoredVec = Builder.CreateBitOrPointerCast(StoredVec, SubVT); 2348 2349 StoredVecs.push_back(StoredVec); 2350 } 2351 2352 // Concatenate all vectors into a wide vector. 2353 Value *WideVec = ConcatenateVectors(Builder, StoredVecs); 2354 2355 // Interleave the elements in the wide vector. 2356 Constant *IMask = getInterleavedMask(Builder, VF, InterleaveFactor); 2357 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2358 "interleaved.vec"); 2359 2360 Instruction *NewStoreInstr = 2361 Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment()); 2362 propagateMetadata(NewStoreInstr, Instr); 2363 } 2364 } 2365 2366 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { 2367 // Attempt to issue a wide load. 2368 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2369 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2370 2371 assert((LI || SI) && "Invalid Load/Store instruction"); 2372 2373 // Try to vectorize the interleave group if this access is interleaved. 2374 if (Legal->isAccessInterleaved(Instr)) 2375 return vectorizeInterleaveGroup(Instr); 2376 2377 Type *ScalarDataTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2378 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2379 Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); 2380 unsigned Alignment = LI ? LI->getAlignment() : SI->getAlignment(); 2381 // An alignment of 0 means target abi alignment. We need to use the scalar's 2382 // target abi alignment in such a case. 2383 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2384 if (!Alignment) 2385 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2386 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2387 unsigned ScalarAllocatedSize = DL.getTypeAllocSize(ScalarDataTy); 2388 unsigned VectorElementSize = DL.getTypeStoreSize(DataTy) / VF; 2389 2390 if (SI && Legal->blockNeedsPredication(SI->getParent()) && 2391 !Legal->isMaskRequired(SI)) 2392 return scalarizeInstruction(Instr, true); 2393 2394 if (ScalarAllocatedSize != VectorElementSize) 2395 return scalarizeInstruction(Instr); 2396 2397 // If the pointer is loop invariant or if it is non-consecutive, 2398 // scalarize the load. 2399 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 2400 bool Reverse = ConsecutiveStride < 0; 2401 bool UniformLoad = LI && Legal->isUniform(Ptr); 2402 if (!ConsecutiveStride || UniformLoad) 2403 return scalarizeInstruction(Instr); 2404 2405 Constant *Zero = Builder.getInt32(0); 2406 VectorParts &Entry = WidenMap.get(Instr); 2407 2408 // Handle consecutive loads/stores. 2409 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2410 if (Gep && Legal->isInductionVariable(Gep->getPointerOperand())) { 2411 setDebugLocFromInst(Builder, Gep); 2412 Value *PtrOperand = Gep->getPointerOperand(); 2413 Value *FirstBasePtr = getVectorValue(PtrOperand)[0]; 2414 FirstBasePtr = Builder.CreateExtractElement(FirstBasePtr, Zero); 2415 2416 // Create the new GEP with the new induction variable. 2417 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2418 Gep2->setOperand(0, FirstBasePtr); 2419 Gep2->setName("gep.indvar.base"); 2420 Ptr = Builder.Insert(Gep2); 2421 } else if (Gep) { 2422 setDebugLocFromInst(Builder, Gep); 2423 assert(SE->isLoopInvariant(SE->getSCEV(Gep->getPointerOperand()), 2424 OrigLoop) && "Base ptr must be invariant"); 2425 2426 // The last index does not have to be the induction. It can be 2427 // consecutive and be a function of the index. For example A[I+1]; 2428 unsigned NumOperands = Gep->getNumOperands(); 2429 unsigned InductionOperand = getGEPInductionOperand(Gep); 2430 // Create the new GEP with the new induction variable. 2431 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2432 2433 for (unsigned i = 0; i < NumOperands; ++i) { 2434 Value *GepOperand = Gep->getOperand(i); 2435 Instruction *GepOperandInst = dyn_cast<Instruction>(GepOperand); 2436 2437 // Update last index or loop invariant instruction anchored in loop. 2438 if (i == InductionOperand || 2439 (GepOperandInst && OrigLoop->contains(GepOperandInst))) { 2440 assert((i == InductionOperand || 2441 SE->isLoopInvariant(SE->getSCEV(GepOperandInst), OrigLoop)) && 2442 "Must be last index or loop invariant"); 2443 2444 VectorParts &GEPParts = getVectorValue(GepOperand); 2445 Value *Index = GEPParts[0]; 2446 Index = Builder.CreateExtractElement(Index, Zero); 2447 Gep2->setOperand(i, Index); 2448 Gep2->setName("gep.indvar.idx"); 2449 } 2450 } 2451 Ptr = Builder.Insert(Gep2); 2452 } else { 2453 // Use the induction element ptr. 2454 assert(isa<PHINode>(Ptr) && "Invalid induction ptr"); 2455 setDebugLocFromInst(Builder, Ptr); 2456 VectorParts &PtrVal = getVectorValue(Ptr); 2457 Ptr = Builder.CreateExtractElement(PtrVal[0], Zero); 2458 } 2459 2460 VectorParts Mask = createBlockInMask(Instr->getParent()); 2461 // Handle Stores: 2462 if (SI) { 2463 assert(!Legal->isUniform(SI->getPointerOperand()) && 2464 "We do not allow storing to uniform addresses"); 2465 setDebugLocFromInst(Builder, SI); 2466 // We don't want to update the value in the map as it might be used in 2467 // another expression. So don't use a reference type for "StoredVal". 2468 VectorParts StoredVal = getVectorValue(SI->getValueOperand()); 2469 2470 for (unsigned Part = 0; Part < UF; ++Part) { 2471 // Calculate the pointer for the specific unroll-part. 2472 Value *PartPtr = 2473 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2474 2475 if (Reverse) { 2476 // If we store to reverse consecutive memory locations, then we need 2477 // to reverse the order of elements in the stored value. 2478 StoredVal[Part] = reverseVector(StoredVal[Part]); 2479 // If the address is consecutive but reversed, then the 2480 // wide store needs to start at the last vector element. 2481 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2482 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2483 Mask[Part] = reverseVector(Mask[Part]); 2484 } 2485 2486 Value *VecPtr = Builder.CreateBitCast(PartPtr, 2487 DataTy->getPointerTo(AddressSpace)); 2488 2489 Instruction *NewSI; 2490 if (Legal->isMaskRequired(SI)) 2491 NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment, 2492 Mask[Part]); 2493 else 2494 NewSI = Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment); 2495 propagateMetadata(NewSI, SI); 2496 } 2497 return; 2498 } 2499 2500 // Handle loads. 2501 assert(LI && "Must have a load instruction"); 2502 setDebugLocFromInst(Builder, LI); 2503 for (unsigned Part = 0; Part < UF; ++Part) { 2504 // Calculate the pointer for the specific unroll-part. 2505 Value *PartPtr = 2506 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2507 2508 if (Reverse) { 2509 // If the address is consecutive but reversed, then the 2510 // wide load needs to start at the last vector element. 2511 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2512 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2513 Mask[Part] = reverseVector(Mask[Part]); 2514 } 2515 2516 Instruction* NewLI; 2517 Value *VecPtr = Builder.CreateBitCast(PartPtr, 2518 DataTy->getPointerTo(AddressSpace)); 2519 if (Legal->isMaskRequired(LI)) 2520 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 2521 UndefValue::get(DataTy), 2522 "wide.masked.load"); 2523 else 2524 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 2525 propagateMetadata(NewLI, LI); 2526 Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI; 2527 } 2528 } 2529 2530 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2531 bool IfPredicateStore) { 2532 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2533 // Holds vector parameters or scalars, in case of uniform vals. 2534 SmallVector<VectorParts, 4> Params; 2535 2536 setDebugLocFromInst(Builder, Instr); 2537 2538 // Find all of the vectorized parameters. 2539 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2540 Value *SrcOp = Instr->getOperand(op); 2541 2542 // If we are accessing the old induction variable, use the new one. 2543 if (SrcOp == OldInduction) { 2544 Params.push_back(getVectorValue(SrcOp)); 2545 continue; 2546 } 2547 2548 // Try using previously calculated values. 2549 Instruction *SrcInst = dyn_cast<Instruction>(SrcOp); 2550 2551 // If the src is an instruction that appeared earlier in the basic block, 2552 // then it should already be vectorized. 2553 if (SrcInst && OrigLoop->contains(SrcInst)) { 2554 assert(WidenMap.has(SrcInst) && "Source operand is unavailable"); 2555 // The parameter is a vector value from earlier. 2556 Params.push_back(WidenMap.get(SrcInst)); 2557 } else { 2558 // The parameter is a scalar from outside the loop. Maybe even a constant. 2559 VectorParts Scalars; 2560 Scalars.append(UF, SrcOp); 2561 Params.push_back(Scalars); 2562 } 2563 } 2564 2565 assert(Params.size() == Instr->getNumOperands() && 2566 "Invalid number of operands"); 2567 2568 // Does this instruction return a value ? 2569 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2570 2571 Value *UndefVec = IsVoidRetTy ? nullptr : 2572 UndefValue::get(VectorType::get(Instr->getType(), VF)); 2573 // Create a new entry in the WidenMap and initialize it to Undef or Null. 2574 VectorParts &VecResults = WidenMap.splat(Instr, UndefVec); 2575 2576 VectorParts Cond; 2577 if (IfPredicateStore) { 2578 assert(Instr->getParent()->getSinglePredecessor() && 2579 "Only support single predecessor blocks"); 2580 Cond = createEdgeMask(Instr->getParent()->getSinglePredecessor(), 2581 Instr->getParent()); 2582 } 2583 2584 // For each vector unroll 'part': 2585 for (unsigned Part = 0; Part < UF; ++Part) { 2586 // For each scalar that we create: 2587 for (unsigned Width = 0; Width < VF; ++Width) { 2588 2589 // Start if-block. 2590 Value *Cmp = nullptr; 2591 if (IfPredicateStore) { 2592 Cmp = Builder.CreateExtractElement(Cond[Part], Builder.getInt32(Width)); 2593 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cmp, 2594 ConstantInt::get(Cmp->getType(), 1)); 2595 } 2596 2597 Instruction *Cloned = Instr->clone(); 2598 if (!IsVoidRetTy) 2599 Cloned->setName(Instr->getName() + ".cloned"); 2600 // Replace the operands of the cloned instructions with extracted scalars. 2601 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2602 Value *Op = Params[op][Part]; 2603 // Param is a vector. Need to extract the right lane. 2604 if (Op->getType()->isVectorTy()) 2605 Op = Builder.CreateExtractElement(Op, Builder.getInt32(Width)); 2606 Cloned->setOperand(op, Op); 2607 } 2608 2609 // Place the cloned scalar in the new loop. 2610 Builder.Insert(Cloned); 2611 2612 // If the original scalar returns a value we need to place it in a vector 2613 // so that future users will be able to use it. 2614 if (!IsVoidRetTy) 2615 VecResults[Part] = Builder.CreateInsertElement(VecResults[Part], Cloned, 2616 Builder.getInt32(Width)); 2617 // End if-block. 2618 if (IfPredicateStore) 2619 PredicatedStores.push_back(std::make_pair(cast<StoreInst>(Cloned), 2620 Cmp)); 2621 } 2622 } 2623 } 2624 2625 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2626 Value *End, Value *Step, 2627 Instruction *DL) { 2628 BasicBlock *Header = L->getHeader(); 2629 BasicBlock *Latch = L->getLoopLatch(); 2630 // As we're just creating this loop, it's possible no latch exists 2631 // yet. If so, use the header as this will be a single block loop. 2632 if (!Latch) 2633 Latch = Header; 2634 2635 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2636 setDebugLocFromInst(Builder, getDebugLocFromInstOrOperands(OldInduction)); 2637 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2638 2639 Builder.SetInsertPoint(Latch->getTerminator()); 2640 2641 // Create i+1 and fill the PHINode. 2642 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2643 Induction->addIncoming(Start, L->getLoopPreheader()); 2644 Induction->addIncoming(Next, Latch); 2645 // Create the compare. 2646 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2647 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 2648 2649 // Now we have two terminators. Remove the old one from the block. 2650 Latch->getTerminator()->eraseFromParent(); 2651 2652 return Induction; 2653 } 2654 2655 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2656 if (TripCount) 2657 return TripCount; 2658 2659 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2660 // Find the loop boundaries. 2661 const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(OrigLoop); 2662 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 2663 "Invalid loop count"); 2664 2665 Type *IdxTy = Legal->getWidestInductionType(); 2666 2667 // The exit count might have the type of i64 while the phi is i32. This can 2668 // happen if we have an induction variable that is sign extended before the 2669 // compare. The only way that we get a backedge taken count is that the 2670 // induction variable was signed and as such will not overflow. In such a case 2671 // truncation is legal. 2672 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 2673 IdxTy->getPrimitiveSizeInBits()) 2674 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2675 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2676 2677 // Get the total trip count from the count by adding 1. 2678 const SCEV *ExitCount = SE->getAddExpr( 2679 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2680 2681 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2682 2683 // Expand the trip count and place the new instructions in the preheader. 2684 // Notice that the pre-header does not change, only the loop body. 2685 SCEVExpander Exp(*SE, DL, "induction"); 2686 2687 // Count holds the overall loop count (N). 2688 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2689 L->getLoopPreheader()->getTerminator()); 2690 2691 if (TripCount->getType()->isPointerTy()) 2692 TripCount = 2693 CastInst::CreatePointerCast(TripCount, IdxTy, 2694 "exitcount.ptrcnt.to.int", 2695 L->getLoopPreheader()->getTerminator()); 2696 2697 return TripCount; 2698 } 2699 2700 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 2701 if (VectorTripCount) 2702 return VectorTripCount; 2703 2704 Value *TC = getOrCreateTripCount(L); 2705 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2706 2707 // Now we need to generate the expression for N - (N % VF), which is 2708 // the part that the vectorized body will execute. 2709 // The loop step is equal to the vectorization factor (num of SIMD elements) 2710 // times the unroll factor (num of SIMD instructions). 2711 Constant *Step = ConstantInt::get(TC->getType(), VF * UF); 2712 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2713 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2714 2715 return VectorTripCount; 2716 } 2717 2718 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 2719 BasicBlock *Bypass) { 2720 Value *Count = getOrCreateTripCount(L); 2721 BasicBlock *BB = L->getLoopPreheader(); 2722 IRBuilder<> Builder(BB->getTerminator()); 2723 2724 // Generate code to check that the loop's trip count that we computed by 2725 // adding one to the backedge-taken count will not overflow. 2726 Value *CheckMinIters = 2727 Builder.CreateICmpULT(Count, 2728 ConstantInt::get(Count->getType(), VF * UF), 2729 "min.iters.check"); 2730 2731 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), 2732 "min.iters.checked"); 2733 if (L->getParentLoop()) 2734 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2735 ReplaceInstWithInst(BB->getTerminator(), 2736 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 2737 LoopBypassBlocks.push_back(BB); 2738 } 2739 2740 void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L, 2741 BasicBlock *Bypass) { 2742 Value *TC = getOrCreateVectorTripCount(L); 2743 BasicBlock *BB = L->getLoopPreheader(); 2744 IRBuilder<> Builder(BB->getTerminator()); 2745 2746 // Now, compare the new count to zero. If it is zero skip the vector loop and 2747 // jump to the scalar loop. 2748 Value *Cmp = Builder.CreateICmpEQ(TC, Constant::getNullValue(TC->getType()), 2749 "cmp.zero"); 2750 2751 // Generate code to check that the loop's trip count that we computed by 2752 // adding one to the backedge-taken count will not overflow. 2753 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), 2754 "vector.ph"); 2755 if (L->getParentLoop()) 2756 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2757 ReplaceInstWithInst(BB->getTerminator(), 2758 BranchInst::Create(Bypass, NewBB, Cmp)); 2759 LoopBypassBlocks.push_back(BB); 2760 } 2761 2762 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 2763 BasicBlock *BB = L->getLoopPreheader(); 2764 2765 // Generate the code to check that the SCEV assumptions that we made. 2766 // We want the new basic block to start at the first instruction in a 2767 // sequence of instructions that form a check. 2768 SCEVExpander Exp(*SE, Bypass->getModule()->getDataLayout(), "scev.check"); 2769 Value *SCEVCheck = Exp.expandCodeForPredicate(&Preds, BB->getTerminator()); 2770 2771 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 2772 if (C->isZero()) 2773 return; 2774 2775 // Create a new block containing the stride check. 2776 BB->setName("vector.scevcheck"); 2777 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2778 if (L->getParentLoop()) 2779 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2780 ReplaceInstWithInst(BB->getTerminator(), 2781 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 2782 LoopBypassBlocks.push_back(BB); 2783 AddedSafetyChecks = true; 2784 } 2785 2786 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, 2787 BasicBlock *Bypass) { 2788 BasicBlock *BB = L->getLoopPreheader(); 2789 2790 // Generate the code that checks in runtime if arrays overlap. We put the 2791 // checks into a separate block to make the more common case of few elements 2792 // faster. 2793 Instruction *FirstCheckInst; 2794 Instruction *MemRuntimeCheck; 2795 std::tie(FirstCheckInst, MemRuntimeCheck) = 2796 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 2797 if (!MemRuntimeCheck) 2798 return; 2799 2800 // Create a new block containing the memory check. 2801 BB->setName("vector.memcheck"); 2802 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2803 if (L->getParentLoop()) 2804 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2805 ReplaceInstWithInst(BB->getTerminator(), 2806 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 2807 LoopBypassBlocks.push_back(BB); 2808 AddedSafetyChecks = true; 2809 } 2810 2811 2812 void InnerLoopVectorizer::createEmptyLoop() { 2813 /* 2814 In this function we generate a new loop. The new loop will contain 2815 the vectorized instructions while the old loop will continue to run the 2816 scalar remainder. 2817 2818 [ ] <-- loop iteration number check. 2819 / | 2820 / v 2821 | [ ] <-- vector loop bypass (may consist of multiple blocks). 2822 | / | 2823 | / v 2824 || [ ] <-- vector pre header. 2825 |/ | 2826 | v 2827 | [ ] \ 2828 | [ ]_| <-- vector loop. 2829 | | 2830 | v 2831 | -[ ] <--- middle-block. 2832 | / | 2833 | / v 2834 -|- >[ ] <--- new preheader. 2835 | | 2836 | v 2837 | [ ] \ 2838 | [ ]_| <-- old scalar loop to handle remainder. 2839 \ | 2840 \ v 2841 >[ ] <-- exit block. 2842 ... 2843 */ 2844 2845 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 2846 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 2847 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 2848 assert(VectorPH && "Invalid loop structure"); 2849 assert(ExitBlock && "Must have an exit block"); 2850 2851 // Some loops have a single integer induction variable, while other loops 2852 // don't. One example is c++ iterators that often have multiple pointer 2853 // induction variables. In the code below we also support a case where we 2854 // don't have a single induction variable. 2855 // 2856 // We try to obtain an induction variable from the original loop as hard 2857 // as possible. However if we don't find one that: 2858 // - is an integer 2859 // - counts from zero, stepping by one 2860 // - is the size of the widest induction variable type 2861 // then we create a new one. 2862 OldInduction = Legal->getInduction(); 2863 Type *IdxTy = Legal->getWidestInductionType(); 2864 2865 // Split the single block loop into the two loop structure described above. 2866 BasicBlock *VecBody = 2867 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 2868 BasicBlock *MiddleBlock = 2869 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 2870 BasicBlock *ScalarPH = 2871 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 2872 2873 // Create and register the new vector loop. 2874 Loop* Lp = new Loop(); 2875 Loop *ParentLoop = OrigLoop->getParentLoop(); 2876 2877 // Insert the new loop into the loop nest and register the new basic blocks 2878 // before calling any utilities such as SCEV that require valid LoopInfo. 2879 if (ParentLoop) { 2880 ParentLoop->addChildLoop(Lp); 2881 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 2882 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 2883 } else { 2884 LI->addTopLevelLoop(Lp); 2885 } 2886 Lp->addBasicBlockToLoop(VecBody, *LI); 2887 2888 // Find the loop boundaries. 2889 Value *Count = getOrCreateTripCount(Lp); 2890 2891 Value *StartIdx = ConstantInt::get(IdxTy, 0); 2892 2893 // We need to test whether the backedge-taken count is uint##_max. Adding one 2894 // to it will cause overflow and an incorrect loop trip count in the vector 2895 // body. In case of overflow we want to directly jump to the scalar remainder 2896 // loop. 2897 emitMinimumIterationCountCheck(Lp, ScalarPH); 2898 // Now, compare the new count to zero. If it is zero skip the vector loop and 2899 // jump to the scalar loop. 2900 emitVectorLoopEnteredCheck(Lp, ScalarPH); 2901 // Generate the code to check any assumptions that we've made for SCEV 2902 // expressions. 2903 emitSCEVChecks(Lp, ScalarPH); 2904 2905 // Generate the code that checks in runtime if arrays overlap. We put the 2906 // checks into a separate block to make the more common case of few elements 2907 // faster. 2908 emitMemRuntimeChecks(Lp, ScalarPH); 2909 2910 // Generate the induction variable. 2911 // The loop step is equal to the vectorization factor (num of SIMD elements) 2912 // times the unroll factor (num of SIMD instructions). 2913 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 2914 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 2915 Induction = 2916 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 2917 getDebugLocFromInstOrOperands(OldInduction)); 2918 2919 // We are going to resume the execution of the scalar loop. 2920 // Go over all of the induction variables that we found and fix the 2921 // PHIs that are left in the scalar version of the loop. 2922 // The starting values of PHI nodes depend on the counter of the last 2923 // iteration in the vectorized loop. 2924 // If we come from a bypass edge then we need to start from the original 2925 // start value. 2926 2927 // This variable saves the new starting index for the scalar loop. It is used 2928 // to test if there are any tail iterations left once the vector loop has 2929 // completed. 2930 LoopVectorizationLegality::InductionList::iterator I, E; 2931 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 2932 for (I = List->begin(), E = List->end(); I != E; ++I) { 2933 PHINode *OrigPhi = I->first; 2934 InductionDescriptor II = I->second; 2935 2936 // Create phi nodes to merge from the backedge-taken check block. 2937 PHINode *BCResumeVal = PHINode::Create(OrigPhi->getType(), 3, 2938 "bc.resume.val", 2939 ScalarPH->getTerminator()); 2940 Value *EndValue; 2941 if (OrigPhi == OldInduction) { 2942 // We know what the end value is. 2943 EndValue = CountRoundDown; 2944 } else { 2945 IRBuilder<> B(LoopBypassBlocks.back()->getTerminator()); 2946 Value *CRD = B.CreateSExtOrTrunc(CountRoundDown, 2947 II.getStepValue()->getType(), 2948 "cast.crd"); 2949 EndValue = II.transform(B, CRD); 2950 EndValue->setName("ind.end"); 2951 } 2952 2953 // The new PHI merges the original incoming value, in case of a bypass, 2954 // or the value at the end of the vectorized loop. 2955 BCResumeVal->addIncoming(EndValue, MiddleBlock); 2956 2957 // Fix the scalar body counter (PHI node). 2958 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 2959 2960 // The old induction's phi node in the scalar body needs the truncated 2961 // value. 2962 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 2963 BCResumeVal->addIncoming(II.getStartValue(), LoopBypassBlocks[I]); 2964 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 2965 } 2966 2967 // Add a check in the middle block to see if we have completed 2968 // all of the iterations in the first vector loop. 2969 // If (N - N%VF) == N, then we *don't* need to run the remainder. 2970 Value *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 2971 CountRoundDown, "cmp.n", 2972 MiddleBlock->getTerminator()); 2973 ReplaceInstWithInst(MiddleBlock->getTerminator(), 2974 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 2975 2976 // Get ready to start creating new instructions into the vectorized body. 2977 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 2978 2979 // Save the state. 2980 LoopVectorPreHeader = Lp->getLoopPreheader(); 2981 LoopScalarPreHeader = ScalarPH; 2982 LoopMiddleBlock = MiddleBlock; 2983 LoopExitBlock = ExitBlock; 2984 LoopVectorBody.push_back(VecBody); 2985 LoopScalarBody = OldBasicBlock; 2986 2987 LoopVectorizeHints Hints(Lp, true); 2988 Hints.setAlreadyVectorized(); 2989 } 2990 2991 namespace { 2992 struct CSEDenseMapInfo { 2993 static bool canHandle(Instruction *I) { 2994 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 2995 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 2996 } 2997 static inline Instruction *getEmptyKey() { 2998 return DenseMapInfo<Instruction *>::getEmptyKey(); 2999 } 3000 static inline Instruction *getTombstoneKey() { 3001 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3002 } 3003 static unsigned getHashValue(Instruction *I) { 3004 assert(canHandle(I) && "Unknown instruction!"); 3005 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3006 I->value_op_end())); 3007 } 3008 static bool isEqual(Instruction *LHS, Instruction *RHS) { 3009 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3010 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3011 return LHS == RHS; 3012 return LHS->isIdenticalTo(RHS); 3013 } 3014 }; 3015 } 3016 3017 /// \brief Check whether this block is a predicated block. 3018 /// Due to if predication of stores we might create a sequence of "if(pred) a[i] 3019 /// = ...; " blocks. We start with one vectorized basic block. For every 3020 /// conditional block we split this vectorized block. Therefore, every second 3021 /// block will be a predicated one. 3022 static bool isPredicatedBlock(unsigned BlockNum) { 3023 return BlockNum % 2; 3024 } 3025 3026 ///\brief Perform cse of induction variable instructions. 3027 static void cse(SmallVector<BasicBlock *, 4> &BBs) { 3028 // Perform simple cse. 3029 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3030 for (unsigned i = 0, e = BBs.size(); i != e; ++i) { 3031 BasicBlock *BB = BBs[i]; 3032 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3033 Instruction *In = &*I++; 3034 3035 if (!CSEDenseMapInfo::canHandle(In)) 3036 continue; 3037 3038 // Check if we can replace this instruction with any of the 3039 // visited instructions. 3040 if (Instruction *V = CSEMap.lookup(In)) { 3041 In->replaceAllUsesWith(V); 3042 In->eraseFromParent(); 3043 continue; 3044 } 3045 // Ignore instructions in conditional blocks. We create "if (pred) a[i] = 3046 // ...;" blocks for predicated stores. Every second block is a predicated 3047 // block. 3048 if (isPredicatedBlock(i)) 3049 continue; 3050 3051 CSEMap[In] = In; 3052 } 3053 } 3054 } 3055 3056 /// \brief Adds a 'fast' flag to floating point operations. 3057 static Value *addFastMathFlag(Value *V) { 3058 if (isa<FPMathOperator>(V)){ 3059 FastMathFlags Flags; 3060 Flags.setUnsafeAlgebra(); 3061 cast<Instruction>(V)->setFastMathFlags(Flags); 3062 } 3063 return V; 3064 } 3065 3066 /// Estimate the overhead of scalarizing a value. Insert and Extract are set if 3067 /// the result needs to be inserted and/or extracted from vectors. 3068 static unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract, 3069 const TargetTransformInfo &TTI) { 3070 if (Ty->isVoidTy()) 3071 return 0; 3072 3073 assert(Ty->isVectorTy() && "Can only scalarize vectors"); 3074 unsigned Cost = 0; 3075 3076 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 3077 if (Insert) 3078 Cost += TTI.getVectorInstrCost(Instruction::InsertElement, Ty, i); 3079 if (Extract) 3080 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, Ty, i); 3081 } 3082 3083 return Cost; 3084 } 3085 3086 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3087 // Return the cost of the instruction, including scalarization overhead if it's 3088 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3089 // i.e. either vector version isn't available, or is too expensive. 3090 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3091 const TargetTransformInfo &TTI, 3092 const TargetLibraryInfo *TLI, 3093 bool &NeedToScalarize) { 3094 Function *F = CI->getCalledFunction(); 3095 StringRef FnName = CI->getCalledFunction()->getName(); 3096 Type *ScalarRetTy = CI->getType(); 3097 SmallVector<Type *, 4> Tys, ScalarTys; 3098 for (auto &ArgOp : CI->arg_operands()) 3099 ScalarTys.push_back(ArgOp->getType()); 3100 3101 // Estimate cost of scalarized vector call. The source operands are assumed 3102 // to be vectors, so we need to extract individual elements from there, 3103 // execute VF scalar calls, and then gather the result into the vector return 3104 // value. 3105 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3106 if (VF == 1) 3107 return ScalarCallCost; 3108 3109 // Compute corresponding vector type for return value and arguments. 3110 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3111 for (unsigned i = 0, ie = ScalarTys.size(); i != ie; ++i) 3112 Tys.push_back(ToVectorTy(ScalarTys[i], VF)); 3113 3114 // Compute costs of unpacking argument values for the scalar calls and 3115 // packing the return values to a vector. 3116 unsigned ScalarizationCost = 3117 getScalarizationOverhead(RetTy, true, false, TTI); 3118 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) 3119 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true, TTI); 3120 3121 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3122 3123 // If we can't emit a vector call for this function, then the currently found 3124 // cost is the cost we need to return. 3125 NeedToScalarize = true; 3126 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3127 return Cost; 3128 3129 // If the corresponding vector cost is cheaper, return its cost. 3130 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3131 if (VectorCallCost < Cost) { 3132 NeedToScalarize = false; 3133 return VectorCallCost; 3134 } 3135 return Cost; 3136 } 3137 3138 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3139 // factor VF. Return the cost of the instruction, including scalarization 3140 // overhead if it's needed. 3141 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3142 const TargetTransformInfo &TTI, 3143 const TargetLibraryInfo *TLI) { 3144 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 3145 assert(ID && "Expected intrinsic call!"); 3146 3147 Type *RetTy = ToVectorTy(CI->getType(), VF); 3148 SmallVector<Type *, 4> Tys; 3149 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) 3150 Tys.push_back(ToVectorTy(CI->getArgOperand(i)->getType(), VF)); 3151 3152 return TTI.getIntrinsicInstrCost(ID, RetTy, Tys); 3153 } 3154 3155 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3156 IntegerType *I1 = cast<IntegerType>(T1->getVectorElementType()); 3157 IntegerType *I2 = cast<IntegerType>(T2->getVectorElementType()); 3158 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3159 } 3160 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3161 IntegerType *I1 = cast<IntegerType>(T1->getVectorElementType()); 3162 IntegerType *I2 = cast<IntegerType>(T2->getVectorElementType()); 3163 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3164 } 3165 3166 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3167 // For every instruction `I` in MinBWs, truncate the operands, create a 3168 // truncated version of `I` and reextend its result. InstCombine runs 3169 // later and will remove any ext/trunc pairs. 3170 // 3171 for (auto &KV : MinBWs) { 3172 VectorParts &Parts = WidenMap.get(KV.first); 3173 for (Value *&I : Parts) { 3174 if (I->use_empty()) 3175 continue; 3176 Type *OriginalTy = I->getType(); 3177 Type *ScalarTruncatedTy = IntegerType::get(OriginalTy->getContext(), 3178 KV.second); 3179 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3180 OriginalTy->getVectorNumElements()); 3181 if (TruncatedTy == OriginalTy) 3182 continue; 3183 3184 IRBuilder<> B(cast<Instruction>(I)); 3185 auto ShrinkOperand = [&](Value *V) -> Value* { 3186 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3187 if (ZI->getSrcTy() == TruncatedTy) 3188 return ZI->getOperand(0); 3189 return B.CreateZExtOrTrunc(V, TruncatedTy); 3190 }; 3191 3192 // The actual instruction modification depends on the instruction type, 3193 // unfortunately. 3194 Value *NewI = nullptr; 3195 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { 3196 NewI = B.CreateBinOp(BO->getOpcode(), 3197 ShrinkOperand(BO->getOperand(0)), 3198 ShrinkOperand(BO->getOperand(1))); 3199 cast<BinaryOperator>(NewI)->copyIRFlags(I); 3200 } else if (ICmpInst *CI = dyn_cast<ICmpInst>(I)) { 3201 NewI = B.CreateICmp(CI->getPredicate(), 3202 ShrinkOperand(CI->getOperand(0)), 3203 ShrinkOperand(CI->getOperand(1))); 3204 } else if (SelectInst *SI = dyn_cast<SelectInst>(I)) { 3205 NewI = B.CreateSelect(SI->getCondition(), 3206 ShrinkOperand(SI->getTrueValue()), 3207 ShrinkOperand(SI->getFalseValue())); 3208 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 3209 switch (CI->getOpcode()) { 3210 default: llvm_unreachable("Unhandled cast!"); 3211 case Instruction::Trunc: 3212 NewI = ShrinkOperand(CI->getOperand(0)); 3213 break; 3214 case Instruction::SExt: 3215 NewI = B.CreateSExtOrTrunc(CI->getOperand(0), 3216 smallestIntegerVectorType(OriginalTy, 3217 TruncatedTy)); 3218 break; 3219 case Instruction::ZExt: 3220 NewI = B.CreateZExtOrTrunc(CI->getOperand(0), 3221 smallestIntegerVectorType(OriginalTy, 3222 TruncatedTy)); 3223 break; 3224 } 3225 } else if (ShuffleVectorInst *SI = dyn_cast<ShuffleVectorInst>(I)) { 3226 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3227 auto *O0 = 3228 B.CreateZExtOrTrunc(SI->getOperand(0), 3229 VectorType::get(ScalarTruncatedTy, Elements0)); 3230 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3231 auto *O1 = 3232 B.CreateZExtOrTrunc(SI->getOperand(1), 3233 VectorType::get(ScalarTruncatedTy, Elements1)); 3234 3235 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3236 } else if (isa<LoadInst>(I)) { 3237 // Don't do anything with the operands, just extend the result. 3238 continue; 3239 } else { 3240 llvm_unreachable("Unhandled instruction type!"); 3241 } 3242 3243 // Lastly, extend the result. 3244 NewI->takeName(cast<Instruction>(I)); 3245 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3246 I->replaceAllUsesWith(Res); 3247 cast<Instruction>(I)->eraseFromParent(); 3248 I = Res; 3249 } 3250 } 3251 3252 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3253 for (auto &KV : MinBWs) { 3254 VectorParts &Parts = WidenMap.get(KV.first); 3255 for (Value *&I : Parts) { 3256 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3257 if (Inst && Inst->use_empty()) { 3258 Value *NewI = Inst->getOperand(0); 3259 Inst->eraseFromParent(); 3260 I = NewI; 3261 } 3262 } 3263 } 3264 } 3265 3266 void InnerLoopVectorizer::vectorizeLoop() { 3267 //===------------------------------------------------===// 3268 // 3269 // Notice: any optimization or new instruction that go 3270 // into the code below should be also be implemented in 3271 // the cost-model. 3272 // 3273 //===------------------------------------------------===// 3274 Constant *Zero = Builder.getInt32(0); 3275 3276 // In order to support reduction variables we need to be able to vectorize 3277 // Phi nodes. Phi nodes have cycles, so we need to vectorize them in two 3278 // stages. First, we create a new vector PHI node with no incoming edges. 3279 // We use this value when we vectorize all of the instructions that use the 3280 // PHI. Next, after all of the instructions in the block are complete we 3281 // add the new incoming edges to the PHI. At this point all of the 3282 // instructions in the basic block are vectorized, so we can use them to 3283 // construct the PHI. 3284 PhiVector RdxPHIsToFix; 3285 3286 // Scan the loop in a topological order to ensure that defs are vectorized 3287 // before users. 3288 LoopBlocksDFS DFS(OrigLoop); 3289 DFS.perform(LI); 3290 3291 // Vectorize all of the blocks in the original loop. 3292 for (LoopBlocksDFS::RPOIterator bb = DFS.beginRPO(), 3293 be = DFS.endRPO(); bb != be; ++bb) 3294 vectorizeBlockInLoop(*bb, &RdxPHIsToFix); 3295 3296 // Insert truncates and extends for any truncated instructions as hints to 3297 // InstCombine. 3298 if (VF > 1) 3299 truncateToMinimalBitwidths(); 3300 3301 // At this point every instruction in the original loop is widened to 3302 // a vector form. We are almost done. Now, we need to fix the PHI nodes 3303 // that we vectorized. The PHI nodes are currently empty because we did 3304 // not want to introduce cycles. Notice that the remaining PHI nodes 3305 // that we need to fix are reduction variables. 3306 3307 // Create the 'reduced' values for each of the induction vars. 3308 // The reduced values are the vector values that we scalarize and combine 3309 // after the loop is finished. 3310 for (PhiVector::iterator it = RdxPHIsToFix.begin(), e = RdxPHIsToFix.end(); 3311 it != e; ++it) { 3312 PHINode *RdxPhi = *it; 3313 assert(RdxPhi && "Unable to recover vectorized PHI"); 3314 3315 // Find the reduction variable descriptor. 3316 assert(Legal->isReductionVariable(RdxPhi) && 3317 "Unable to find the reduction variable"); 3318 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[RdxPhi]; 3319 3320 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3321 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3322 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3323 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3324 RdxDesc.getMinMaxRecurrenceKind(); 3325 setDebugLocFromInst(Builder, ReductionStartValue); 3326 3327 // We need to generate a reduction vector from the incoming scalar. 3328 // To do so, we need to generate the 'identity' vector and override 3329 // one of the elements with the incoming scalar reduction. We need 3330 // to do it in the vector-loop preheader. 3331 Builder.SetInsertPoint(LoopBypassBlocks[1]->getTerminator()); 3332 3333 // This is the vector-clone of the value that leaves the loop. 3334 VectorParts &VectorExit = getVectorValue(LoopExitInst); 3335 Type *VecTy = VectorExit[0]->getType(); 3336 3337 // Find the reduction identity variable. Zero for addition, or, xor, 3338 // one for multiplication, -1 for And. 3339 Value *Identity; 3340 Value *VectorStart; 3341 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3342 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3343 // MinMax reduction have the start value as their identify. 3344 if (VF == 1) { 3345 VectorStart = Identity = ReductionStartValue; 3346 } else { 3347 VectorStart = Identity = 3348 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3349 } 3350 } else { 3351 // Handle other reduction kinds: 3352 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3353 RK, VecTy->getScalarType()); 3354 if (VF == 1) { 3355 Identity = Iden; 3356 // This vector is the Identity vector where the first element is the 3357 // incoming scalar reduction. 3358 VectorStart = ReductionStartValue; 3359 } else { 3360 Identity = ConstantVector::getSplat(VF, Iden); 3361 3362 // This vector is the Identity vector where the first element is the 3363 // incoming scalar reduction. 3364 VectorStart = 3365 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3366 } 3367 } 3368 3369 // Fix the vector-loop phi. 3370 3371 // Reductions do not have to start at zero. They can start with 3372 // any loop invariant values. 3373 VectorParts &VecRdxPhi = WidenMap.get(RdxPhi); 3374 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3375 Value *LoopVal = RdxPhi->getIncomingValueForBlock(Latch); 3376 VectorParts &Val = getVectorValue(LoopVal); 3377 for (unsigned part = 0; part < UF; ++part) { 3378 // Make sure to add the reduction stat value only to the 3379 // first unroll part. 3380 Value *StartVal = (part == 0) ? VectorStart : Identity; 3381 cast<PHINode>(VecRdxPhi[part])->addIncoming(StartVal, 3382 LoopVectorPreHeader); 3383 cast<PHINode>(VecRdxPhi[part])->addIncoming(Val[part], 3384 LoopVectorBody.back()); 3385 } 3386 3387 // Before each round, move the insertion point right between 3388 // the PHIs and the values we are going to write. 3389 // This allows us to write both PHINodes and the extractelement 3390 // instructions. 3391 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3392 3393 VectorParts RdxParts = getVectorValue(LoopExitInst); 3394 setDebugLocFromInst(Builder, LoopExitInst); 3395 3396 // If the vector reduction can be performed in a smaller type, we truncate 3397 // then extend the loop exit value to enable InstCombine to evaluate the 3398 // entire expression in the smaller type. 3399 if (VF > 1 && RdxPhi->getType() != RdxDesc.getRecurrenceType()) { 3400 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3401 Builder.SetInsertPoint(LoopVectorBody.back()->getTerminator()); 3402 for (unsigned part = 0; part < UF; ++part) { 3403 Value *Trunc = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3404 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3405 : Builder.CreateZExt(Trunc, VecTy); 3406 for (Value::user_iterator UI = RdxParts[part]->user_begin(); 3407 UI != RdxParts[part]->user_end();) 3408 if (*UI != Trunc) { 3409 (*UI++)->replaceUsesOfWith(RdxParts[part], Extnd); 3410 RdxParts[part] = Extnd; 3411 } else { 3412 ++UI; 3413 } 3414 } 3415 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3416 for (unsigned part = 0; part < UF; ++part) 3417 RdxParts[part] = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3418 } 3419 3420 // Reduce all of the unrolled parts into a single vector. 3421 Value *ReducedPartRdx = RdxParts[0]; 3422 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3423 setDebugLocFromInst(Builder, ReducedPartRdx); 3424 for (unsigned part = 1; part < UF; ++part) { 3425 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3426 // Floating point operations had to be 'fast' to enable the reduction. 3427 ReducedPartRdx = addFastMathFlag( 3428 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxParts[part], 3429 ReducedPartRdx, "bin.rdx")); 3430 else 3431 ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp( 3432 Builder, MinMaxKind, ReducedPartRdx, RdxParts[part]); 3433 } 3434 3435 if (VF > 1) { 3436 // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles 3437 // and vector ops, reducing the set of values being computed by half each 3438 // round. 3439 assert(isPowerOf2_32(VF) && 3440 "Reduction emission only supported for pow2 vectors!"); 3441 Value *TmpVec = ReducedPartRdx; 3442 SmallVector<Constant*, 32> ShuffleMask(VF, nullptr); 3443 for (unsigned i = VF; i != 1; i >>= 1) { 3444 // Move the upper half of the vector to the lower half. 3445 for (unsigned j = 0; j != i/2; ++j) 3446 ShuffleMask[j] = Builder.getInt32(i/2 + j); 3447 3448 // Fill the rest of the mask with undef. 3449 std::fill(&ShuffleMask[i/2], ShuffleMask.end(), 3450 UndefValue::get(Builder.getInt32Ty())); 3451 3452 Value *Shuf = 3453 Builder.CreateShuffleVector(TmpVec, 3454 UndefValue::get(TmpVec->getType()), 3455 ConstantVector::get(ShuffleMask), 3456 "rdx.shuf"); 3457 3458 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3459 // Floating point operations had to be 'fast' to enable the reduction. 3460 TmpVec = addFastMathFlag(Builder.CreateBinOp( 3461 (Instruction::BinaryOps)Op, TmpVec, Shuf, "bin.rdx")); 3462 else 3463 TmpVec = RecurrenceDescriptor::createMinMaxOp(Builder, MinMaxKind, 3464 TmpVec, Shuf); 3465 } 3466 3467 // The result is in the first element of the vector. 3468 ReducedPartRdx = Builder.CreateExtractElement(TmpVec, 3469 Builder.getInt32(0)); 3470 3471 // If the reduction can be performed in a smaller type, we need to extend 3472 // the reduction to the wider type before we branch to the original loop. 3473 if (RdxPhi->getType() != RdxDesc.getRecurrenceType()) 3474 ReducedPartRdx = 3475 RdxDesc.isSigned() 3476 ? Builder.CreateSExt(ReducedPartRdx, RdxPhi->getType()) 3477 : Builder.CreateZExt(ReducedPartRdx, RdxPhi->getType()); 3478 } 3479 3480 // Create a phi node that merges control-flow from the backedge-taken check 3481 // block and the middle block. 3482 PHINode *BCBlockPhi = PHINode::Create(RdxPhi->getType(), 2, "bc.merge.rdx", 3483 LoopScalarPreHeader->getTerminator()); 3484 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3485 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 3486 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3487 3488 // Now, we need to fix the users of the reduction variable 3489 // inside and outside of the scalar remainder loop. 3490 // We know that the loop is in LCSSA form. We need to update the 3491 // PHI nodes in the exit blocks. 3492 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 3493 LEE = LoopExitBlock->end(); LEI != LEE; ++LEI) { 3494 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 3495 if (!LCSSAPhi) break; 3496 3497 // All PHINodes need to have a single entry edge, or two if 3498 // we already fixed them. 3499 assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 3500 3501 // We found our reduction value exit-PHI. Update it with the 3502 // incoming bypass edge. 3503 if (LCSSAPhi->getIncomingValue(0) == LoopExitInst) { 3504 // Add an edge coming from the bypass. 3505 LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3506 break; 3507 } 3508 }// end of the LCSSA phi scan. 3509 3510 // Fix the scalar loop reduction variable with the incoming reduction sum 3511 // from the vector body and from the backedge value. 3512 int IncomingEdgeBlockIdx = 3513 (RdxPhi)->getBasicBlockIndex(OrigLoop->getLoopLatch()); 3514 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 3515 // Pick the other block. 3516 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 3517 (RdxPhi)->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 3518 (RdxPhi)->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 3519 }// end of for each redux variable. 3520 3521 fixLCSSAPHIs(); 3522 3523 // Make sure DomTree is updated. 3524 updateAnalysis(); 3525 3526 // Predicate any stores. 3527 for (auto KV : PredicatedStores) { 3528 BasicBlock::iterator I(KV.first); 3529 auto *BB = SplitBlock(I->getParent(), &*std::next(I), DT, LI); 3530 auto *T = SplitBlockAndInsertIfThen(KV.second, &*I, /*Unreachable=*/false, 3531 /*BranchWeights=*/nullptr, DT); 3532 I->moveBefore(T); 3533 I->getParent()->setName("pred.store.if"); 3534 BB->setName("pred.store.continue"); 3535 } 3536 DEBUG(DT->verifyDomTree()); 3537 // Remove redundant induction instructions. 3538 cse(LoopVectorBody); 3539 } 3540 3541 void InnerLoopVectorizer::fixLCSSAPHIs() { 3542 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 3543 LEE = LoopExitBlock->end(); LEI != LEE; ++LEI) { 3544 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 3545 if (!LCSSAPhi) break; 3546 if (LCSSAPhi->getNumIncomingValues() == 1) 3547 LCSSAPhi->addIncoming(UndefValue::get(LCSSAPhi->getType()), 3548 LoopMiddleBlock); 3549 } 3550 } 3551 3552 InnerLoopVectorizer::VectorParts 3553 InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) { 3554 assert(std::find(pred_begin(Dst), pred_end(Dst), Src) != pred_end(Dst) && 3555 "Invalid edge"); 3556 3557 // Look for cached value. 3558 std::pair<BasicBlock*, BasicBlock*> Edge(Src, Dst); 3559 EdgeMaskCache::iterator ECEntryIt = MaskCache.find(Edge); 3560 if (ECEntryIt != MaskCache.end()) 3561 return ECEntryIt->second; 3562 3563 VectorParts SrcMask = createBlockInMask(Src); 3564 3565 // The terminator has to be a branch inst! 3566 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 3567 assert(BI && "Unexpected terminator found"); 3568 3569 if (BI->isConditional()) { 3570 VectorParts EdgeMask = getVectorValue(BI->getCondition()); 3571 3572 if (BI->getSuccessor(0) != Dst) 3573 for (unsigned part = 0; part < UF; ++part) 3574 EdgeMask[part] = Builder.CreateNot(EdgeMask[part]); 3575 3576 for (unsigned part = 0; part < UF; ++part) 3577 EdgeMask[part] = Builder.CreateAnd(EdgeMask[part], SrcMask[part]); 3578 3579 MaskCache[Edge] = EdgeMask; 3580 return EdgeMask; 3581 } 3582 3583 MaskCache[Edge] = SrcMask; 3584 return SrcMask; 3585 } 3586 3587 InnerLoopVectorizer::VectorParts 3588 InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) { 3589 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 3590 3591 // Loop incoming mask is all-one. 3592 if (OrigLoop->getHeader() == BB) { 3593 Value *C = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 1); 3594 return getVectorValue(C); 3595 } 3596 3597 // This is the block mask. We OR all incoming edges, and with zero. 3598 Value *Zero = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 0); 3599 VectorParts BlockMask = getVectorValue(Zero); 3600 3601 // For each pred: 3602 for (pred_iterator it = pred_begin(BB), e = pred_end(BB); it != e; ++it) { 3603 VectorParts EM = createEdgeMask(*it, BB); 3604 for (unsigned part = 0; part < UF; ++part) 3605 BlockMask[part] = Builder.CreateOr(BlockMask[part], EM[part]); 3606 } 3607 3608 return BlockMask; 3609 } 3610 3611 void InnerLoopVectorizer::widenPHIInstruction( 3612 Instruction *PN, InnerLoopVectorizer::VectorParts &Entry, unsigned UF, 3613 unsigned VF, PhiVector *PV) { 3614 PHINode* P = cast<PHINode>(PN); 3615 // Handle reduction variables: 3616 if (Legal->isReductionVariable(P)) { 3617 for (unsigned part = 0; part < UF; ++part) { 3618 // This is phase one of vectorizing PHIs. 3619 Type *VecTy = (VF == 1) ? PN->getType() : 3620 VectorType::get(PN->getType(), VF); 3621 Entry[part] = PHINode::Create( 3622 VecTy, 2, "vec.phi", &*LoopVectorBody.back()->getFirstInsertionPt()); 3623 } 3624 PV->push_back(P); 3625 return; 3626 } 3627 3628 setDebugLocFromInst(Builder, P); 3629 // Check for PHI nodes that are lowered to vector selects. 3630 if (P->getParent() != OrigLoop->getHeader()) { 3631 // We know that all PHIs in non-header blocks are converted into 3632 // selects, so we don't have to worry about the insertion order and we 3633 // can just use the builder. 3634 // At this point we generate the predication tree. There may be 3635 // duplications since this is a simple recursive scan, but future 3636 // optimizations will clean it up. 3637 3638 unsigned NumIncoming = P->getNumIncomingValues(); 3639 3640 // Generate a sequence of selects of the form: 3641 // SELECT(Mask3, In3, 3642 // SELECT(Mask2, In2, 3643 // ( ...))) 3644 for (unsigned In = 0; In < NumIncoming; In++) { 3645 VectorParts Cond = createEdgeMask(P->getIncomingBlock(In), 3646 P->getParent()); 3647 VectorParts &In0 = getVectorValue(P->getIncomingValue(In)); 3648 3649 for (unsigned part = 0; part < UF; ++part) { 3650 // We might have single edge PHIs (blocks) - use an identity 3651 // 'select' for the first PHI operand. 3652 if (In == 0) 3653 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], 3654 In0[part]); 3655 else 3656 // Select between the current value and the previous incoming edge 3657 // based on the incoming mask. 3658 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], 3659 Entry[part], "predphi"); 3660 } 3661 } 3662 return; 3663 } 3664 3665 // This PHINode must be an induction variable. 3666 // Make sure that we know about it. 3667 assert(Legal->getInductionVars()->count(P) && 3668 "Not an induction variable"); 3669 3670 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 3671 3672 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 3673 // which can be found from the original scalar operations. 3674 switch (II.getKind()) { 3675 case InductionDescriptor::IK_NoInduction: 3676 llvm_unreachable("Unknown induction"); 3677 case InductionDescriptor::IK_IntInduction: { 3678 assert(P->getType() == II.getStartValue()->getType() && 3679 "Types must match"); 3680 // Handle other induction variables that are now based on the 3681 // canonical one. 3682 Value *V = Induction; 3683 if (P != OldInduction) { 3684 V = Builder.CreateSExtOrTrunc(Induction, P->getType()); 3685 V = II.transform(Builder, V); 3686 V->setName("offset.idx"); 3687 } 3688 Value *Broadcasted = getBroadcastInstrs(V); 3689 // After broadcasting the induction variable we need to make the vector 3690 // consecutive by adding 0, 1, 2, etc. 3691 for (unsigned part = 0; part < UF; ++part) 3692 Entry[part] = getStepVector(Broadcasted, VF * part, II.getStepValue()); 3693 return; 3694 } 3695 case InductionDescriptor::IK_PtrInduction: 3696 // Handle the pointer induction variable case. 3697 assert(P->getType()->isPointerTy() && "Unexpected type."); 3698 // This is the normalized GEP that starts counting at zero. 3699 Value *PtrInd = Induction; 3700 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStepValue()->getType()); 3701 // This is the vector of results. Notice that we don't generate 3702 // vector geps because scalar geps result in better code. 3703 for (unsigned part = 0; part < UF; ++part) { 3704 if (VF == 1) { 3705 int EltIndex = part; 3706 Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex); 3707 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 3708 Value *SclrGep = II.transform(Builder, GlobalIdx); 3709 SclrGep->setName("next.gep"); 3710 Entry[part] = SclrGep; 3711 continue; 3712 } 3713 3714 Value *VecVal = UndefValue::get(VectorType::get(P->getType(), VF)); 3715 for (unsigned int i = 0; i < VF; ++i) { 3716 int EltIndex = i + part * VF; 3717 Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex); 3718 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 3719 Value *SclrGep = II.transform(Builder, GlobalIdx); 3720 SclrGep->setName("next.gep"); 3721 VecVal = Builder.CreateInsertElement(VecVal, SclrGep, 3722 Builder.getInt32(i), 3723 "insert.gep"); 3724 } 3725 Entry[part] = VecVal; 3726 } 3727 return; 3728 } 3729 } 3730 3731 void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { 3732 // For each instruction in the old loop. 3733 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 3734 VectorParts &Entry = WidenMap.get(&*it); 3735 3736 switch (it->getOpcode()) { 3737 case Instruction::Br: 3738 // Nothing to do for PHIs and BR, since we already took care of the 3739 // loop control flow instructions. 3740 continue; 3741 case Instruction::PHI: { 3742 // Vectorize PHINodes. 3743 widenPHIInstruction(&*it, Entry, UF, VF, PV); 3744 continue; 3745 }// End of PHI. 3746 3747 case Instruction::Add: 3748 case Instruction::FAdd: 3749 case Instruction::Sub: 3750 case Instruction::FSub: 3751 case Instruction::Mul: 3752 case Instruction::FMul: 3753 case Instruction::UDiv: 3754 case Instruction::SDiv: 3755 case Instruction::FDiv: 3756 case Instruction::URem: 3757 case Instruction::SRem: 3758 case Instruction::FRem: 3759 case Instruction::Shl: 3760 case Instruction::LShr: 3761 case Instruction::AShr: 3762 case Instruction::And: 3763 case Instruction::Or: 3764 case Instruction::Xor: { 3765 // Just widen binops. 3766 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(it); 3767 setDebugLocFromInst(Builder, BinOp); 3768 VectorParts &A = getVectorValue(it->getOperand(0)); 3769 VectorParts &B = getVectorValue(it->getOperand(1)); 3770 3771 // Use this vector value for all users of the original instruction. 3772 for (unsigned Part = 0; Part < UF; ++Part) { 3773 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A[Part], B[Part]); 3774 3775 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 3776 VecOp->copyIRFlags(BinOp); 3777 3778 Entry[Part] = V; 3779 } 3780 3781 propagateMetadata(Entry, &*it); 3782 break; 3783 } 3784 case Instruction::Select: { 3785 // Widen selects. 3786 // If the selector is loop invariant we can create a select 3787 // instruction with a scalar condition. Otherwise, use vector-select. 3788 bool InvariantCond = SE->isLoopInvariant(SE->getSCEV(it->getOperand(0)), 3789 OrigLoop); 3790 setDebugLocFromInst(Builder, &*it); 3791 3792 // The condition can be loop invariant but still defined inside the 3793 // loop. This means that we can't just use the original 'cond' value. 3794 // We have to take the 'vectorized' value and pick the first lane. 3795 // Instcombine will make this a no-op. 3796 VectorParts &Cond = getVectorValue(it->getOperand(0)); 3797 VectorParts &Op0 = getVectorValue(it->getOperand(1)); 3798 VectorParts &Op1 = getVectorValue(it->getOperand(2)); 3799 3800 Value *ScalarCond = (VF == 1) ? Cond[0] : 3801 Builder.CreateExtractElement(Cond[0], Builder.getInt32(0)); 3802 3803 for (unsigned Part = 0; Part < UF; ++Part) { 3804 Entry[Part] = Builder.CreateSelect( 3805 InvariantCond ? ScalarCond : Cond[Part], 3806 Op0[Part], 3807 Op1[Part]); 3808 } 3809 3810 propagateMetadata(Entry, &*it); 3811 break; 3812 } 3813 3814 case Instruction::ICmp: 3815 case Instruction::FCmp: { 3816 // Widen compares. Generate vector compares. 3817 bool FCmp = (it->getOpcode() == Instruction::FCmp); 3818 CmpInst *Cmp = dyn_cast<CmpInst>(it); 3819 setDebugLocFromInst(Builder, &*it); 3820 VectorParts &A = getVectorValue(it->getOperand(0)); 3821 VectorParts &B = getVectorValue(it->getOperand(1)); 3822 for (unsigned Part = 0; Part < UF; ++Part) { 3823 Value *C = nullptr; 3824 if (FCmp) { 3825 C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]); 3826 cast<FCmpInst>(C)->copyFastMathFlags(&*it); 3827 } else { 3828 C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]); 3829 } 3830 Entry[Part] = C; 3831 } 3832 3833 propagateMetadata(Entry, &*it); 3834 break; 3835 } 3836 3837 case Instruction::Store: 3838 case Instruction::Load: 3839 vectorizeMemoryInstruction(&*it); 3840 break; 3841 case Instruction::ZExt: 3842 case Instruction::SExt: 3843 case Instruction::FPToUI: 3844 case Instruction::FPToSI: 3845 case Instruction::FPExt: 3846 case Instruction::PtrToInt: 3847 case Instruction::IntToPtr: 3848 case Instruction::SIToFP: 3849 case Instruction::UIToFP: 3850 case Instruction::Trunc: 3851 case Instruction::FPTrunc: 3852 case Instruction::BitCast: { 3853 CastInst *CI = dyn_cast<CastInst>(it); 3854 setDebugLocFromInst(Builder, &*it); 3855 /// Optimize the special case where the source is the induction 3856 /// variable. Notice that we can only optimize the 'trunc' case 3857 /// because: a. FP conversions lose precision, b. sext/zext may wrap, 3858 /// c. other casts depend on pointer size. 3859 if (CI->getOperand(0) == OldInduction && 3860 it->getOpcode() == Instruction::Trunc) { 3861 Value *ScalarCast = Builder.CreateCast(CI->getOpcode(), Induction, 3862 CI->getType()); 3863 Value *Broadcasted = getBroadcastInstrs(ScalarCast); 3864 InductionDescriptor II = 3865 Legal->getInductionVars()->lookup(OldInduction); 3866 Constant *Step = ConstantInt::getSigned( 3867 CI->getType(), II.getStepValue()->getSExtValue()); 3868 for (unsigned Part = 0; Part < UF; ++Part) 3869 Entry[Part] = getStepVector(Broadcasted, VF * Part, Step); 3870 propagateMetadata(Entry, &*it); 3871 break; 3872 } 3873 /// Vectorize casts. 3874 Type *DestTy = (VF == 1) ? CI->getType() : 3875 VectorType::get(CI->getType(), VF); 3876 3877 VectorParts &A = getVectorValue(it->getOperand(0)); 3878 for (unsigned Part = 0; Part < UF; ++Part) 3879 Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy); 3880 propagateMetadata(Entry, &*it); 3881 break; 3882 } 3883 3884 case Instruction::Call: { 3885 // Ignore dbg intrinsics. 3886 if (isa<DbgInfoIntrinsic>(it)) 3887 break; 3888 setDebugLocFromInst(Builder, &*it); 3889 3890 Module *M = BB->getParent()->getParent(); 3891 CallInst *CI = cast<CallInst>(it); 3892 3893 StringRef FnName = CI->getCalledFunction()->getName(); 3894 Function *F = CI->getCalledFunction(); 3895 Type *RetTy = ToVectorTy(CI->getType(), VF); 3896 SmallVector<Type *, 4> Tys; 3897 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) 3898 Tys.push_back(ToVectorTy(CI->getArgOperand(i)->getType(), VF)); 3899 3900 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 3901 if (ID && 3902 (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 3903 ID == Intrinsic::lifetime_start)) { 3904 scalarizeInstruction(&*it); 3905 break; 3906 } 3907 // The flag shows whether we use Intrinsic or a usual Call for vectorized 3908 // version of the instruction. 3909 // Is it beneficial to perform intrinsic call compared to lib call? 3910 bool NeedToScalarize; 3911 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 3912 bool UseVectorIntrinsic = 3913 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 3914 if (!UseVectorIntrinsic && NeedToScalarize) { 3915 scalarizeInstruction(&*it); 3916 break; 3917 } 3918 3919 for (unsigned Part = 0; Part < UF; ++Part) { 3920 SmallVector<Value *, 4> Args; 3921 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 3922 Value *Arg = CI->getArgOperand(i); 3923 // Some intrinsics have a scalar argument - don't replace it with a 3924 // vector. 3925 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) { 3926 VectorParts &VectorArg = getVectorValue(CI->getArgOperand(i)); 3927 Arg = VectorArg[Part]; 3928 } 3929 Args.push_back(Arg); 3930 } 3931 3932 Function *VectorF; 3933 if (UseVectorIntrinsic) { 3934 // Use vector version of the intrinsic. 3935 Type *TysForDecl[] = {CI->getType()}; 3936 if (VF > 1) 3937 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 3938 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 3939 } else { 3940 // Use vector version of the library call. 3941 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 3942 assert(!VFnName.empty() && "Vector function name is empty."); 3943 VectorF = M->getFunction(VFnName); 3944 if (!VectorF) { 3945 // Generate a declaration 3946 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 3947 VectorF = 3948 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 3949 VectorF->copyAttributesFrom(F); 3950 } 3951 } 3952 assert(VectorF && "Can't create vector function."); 3953 Entry[Part] = Builder.CreateCall(VectorF, Args); 3954 } 3955 3956 propagateMetadata(Entry, &*it); 3957 break; 3958 } 3959 3960 default: 3961 // All other instructions are unsupported. Scalarize them. 3962 scalarizeInstruction(&*it); 3963 break; 3964 }// end of switch. 3965 }// end of for_each instr. 3966 } 3967 3968 void InnerLoopVectorizer::updateAnalysis() { 3969 // Forget the original basic block. 3970 SE->forgetLoop(OrigLoop); 3971 3972 // Update the dominator tree information. 3973 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 3974 "Entry does not dominate exit."); 3975 3976 for (unsigned I = 1, E = LoopBypassBlocks.size(); I != E; ++I) 3977 DT->addNewBlock(LoopBypassBlocks[I], LoopBypassBlocks[I-1]); 3978 DT->addNewBlock(LoopVectorPreHeader, LoopBypassBlocks.back()); 3979 3980 // We don't predicate stores by this point, so the vector body should be a 3981 // single loop. 3982 assert(LoopVectorBody.size() == 1 && "Expected single block loop!"); 3983 DT->addNewBlock(LoopVectorBody[0], LoopVectorPreHeader); 3984 3985 DT->addNewBlock(LoopMiddleBlock, LoopVectorBody.back()); 3986 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 3987 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 3988 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 3989 3990 DEBUG(DT->verifyDomTree()); 3991 } 3992 3993 /// \brief Check whether it is safe to if-convert this phi node. 3994 /// 3995 /// Phi nodes with constant expressions that can trap are not safe to if 3996 /// convert. 3997 static bool canIfConvertPHINodes(BasicBlock *BB) { 3998 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { 3999 PHINode *Phi = dyn_cast<PHINode>(I); 4000 if (!Phi) 4001 return true; 4002 for (unsigned p = 0, e = Phi->getNumIncomingValues(); p != e; ++p) 4003 if (Constant *C = dyn_cast<Constant>(Phi->getIncomingValue(p))) 4004 if (C->canTrap()) 4005 return false; 4006 } 4007 return true; 4008 } 4009 4010 bool LoopVectorizationLegality::canVectorizeWithIfConvert() { 4011 if (!EnableIfConversion) { 4012 emitAnalysis(VectorizationReport() << "if-conversion is disabled"); 4013 return false; 4014 } 4015 4016 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable"); 4017 4018 // A list of pointers that we can safely read and write to. 4019 SmallPtrSet<Value *, 8> SafePointes; 4020 4021 // Collect safe addresses. 4022 for (Loop::block_iterator BI = TheLoop->block_begin(), 4023 BE = TheLoop->block_end(); BI != BE; ++BI) { 4024 BasicBlock *BB = *BI; 4025 4026 if (blockNeedsPredication(BB)) 4027 continue; 4028 4029 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { 4030 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 4031 SafePointes.insert(LI->getPointerOperand()); 4032 else if (StoreInst *SI = dyn_cast<StoreInst>(I)) 4033 SafePointes.insert(SI->getPointerOperand()); 4034 } 4035 } 4036 4037 // Collect the blocks that need predication. 4038 BasicBlock *Header = TheLoop->getHeader(); 4039 for (Loop::block_iterator BI = TheLoop->block_begin(), 4040 BE = TheLoop->block_end(); BI != BE; ++BI) { 4041 BasicBlock *BB = *BI; 4042 4043 // We don't support switch statements inside loops. 4044 if (!isa<BranchInst>(BB->getTerminator())) { 4045 emitAnalysis(VectorizationReport(BB->getTerminator()) 4046 << "loop contains a switch statement"); 4047 return false; 4048 } 4049 4050 // We must be able to predicate all blocks that need to be predicated. 4051 if (blockNeedsPredication(BB)) { 4052 if (!blockCanBePredicated(BB, SafePointes)) { 4053 emitAnalysis(VectorizationReport(BB->getTerminator()) 4054 << "control flow cannot be substituted for a select"); 4055 return false; 4056 } 4057 } else if (BB != Header && !canIfConvertPHINodes(BB)) { 4058 emitAnalysis(VectorizationReport(BB->getTerminator()) 4059 << "control flow cannot be substituted for a select"); 4060 return false; 4061 } 4062 } 4063 4064 // We can if-convert this loop. 4065 return true; 4066 } 4067 4068 bool LoopVectorizationLegality::canVectorize() { 4069 // We must have a loop in canonical form. Loops with indirectbr in them cannot 4070 // be canonicalized. 4071 if (!TheLoop->getLoopPreheader()) { 4072 emitAnalysis( 4073 VectorizationReport() << 4074 "loop control flow is not understood by vectorizer"); 4075 return false; 4076 } 4077 4078 // We can only vectorize innermost loops. 4079 if (!TheLoop->empty()) { 4080 emitAnalysis(VectorizationReport() << "loop is not the innermost loop"); 4081 return false; 4082 } 4083 4084 // We must have a single backedge. 4085 if (TheLoop->getNumBackEdges() != 1) { 4086 emitAnalysis( 4087 VectorizationReport() << 4088 "loop control flow is not understood by vectorizer"); 4089 return false; 4090 } 4091 4092 // We must have a single exiting block. 4093 if (!TheLoop->getExitingBlock()) { 4094 emitAnalysis( 4095 VectorizationReport() << 4096 "loop control flow is not understood by vectorizer"); 4097 return false; 4098 } 4099 4100 // We only handle bottom-tested loops, i.e. loop in which the condition is 4101 // checked at the end of each iteration. With that we can assume that all 4102 // instructions in the loop are executed the same number of times. 4103 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 4104 emitAnalysis( 4105 VectorizationReport() << 4106 "loop control flow is not understood by vectorizer"); 4107 return false; 4108 } 4109 4110 // We need to have a loop header. 4111 DEBUG(dbgs() << "LV: Found a loop: " << 4112 TheLoop->getHeader()->getName() << '\n'); 4113 4114 // Check if we can if-convert non-single-bb loops. 4115 unsigned NumBlocks = TheLoop->getNumBlocks(); 4116 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { 4117 DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); 4118 return false; 4119 } 4120 4121 // ScalarEvolution needs to be able to find the exit count. 4122 const SCEV *ExitCount = SE->getBackedgeTakenCount(TheLoop); 4123 if (ExitCount == SE->getCouldNotCompute()) { 4124 emitAnalysis(VectorizationReport() << 4125 "could not determine number of loop iterations"); 4126 DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n"); 4127 return false; 4128 } 4129 4130 // Check if we can vectorize the instructions and CFG in this loop. 4131 if (!canVectorizeInstrs()) { 4132 DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); 4133 return false; 4134 } 4135 4136 // Go over each instruction and look at memory deps. 4137 if (!canVectorizeMemory()) { 4138 DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); 4139 return false; 4140 } 4141 4142 // Collect all of the variables that remain uniform after vectorization. 4143 collectLoopUniforms(); 4144 4145 DEBUG(dbgs() << "LV: We can vectorize this loop" 4146 << (LAI->getRuntimePointerChecking()->Need 4147 ? " (with a runtime bound check)" 4148 : "") 4149 << "!\n"); 4150 4151 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 4152 4153 // If an override option has been passed in for interleaved accesses, use it. 4154 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 4155 UseInterleaved = EnableInterleavedMemAccesses; 4156 4157 // Analyze interleaved memory accesses. 4158 if (UseInterleaved) 4159 InterleaveInfo.analyzeInterleaving(Strides); 4160 4161 unsigned SCEVThreshold = VectorizeSCEVCheckThreshold; 4162 if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) 4163 SCEVThreshold = PragmaVectorizeSCEVCheckThreshold; 4164 4165 if (Preds.getComplexity() > SCEVThreshold) { 4166 emitAnalysis(VectorizationReport() 4167 << "Too many SCEV assumptions need to be made and checked " 4168 << "at runtime"); 4169 DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n"); 4170 return false; 4171 } 4172 4173 // Okay! We can vectorize. At this point we don't have any other mem analysis 4174 // which may limit our maximum vectorization factor, so just return true with 4175 // no restrictions. 4176 return true; 4177 } 4178 4179 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { 4180 if (Ty->isPointerTy()) 4181 return DL.getIntPtrType(Ty); 4182 4183 // It is possible that char's or short's overflow when we ask for the loop's 4184 // trip count, work around this by changing the type size. 4185 if (Ty->getScalarSizeInBits() < 32) 4186 return Type::getInt32Ty(Ty->getContext()); 4187 4188 return Ty; 4189 } 4190 4191 static Type* getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { 4192 Ty0 = convertPointerToIntegerType(DL, Ty0); 4193 Ty1 = convertPointerToIntegerType(DL, Ty1); 4194 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) 4195 return Ty0; 4196 return Ty1; 4197 } 4198 4199 /// \brief Check that the instruction has outside loop users and is not an 4200 /// identified reduction variable. 4201 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, 4202 SmallPtrSetImpl<Value *> &Reductions) { 4203 // Reduction instructions are allowed to have exit users. All other 4204 // instructions must not have external users. 4205 if (!Reductions.count(Inst)) 4206 //Check that all of the users of the loop are inside the BB. 4207 for (User *U : Inst->users()) { 4208 Instruction *UI = cast<Instruction>(U); 4209 // This user may be a reduction exit value. 4210 if (!TheLoop->contains(UI)) { 4211 DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); 4212 return true; 4213 } 4214 } 4215 return false; 4216 } 4217 4218 bool LoopVectorizationLegality::canVectorizeInstrs() { 4219 BasicBlock *Header = TheLoop->getHeader(); 4220 4221 // Look for the attribute signaling the absence of NaNs. 4222 Function &F = *Header->getParent(); 4223 const DataLayout &DL = F.getParent()->getDataLayout(); 4224 if (F.hasFnAttribute("no-nans-fp-math")) 4225 HasFunNoNaNAttr = 4226 F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; 4227 4228 // For each block in the loop. 4229 for (Loop::block_iterator bb = TheLoop->block_begin(), 4230 be = TheLoop->block_end(); bb != be; ++bb) { 4231 4232 // Scan the instructions in the block and look for hazards. 4233 for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e; 4234 ++it) { 4235 4236 if (PHINode *Phi = dyn_cast<PHINode>(it)) { 4237 Type *PhiTy = Phi->getType(); 4238 // Check that this PHI type is allowed. 4239 if (!PhiTy->isIntegerTy() && 4240 !PhiTy->isFloatingPointTy() && 4241 !PhiTy->isPointerTy()) { 4242 emitAnalysis(VectorizationReport(&*it) 4243 << "loop control flow is not understood by vectorizer"); 4244 DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); 4245 return false; 4246 } 4247 4248 // If this PHINode is not in the header block, then we know that we 4249 // can convert it to select during if-conversion. No need to check if 4250 // the PHIs in this block are induction or reduction variables. 4251 if (*bb != Header) { 4252 // Check that this instruction has no outside users or is an 4253 // identified reduction value with an outside user. 4254 if (!hasOutsideLoopUser(TheLoop, &*it, AllowedExit)) 4255 continue; 4256 emitAnalysis(VectorizationReport(&*it) << 4257 "value could not be identified as " 4258 "an induction or reduction variable"); 4259 return false; 4260 } 4261 4262 // We only allow if-converted PHIs with exactly two incoming values. 4263 if (Phi->getNumIncomingValues() != 2) { 4264 emitAnalysis(VectorizationReport(&*it) 4265 << "control flow not understood by vectorizer"); 4266 DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); 4267 return false; 4268 } 4269 4270 InductionDescriptor ID; 4271 if (InductionDescriptor::isInductionPHI(Phi, SE, ID)) { 4272 Inductions[Phi] = ID; 4273 // Get the widest type. 4274 if (!WidestIndTy) 4275 WidestIndTy = convertPointerToIntegerType(DL, PhiTy); 4276 else 4277 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); 4278 4279 // Int inductions are special because we only allow one IV. 4280 if (ID.getKind() == InductionDescriptor::IK_IntInduction && 4281 ID.getStepValue()->isOne() && 4282 isa<Constant>(ID.getStartValue()) && 4283 cast<Constant>(ID.getStartValue())->isNullValue()) { 4284 // Use the phi node with the widest type as induction. Use the last 4285 // one if there are multiple (no good reason for doing this other 4286 // than it is expedient). We've checked that it begins at zero and 4287 // steps by one, so this is a canonical induction variable. 4288 if (!Induction || PhiTy == WidestIndTy) 4289 Induction = Phi; 4290 } 4291 4292 DEBUG(dbgs() << "LV: Found an induction variable.\n"); 4293 4294 // Until we explicitly handle the case of an induction variable with 4295 // an outside loop user we have to give up vectorizing this loop. 4296 if (hasOutsideLoopUser(TheLoop, &*it, AllowedExit)) { 4297 emitAnalysis(VectorizationReport(&*it) << 4298 "use of induction value outside of the " 4299 "loop is not handled by vectorizer"); 4300 return false; 4301 } 4302 4303 continue; 4304 } 4305 4306 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, 4307 Reductions[Phi])) { 4308 if (Reductions[Phi].hasUnsafeAlgebra()) 4309 Requirements->addUnsafeAlgebraInst( 4310 Reductions[Phi].getUnsafeAlgebraInst()); 4311 AllowedExit.insert(Reductions[Phi].getLoopExitInstr()); 4312 continue; 4313 } 4314 4315 emitAnalysis(VectorizationReport(&*it) << 4316 "value that could not be identified as " 4317 "reduction is used outside the loop"); 4318 DEBUG(dbgs() << "LV: Found an unidentified PHI."<< *Phi <<"\n"); 4319 return false; 4320 }// end of PHI handling 4321 4322 // We handle calls that: 4323 // * Are debug info intrinsics. 4324 // * Have a mapping to an IR intrinsic. 4325 // * Have a vector version available. 4326 CallInst *CI = dyn_cast<CallInst>(it); 4327 if (CI && !getIntrinsicIDForCall(CI, TLI) && !isa<DbgInfoIntrinsic>(CI) && 4328 !(CI->getCalledFunction() && TLI && 4329 TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) { 4330 emitAnalysis(VectorizationReport(&*it) 4331 << "call instruction cannot be vectorized"); 4332 DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); 4333 return false; 4334 } 4335 4336 // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the 4337 // second argument is the same (i.e. loop invariant) 4338 if (CI && 4339 hasVectorInstrinsicScalarOpd(getIntrinsicIDForCall(CI, TLI), 1)) { 4340 if (!SE->isLoopInvariant(SE->getSCEV(CI->getOperand(1)), TheLoop)) { 4341 emitAnalysis(VectorizationReport(&*it) 4342 << "intrinsic instruction cannot be vectorized"); 4343 DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n"); 4344 return false; 4345 } 4346 } 4347 4348 // Check that the instruction return type is vectorizable. 4349 // Also, we can't vectorize extractelement instructions. 4350 if ((!VectorType::isValidElementType(it->getType()) && 4351 !it->getType()->isVoidTy()) || isa<ExtractElementInst>(it)) { 4352 emitAnalysis(VectorizationReport(&*it) 4353 << "instruction return type cannot be vectorized"); 4354 DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); 4355 return false; 4356 } 4357 4358 // Check that the stored type is vectorizable. 4359 if (StoreInst *ST = dyn_cast<StoreInst>(it)) { 4360 Type *T = ST->getValueOperand()->getType(); 4361 if (!VectorType::isValidElementType(T)) { 4362 emitAnalysis(VectorizationReport(ST) << 4363 "store instruction cannot be vectorized"); 4364 return false; 4365 } 4366 if (EnableMemAccessVersioning) 4367 collectStridedAccess(ST); 4368 } 4369 4370 if (EnableMemAccessVersioning) 4371 if (LoadInst *LI = dyn_cast<LoadInst>(it)) 4372 collectStridedAccess(LI); 4373 4374 // Reduction instructions are allowed to have exit users. 4375 // All other instructions must not have external users. 4376 if (hasOutsideLoopUser(TheLoop, &*it, AllowedExit)) { 4377 emitAnalysis(VectorizationReport(&*it) << 4378 "value cannot be used outside the loop"); 4379 return false; 4380 } 4381 4382 } // next instr. 4383 4384 } 4385 4386 if (!Induction) { 4387 DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); 4388 if (Inductions.empty()) { 4389 emitAnalysis(VectorizationReport() 4390 << "loop induction variable could not be identified"); 4391 return false; 4392 } 4393 } 4394 4395 // Now we know the widest induction type, check if our found induction 4396 // is the same size. If it's not, unset it here and InnerLoopVectorizer 4397 // will create another. 4398 if (Induction && WidestIndTy != Induction->getType()) 4399 Induction = nullptr; 4400 4401 return true; 4402 } 4403 4404 void LoopVectorizationLegality::collectStridedAccess(Value *MemAccess) { 4405 Value *Ptr = nullptr; 4406 if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess)) 4407 Ptr = LI->getPointerOperand(); 4408 else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess)) 4409 Ptr = SI->getPointerOperand(); 4410 else 4411 return; 4412 4413 Value *Stride = getStrideFromPointer(Ptr, SE, TheLoop); 4414 if (!Stride) 4415 return; 4416 4417 DEBUG(dbgs() << "LV: Found a strided access that we can version"); 4418 DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n"); 4419 Strides[Ptr] = Stride; 4420 StrideSet.insert(Stride); 4421 } 4422 4423 void LoopVectorizationLegality::collectLoopUniforms() { 4424 // We now know that the loop is vectorizable! 4425 // Collect variables that will remain uniform after vectorization. 4426 std::vector<Value*> Worklist; 4427 BasicBlock *Latch = TheLoop->getLoopLatch(); 4428 4429 // Start with the conditional branch and walk up the block. 4430 Worklist.push_back(Latch->getTerminator()->getOperand(0)); 4431 4432 // Also add all consecutive pointer values; these values will be uniform 4433 // after vectorization (and subsequent cleanup) and, until revectorization is 4434 // supported, all dependencies must also be uniform. 4435 for (Loop::block_iterator B = TheLoop->block_begin(), 4436 BE = TheLoop->block_end(); B != BE; ++B) 4437 for (BasicBlock::iterator I = (*B)->begin(), IE = (*B)->end(); 4438 I != IE; ++I) 4439 if (I->getType()->isPointerTy() && isConsecutivePtr(&*I)) 4440 Worklist.insert(Worklist.end(), I->op_begin(), I->op_end()); 4441 4442 while (!Worklist.empty()) { 4443 Instruction *I = dyn_cast<Instruction>(Worklist.back()); 4444 Worklist.pop_back(); 4445 4446 // Look at instructions inside this loop. 4447 // Stop when reaching PHI nodes. 4448 // TODO: we need to follow values all over the loop, not only in this block. 4449 if (!I || !TheLoop->contains(I) || isa<PHINode>(I)) 4450 continue; 4451 4452 // This is a known uniform. 4453 Uniforms.insert(I); 4454 4455 // Insert all operands. 4456 Worklist.insert(Worklist.end(), I->op_begin(), I->op_end()); 4457 } 4458 } 4459 4460 bool LoopVectorizationLegality::canVectorizeMemory() { 4461 LAI = &LAA->getInfo(TheLoop, Strides); 4462 auto &OptionalReport = LAI->getReport(); 4463 if (OptionalReport) 4464 emitAnalysis(VectorizationReport(*OptionalReport)); 4465 if (!LAI->canVectorizeMemory()) 4466 return false; 4467 4468 if (LAI->hasStoreToLoopInvariantAddress()) { 4469 emitAnalysis( 4470 VectorizationReport() 4471 << "write to a loop invariant address could not be vectorized"); 4472 DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); 4473 return false; 4474 } 4475 4476 Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks()); 4477 Preds.add(&LAI->Preds); 4478 4479 return true; 4480 } 4481 4482 bool LoopVectorizationLegality::isInductionVariable(const Value *V) { 4483 Value *In0 = const_cast<Value*>(V); 4484 PHINode *PN = dyn_cast_or_null<PHINode>(In0); 4485 if (!PN) 4486 return false; 4487 4488 return Inductions.count(PN); 4489 } 4490 4491 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { 4492 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 4493 } 4494 4495 bool LoopVectorizationLegality::blockCanBePredicated(BasicBlock *BB, 4496 SmallPtrSetImpl<Value *> &SafePtrs) { 4497 4498 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 4499 // Check that we don't have a constant expression that can trap as operand. 4500 for (Instruction::op_iterator OI = it->op_begin(), OE = it->op_end(); 4501 OI != OE; ++OI) { 4502 if (Constant *C = dyn_cast<Constant>(*OI)) 4503 if (C->canTrap()) 4504 return false; 4505 } 4506 // We might be able to hoist the load. 4507 if (it->mayReadFromMemory()) { 4508 LoadInst *LI = dyn_cast<LoadInst>(it); 4509 if (!LI) 4510 return false; 4511 if (!SafePtrs.count(LI->getPointerOperand())) { 4512 if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand())) { 4513 MaskedOp.insert(LI); 4514 continue; 4515 } 4516 return false; 4517 } 4518 } 4519 4520 // We don't predicate stores at the moment. 4521 if (it->mayWriteToMemory()) { 4522 StoreInst *SI = dyn_cast<StoreInst>(it); 4523 // We only support predication of stores in basic blocks with one 4524 // predecessor. 4525 if (!SI) 4526 return false; 4527 4528 bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0); 4529 bool isSinglePredecessor = SI->getParent()->getSinglePredecessor(); 4530 4531 if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr || 4532 !isSinglePredecessor) { 4533 // Build a masked store if it is legal for the target, otherwise 4534 // scalarize the block. 4535 bool isLegalMaskedOp = 4536 isLegalMaskedStore(SI->getValueOperand()->getType(), 4537 SI->getPointerOperand()); 4538 if (isLegalMaskedOp) { 4539 --NumPredStores; 4540 MaskedOp.insert(SI); 4541 continue; 4542 } 4543 return false; 4544 } 4545 } 4546 if (it->mayThrow()) 4547 return false; 4548 4549 // The instructions below can trap. 4550 switch (it->getOpcode()) { 4551 default: continue; 4552 case Instruction::UDiv: 4553 case Instruction::SDiv: 4554 case Instruction::URem: 4555 case Instruction::SRem: 4556 return false; 4557 } 4558 } 4559 4560 return true; 4561 } 4562 4563 void InterleavedAccessInfo::collectConstStridedAccesses( 4564 MapVector<Instruction *, StrideDescriptor> &StrideAccesses, 4565 const ValueToValueMap &Strides) { 4566 // Holds load/store instructions in program order. 4567 SmallVector<Instruction *, 16> AccessList; 4568 4569 for (auto *BB : TheLoop->getBlocks()) { 4570 bool IsPred = LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 4571 4572 for (auto &I : *BB) { 4573 if (!isa<LoadInst>(&I) && !isa<StoreInst>(&I)) 4574 continue; 4575 // FIXME: Currently we can't handle mixed accesses and predicated accesses 4576 if (IsPred) 4577 return; 4578 4579 AccessList.push_back(&I); 4580 } 4581 } 4582 4583 if (AccessList.empty()) 4584 return; 4585 4586 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 4587 for (auto I : AccessList) { 4588 LoadInst *LI = dyn_cast<LoadInst>(I); 4589 StoreInst *SI = dyn_cast<StoreInst>(I); 4590 4591 Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); 4592 int Stride = isStridedPtr(SE, Ptr, TheLoop, Strides, Preds); 4593 4594 // The factor of the corresponding interleave group. 4595 unsigned Factor = std::abs(Stride); 4596 4597 // Ignore the access if the factor is too small or too large. 4598 if (Factor < 2 || Factor > MaxInterleaveGroupFactor) 4599 continue; 4600 4601 const SCEV *Scev = replaceSymbolicStrideSCEV(SE, Strides, Preds, Ptr); 4602 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 4603 unsigned Size = DL.getTypeAllocSize(PtrTy->getElementType()); 4604 4605 // An alignment of 0 means target ABI alignment. 4606 unsigned Align = LI ? LI->getAlignment() : SI->getAlignment(); 4607 if (!Align) 4608 Align = DL.getABITypeAlignment(PtrTy->getElementType()); 4609 4610 StrideAccesses[I] = StrideDescriptor(Stride, Scev, Size, Align); 4611 } 4612 } 4613 4614 // Analyze interleaved accesses and collect them into interleave groups. 4615 // 4616 // Notice that the vectorization on interleaved groups will change instruction 4617 // orders and may break dependences. But the memory dependence check guarantees 4618 // that there is no overlap between two pointers of different strides, element 4619 // sizes or underlying bases. 4620 // 4621 // For pointers sharing the same stride, element size and underlying base, no 4622 // need to worry about Read-After-Write dependences and Write-After-Read 4623 // dependences. 4624 // 4625 // E.g. The RAW dependence: A[i] = a; 4626 // b = A[i]; 4627 // This won't exist as it is a store-load forwarding conflict, which has 4628 // already been checked and forbidden in the dependence check. 4629 // 4630 // E.g. The WAR dependence: a = A[i]; // (1) 4631 // A[i] = b; // (2) 4632 // The store group of (2) is always inserted at or below (2), and the load group 4633 // of (1) is always inserted at or above (1). The dependence is safe. 4634 void InterleavedAccessInfo::analyzeInterleaving( 4635 const ValueToValueMap &Strides) { 4636 DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 4637 4638 // Holds all the stride accesses. 4639 MapVector<Instruction *, StrideDescriptor> StrideAccesses; 4640 collectConstStridedAccesses(StrideAccesses, Strides); 4641 4642 if (StrideAccesses.empty()) 4643 return; 4644 4645 // Holds all interleaved store groups temporarily. 4646 SmallSetVector<InterleaveGroup *, 4> StoreGroups; 4647 4648 // Search the load-load/write-write pair B-A in bottom-up order and try to 4649 // insert B into the interleave group of A according to 3 rules: 4650 // 1. A and B have the same stride. 4651 // 2. A and B have the same memory object size. 4652 // 3. B belongs to the group according to the distance. 4653 // 4654 // The bottom-up order can avoid breaking the Write-After-Write dependences 4655 // between two pointers of the same base. 4656 // E.g. A[i] = a; (1) 4657 // A[i] = b; (2) 4658 // A[i+1] = c (3) 4659 // We form the group (2)+(3) in front, so (1) has to form groups with accesses 4660 // above (1), which guarantees that (1) is always above (2). 4661 for (auto I = StrideAccesses.rbegin(), E = StrideAccesses.rend(); I != E; 4662 ++I) { 4663 Instruction *A = I->first; 4664 StrideDescriptor DesA = I->second; 4665 4666 InterleaveGroup *Group = getInterleaveGroup(A); 4667 if (!Group) { 4668 DEBUG(dbgs() << "LV: Creating an interleave group with:" << *A << '\n'); 4669 Group = createInterleaveGroup(A, DesA.Stride, DesA.Align); 4670 } 4671 4672 if (A->mayWriteToMemory()) 4673 StoreGroups.insert(Group); 4674 4675 for (auto II = std::next(I); II != E; ++II) { 4676 Instruction *B = II->first; 4677 StrideDescriptor DesB = II->second; 4678 4679 // Ignore if B is already in a group or B is a different memory operation. 4680 if (isInterleaved(B) || A->mayReadFromMemory() != B->mayReadFromMemory()) 4681 continue; 4682 4683 // Check the rule 1 and 2. 4684 if (DesB.Stride != DesA.Stride || DesB.Size != DesA.Size) 4685 continue; 4686 4687 // Calculate the distance and prepare for the rule 3. 4688 const SCEVConstant *DistToA = 4689 dyn_cast<SCEVConstant>(SE->getMinusSCEV(DesB.Scev, DesA.Scev)); 4690 if (!DistToA) 4691 continue; 4692 4693 int DistanceToA = DistToA->getValue()->getValue().getSExtValue(); 4694 4695 // Skip if the distance is not multiple of size as they are not in the 4696 // same group. 4697 if (DistanceToA % static_cast<int>(DesA.Size)) 4698 continue; 4699 4700 // The index of B is the index of A plus the related index to A. 4701 int IndexB = 4702 Group->getIndex(A) + DistanceToA / static_cast<int>(DesA.Size); 4703 4704 // Try to insert B into the group. 4705 if (Group->insertMember(B, IndexB, DesB.Align)) { 4706 DEBUG(dbgs() << "LV: Inserted:" << *B << '\n' 4707 << " into the interleave group with" << *A << '\n'); 4708 InterleaveGroupMap[B] = Group; 4709 4710 // Set the first load in program order as the insert position. 4711 if (B->mayReadFromMemory()) 4712 Group->setInsertPos(B); 4713 } 4714 } // Iteration on instruction B 4715 } // Iteration on instruction A 4716 4717 // Remove interleaved store groups with gaps. 4718 for (InterleaveGroup *Group : StoreGroups) 4719 if (Group->getNumMembers() != Group->getFactor()) 4720 releaseGroup(Group); 4721 } 4722 4723 LoopVectorizationCostModel::VectorizationFactor 4724 LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { 4725 // Width 1 means no vectorize 4726 VectorizationFactor Factor = { 1U, 0U }; 4727 if (OptForSize && Legal->getRuntimePointerChecking()->Need) { 4728 emitAnalysis(VectorizationReport() << 4729 "runtime pointer checks needed. Enable vectorization of this " 4730 "loop with '#pragma clang loop vectorize(enable)' when " 4731 "compiling with -Os/-Oz"); 4732 DEBUG(dbgs() << 4733 "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 4734 return Factor; 4735 } 4736 4737 if (!EnableCondStoresVectorization && Legal->getNumPredStores()) { 4738 emitAnalysis(VectorizationReport() << 4739 "store that is conditionally executed prevents vectorization"); 4740 DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); 4741 return Factor; 4742 } 4743 4744 // Find the trip count. 4745 unsigned TC = SE->getSmallConstantTripCount(TheLoop); 4746 DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 4747 4748 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 4749 unsigned SmallestType, WidestType; 4750 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 4751 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 4752 unsigned MaxSafeDepDist = -1U; 4753 if (Legal->getMaxSafeDepDistBytes() != -1U) 4754 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 4755 WidestRegister = ((WidestRegister < MaxSafeDepDist) ? 4756 WidestRegister : MaxSafeDepDist); 4757 unsigned MaxVectorSize = WidestRegister / WidestType; 4758 4759 DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " 4760 << WidestType << " bits.\n"); 4761 DEBUG(dbgs() << "LV: The Widest register is: " 4762 << WidestRegister << " bits.\n"); 4763 4764 if (MaxVectorSize == 0) { 4765 DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 4766 MaxVectorSize = 1; 4767 } 4768 4769 assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements" 4770 " into one vector!"); 4771 4772 unsigned VF = MaxVectorSize; 4773 if (MaximizeBandwidth && !OptForSize) { 4774 // Collect all viable vectorization factors. 4775 SmallVector<unsigned, 8> VFs; 4776 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 4777 for (unsigned VS = MaxVectorSize; VS <= NewMaxVectorSize; VS *= 2) 4778 VFs.push_back(VS); 4779 4780 // For each VF calculate its register usage. 4781 auto RUs = calculateRegisterUsage(VFs); 4782 4783 // Select the largest VF which doesn't require more registers than existing 4784 // ones. 4785 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 4786 for (int i = RUs.size() - 1; i >= 0; --i) { 4787 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 4788 VF = VFs[i]; 4789 break; 4790 } 4791 } 4792 } 4793 4794 // If we optimize the program for size, avoid creating the tail loop. 4795 if (OptForSize) { 4796 // If we are unable to calculate the trip count then don't try to vectorize. 4797 if (TC < 2) { 4798 emitAnalysis 4799 (VectorizationReport() << 4800 "unable to calculate the loop count due to complex control flow"); 4801 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 4802 return Factor; 4803 } 4804 4805 // Find the maximum SIMD width that can fit within the trip count. 4806 VF = TC % MaxVectorSize; 4807 4808 if (VF == 0) 4809 VF = MaxVectorSize; 4810 else { 4811 // If the trip count that we found modulo the vectorization factor is not 4812 // zero then we require a tail. 4813 emitAnalysis(VectorizationReport() << 4814 "cannot optimize for size and vectorize at the " 4815 "same time. Enable vectorization of this loop " 4816 "with '#pragma clang loop vectorize(enable)' " 4817 "when compiling with -Os/-Oz"); 4818 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 4819 return Factor; 4820 } 4821 } 4822 4823 int UserVF = Hints->getWidth(); 4824 if (UserVF != 0) { 4825 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 4826 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 4827 4828 Factor.Width = UserVF; 4829 return Factor; 4830 } 4831 4832 float Cost = expectedCost(1); 4833 #ifndef NDEBUG 4834 const float ScalarCost = Cost; 4835 #endif /* NDEBUG */ 4836 unsigned Width = 1; 4837 DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 4838 4839 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 4840 // Ignore scalar width, because the user explicitly wants vectorization. 4841 if (ForceVectorization && VF > 1) { 4842 Width = 2; 4843 Cost = expectedCost(Width) / (float)Width; 4844 } 4845 4846 for (unsigned i=2; i <= VF; i*=2) { 4847 // Notice that the vector loop needs to be executed less times, so 4848 // we need to divide the cost of the vector loops by the width of 4849 // the vector elements. 4850 float VectorCost = expectedCost(i) / (float)i; 4851 DEBUG(dbgs() << "LV: Vector loop of width " << i << " costs: " << 4852 (int)VectorCost << ".\n"); 4853 if (VectorCost < Cost) { 4854 Cost = VectorCost; 4855 Width = i; 4856 } 4857 } 4858 4859 DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 4860 << "LV: Vectorization seems to be not beneficial, " 4861 << "but was forced by a user.\n"); 4862 DEBUG(dbgs() << "LV: Selecting VF: "<< Width << ".\n"); 4863 Factor.Width = Width; 4864 Factor.Cost = Width * Cost; 4865 return Factor; 4866 } 4867 4868 std::pair<unsigned, unsigned> 4869 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 4870 unsigned MinWidth = -1U; 4871 unsigned MaxWidth = 8; 4872 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 4873 4874 // For each block. 4875 for (Loop::block_iterator bb = TheLoop->block_begin(), 4876 be = TheLoop->block_end(); bb != be; ++bb) { 4877 BasicBlock *BB = *bb; 4878 4879 // For each instruction in the loop. 4880 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 4881 Type *T = it->getType(); 4882 4883 // Skip ignored values. 4884 if (ValuesToIgnore.count(&*it)) 4885 continue; 4886 4887 // Only examine Loads, Stores and PHINodes. 4888 if (!isa<LoadInst>(it) && !isa<StoreInst>(it) && !isa<PHINode>(it)) 4889 continue; 4890 4891 // Examine PHI nodes that are reduction variables. Update the type to 4892 // account for the recurrence type. 4893 if (PHINode *PN = dyn_cast<PHINode>(it)) { 4894 if (!Legal->isReductionVariable(PN)) 4895 continue; 4896 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 4897 T = RdxDesc.getRecurrenceType(); 4898 } 4899 4900 // Examine the stored values. 4901 if (StoreInst *ST = dyn_cast<StoreInst>(it)) 4902 T = ST->getValueOperand()->getType(); 4903 4904 // Ignore loaded pointer types and stored pointer types that are not 4905 // consecutive. However, we do want to take consecutive stores/loads of 4906 // pointer vectors into account. 4907 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&*it)) 4908 continue; 4909 4910 MinWidth = std::min(MinWidth, 4911 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 4912 MaxWidth = std::max(MaxWidth, 4913 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 4914 } 4915 } 4916 4917 return {MinWidth, MaxWidth}; 4918 } 4919 4920 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 4921 unsigned VF, 4922 unsigned LoopCost) { 4923 4924 // -- The interleave heuristics -- 4925 // We interleave the loop in order to expose ILP and reduce the loop overhead. 4926 // There are many micro-architectural considerations that we can't predict 4927 // at this level. For example, frontend pressure (on decode or fetch) due to 4928 // code size, or the number and capabilities of the execution ports. 4929 // 4930 // We use the following heuristics to select the interleave count: 4931 // 1. If the code has reductions, then we interleave to break the cross 4932 // iteration dependency. 4933 // 2. If the loop is really small, then we interleave to reduce the loop 4934 // overhead. 4935 // 3. We don't interleave if we think that we will spill registers to memory 4936 // due to the increased register pressure. 4937 4938 // When we optimize for size, we don't interleave. 4939 if (OptForSize) 4940 return 1; 4941 4942 // We used the distance for the interleave count. 4943 if (Legal->getMaxSafeDepDistBytes() != -1U) 4944 return 1; 4945 4946 // Do not interleave loops with a relatively small trip count. 4947 unsigned TC = SE->getSmallConstantTripCount(TheLoop); 4948 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 4949 return 1; 4950 4951 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 4952 DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters << 4953 " registers\n"); 4954 4955 if (VF == 1) { 4956 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 4957 TargetNumRegisters = ForceTargetNumScalarRegs; 4958 } else { 4959 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 4960 TargetNumRegisters = ForceTargetNumVectorRegs; 4961 } 4962 4963 RegisterUsage R = calculateRegisterUsage({VF})[0]; 4964 // We divide by these constants so assume that we have at least one 4965 // instruction that uses at least one register. 4966 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 4967 R.NumInstructions = std::max(R.NumInstructions, 1U); 4968 4969 // We calculate the interleave count using the following formula. 4970 // Subtract the number of loop invariants from the number of available 4971 // registers. These registers are used by all of the interleaved instances. 4972 // Next, divide the remaining registers by the number of registers that is 4973 // required by the loop, in order to estimate how many parallel instances 4974 // fit without causing spills. All of this is rounded down if necessary to be 4975 // a power of two. We want power of two interleave count to simplify any 4976 // addressing operations or alignment considerations. 4977 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 4978 R.MaxLocalUsers); 4979 4980 // Don't count the induction variable as interleaved. 4981 if (EnableIndVarRegisterHeur) 4982 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 4983 std::max(1U, (R.MaxLocalUsers - 1))); 4984 4985 // Clamp the interleave ranges to reasonable counts. 4986 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 4987 4988 // Check if the user has overridden the max. 4989 if (VF == 1) { 4990 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 4991 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 4992 } else { 4993 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 4994 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 4995 } 4996 4997 // If we did not calculate the cost for VF (because the user selected the VF) 4998 // then we calculate the cost of VF here. 4999 if (LoopCost == 0) 5000 LoopCost = expectedCost(VF); 5001 5002 // Clamp the calculated IC to be between the 1 and the max interleave count 5003 // that the target allows. 5004 if (IC > MaxInterleaveCount) 5005 IC = MaxInterleaveCount; 5006 else if (IC < 1) 5007 IC = 1; 5008 5009 // Interleave if we vectorized this loop and there is a reduction that could 5010 // benefit from interleaving. 5011 if (VF > 1 && Legal->getReductionVars()->size()) { 5012 DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5013 return IC; 5014 } 5015 5016 // Note that if we've already vectorized the loop we will have done the 5017 // runtime check and so interleaving won't require further checks. 5018 bool InterleavingRequiresRuntimePointerCheck = 5019 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 5020 5021 // We want to interleave small loops in order to reduce the loop overhead and 5022 // potentially expose ILP opportunities. 5023 DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 5024 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 5025 // We assume that the cost overhead is 1 and we use the cost model 5026 // to estimate the cost of the loop and interleave until the cost of the 5027 // loop overhead is about 5% of the cost of the loop. 5028 unsigned SmallIC = 5029 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5030 5031 // Interleave until store/load ports (estimated by max interleave count) are 5032 // saturated. 5033 unsigned NumStores = Legal->getNumStores(); 5034 unsigned NumLoads = Legal->getNumLoads(); 5035 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5036 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5037 5038 // If we have a scalar reduction (vector reductions are already dealt with 5039 // by this point), we can increase the critical path length if the loop 5040 // we're interleaving is inside another loop. Limit, by default to 2, so the 5041 // critical path only gets increased by one reduction operation. 5042 if (Legal->getReductionVars()->size() && 5043 TheLoop->getLoopDepth() > 1) { 5044 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5045 SmallIC = std::min(SmallIC, F); 5046 StoresIC = std::min(StoresIC, F); 5047 LoadsIC = std::min(LoadsIC, F); 5048 } 5049 5050 if (EnableLoadStoreRuntimeInterleave && 5051 std::max(StoresIC, LoadsIC) > SmallIC) { 5052 DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5053 return std::max(StoresIC, LoadsIC); 5054 } 5055 5056 DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5057 return SmallIC; 5058 } 5059 5060 // Interleave if this is a large loop (small loops are already dealt with by 5061 // this point) that could benefit from interleaving. 5062 bool HasReductions = (Legal->getReductionVars()->size() > 0); 5063 if (TTI.enableAggressiveInterleaving(HasReductions)) { 5064 DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5065 return IC; 5066 } 5067 5068 DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5069 return 1; 5070 } 5071 5072 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5073 LoopVectorizationCostModel::calculateRegisterUsage( 5074 const SmallVector<unsigned, 8> &VFs) { 5075 // This function calculates the register usage by measuring the highest number 5076 // of values that are alive at a single location. Obviously, this is a very 5077 // rough estimation. We scan the loop in a topological order in order and 5078 // assign a number to each instruction. We use RPO to ensure that defs are 5079 // met before their users. We assume that each instruction that has in-loop 5080 // users starts an interval. We record every time that an in-loop value is 5081 // used, so we have a list of the first and last occurrences of each 5082 // instruction. Next, we transpose this data structure into a multi map that 5083 // holds the list of intervals that *end* at a specific location. This multi 5084 // map allows us to perform a linear search. We scan the instructions linearly 5085 // and record each time that a new interval starts, by placing it in a set. 5086 // If we find this value in the multi-map then we remove it from the set. 5087 // The max register usage is the maximum size of the set. 5088 // We also search for instructions that are defined outside the loop, but are 5089 // used inside the loop. We need this number separately from the max-interval 5090 // usage number because when we unroll, loop-invariant values do not take 5091 // more register. 5092 LoopBlocksDFS DFS(TheLoop); 5093 DFS.perform(LI); 5094 5095 RegisterUsage RU; 5096 RU.NumInstructions = 0; 5097 5098 // Each 'key' in the map opens a new interval. The values 5099 // of the map are the index of the 'last seen' usage of the 5100 // instruction that is the key. 5101 typedef DenseMap<Instruction*, unsigned> IntervalMap; 5102 // Maps instruction to its index. 5103 DenseMap<unsigned, Instruction*> IdxToInstr; 5104 // Marks the end of each interval. 5105 IntervalMap EndPoint; 5106 // Saves the list of instruction indices that are used in the loop. 5107 SmallSet<Instruction*, 8> Ends; 5108 // Saves the list of values that are used in the loop but are 5109 // defined outside the loop, such as arguments and constants. 5110 SmallPtrSet<Value*, 8> LoopInvariants; 5111 5112 unsigned Index = 0; 5113 for (LoopBlocksDFS::RPOIterator bb = DFS.beginRPO(), 5114 be = DFS.endRPO(); bb != be; ++bb) { 5115 RU.NumInstructions += (*bb)->size(); 5116 for (Instruction &I : **bb) { 5117 IdxToInstr[Index++] = &I; 5118 5119 // Save the end location of each USE. 5120 for (unsigned i = 0; i < I.getNumOperands(); ++i) { 5121 Value *U = I.getOperand(i); 5122 Instruction *Instr = dyn_cast<Instruction>(U); 5123 5124 // Ignore non-instruction values such as arguments, constants, etc. 5125 if (!Instr) continue; 5126 5127 // If this instruction is outside the loop then record it and continue. 5128 if (!TheLoop->contains(Instr)) { 5129 LoopInvariants.insert(Instr); 5130 continue; 5131 } 5132 5133 // Overwrite previous end points. 5134 EndPoint[Instr] = Index; 5135 Ends.insert(Instr); 5136 } 5137 } 5138 } 5139 5140 // Saves the list of intervals that end with the index in 'key'. 5141 typedef SmallVector<Instruction*, 2> InstrList; 5142 DenseMap<unsigned, InstrList> TransposeEnds; 5143 5144 // Transpose the EndPoints to a list of values that end at each index. 5145 for (IntervalMap::iterator it = EndPoint.begin(), e = EndPoint.end(); 5146 it != e; ++it) 5147 TransposeEnds[it->second].push_back(it->first); 5148 5149 SmallSet<Instruction*, 8> OpenIntervals; 5150 5151 // Get the size of the widest register. 5152 unsigned MaxSafeDepDist = -1U; 5153 if (Legal->getMaxSafeDepDistBytes() != -1U) 5154 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 5155 unsigned WidestRegister = 5156 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 5157 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5158 5159 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5160 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 5161 5162 DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5163 5164 // A lambda that gets the register usage for the given type and VF. 5165 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 5166 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 5167 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 5168 }; 5169 5170 for (unsigned int i = 0; i < Index; ++i) { 5171 Instruction *I = IdxToInstr[i]; 5172 // Ignore instructions that are never used within the loop. 5173 if (!Ends.count(I)) continue; 5174 5175 // Skip ignored values. 5176 if (ValuesToIgnore.count(I)) 5177 continue; 5178 5179 // Remove all of the instructions that end at this location. 5180 InstrList &List = TransposeEnds[i]; 5181 for (unsigned int j = 0, e = List.size(); j < e; ++j) 5182 OpenIntervals.erase(List[j]); 5183 5184 // For each VF find the maximum usage of registers. 5185 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 5186 if (VFs[j] == 1) { 5187 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 5188 continue; 5189 } 5190 5191 // Count the number of live intervals. 5192 unsigned RegUsage = 0; 5193 for (auto Inst : OpenIntervals) 5194 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 5195 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 5196 } 5197 5198 DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 5199 << OpenIntervals.size() << '\n'); 5200 5201 // Add the current instruction to the list of open intervals. 5202 OpenIntervals.insert(I); 5203 } 5204 5205 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 5206 unsigned Invariant = 0; 5207 if (VFs[i] == 1) 5208 Invariant = LoopInvariants.size(); 5209 else { 5210 for (auto Inst : LoopInvariants) 5211 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 5212 } 5213 5214 DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 5215 DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 5216 DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); 5217 DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n'); 5218 5219 RU.LoopInvariantRegs = Invariant; 5220 RU.MaxLocalUsers = MaxUsages[i]; 5221 RUs[i] = RU; 5222 } 5223 5224 return RUs; 5225 } 5226 5227 unsigned LoopVectorizationCostModel::expectedCost(unsigned VF) { 5228 unsigned Cost = 0; 5229 5230 // For each block. 5231 for (Loop::block_iterator bb = TheLoop->block_begin(), 5232 be = TheLoop->block_end(); bb != be; ++bb) { 5233 unsigned BlockCost = 0; 5234 BasicBlock *BB = *bb; 5235 5236 // For each instruction in the old loop. 5237 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 5238 // Skip dbg intrinsics. 5239 if (isa<DbgInfoIntrinsic>(it)) 5240 continue; 5241 5242 // Skip ignored values. 5243 if (ValuesToIgnore.count(&*it)) 5244 continue; 5245 5246 unsigned C = getInstructionCost(&*it, VF); 5247 5248 // Check if we should override the cost. 5249 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 5250 C = ForceTargetInstructionCost; 5251 5252 BlockCost += C; 5253 DEBUG(dbgs() << "LV: Found an estimated cost of " << C << " for VF " << 5254 VF << " For instruction: " << *it << '\n'); 5255 } 5256 5257 // We assume that if-converted blocks have a 50% chance of being executed. 5258 // When the code is scalar then some of the blocks are avoided due to CF. 5259 // When the code is vectorized we execute all code paths. 5260 if (VF == 1 && Legal->blockNeedsPredication(*bb)) 5261 BlockCost /= 2; 5262 5263 Cost += BlockCost; 5264 } 5265 5266 return Cost; 5267 } 5268 5269 /// \brief Check whether the address computation for a non-consecutive memory 5270 /// access looks like an unlikely candidate for being merged into the indexing 5271 /// mode. 5272 /// 5273 /// We look for a GEP which has one index that is an induction variable and all 5274 /// other indices are loop invariant. If the stride of this access is also 5275 /// within a small bound we decide that this address computation can likely be 5276 /// merged into the addressing mode. 5277 /// In all other cases, we identify the address computation as complex. 5278 static bool isLikelyComplexAddressComputation(Value *Ptr, 5279 LoopVectorizationLegality *Legal, 5280 ScalarEvolution *SE, 5281 const Loop *TheLoop) { 5282 GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(Ptr); 5283 if (!Gep) 5284 return true; 5285 5286 // We are looking for a gep with all loop invariant indices except for one 5287 // which should be an induction variable. 5288 unsigned NumOperands = Gep->getNumOperands(); 5289 for (unsigned i = 1; i < NumOperands; ++i) { 5290 Value *Opd = Gep->getOperand(i); 5291 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 5292 !Legal->isInductionVariable(Opd)) 5293 return true; 5294 } 5295 5296 // Now we know we have a GEP ptr, %inv, %ind, %inv. Make sure that the step 5297 // can likely be merged into the address computation. 5298 unsigned MaxMergeDistance = 64; 5299 5300 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Ptr)); 5301 if (!AddRec) 5302 return true; 5303 5304 // Check the step is constant. 5305 const SCEV *Step = AddRec->getStepRecurrence(*SE); 5306 // Calculate the pointer stride and check if it is consecutive. 5307 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step); 5308 if (!C) 5309 return true; 5310 5311 const APInt &APStepVal = C->getValue()->getValue(); 5312 5313 // Huge step value - give up. 5314 if (APStepVal.getBitWidth() > 64) 5315 return true; 5316 5317 int64_t StepVal = APStepVal.getSExtValue(); 5318 5319 return StepVal > MaxMergeDistance; 5320 } 5321 5322 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 5323 return Legal->hasStride(I->getOperand(0)) || 5324 Legal->hasStride(I->getOperand(1)); 5325 } 5326 5327 unsigned 5328 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 5329 // If we know that this instruction will remain uniform, check the cost of 5330 // the scalar version. 5331 if (Legal->isUniformAfterVectorization(I)) 5332 VF = 1; 5333 5334 Type *RetTy = I->getType(); 5335 if (VF > 1 && MinBWs.count(I)) 5336 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 5337 Type *VectorTy = ToVectorTy(RetTy, VF); 5338 5339 // TODO: We need to estimate the cost of intrinsic calls. 5340 switch (I->getOpcode()) { 5341 case Instruction::GetElementPtr: 5342 // We mark this instruction as zero-cost because the cost of GEPs in 5343 // vectorized code depends on whether the corresponding memory instruction 5344 // is scalarized or not. Therefore, we handle GEPs with the memory 5345 // instruction cost. 5346 return 0; 5347 case Instruction::Br: { 5348 return TTI.getCFInstrCost(I->getOpcode()); 5349 } 5350 case Instruction::PHI: 5351 //TODO: IF-converted IFs become selects. 5352 return 0; 5353 case Instruction::Add: 5354 case Instruction::FAdd: 5355 case Instruction::Sub: 5356 case Instruction::FSub: 5357 case Instruction::Mul: 5358 case Instruction::FMul: 5359 case Instruction::UDiv: 5360 case Instruction::SDiv: 5361 case Instruction::FDiv: 5362 case Instruction::URem: 5363 case Instruction::SRem: 5364 case Instruction::FRem: 5365 case Instruction::Shl: 5366 case Instruction::LShr: 5367 case Instruction::AShr: 5368 case Instruction::And: 5369 case Instruction::Or: 5370 case Instruction::Xor: { 5371 // Since we will replace the stride by 1 the multiplication should go away. 5372 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 5373 return 0; 5374 // Certain instructions can be cheaper to vectorize if they have a constant 5375 // second vector operand. One example of this are shifts on x86. 5376 TargetTransformInfo::OperandValueKind Op1VK = 5377 TargetTransformInfo::OK_AnyValue; 5378 TargetTransformInfo::OperandValueKind Op2VK = 5379 TargetTransformInfo::OK_AnyValue; 5380 TargetTransformInfo::OperandValueProperties Op1VP = 5381 TargetTransformInfo::OP_None; 5382 TargetTransformInfo::OperandValueProperties Op2VP = 5383 TargetTransformInfo::OP_None; 5384 Value *Op2 = I->getOperand(1); 5385 5386 // Check for a splat of a constant or for a non uniform vector of constants. 5387 if (isa<ConstantInt>(Op2)) { 5388 ConstantInt *CInt = cast<ConstantInt>(Op2); 5389 if (CInt && CInt->getValue().isPowerOf2()) 5390 Op2VP = TargetTransformInfo::OP_PowerOf2; 5391 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 5392 } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) { 5393 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 5394 Constant *SplatValue = cast<Constant>(Op2)->getSplatValue(); 5395 if (SplatValue) { 5396 ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue); 5397 if (CInt && CInt->getValue().isPowerOf2()) 5398 Op2VP = TargetTransformInfo::OP_PowerOf2; 5399 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 5400 } 5401 } 5402 5403 return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, Op2VK, 5404 Op1VP, Op2VP); 5405 } 5406 case Instruction::Select: { 5407 SelectInst *SI = cast<SelectInst>(I); 5408 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 5409 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 5410 Type *CondTy = SI->getCondition()->getType(); 5411 if (!ScalarCond) 5412 CondTy = VectorType::get(CondTy, VF); 5413 5414 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy); 5415 } 5416 case Instruction::ICmp: 5417 case Instruction::FCmp: { 5418 Type *ValTy = I->getOperand(0)->getType(); 5419 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 5420 auto It = MinBWs.find(Op0AsInstruction); 5421 if (VF > 1 && It != MinBWs.end()) 5422 ValTy = IntegerType::get(ValTy->getContext(), It->second); 5423 VectorTy = ToVectorTy(ValTy, VF); 5424 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy); 5425 } 5426 case Instruction::Store: 5427 case Instruction::Load: { 5428 StoreInst *SI = dyn_cast<StoreInst>(I); 5429 LoadInst *LI = dyn_cast<LoadInst>(I); 5430 Type *ValTy = (SI ? SI->getValueOperand()->getType() : 5431 LI->getType()); 5432 VectorTy = ToVectorTy(ValTy, VF); 5433 5434 unsigned Alignment = SI ? SI->getAlignment() : LI->getAlignment(); 5435 unsigned AS = SI ? SI->getPointerAddressSpace() : 5436 LI->getPointerAddressSpace(); 5437 Value *Ptr = SI ? SI->getPointerOperand() : LI->getPointerOperand(); 5438 // We add the cost of address computation here instead of with the gep 5439 // instruction because only here we know whether the operation is 5440 // scalarized. 5441 if (VF == 1) 5442 return TTI.getAddressComputationCost(VectorTy) + 5443 TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 5444 5445 // For an interleaved access, calculate the total cost of the whole 5446 // interleave group. 5447 if (Legal->isAccessInterleaved(I)) { 5448 auto Group = Legal->getInterleavedAccessGroup(I); 5449 assert(Group && "Fail to get an interleaved access group."); 5450 5451 // Only calculate the cost once at the insert position. 5452 if (Group->getInsertPos() != I) 5453 return 0; 5454 5455 unsigned InterleaveFactor = Group->getFactor(); 5456 Type *WideVecTy = 5457 VectorType::get(VectorTy->getVectorElementType(), 5458 VectorTy->getVectorNumElements() * InterleaveFactor); 5459 5460 // Holds the indices of existing members in an interleaved load group. 5461 // An interleaved store group doesn't need this as it dones't allow gaps. 5462 SmallVector<unsigned, 4> Indices; 5463 if (LI) { 5464 for (unsigned i = 0; i < InterleaveFactor; i++) 5465 if (Group->getMember(i)) 5466 Indices.push_back(i); 5467 } 5468 5469 // Calculate the cost of the whole interleaved group. 5470 unsigned Cost = TTI.getInterleavedMemoryOpCost( 5471 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 5472 Group->getAlignment(), AS); 5473 5474 if (Group->isReverse()) 5475 Cost += 5476 Group->getNumMembers() * 5477 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5478 5479 // FIXME: The interleaved load group with a huge gap could be even more 5480 // expensive than scalar operations. Then we could ignore such group and 5481 // use scalar operations instead. 5482 return Cost; 5483 } 5484 5485 // Scalarized loads/stores. 5486 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 5487 bool Reverse = ConsecutiveStride < 0; 5488 const DataLayout &DL = I->getModule()->getDataLayout(); 5489 unsigned ScalarAllocatedSize = DL.getTypeAllocSize(ValTy); 5490 unsigned VectorElementSize = DL.getTypeStoreSize(VectorTy) / VF; 5491 if (!ConsecutiveStride || ScalarAllocatedSize != VectorElementSize) { 5492 bool IsComplexComputation = 5493 isLikelyComplexAddressComputation(Ptr, Legal, SE, TheLoop); 5494 unsigned Cost = 0; 5495 // The cost of extracting from the value vector and pointer vector. 5496 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 5497 for (unsigned i = 0; i < VF; ++i) { 5498 // The cost of extracting the pointer operand. 5499 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, PtrTy, i); 5500 // In case of STORE, the cost of ExtractElement from the vector. 5501 // In case of LOAD, the cost of InsertElement into the returned 5502 // vector. 5503 Cost += TTI.getVectorInstrCost(SI ? Instruction::ExtractElement : 5504 Instruction::InsertElement, 5505 VectorTy, i); 5506 } 5507 5508 // The cost of the scalar loads/stores. 5509 Cost += VF * TTI.getAddressComputationCost(PtrTy, IsComplexComputation); 5510 Cost += VF * TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 5511 Alignment, AS); 5512 return Cost; 5513 } 5514 5515 // Wide load/stores. 5516 unsigned Cost = TTI.getAddressComputationCost(VectorTy); 5517 if (Legal->isMaskRequired(I)) 5518 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, 5519 AS); 5520 else 5521 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 5522 5523 if (Reverse) 5524 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, 5525 VectorTy, 0); 5526 return Cost; 5527 } 5528 case Instruction::ZExt: 5529 case Instruction::SExt: 5530 case Instruction::FPToUI: 5531 case Instruction::FPToSI: 5532 case Instruction::FPExt: 5533 case Instruction::PtrToInt: 5534 case Instruction::IntToPtr: 5535 case Instruction::SIToFP: 5536 case Instruction::UIToFP: 5537 case Instruction::Trunc: 5538 case Instruction::FPTrunc: 5539 case Instruction::BitCast: { 5540 // We optimize the truncation of induction variable. 5541 // The cost of these is the same as the scalar operation. 5542 if (I->getOpcode() == Instruction::Trunc && 5543 Legal->isInductionVariable(I->getOperand(0))) 5544 return TTI.getCastInstrCost(I->getOpcode(), I->getType(), 5545 I->getOperand(0)->getType()); 5546 5547 Type *SrcScalarTy = I->getOperand(0)->getType(); 5548 Type *SrcVecTy = ToVectorTy(SrcScalarTy, VF); 5549 if (VF > 1 && MinBWs.count(I)) { 5550 // This cast is going to be shrunk. This may remove the cast or it might 5551 // turn it into slightly different cast. For example, if MinBW == 16, 5552 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 5553 // 5554 // Calculate the modified src and dest types. 5555 Type *MinVecTy = VectorTy; 5556 if (I->getOpcode() == Instruction::Trunc) { 5557 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 5558 VectorTy = largestIntegerVectorType(ToVectorTy(I->getType(), VF), 5559 MinVecTy); 5560 } else if (I->getOpcode() == Instruction::ZExt || 5561 I->getOpcode() == Instruction::SExt) { 5562 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 5563 VectorTy = smallestIntegerVectorType(ToVectorTy(I->getType(), VF), 5564 MinVecTy); 5565 } 5566 } 5567 5568 return TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy); 5569 } 5570 case Instruction::Call: { 5571 bool NeedToScalarize; 5572 CallInst *CI = cast<CallInst>(I); 5573 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 5574 if (getIntrinsicIDForCall(CI, TLI)) 5575 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 5576 return CallCost; 5577 } 5578 default: { 5579 // We are scalarizing the instruction. Return the cost of the scalar 5580 // instruction, plus the cost of insert and extract into vector 5581 // elements, times the vector width. 5582 unsigned Cost = 0; 5583 5584 if (!RetTy->isVoidTy() && VF != 1) { 5585 unsigned InsCost = TTI.getVectorInstrCost(Instruction::InsertElement, 5586 VectorTy); 5587 unsigned ExtCost = TTI.getVectorInstrCost(Instruction::ExtractElement, 5588 VectorTy); 5589 5590 // The cost of inserting the results plus extracting each one of the 5591 // operands. 5592 Cost += VF * (InsCost + ExtCost * I->getNumOperands()); 5593 } 5594 5595 // The cost of executing VF copies of the scalar instruction. This opcode 5596 // is unknown. Assume that it is the same as 'mul'. 5597 Cost += VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy); 5598 return Cost; 5599 } 5600 }// end of switch. 5601 } 5602 5603 char LoopVectorize::ID = 0; 5604 static const char lv_name[] = "Loop Vectorization"; 5605 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 5606 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 5607 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 5608 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 5609 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 5610 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 5611 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 5612 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 5613 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 5614 INITIALIZE_PASS_DEPENDENCY(LCSSA) 5615 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 5616 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 5617 INITIALIZE_PASS_DEPENDENCY(LoopAccessAnalysis) 5618 INITIALIZE_PASS_DEPENDENCY(DemandedBits) 5619 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 5620 5621 namespace llvm { 5622 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { 5623 return new LoopVectorize(NoUnrolling, AlwaysVectorize); 5624 } 5625 } 5626 5627 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 5628 // Check for a store. 5629 if (StoreInst *ST = dyn_cast<StoreInst>(Inst)) 5630 return Legal->isConsecutivePtr(ST->getPointerOperand()) != 0; 5631 5632 // Check for a load. 5633 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) 5634 return Legal->isConsecutivePtr(LI->getPointerOperand()) != 0; 5635 5636 return false; 5637 } 5638 5639 5640 void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr, 5641 bool IfPredicateStore) { 5642 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 5643 // Holds vector parameters or scalars, in case of uniform vals. 5644 SmallVector<VectorParts, 4> Params; 5645 5646 setDebugLocFromInst(Builder, Instr); 5647 5648 // Find all of the vectorized parameters. 5649 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 5650 Value *SrcOp = Instr->getOperand(op); 5651 5652 // If we are accessing the old induction variable, use the new one. 5653 if (SrcOp == OldInduction) { 5654 Params.push_back(getVectorValue(SrcOp)); 5655 continue; 5656 } 5657 5658 // Try using previously calculated values. 5659 Instruction *SrcInst = dyn_cast<Instruction>(SrcOp); 5660 5661 // If the src is an instruction that appeared earlier in the basic block 5662 // then it should already be vectorized. 5663 if (SrcInst && OrigLoop->contains(SrcInst)) { 5664 assert(WidenMap.has(SrcInst) && "Source operand is unavailable"); 5665 // The parameter is a vector value from earlier. 5666 Params.push_back(WidenMap.get(SrcInst)); 5667 } else { 5668 // The parameter is a scalar from outside the loop. Maybe even a constant. 5669 VectorParts Scalars; 5670 Scalars.append(UF, SrcOp); 5671 Params.push_back(Scalars); 5672 } 5673 } 5674 5675 assert(Params.size() == Instr->getNumOperands() && 5676 "Invalid number of operands"); 5677 5678 // Does this instruction return a value ? 5679 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 5680 5681 Value *UndefVec = IsVoidRetTy ? nullptr : 5682 UndefValue::get(Instr->getType()); 5683 // Create a new entry in the WidenMap and initialize it to Undef or Null. 5684 VectorParts &VecResults = WidenMap.splat(Instr, UndefVec); 5685 5686 VectorParts Cond; 5687 if (IfPredicateStore) { 5688 assert(Instr->getParent()->getSinglePredecessor() && 5689 "Only support single predecessor blocks"); 5690 Cond = createEdgeMask(Instr->getParent()->getSinglePredecessor(), 5691 Instr->getParent()); 5692 } 5693 5694 // For each vector unroll 'part': 5695 for (unsigned Part = 0; Part < UF; ++Part) { 5696 // For each scalar that we create: 5697 5698 // Start an "if (pred) a[i] = ..." block. 5699 Value *Cmp = nullptr; 5700 if (IfPredicateStore) { 5701 if (Cond[Part]->getType()->isVectorTy()) 5702 Cond[Part] = 5703 Builder.CreateExtractElement(Cond[Part], Builder.getInt32(0)); 5704 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cond[Part], 5705 ConstantInt::get(Cond[Part]->getType(), 1)); 5706 } 5707 5708 Instruction *Cloned = Instr->clone(); 5709 if (!IsVoidRetTy) 5710 Cloned->setName(Instr->getName() + ".cloned"); 5711 // Replace the operands of the cloned instructions with extracted scalars. 5712 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 5713 Value *Op = Params[op][Part]; 5714 Cloned->setOperand(op, Op); 5715 } 5716 5717 // Place the cloned scalar in the new loop. 5718 Builder.Insert(Cloned); 5719 5720 // If the original scalar returns a value we need to place it in a vector 5721 // so that future users will be able to use it. 5722 if (!IsVoidRetTy) 5723 VecResults[Part] = Cloned; 5724 5725 // End if-block. 5726 if (IfPredicateStore) 5727 PredicatedStores.push_back(std::make_pair(cast<StoreInst>(Cloned), 5728 Cmp)); 5729 } 5730 } 5731 5732 void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) { 5733 StoreInst *SI = dyn_cast<StoreInst>(Instr); 5734 bool IfPredicateStore = (SI && Legal->blockNeedsPredication(SI->getParent())); 5735 5736 return scalarizeInstruction(Instr, IfPredicateStore); 5737 } 5738 5739 Value *InnerLoopUnroller::reverseVector(Value *Vec) { 5740 return Vec; 5741 } 5742 5743 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { 5744 return V; 5745 } 5746 5747 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step) { 5748 // When unrolling and the VF is 1, we only need to add a simple scalar. 5749 Type *ITy = Val->getType(); 5750 assert(!ITy->isVectorTy() && "Val must be a scalar"); 5751 Constant *C = ConstantInt::get(ITy, StartIdx); 5752 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 5753 } 5754