1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 11 // and generates target-independent LLVM-IR. 12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 13 // of instructions in order to estimate the profitability of vectorization. 14 // 15 // The loop vectorizer combines consecutive loop iterations into a single 16 // 'wide' iteration. After this transformation the index is incremented 17 // by the SIMD vector width, and not by one. 18 // 19 // This pass has three parts: 20 // 1. The main loop pass that drives the different parts. 21 // 2. LoopVectorizationLegality - A unit that checks for the legality 22 // of the vectorization. 23 // 3. InnerLoopVectorizer - A unit that performs the actual 24 // widening of instructions. 25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 26 // of vectorization. It decides on the optimal vector width, which 27 // can be one, if vectorization is not profitable. 28 // 29 //===----------------------------------------------------------------------===// 30 // 31 // The reduction-variable vectorization is based on the paper: 32 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 33 // 34 // Variable uniformity checks are inspired by: 35 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 36 // 37 // The interleaved access vectorization is based on the paper: 38 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 39 // Data for SIMD 40 // 41 // Other ideas/concepts are from: 42 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 43 // 44 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 45 // Vectorizing Compilers. 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/Transforms/Vectorize.h" 50 #include "llvm/ADT/DenseMap.h" 51 #include "llvm/ADT/Hashing.h" 52 #include "llvm/ADT/MapVector.h" 53 #include "llvm/ADT/SetVector.h" 54 #include "llvm/ADT/SmallPtrSet.h" 55 #include "llvm/ADT/SmallSet.h" 56 #include "llvm/ADT/SmallVector.h" 57 #include "llvm/ADT/Statistic.h" 58 #include "llvm/ADT/StringExtras.h" 59 #include "llvm/Analysis/AliasAnalysis.h" 60 #include "llvm/Analysis/BasicAliasAnalysis.h" 61 #include "llvm/Analysis/AliasSetTracker.h" 62 #include "llvm/Analysis/AssumptionCache.h" 63 #include "llvm/Analysis/BlockFrequencyInfo.h" 64 #include "llvm/Analysis/CodeMetrics.h" 65 #include "llvm/Analysis/DemandedBits.h" 66 #include "llvm/Analysis/GlobalsModRef.h" 67 #include "llvm/Analysis/LoopAccessAnalysis.h" 68 #include "llvm/Analysis/LoopInfo.h" 69 #include "llvm/Analysis/LoopIterator.h" 70 #include "llvm/Analysis/LoopPass.h" 71 #include "llvm/Analysis/ScalarEvolution.h" 72 #include "llvm/Analysis/ScalarEvolutionExpander.h" 73 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 74 #include "llvm/Analysis/TargetTransformInfo.h" 75 #include "llvm/Analysis/ValueTracking.h" 76 #include "llvm/IR/Constants.h" 77 #include "llvm/IR/DataLayout.h" 78 #include "llvm/IR/DebugInfo.h" 79 #include "llvm/IR/DerivedTypes.h" 80 #include "llvm/IR/DiagnosticInfo.h" 81 #include "llvm/IR/Dominators.h" 82 #include "llvm/IR/Function.h" 83 #include "llvm/IR/IRBuilder.h" 84 #include "llvm/IR/Instructions.h" 85 #include "llvm/IR/IntrinsicInst.h" 86 #include "llvm/IR/LLVMContext.h" 87 #include "llvm/IR/Module.h" 88 #include "llvm/IR/PatternMatch.h" 89 #include "llvm/IR/Type.h" 90 #include "llvm/IR/Value.h" 91 #include "llvm/IR/ValueHandle.h" 92 #include "llvm/IR/Verifier.h" 93 #include "llvm/Pass.h" 94 #include "llvm/Support/BranchProbability.h" 95 #include "llvm/Support/CommandLine.h" 96 #include "llvm/Support/Debug.h" 97 #include "llvm/Support/raw_ostream.h" 98 #include "llvm/Transforms/Scalar.h" 99 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 100 #include "llvm/Transforms/Utils/Local.h" 101 #include "llvm/Analysis/VectorUtils.h" 102 #include "llvm/Transforms/Utils/LoopUtils.h" 103 #include <algorithm> 104 #include <functional> 105 #include <map> 106 #include <tuple> 107 108 using namespace llvm; 109 using namespace llvm::PatternMatch; 110 111 #define LV_NAME "loop-vectorize" 112 #define DEBUG_TYPE LV_NAME 113 114 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 115 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 116 117 static cl::opt<bool> 118 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, 119 cl::desc("Enable if-conversion during vectorization.")); 120 121 /// We don't vectorize loops with a known constant trip count below this number. 122 static cl::opt<unsigned> 123 TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), 124 cl::Hidden, 125 cl::desc("Don't vectorize loops with a constant " 126 "trip count that is smaller than this " 127 "value.")); 128 129 /// This enables versioning on the strides of symbolically striding memory 130 /// accesses in code like the following. 131 /// for (i = 0; i < N; ++i) 132 /// A[i * Stride1] += B[i * Stride2] ... 133 /// 134 /// Will be roughly translated to 135 /// if (Stride1 == 1 && Stride2 == 1) { 136 /// for (i = 0; i < N; i+=4) 137 /// A[i:i+3] += ... 138 /// } else 139 /// ... 140 static cl::opt<bool> EnableMemAccessVersioning( 141 "enable-mem-access-versioning", cl::init(true), cl::Hidden, 142 cl::desc("Enable symblic stride memory access versioning")); 143 144 static cl::opt<bool> EnableInterleavedMemAccesses( 145 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 146 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 147 148 /// Maximum factor for an interleaved memory access. 149 static cl::opt<unsigned> MaxInterleaveGroupFactor( 150 "max-interleave-group-factor", cl::Hidden, 151 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 152 cl::init(8)); 153 154 /// We don't interleave loops with a known constant trip count below this 155 /// number. 156 static const unsigned TinyTripCountInterleaveThreshold = 128; 157 158 static cl::opt<unsigned> ForceTargetNumScalarRegs( 159 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 160 cl::desc("A flag that overrides the target's number of scalar registers.")); 161 162 static cl::opt<unsigned> ForceTargetNumVectorRegs( 163 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 164 cl::desc("A flag that overrides the target's number of vector registers.")); 165 166 /// Maximum vectorization interleave count. 167 static const unsigned MaxInterleaveFactor = 16; 168 169 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 170 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 171 cl::desc("A flag that overrides the target's max interleave factor for " 172 "scalar loops.")); 173 174 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 175 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 176 cl::desc("A flag that overrides the target's max interleave factor for " 177 "vectorized loops.")); 178 179 static cl::opt<unsigned> ForceTargetInstructionCost( 180 "force-target-instruction-cost", cl::init(0), cl::Hidden, 181 cl::desc("A flag that overrides the target's expected cost for " 182 "an instruction to a single constant value. Mostly " 183 "useful for getting consistent testing.")); 184 185 static cl::opt<unsigned> SmallLoopCost( 186 "small-loop-cost", cl::init(20), cl::Hidden, 187 cl::desc( 188 "The cost of a loop that is considered 'small' by the interleaver.")); 189 190 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 191 "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden, 192 cl::desc("Enable the use of the block frequency analysis to access PGO " 193 "heuristics minimizing code growth in cold regions and being more " 194 "aggressive in hot regions.")); 195 196 // Runtime interleave loops for load/store throughput. 197 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 198 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 199 cl::desc( 200 "Enable runtime interleaving until load/store ports are saturated")); 201 202 /// The number of stores in a loop that are allowed to need predication. 203 static cl::opt<unsigned> NumberOfStoresToPredicate( 204 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 205 cl::desc("Max number of stores to be predicated behind an if.")); 206 207 static cl::opt<bool> EnableIndVarRegisterHeur( 208 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 209 cl::desc("Count the induction variable only once when interleaving")); 210 211 static cl::opt<bool> EnableCondStoresVectorization( 212 "enable-cond-stores-vec", cl::init(false), cl::Hidden, 213 cl::desc("Enable if predication of stores during vectorization.")); 214 215 static cl::opt<unsigned> MaxNestedScalarReductionIC( 216 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 217 cl::desc("The maximum interleave count to use when interleaving a scalar " 218 "reduction in a nested loop.")); 219 220 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 221 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 222 cl::desc("The maximum allowed number of runtime memory checks with a " 223 "vectorize(enable) pragma.")); 224 225 namespace { 226 227 // Forward declarations. 228 class LoopVectorizeHints; 229 class LoopVectorizationLegality; 230 class LoopVectorizationCostModel; 231 class LoopVectorizationRequirements; 232 233 /// \brief This modifies LoopAccessReport to initialize message with 234 /// loop-vectorizer-specific part. 235 class VectorizationReport : public LoopAccessReport { 236 public: 237 VectorizationReport(Instruction *I = nullptr) 238 : LoopAccessReport("loop not vectorized: ", I) {} 239 240 /// \brief This allows promotion of the loop-access analysis report into the 241 /// loop-vectorizer report. It modifies the message to add the 242 /// loop-vectorizer-specific part of the message. 243 explicit VectorizationReport(const LoopAccessReport &R) 244 : LoopAccessReport(Twine("loop not vectorized: ") + R.str(), 245 R.getInstr()) {} 246 }; 247 248 /// A helper function for converting Scalar types to vector types. 249 /// If the incoming type is void, we return void. If the VF is 1, we return 250 /// the scalar type. 251 static Type* ToVectorTy(Type *Scalar, unsigned VF) { 252 if (Scalar->isVoidTy() || VF == 1) 253 return Scalar; 254 return VectorType::get(Scalar, VF); 255 } 256 257 /// InnerLoopVectorizer vectorizes loops which contain only one basic 258 /// block to a specified vectorization factor (VF). 259 /// This class performs the widening of scalars into vectors, or multiple 260 /// scalars. This class also implements the following features: 261 /// * It inserts an epilogue loop for handling loops that don't have iteration 262 /// counts that are known to be a multiple of the vectorization factor. 263 /// * It handles the code generation for reduction variables. 264 /// * Scalarization (implementation using scalars) of un-vectorizable 265 /// instructions. 266 /// InnerLoopVectorizer does not perform any vectorization-legality 267 /// checks, and relies on the caller to check for the different legality 268 /// aspects. The InnerLoopVectorizer relies on the 269 /// LoopVectorizationLegality class to provide information about the induction 270 /// and reduction variables that were found to a given vectorization factor. 271 class InnerLoopVectorizer { 272 public: 273 InnerLoopVectorizer(Loop *OrigLoop, ScalarEvolution *SE, LoopInfo *LI, 274 DominatorTree *DT, const TargetLibraryInfo *TLI, 275 const TargetTransformInfo *TTI, unsigned VecWidth, 276 unsigned UnrollFactor) 277 : OrigLoop(OrigLoop), SE(SE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 278 VF(VecWidth), UF(UnrollFactor), Builder(SE->getContext()), 279 Induction(nullptr), OldInduction(nullptr), WidenMap(UnrollFactor), 280 TripCount(nullptr), VectorTripCount(nullptr), Legal(nullptr), 281 AddedSafetyChecks(false) {} 282 283 // Perform the actual loop widening (vectorization). 284 // MinimumBitWidths maps scalar integer values to the smallest bitwidth they 285 // can be validly truncated to. The cost model has assumed this truncation 286 // will happen when vectorizing. 287 void vectorize(LoopVectorizationLegality *L, 288 DenseMap<Instruction*,uint64_t> MinimumBitWidths) { 289 MinBWs = MinimumBitWidths; 290 Legal = L; 291 // Create a new empty loop. Unlink the old loop and connect the new one. 292 createEmptyLoop(); 293 // Widen each instruction in the old loop to a new one in the new loop. 294 // Use the Legality module to find the induction and reduction variables. 295 vectorizeLoop(); 296 } 297 298 // Return true if any runtime check is added. 299 bool IsSafetyChecksAdded() { 300 return AddedSafetyChecks; 301 } 302 303 virtual ~InnerLoopVectorizer() {} 304 305 protected: 306 /// A small list of PHINodes. 307 typedef SmallVector<PHINode*, 4> PhiVector; 308 /// When we unroll loops we have multiple vector values for each scalar. 309 /// This data structure holds the unrolled and vectorized values that 310 /// originated from one scalar instruction. 311 typedef SmallVector<Value*, 2> VectorParts; 312 313 // When we if-convert we need to create edge masks. We have to cache values 314 // so that we don't end up with exponential recursion/IR. 315 typedef DenseMap<std::pair<BasicBlock*, BasicBlock*>, 316 VectorParts> EdgeMaskCache; 317 318 /// \brief Add checks for strides that were assumed to be 1. 319 /// 320 /// Returns the last check instruction and the first check instruction in the 321 /// pair as (first, last). 322 std::pair<Instruction *, Instruction *> addStrideCheck(Instruction *Loc); 323 324 /// Create an empty loop, based on the loop ranges of the old loop. 325 void createEmptyLoop(); 326 /// Create a new induction variable inside L. 327 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 328 Value *Step, Instruction *DL); 329 /// Copy and widen the instructions from the old loop. 330 virtual void vectorizeLoop(); 331 332 /// \brief The Loop exit block may have single value PHI nodes where the 333 /// incoming value is 'Undef'. While vectorizing we only handled real values 334 /// that were defined inside the loop. Here we fix the 'undef case'. 335 /// See PR14725. 336 void fixLCSSAPHIs(); 337 338 /// Shrinks vector element sizes based on information in "MinBWs". 339 void truncateToMinimalBitwidths(); 340 341 /// A helper function that computes the predicate of the block BB, assuming 342 /// that the header block of the loop is set to True. It returns the *entry* 343 /// mask for the block BB. 344 VectorParts createBlockInMask(BasicBlock *BB); 345 /// A helper function that computes the predicate of the edge between SRC 346 /// and DST. 347 VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst); 348 349 /// A helper function to vectorize a single BB within the innermost loop. 350 void vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV); 351 352 /// Vectorize a single PHINode in a block. This method handles the induction 353 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 354 /// arbitrary length vectors. 355 void widenPHIInstruction(Instruction *PN, VectorParts &Entry, 356 unsigned UF, unsigned VF, PhiVector *PV); 357 358 /// Insert the new loop to the loop hierarchy and pass manager 359 /// and update the analysis passes. 360 void updateAnalysis(); 361 362 /// This instruction is un-vectorizable. Implement it as a sequence 363 /// of scalars. If \p IfPredicateStore is true we need to 'hide' each 364 /// scalarized instruction behind an if block predicated on the control 365 /// dependence of the instruction. 366 virtual void scalarizeInstruction(Instruction *Instr, 367 bool IfPredicateStore=false); 368 369 /// Vectorize Load and Store instructions, 370 virtual void vectorizeMemoryInstruction(Instruction *Instr); 371 372 /// Create a broadcast instruction. This method generates a broadcast 373 /// instruction (shuffle) for loop invariant values and for the induction 374 /// value. If this is the induction variable then we extend it to N, N+1, ... 375 /// this is needed because each iteration in the loop corresponds to a SIMD 376 /// element. 377 virtual Value *getBroadcastInstrs(Value *V); 378 379 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 380 /// to each vector element of Val. The sequence starts at StartIndex. 381 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step); 382 383 /// When we go over instructions in the basic block we rely on previous 384 /// values within the current basic block or on loop invariant values. 385 /// When we widen (vectorize) values we place them in the map. If the values 386 /// are not within the map, they have to be loop invariant, so we simply 387 /// broadcast them into a vector. 388 VectorParts &getVectorValue(Value *V); 389 390 /// Try to vectorize the interleaved access group that \p Instr belongs to. 391 void vectorizeInterleaveGroup(Instruction *Instr); 392 393 /// Generate a shuffle sequence that will reverse the vector Vec. 394 virtual Value *reverseVector(Value *Vec); 395 396 /// Returns (and creates if needed) the original loop trip count. 397 Value *getOrCreateTripCount(Loop *NewLoop); 398 399 /// Returns (and creates if needed) the trip count of the widened loop. 400 Value *getOrCreateVectorTripCount(Loop *NewLoop); 401 402 /// Emit a bypass check to see if the trip count would overflow, or we 403 /// wouldn't have enough iterations to execute one vector loop. 404 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 405 /// Emit a bypass check to see if the vector trip count is nonzero. 406 void emitVectorLoopEnteredCheck(Loop *L, BasicBlock *Bypass); 407 /// Emit bypass checks to check if strides we've assumed to be one really are. 408 void emitStrideChecks(Loop *L, BasicBlock *Bypass); 409 /// Emit bypass checks to check any memory assumptions we may have made. 410 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 411 412 /// This is a helper class that holds the vectorizer state. It maps scalar 413 /// instructions to vector instructions. When the code is 'unrolled' then 414 /// then a single scalar value is mapped to multiple vector parts. The parts 415 /// are stored in the VectorPart type. 416 struct ValueMap { 417 /// C'tor. UnrollFactor controls the number of vectors ('parts') that 418 /// are mapped. 419 ValueMap(unsigned UnrollFactor) : UF(UnrollFactor) {} 420 421 /// \return True if 'Key' is saved in the Value Map. 422 bool has(Value *Key) const { return MapStorage.count(Key); } 423 424 /// Initializes a new entry in the map. Sets all of the vector parts to the 425 /// save value in 'Val'. 426 /// \return A reference to a vector with splat values. 427 VectorParts &splat(Value *Key, Value *Val) { 428 VectorParts &Entry = MapStorage[Key]; 429 Entry.assign(UF, Val); 430 return Entry; 431 } 432 433 ///\return A reference to the value that is stored at 'Key'. 434 VectorParts &get(Value *Key) { 435 VectorParts &Entry = MapStorage[Key]; 436 if (Entry.empty()) 437 Entry.resize(UF); 438 assert(Entry.size() == UF); 439 return Entry; 440 } 441 442 private: 443 /// The unroll factor. Each entry in the map stores this number of vector 444 /// elements. 445 unsigned UF; 446 447 /// Map storage. We use std::map and not DenseMap because insertions to a 448 /// dense map invalidates its iterators. 449 std::map<Value *, VectorParts> MapStorage; 450 }; 451 452 /// The original loop. 453 Loop *OrigLoop; 454 /// Scev analysis to use. 455 ScalarEvolution *SE; 456 /// Loop Info. 457 LoopInfo *LI; 458 /// Dominator Tree. 459 DominatorTree *DT; 460 /// Alias Analysis. 461 AliasAnalysis *AA; 462 /// Target Library Info. 463 const TargetLibraryInfo *TLI; 464 /// Target Transform Info. 465 const TargetTransformInfo *TTI; 466 467 /// The vectorization SIMD factor to use. Each vector will have this many 468 /// vector elements. 469 unsigned VF; 470 471 protected: 472 /// The vectorization unroll factor to use. Each scalar is vectorized to this 473 /// many different vector instructions. 474 unsigned UF; 475 476 /// The builder that we use 477 IRBuilder<> Builder; 478 479 // --- Vectorization state --- 480 481 /// The vector-loop preheader. 482 BasicBlock *LoopVectorPreHeader; 483 /// The scalar-loop preheader. 484 BasicBlock *LoopScalarPreHeader; 485 /// Middle Block between the vector and the scalar. 486 BasicBlock *LoopMiddleBlock; 487 ///The ExitBlock of the scalar loop. 488 BasicBlock *LoopExitBlock; 489 ///The vector loop body. 490 SmallVector<BasicBlock *, 4> LoopVectorBody; 491 ///The scalar loop body. 492 BasicBlock *LoopScalarBody; 493 /// A list of all bypass blocks. The first block is the entry of the loop. 494 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 495 496 /// The new Induction variable which was added to the new block. 497 PHINode *Induction; 498 /// The induction variable of the old basic block. 499 PHINode *OldInduction; 500 /// Maps scalars to widened vectors. 501 ValueMap WidenMap; 502 /// Store instructions that should be predicated, as a pair 503 /// <StoreInst, Predicate> 504 SmallVector<std::pair<StoreInst*,Value*>, 4> PredicatedStores; 505 EdgeMaskCache MaskCache; 506 /// Trip count of the original loop. 507 Value *TripCount; 508 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 509 Value *VectorTripCount; 510 511 /// Map of scalar integer values to the smallest bitwidth they can be legally 512 /// represented as. The vector equivalents of these values should be truncated 513 /// to this type. 514 DenseMap<Instruction*,uint64_t> MinBWs; 515 LoopVectorizationLegality *Legal; 516 517 // Record whether runtime check is added. 518 bool AddedSafetyChecks; 519 }; 520 521 class InnerLoopUnroller : public InnerLoopVectorizer { 522 public: 523 InnerLoopUnroller(Loop *OrigLoop, ScalarEvolution *SE, LoopInfo *LI, 524 DominatorTree *DT, const TargetLibraryInfo *TLI, 525 const TargetTransformInfo *TTI, unsigned UnrollFactor) 526 : InnerLoopVectorizer(OrigLoop, SE, LI, DT, TLI, TTI, 1, UnrollFactor) {} 527 528 private: 529 void scalarizeInstruction(Instruction *Instr, 530 bool IfPredicateStore = false) override; 531 void vectorizeMemoryInstruction(Instruction *Instr) override; 532 Value *getBroadcastInstrs(Value *V) override; 533 Value *getStepVector(Value *Val, int StartIdx, Value *Step) override; 534 Value *reverseVector(Value *Vec) override; 535 }; 536 537 /// \brief Look for a meaningful debug location on the instruction or it's 538 /// operands. 539 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 540 if (!I) 541 return I; 542 543 DebugLoc Empty; 544 if (I->getDebugLoc() != Empty) 545 return I; 546 547 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 548 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 549 if (OpInst->getDebugLoc() != Empty) 550 return OpInst; 551 } 552 553 return I; 554 } 555 556 /// \brief Set the debug location in the builder using the debug location in the 557 /// instruction. 558 static void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 559 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) 560 B.SetCurrentDebugLocation(Inst->getDebugLoc()); 561 else 562 B.SetCurrentDebugLocation(DebugLoc()); 563 } 564 565 #ifndef NDEBUG 566 /// \return string containing a file name and a line # for the given loop. 567 static std::string getDebugLocString(const Loop *L) { 568 std::string Result; 569 if (L) { 570 raw_string_ostream OS(Result); 571 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 572 LoopDbgLoc.print(OS); 573 else 574 // Just print the module name. 575 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 576 OS.flush(); 577 } 578 return Result; 579 } 580 #endif 581 582 /// \brief Propagate known metadata from one instruction to another. 583 static void propagateMetadata(Instruction *To, const Instruction *From) { 584 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 585 From->getAllMetadataOtherThanDebugLoc(Metadata); 586 587 for (auto M : Metadata) { 588 unsigned Kind = M.first; 589 590 // These are safe to transfer (this is safe for TBAA, even when we 591 // if-convert, because should that metadata have had a control dependency 592 // on the condition, and thus actually aliased with some other 593 // non-speculated memory access when the condition was false, this would be 594 // caught by the runtime overlap checks). 595 if (Kind != LLVMContext::MD_tbaa && 596 Kind != LLVMContext::MD_alias_scope && 597 Kind != LLVMContext::MD_noalias && 598 Kind != LLVMContext::MD_fpmath && 599 Kind != LLVMContext::MD_nontemporal) 600 continue; 601 602 To->setMetadata(Kind, M.second); 603 } 604 } 605 606 /// \brief Propagate known metadata from one instruction to a vector of others. 607 static void propagateMetadata(SmallVectorImpl<Value *> &To, const Instruction *From) { 608 for (Value *V : To) 609 if (Instruction *I = dyn_cast<Instruction>(V)) 610 propagateMetadata(I, From); 611 } 612 613 /// \brief The group of interleaved loads/stores sharing the same stride and 614 /// close to each other. 615 /// 616 /// Each member in this group has an index starting from 0, and the largest 617 /// index should be less than interleaved factor, which is equal to the absolute 618 /// value of the access's stride. 619 /// 620 /// E.g. An interleaved load group of factor 4: 621 /// for (unsigned i = 0; i < 1024; i+=4) { 622 /// a = A[i]; // Member of index 0 623 /// b = A[i+1]; // Member of index 1 624 /// d = A[i+3]; // Member of index 3 625 /// ... 626 /// } 627 /// 628 /// An interleaved store group of factor 4: 629 /// for (unsigned i = 0; i < 1024; i+=4) { 630 /// ... 631 /// A[i] = a; // Member of index 0 632 /// A[i+1] = b; // Member of index 1 633 /// A[i+2] = c; // Member of index 2 634 /// A[i+3] = d; // Member of index 3 635 /// } 636 /// 637 /// Note: the interleaved load group could have gaps (missing members), but 638 /// the interleaved store group doesn't allow gaps. 639 class InterleaveGroup { 640 public: 641 InterleaveGroup(Instruction *Instr, int Stride, unsigned Align) 642 : Align(Align), SmallestKey(0), LargestKey(0), InsertPos(Instr) { 643 assert(Align && "The alignment should be non-zero"); 644 645 Factor = std::abs(Stride); 646 assert(Factor > 1 && "Invalid interleave factor"); 647 648 Reverse = Stride < 0; 649 Members[0] = Instr; 650 } 651 652 bool isReverse() const { return Reverse; } 653 unsigned getFactor() const { return Factor; } 654 unsigned getAlignment() const { return Align; } 655 unsigned getNumMembers() const { return Members.size(); } 656 657 /// \brief Try to insert a new member \p Instr with index \p Index and 658 /// alignment \p NewAlign. The index is related to the leader and it could be 659 /// negative if it is the new leader. 660 /// 661 /// \returns false if the instruction doesn't belong to the group. 662 bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) { 663 assert(NewAlign && "The new member's alignment should be non-zero"); 664 665 int Key = Index + SmallestKey; 666 667 // Skip if there is already a member with the same index. 668 if (Members.count(Key)) 669 return false; 670 671 if (Key > LargestKey) { 672 // The largest index is always less than the interleave factor. 673 if (Index >= static_cast<int>(Factor)) 674 return false; 675 676 LargestKey = Key; 677 } else if (Key < SmallestKey) { 678 // The largest index is always less than the interleave factor. 679 if (LargestKey - Key >= static_cast<int>(Factor)) 680 return false; 681 682 SmallestKey = Key; 683 } 684 685 // It's always safe to select the minimum alignment. 686 Align = std::min(Align, NewAlign); 687 Members[Key] = Instr; 688 return true; 689 } 690 691 /// \brief Get the member with the given index \p Index 692 /// 693 /// \returns nullptr if contains no such member. 694 Instruction *getMember(unsigned Index) const { 695 int Key = SmallestKey + Index; 696 if (!Members.count(Key)) 697 return nullptr; 698 699 return Members.find(Key)->second; 700 } 701 702 /// \brief Get the index for the given member. Unlike the key in the member 703 /// map, the index starts from 0. 704 unsigned getIndex(Instruction *Instr) const { 705 for (auto I : Members) 706 if (I.second == Instr) 707 return I.first - SmallestKey; 708 709 llvm_unreachable("InterleaveGroup contains no such member"); 710 } 711 712 Instruction *getInsertPos() const { return InsertPos; } 713 void setInsertPos(Instruction *Inst) { InsertPos = Inst; } 714 715 private: 716 unsigned Factor; // Interleave Factor. 717 bool Reverse; 718 unsigned Align; 719 DenseMap<int, Instruction *> Members; 720 int SmallestKey; 721 int LargestKey; 722 723 // To avoid breaking dependences, vectorized instructions of an interleave 724 // group should be inserted at either the first load or the last store in 725 // program order. 726 // 727 // E.g. %even = load i32 // Insert Position 728 // %add = add i32 %even // Use of %even 729 // %odd = load i32 730 // 731 // store i32 %even 732 // %odd = add i32 // Def of %odd 733 // store i32 %odd // Insert Position 734 Instruction *InsertPos; 735 }; 736 737 /// \brief Drive the analysis of interleaved memory accesses in the loop. 738 /// 739 /// Use this class to analyze interleaved accesses only when we can vectorize 740 /// a loop. Otherwise it's meaningless to do analysis as the vectorization 741 /// on interleaved accesses is unsafe. 742 /// 743 /// The analysis collects interleave groups and records the relationships 744 /// between the member and the group in a map. 745 class InterleavedAccessInfo { 746 public: 747 InterleavedAccessInfo(ScalarEvolution *SE, Loop *L, DominatorTree *DT) 748 : SE(SE), TheLoop(L), DT(DT) {} 749 750 ~InterleavedAccessInfo() { 751 SmallSet<InterleaveGroup *, 4> DelSet; 752 // Avoid releasing a pointer twice. 753 for (auto &I : InterleaveGroupMap) 754 DelSet.insert(I.second); 755 for (auto *Ptr : DelSet) 756 delete Ptr; 757 } 758 759 /// \brief Analyze the interleaved accesses and collect them in interleave 760 /// groups. Substitute symbolic strides using \p Strides. 761 void analyzeInterleaving(const ValueToValueMap &Strides); 762 763 /// \brief Check if \p Instr belongs to any interleave group. 764 bool isInterleaved(Instruction *Instr) const { 765 return InterleaveGroupMap.count(Instr); 766 } 767 768 /// \brief Get the interleave group that \p Instr belongs to. 769 /// 770 /// \returns nullptr if doesn't have such group. 771 InterleaveGroup *getInterleaveGroup(Instruction *Instr) const { 772 if (InterleaveGroupMap.count(Instr)) 773 return InterleaveGroupMap.find(Instr)->second; 774 return nullptr; 775 } 776 777 private: 778 ScalarEvolution *SE; 779 Loop *TheLoop; 780 DominatorTree *DT; 781 782 /// Holds the relationships between the members and the interleave group. 783 DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap; 784 785 /// \brief The descriptor for a strided memory access. 786 struct StrideDescriptor { 787 StrideDescriptor(int Stride, const SCEV *Scev, unsigned Size, 788 unsigned Align) 789 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {} 790 791 StrideDescriptor() : Stride(0), Scev(nullptr), Size(0), Align(0) {} 792 793 int Stride; // The access's stride. It is negative for a reverse access. 794 const SCEV *Scev; // The scalar expression of this access 795 unsigned Size; // The size of the memory object. 796 unsigned Align; // The alignment of this access. 797 }; 798 799 /// \brief Create a new interleave group with the given instruction \p Instr, 800 /// stride \p Stride and alignment \p Align. 801 /// 802 /// \returns the newly created interleave group. 803 InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride, 804 unsigned Align) { 805 assert(!InterleaveGroupMap.count(Instr) && 806 "Already in an interleaved access group"); 807 InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align); 808 return InterleaveGroupMap[Instr]; 809 } 810 811 /// \brief Release the group and remove all the relationships. 812 void releaseGroup(InterleaveGroup *Group) { 813 for (unsigned i = 0; i < Group->getFactor(); i++) 814 if (Instruction *Member = Group->getMember(i)) 815 InterleaveGroupMap.erase(Member); 816 817 delete Group; 818 } 819 820 /// \brief Collect all the accesses with a constant stride in program order. 821 void collectConstStridedAccesses( 822 MapVector<Instruction *, StrideDescriptor> &StrideAccesses, 823 const ValueToValueMap &Strides); 824 }; 825 826 /// Utility class for getting and setting loop vectorizer hints in the form 827 /// of loop metadata. 828 /// This class keeps a number of loop annotations locally (as member variables) 829 /// and can, upon request, write them back as metadata on the loop. It will 830 /// initially scan the loop for existing metadata, and will update the local 831 /// values based on information in the loop. 832 /// We cannot write all values to metadata, as the mere presence of some info, 833 /// for example 'force', means a decision has been made. So, we need to be 834 /// careful NOT to add them if the user hasn't specifically asked so. 835 class LoopVectorizeHints { 836 enum HintKind { 837 HK_WIDTH, 838 HK_UNROLL, 839 HK_FORCE 840 }; 841 842 /// Hint - associates name and validation with the hint value. 843 struct Hint { 844 const char * Name; 845 unsigned Value; // This may have to change for non-numeric values. 846 HintKind Kind; 847 848 Hint(const char * Name, unsigned Value, HintKind Kind) 849 : Name(Name), Value(Value), Kind(Kind) { } 850 851 bool validate(unsigned Val) { 852 switch (Kind) { 853 case HK_WIDTH: 854 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth; 855 case HK_UNROLL: 856 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor; 857 case HK_FORCE: 858 return (Val <= 1); 859 } 860 return false; 861 } 862 }; 863 864 /// Vectorization width. 865 Hint Width; 866 /// Vectorization interleave factor. 867 Hint Interleave; 868 /// Vectorization forced 869 Hint Force; 870 871 /// Return the loop metadata prefix. 872 static StringRef Prefix() { return "llvm.loop."; } 873 874 public: 875 enum ForceKind { 876 FK_Undefined = -1, ///< Not selected. 877 FK_Disabled = 0, ///< Forcing disabled. 878 FK_Enabled = 1, ///< Forcing enabled. 879 }; 880 881 LoopVectorizeHints(const Loop *L, bool DisableInterleaving) 882 : Width("vectorize.width", VectorizerParams::VectorizationFactor, 883 HK_WIDTH), 884 Interleave("interleave.count", DisableInterleaving, HK_UNROLL), 885 Force("vectorize.enable", FK_Undefined, HK_FORCE), 886 TheLoop(L) { 887 // Populate values with existing loop metadata. 888 getHintsFromMetadata(); 889 890 // force-vector-interleave overrides DisableInterleaving. 891 if (VectorizerParams::isInterleaveForced()) 892 Interleave.Value = VectorizerParams::VectorizationInterleave; 893 894 DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs() 895 << "LV: Interleaving disabled by the pass manager\n"); 896 } 897 898 /// Mark the loop L as already vectorized by setting the width to 1. 899 void setAlreadyVectorized() { 900 Width.Value = Interleave.Value = 1; 901 Hint Hints[] = {Width, Interleave}; 902 writeHintsToMetadata(Hints); 903 } 904 905 bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const { 906 if (getForce() == LoopVectorizeHints::FK_Disabled) { 907 DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); 908 emitOptimizationRemarkAnalysis(F->getContext(), 909 vectorizeAnalysisPassName(), *F, 910 L->getStartLoc(), emitRemark()); 911 return false; 912 } 913 914 if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) { 915 DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); 916 emitOptimizationRemarkAnalysis(F->getContext(), 917 vectorizeAnalysisPassName(), *F, 918 L->getStartLoc(), emitRemark()); 919 return false; 920 } 921 922 if (getWidth() == 1 && getInterleave() == 1) { 923 // FIXME: Add a separate metadata to indicate when the loop has already 924 // been vectorized instead of setting width and count to 1. 925 DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); 926 // FIXME: Add interleave.disable metadata. This will allow 927 // vectorize.disable to be used without disabling the pass and errors 928 // to differentiate between disabled vectorization and a width of 1. 929 emitOptimizationRemarkAnalysis( 930 F->getContext(), vectorizeAnalysisPassName(), *F, L->getStartLoc(), 931 "loop not vectorized: vectorization and interleaving are explicitly " 932 "disabled, or vectorize width and interleave count are both set to " 933 "1"); 934 return false; 935 } 936 937 return true; 938 } 939 940 /// Dumps all the hint information. 941 std::string emitRemark() const { 942 VectorizationReport R; 943 if (Force.Value == LoopVectorizeHints::FK_Disabled) 944 R << "vectorization is explicitly disabled"; 945 else { 946 R << "use -Rpass-analysis=loop-vectorize for more info"; 947 if (Force.Value == LoopVectorizeHints::FK_Enabled) { 948 R << " (Force=true"; 949 if (Width.Value != 0) 950 R << ", Vector Width=" << Width.Value; 951 if (Interleave.Value != 0) 952 R << ", Interleave Count=" << Interleave.Value; 953 R << ")"; 954 } 955 } 956 957 return R.str(); 958 } 959 960 unsigned getWidth() const { return Width.Value; } 961 unsigned getInterleave() const { return Interleave.Value; } 962 enum ForceKind getForce() const { return (ForceKind)Force.Value; } 963 const char *vectorizeAnalysisPassName() const { 964 // If hints are provided that don't disable vectorization use the 965 // AlwaysPrint pass name to force the frontend to print the diagnostic. 966 if (getWidth() == 1) 967 return LV_NAME; 968 if (getForce() == LoopVectorizeHints::FK_Disabled) 969 return LV_NAME; 970 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0) 971 return LV_NAME; 972 return DiagnosticInfo::AlwaysPrint; 973 } 974 975 bool allowReordering() const { 976 // When enabling loop hints are provided we allow the vectorizer to change 977 // the order of operations that is given by the scalar loop. This is not 978 // enabled by default because can be unsafe or inefficient. For example, 979 // reordering floating-point operations will change the way round-off 980 // error accumulates in the loop. 981 return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1; 982 } 983 984 private: 985 /// Find hints specified in the loop metadata and update local values. 986 void getHintsFromMetadata() { 987 MDNode *LoopID = TheLoop->getLoopID(); 988 if (!LoopID) 989 return; 990 991 // First operand should refer to the loop id itself. 992 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 993 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 994 995 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 996 const MDString *S = nullptr; 997 SmallVector<Metadata *, 4> Args; 998 999 // The expected hint is either a MDString or a MDNode with the first 1000 // operand a MDString. 1001 if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) { 1002 if (!MD || MD->getNumOperands() == 0) 1003 continue; 1004 S = dyn_cast<MDString>(MD->getOperand(0)); 1005 for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i) 1006 Args.push_back(MD->getOperand(i)); 1007 } else { 1008 S = dyn_cast<MDString>(LoopID->getOperand(i)); 1009 assert(Args.size() == 0 && "too many arguments for MDString"); 1010 } 1011 1012 if (!S) 1013 continue; 1014 1015 // Check if the hint starts with the loop metadata prefix. 1016 StringRef Name = S->getString(); 1017 if (Args.size() == 1) 1018 setHint(Name, Args[0]); 1019 } 1020 } 1021 1022 /// Checks string hint with one operand and set value if valid. 1023 void setHint(StringRef Name, Metadata *Arg) { 1024 if (!Name.startswith(Prefix())) 1025 return; 1026 Name = Name.substr(Prefix().size(), StringRef::npos); 1027 1028 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); 1029 if (!C) return; 1030 unsigned Val = C->getZExtValue(); 1031 1032 Hint *Hints[] = {&Width, &Interleave, &Force}; 1033 for (auto H : Hints) { 1034 if (Name == H->Name) { 1035 if (H->validate(Val)) 1036 H->Value = Val; 1037 else 1038 DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); 1039 break; 1040 } 1041 } 1042 } 1043 1044 /// Create a new hint from name / value pair. 1045 MDNode *createHintMetadata(StringRef Name, unsigned V) const { 1046 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1047 Metadata *MDs[] = {MDString::get(Context, Name), 1048 ConstantAsMetadata::get( 1049 ConstantInt::get(Type::getInt32Ty(Context), V))}; 1050 return MDNode::get(Context, MDs); 1051 } 1052 1053 /// Matches metadata with hint name. 1054 bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) { 1055 MDString* Name = dyn_cast<MDString>(Node->getOperand(0)); 1056 if (!Name) 1057 return false; 1058 1059 for (auto H : HintTypes) 1060 if (Name->getString().endswith(H.Name)) 1061 return true; 1062 return false; 1063 } 1064 1065 /// Sets current hints into loop metadata, keeping other values intact. 1066 void writeHintsToMetadata(ArrayRef<Hint> HintTypes) { 1067 if (HintTypes.size() == 0) 1068 return; 1069 1070 // Reserve the first element to LoopID (see below). 1071 SmallVector<Metadata *, 4> MDs(1); 1072 // If the loop already has metadata, then ignore the existing operands. 1073 MDNode *LoopID = TheLoop->getLoopID(); 1074 if (LoopID) { 1075 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1076 MDNode *Node = cast<MDNode>(LoopID->getOperand(i)); 1077 // If node in update list, ignore old value. 1078 if (!matchesHintMetadataName(Node, HintTypes)) 1079 MDs.push_back(Node); 1080 } 1081 } 1082 1083 // Now, add the missing hints. 1084 for (auto H : HintTypes) 1085 MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value)); 1086 1087 // Replace current metadata node with new one. 1088 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1089 MDNode *NewLoopID = MDNode::get(Context, MDs); 1090 // Set operand 0 to refer to the loop id itself. 1091 NewLoopID->replaceOperandWith(0, NewLoopID); 1092 1093 TheLoop->setLoopID(NewLoopID); 1094 } 1095 1096 /// The loop these hints belong to. 1097 const Loop *TheLoop; 1098 }; 1099 1100 static void emitAnalysisDiag(const Function *TheFunction, const Loop *TheLoop, 1101 const LoopVectorizeHints &Hints, 1102 const LoopAccessReport &Message) { 1103 const char *Name = Hints.vectorizeAnalysisPassName(); 1104 LoopAccessReport::emitAnalysis(Message, TheFunction, TheLoop, Name); 1105 } 1106 1107 static void emitMissedWarning(Function *F, Loop *L, 1108 const LoopVectorizeHints &LH) { 1109 emitOptimizationRemarkMissed(F->getContext(), LV_NAME, *F, L->getStartLoc(), 1110 LH.emitRemark()); 1111 1112 if (LH.getForce() == LoopVectorizeHints::FK_Enabled) { 1113 if (LH.getWidth() != 1) 1114 emitLoopVectorizeWarning( 1115 F->getContext(), *F, L->getStartLoc(), 1116 "failed explicitly specified loop vectorization"); 1117 else if (LH.getInterleave() != 1) 1118 emitLoopInterleaveWarning( 1119 F->getContext(), *F, L->getStartLoc(), 1120 "failed explicitly specified loop interleaving"); 1121 } 1122 } 1123 1124 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and 1125 /// to what vectorization factor. 1126 /// This class does not look at the profitability of vectorization, only the 1127 /// legality. This class has two main kinds of checks: 1128 /// * Memory checks - The code in canVectorizeMemory checks if vectorization 1129 /// will change the order of memory accesses in a way that will change the 1130 /// correctness of the program. 1131 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory 1132 /// checks for a number of different conditions, such as the availability of a 1133 /// single induction variable, that all types are supported and vectorize-able, 1134 /// etc. This code reflects the capabilities of InnerLoopVectorizer. 1135 /// This class is also used by InnerLoopVectorizer for identifying 1136 /// induction variable and the different reduction variables. 1137 class LoopVectorizationLegality { 1138 public: 1139 LoopVectorizationLegality(Loop *L, ScalarEvolution *SE, DominatorTree *DT, 1140 TargetLibraryInfo *TLI, AliasAnalysis *AA, 1141 Function *F, const TargetTransformInfo *TTI, 1142 LoopAccessAnalysis *LAA, 1143 LoopVectorizationRequirements *R, 1144 const LoopVectorizeHints *H) 1145 : NumPredStores(0), TheLoop(L), SE(SE), TLI(TLI), TheFunction(F), 1146 TTI(TTI), DT(DT), LAA(LAA), LAI(nullptr), InterleaveInfo(SE, L, DT), 1147 Induction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false), 1148 Requirements(R), Hints(H) {} 1149 1150 /// ReductionList contains the reduction descriptors for all 1151 /// of the reductions that were found in the loop. 1152 typedef DenseMap<PHINode *, RecurrenceDescriptor> ReductionList; 1153 1154 /// InductionList saves induction variables and maps them to the 1155 /// induction descriptor. 1156 typedef MapVector<PHINode*, InductionDescriptor> InductionList; 1157 1158 /// Returns true if it is legal to vectorize this loop. 1159 /// This does not mean that it is profitable to vectorize this 1160 /// loop, only that it is legal to do so. 1161 bool canVectorize(); 1162 1163 /// Returns the Induction variable. 1164 PHINode *getInduction() { return Induction; } 1165 1166 /// Returns the reduction variables found in the loop. 1167 ReductionList *getReductionVars() { return &Reductions; } 1168 1169 /// Returns the induction variables found in the loop. 1170 InductionList *getInductionVars() { return &Inductions; } 1171 1172 /// Returns the widest induction type. 1173 Type *getWidestInductionType() { return WidestIndTy; } 1174 1175 /// Returns True if V is an induction variable in this loop. 1176 bool isInductionVariable(const Value *V); 1177 1178 /// Return true if the block BB needs to be predicated in order for the loop 1179 /// to be vectorized. 1180 bool blockNeedsPredication(BasicBlock *BB); 1181 1182 /// Check if this pointer is consecutive when vectorizing. This happens 1183 /// when the last index of the GEP is the induction variable, or that the 1184 /// pointer itself is an induction variable. 1185 /// This check allows us to vectorize A[idx] into a wide load/store. 1186 /// Returns: 1187 /// 0 - Stride is unknown or non-consecutive. 1188 /// 1 - Address is consecutive. 1189 /// -1 - Address is consecutive, and decreasing. 1190 int isConsecutivePtr(Value *Ptr); 1191 1192 /// Returns true if the value V is uniform within the loop. 1193 bool isUniform(Value *V); 1194 1195 /// Returns true if this instruction will remain scalar after vectorization. 1196 bool isUniformAfterVectorization(Instruction* I) { return Uniforms.count(I); } 1197 1198 /// Returns the information that we collected about runtime memory check. 1199 const RuntimePointerChecking *getRuntimePointerChecking() const { 1200 return LAI->getRuntimePointerChecking(); 1201 } 1202 1203 const LoopAccessInfo *getLAI() const { 1204 return LAI; 1205 } 1206 1207 /// \brief Check if \p Instr belongs to any interleaved access group. 1208 bool isAccessInterleaved(Instruction *Instr) { 1209 return InterleaveInfo.isInterleaved(Instr); 1210 } 1211 1212 /// \brief Get the interleaved access group that \p Instr belongs to. 1213 const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { 1214 return InterleaveInfo.getInterleaveGroup(Instr); 1215 } 1216 1217 unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); } 1218 1219 bool hasStride(Value *V) { return StrideSet.count(V); } 1220 bool mustCheckStrides() { return !StrideSet.empty(); } 1221 SmallPtrSet<Value *, 8>::iterator strides_begin() { 1222 return StrideSet.begin(); 1223 } 1224 SmallPtrSet<Value *, 8>::iterator strides_end() { return StrideSet.end(); } 1225 1226 /// Returns true if the target machine supports masked store operation 1227 /// for the given \p DataType and kind of access to \p Ptr. 1228 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1229 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType); 1230 } 1231 /// Returns true if the target machine supports masked load operation 1232 /// for the given \p DataType and kind of access to \p Ptr. 1233 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1234 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType); 1235 } 1236 /// Returns true if vector representation of the instruction \p I 1237 /// requires mask. 1238 bool isMaskRequired(const Instruction* I) { 1239 return (MaskedOp.count(I) != 0); 1240 } 1241 unsigned getNumStores() const { 1242 return LAI->getNumStores(); 1243 } 1244 unsigned getNumLoads() const { 1245 return LAI->getNumLoads(); 1246 } 1247 unsigned getNumPredStores() const { 1248 return NumPredStores; 1249 } 1250 private: 1251 /// Check if a single basic block loop is vectorizable. 1252 /// At this point we know that this is a loop with a constant trip count 1253 /// and we only need to check individual instructions. 1254 bool canVectorizeInstrs(); 1255 1256 /// When we vectorize loops we may change the order in which 1257 /// we read and write from memory. This method checks if it is 1258 /// legal to vectorize the code, considering only memory constrains. 1259 /// Returns true if the loop is vectorizable 1260 bool canVectorizeMemory(); 1261 1262 /// Return true if we can vectorize this loop using the IF-conversion 1263 /// transformation. 1264 bool canVectorizeWithIfConvert(); 1265 1266 /// Collect the variables that need to stay uniform after vectorization. 1267 void collectLoopUniforms(); 1268 1269 /// Return true if all of the instructions in the block can be speculatively 1270 /// executed. \p SafePtrs is a list of addresses that are known to be legal 1271 /// and we know that we can read from them without segfault. 1272 bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs); 1273 1274 /// \brief Collect memory access with loop invariant strides. 1275 /// 1276 /// Looks for accesses like "a[i * StrideA]" where "StrideA" is loop 1277 /// invariant. 1278 void collectStridedAccess(Value *LoadOrStoreInst); 1279 1280 /// Report an analysis message to assist the user in diagnosing loops that are 1281 /// not vectorized. These are handled as LoopAccessReport rather than 1282 /// VectorizationReport because the << operator of VectorizationReport returns 1283 /// LoopAccessReport. 1284 void emitAnalysis(const LoopAccessReport &Message) const { 1285 emitAnalysisDiag(TheFunction, TheLoop, *Hints, Message); 1286 } 1287 1288 unsigned NumPredStores; 1289 1290 /// The loop that we evaluate. 1291 Loop *TheLoop; 1292 /// Scev analysis. 1293 ScalarEvolution *SE; 1294 /// Target Library Info. 1295 TargetLibraryInfo *TLI; 1296 /// Parent function 1297 Function *TheFunction; 1298 /// Target Transform Info 1299 const TargetTransformInfo *TTI; 1300 /// Dominator Tree. 1301 DominatorTree *DT; 1302 // LoopAccess analysis. 1303 LoopAccessAnalysis *LAA; 1304 // And the loop-accesses info corresponding to this loop. This pointer is 1305 // null until canVectorizeMemory sets it up. 1306 const LoopAccessInfo *LAI; 1307 1308 /// The interleave access information contains groups of interleaved accesses 1309 /// with the same stride and close to each other. 1310 InterleavedAccessInfo InterleaveInfo; 1311 1312 // --- vectorization state --- // 1313 1314 /// Holds the integer induction variable. This is the counter of the 1315 /// loop. 1316 PHINode *Induction; 1317 /// Holds the reduction variables. 1318 ReductionList Reductions; 1319 /// Holds all of the induction variables that we found in the loop. 1320 /// Notice that inductions don't need to start at zero and that induction 1321 /// variables can be pointers. 1322 InductionList Inductions; 1323 /// Holds the widest induction type encountered. 1324 Type *WidestIndTy; 1325 1326 /// Allowed outside users. This holds the reduction 1327 /// vars which can be accessed from outside the loop. 1328 SmallPtrSet<Value*, 4> AllowedExit; 1329 /// This set holds the variables which are known to be uniform after 1330 /// vectorization. 1331 SmallPtrSet<Instruction*, 4> Uniforms; 1332 1333 /// Can we assume the absence of NaNs. 1334 bool HasFunNoNaNAttr; 1335 1336 /// Vectorization requirements that will go through late-evaluation. 1337 LoopVectorizationRequirements *Requirements; 1338 1339 /// Used to emit an analysis of any legality issues. 1340 const LoopVectorizeHints *Hints; 1341 1342 ValueToValueMap Strides; 1343 SmallPtrSet<Value *, 8> StrideSet; 1344 1345 /// While vectorizing these instructions we have to generate a 1346 /// call to the appropriate masked intrinsic 1347 SmallPtrSet<const Instruction*, 8> MaskedOp; 1348 }; 1349 1350 /// LoopVectorizationCostModel - estimates the expected speedups due to 1351 /// vectorization. 1352 /// In many cases vectorization is not profitable. This can happen because of 1353 /// a number of reasons. In this class we mainly attempt to predict the 1354 /// expected speedup/slowdowns due to the supported instruction set. We use the 1355 /// TargetTransformInfo to query the different backends for the cost of 1356 /// different operations. 1357 class LoopVectorizationCostModel { 1358 public: 1359 LoopVectorizationCostModel(Loop *L, ScalarEvolution *SE, LoopInfo *LI, 1360 LoopVectorizationLegality *Legal, 1361 const TargetTransformInfo &TTI, 1362 const TargetLibraryInfo *TLI, DemandedBits *DB, 1363 AssumptionCache *AC, 1364 const Function *F, const LoopVectorizeHints *Hints, 1365 SmallPtrSetImpl<const Value *> &ValuesToIgnore) 1366 : TheLoop(L), SE(SE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 1367 TheFunction(F), Hints(Hints), ValuesToIgnore(ValuesToIgnore) {} 1368 1369 /// Information about vectorization costs 1370 struct VectorizationFactor { 1371 unsigned Width; // Vector width with best cost 1372 unsigned Cost; // Cost of the loop with that width 1373 }; 1374 /// \return The most profitable vectorization factor and the cost of that VF. 1375 /// This method checks every power of two up to VF. If UserVF is not ZERO 1376 /// then this vectorization factor will be selected if vectorization is 1377 /// possible. 1378 VectorizationFactor selectVectorizationFactor(bool OptForSize); 1379 1380 /// \return The size (in bits) of the widest type in the code that 1381 /// needs to be vectorized. We ignore values that remain scalar such as 1382 /// 64 bit loop indices. 1383 unsigned getWidestType(); 1384 1385 /// \return The desired interleave count. 1386 /// If interleave count has been specified by metadata it will be returned. 1387 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1388 /// are the selected vectorization factor and the cost of the selected VF. 1389 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 1390 unsigned LoopCost); 1391 1392 /// \return The most profitable unroll factor. 1393 /// This method finds the best unroll-factor based on register pressure and 1394 /// other parameters. VF and LoopCost are the selected vectorization factor 1395 /// and the cost of the selected VF. 1396 unsigned computeInterleaveCount(bool OptForSize, unsigned VF, 1397 unsigned LoopCost); 1398 1399 /// \brief A struct that represents some properties of the register usage 1400 /// of a loop. 1401 struct RegisterUsage { 1402 /// Holds the number of loop invariant values that are used in the loop. 1403 unsigned LoopInvariantRegs; 1404 /// Holds the maximum number of concurrent live intervals in the loop. 1405 unsigned MaxLocalUsers; 1406 /// Holds the number of instructions in the loop. 1407 unsigned NumInstructions; 1408 }; 1409 1410 /// \return information about the register usage of the loop. 1411 RegisterUsage calculateRegisterUsage(); 1412 1413 private: 1414 /// Returns the expected execution cost. The unit of the cost does 1415 /// not matter because we use the 'cost' units to compare different 1416 /// vector widths. The cost that is returned is *not* normalized by 1417 /// the factor width. 1418 unsigned expectedCost(unsigned VF); 1419 1420 /// Returns the execution time cost of an instruction for a given vector 1421 /// width. Vector width of one means scalar. 1422 unsigned getInstructionCost(Instruction *I, unsigned VF); 1423 1424 /// Returns whether the instruction is a load or store and will be a emitted 1425 /// as a vector operation. 1426 bool isConsecutiveLoadOrStore(Instruction *I); 1427 1428 /// Report an analysis message to assist the user in diagnosing loops that are 1429 /// not vectorized. These are handled as LoopAccessReport rather than 1430 /// VectorizationReport because the << operator of VectorizationReport returns 1431 /// LoopAccessReport. 1432 void emitAnalysis(const LoopAccessReport &Message) const { 1433 emitAnalysisDiag(TheFunction, TheLoop, *Hints, Message); 1434 } 1435 1436 public: 1437 /// Map of scalar integer values to the smallest bitwidth they can be legally 1438 /// represented as. The vector equivalents of these values should be truncated 1439 /// to this type. 1440 DenseMap<Instruction*,uint64_t> MinBWs; 1441 1442 /// The loop that we evaluate. 1443 Loop *TheLoop; 1444 /// Scev analysis. 1445 ScalarEvolution *SE; 1446 /// Loop Info analysis. 1447 LoopInfo *LI; 1448 /// Vectorization legality. 1449 LoopVectorizationLegality *Legal; 1450 /// Vector target information. 1451 const TargetTransformInfo &TTI; 1452 /// Target Library Info. 1453 const TargetLibraryInfo *TLI; 1454 /// Demanded bits analysis 1455 DemandedBits *DB; 1456 const Function *TheFunction; 1457 // Loop Vectorize Hint. 1458 const LoopVectorizeHints *Hints; 1459 // Values to ignore in the cost model. 1460 const SmallPtrSetImpl<const Value *> &ValuesToIgnore; 1461 }; 1462 1463 /// \brief This holds vectorization requirements that must be verified late in 1464 /// the process. The requirements are set by legalize and costmodel. Once 1465 /// vectorization has been determined to be possible and profitable the 1466 /// requirements can be verified by looking for metadata or compiler options. 1467 /// For example, some loops require FP commutativity which is only allowed if 1468 /// vectorization is explicitly specified or if the fast-math compiler option 1469 /// has been provided. 1470 /// Late evaluation of these requirements allows helpful diagnostics to be 1471 /// composed that tells the user what need to be done to vectorize the loop. For 1472 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late 1473 /// evaluation should be used only when diagnostics can generated that can be 1474 /// followed by a non-expert user. 1475 class LoopVectorizationRequirements { 1476 public: 1477 LoopVectorizationRequirements() 1478 : NumRuntimePointerChecks(0), UnsafeAlgebraInst(nullptr) {} 1479 1480 void addUnsafeAlgebraInst(Instruction *I) { 1481 // First unsafe algebra instruction. 1482 if (!UnsafeAlgebraInst) 1483 UnsafeAlgebraInst = I; 1484 } 1485 1486 void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; } 1487 1488 bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) { 1489 const char *Name = Hints.vectorizeAnalysisPassName(); 1490 bool Failed = false; 1491 if (UnsafeAlgebraInst && !Hints.allowReordering()) { 1492 emitOptimizationRemarkAnalysisFPCommute( 1493 F->getContext(), Name, *F, UnsafeAlgebraInst->getDebugLoc(), 1494 VectorizationReport() << "cannot prove it is safe to reorder " 1495 "floating-point operations"); 1496 Failed = true; 1497 } 1498 1499 // Test if runtime memcheck thresholds are exceeded. 1500 bool PragmaThresholdReached = 1501 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 1502 bool ThresholdReached = 1503 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 1504 if ((ThresholdReached && !Hints.allowReordering()) || 1505 PragmaThresholdReached) { 1506 emitOptimizationRemarkAnalysisAliasing( 1507 F->getContext(), Name, *F, L->getStartLoc(), 1508 VectorizationReport() 1509 << "cannot prove it is safe to reorder memory operations"); 1510 DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 1511 Failed = true; 1512 } 1513 1514 return Failed; 1515 } 1516 1517 private: 1518 unsigned NumRuntimePointerChecks; 1519 Instruction *UnsafeAlgebraInst; 1520 }; 1521 1522 static void addInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) { 1523 if (L.empty()) 1524 return V.push_back(&L); 1525 1526 for (Loop *InnerL : L) 1527 addInnerLoop(*InnerL, V); 1528 } 1529 1530 /// The LoopVectorize Pass. 1531 struct LoopVectorize : public FunctionPass { 1532 /// Pass identification, replacement for typeid 1533 static char ID; 1534 1535 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) 1536 : FunctionPass(ID), 1537 DisableUnrolling(NoUnrolling), 1538 AlwaysVectorize(AlwaysVectorize) { 1539 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1540 } 1541 1542 ScalarEvolution *SE; 1543 LoopInfo *LI; 1544 TargetTransformInfo *TTI; 1545 DominatorTree *DT; 1546 BlockFrequencyInfo *BFI; 1547 TargetLibraryInfo *TLI; 1548 DemandedBits *DB; 1549 AliasAnalysis *AA; 1550 AssumptionCache *AC; 1551 LoopAccessAnalysis *LAA; 1552 bool DisableUnrolling; 1553 bool AlwaysVectorize; 1554 1555 BlockFrequency ColdEntryFreq; 1556 1557 bool runOnFunction(Function &F) override { 1558 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1559 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1560 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1561 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1562 BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1563 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1564 TLI = TLIP ? &TLIP->getTLI() : nullptr; 1565 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1566 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1567 LAA = &getAnalysis<LoopAccessAnalysis>(); 1568 DB = &getAnalysis<DemandedBits>(); 1569 1570 // Compute some weights outside of the loop over the loops. Compute this 1571 // using a BranchProbability to re-use its scaling math. 1572 const BranchProbability ColdProb(1, 5); // 20% 1573 ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb; 1574 1575 // Don't attempt if 1576 // 1. the target claims to have no vector registers, and 1577 // 2. interleaving won't help ILP. 1578 // 1579 // The second condition is necessary because, even if the target has no 1580 // vector registers, loop vectorization may still enable scalar 1581 // interleaving. 1582 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 1583 return false; 1584 1585 // Build up a worklist of inner-loops to vectorize. This is necessary as 1586 // the act of vectorizing or partially unrolling a loop creates new loops 1587 // and can invalidate iterators across the loops. 1588 SmallVector<Loop *, 8> Worklist; 1589 1590 for (Loop *L : *LI) 1591 addInnerLoop(*L, Worklist); 1592 1593 LoopsAnalyzed += Worklist.size(); 1594 1595 // Now walk the identified inner loops. 1596 bool Changed = false; 1597 while (!Worklist.empty()) 1598 Changed |= processLoop(Worklist.pop_back_val()); 1599 1600 // Process each loop nest in the function. 1601 return Changed; 1602 } 1603 1604 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 1605 SmallVector<Metadata *, 4> MDs; 1606 // Reserve first location for self reference to the LoopID metadata node. 1607 MDs.push_back(nullptr); 1608 bool IsUnrollMetadata = false; 1609 MDNode *LoopID = L->getLoopID(); 1610 if (LoopID) { 1611 // First find existing loop unrolling disable metadata. 1612 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1613 MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 1614 if (MD) { 1615 const MDString *S = dyn_cast<MDString>(MD->getOperand(0)); 1616 IsUnrollMetadata = 1617 S && S->getString().startswith("llvm.loop.unroll.disable"); 1618 } 1619 MDs.push_back(LoopID->getOperand(i)); 1620 } 1621 } 1622 1623 if (!IsUnrollMetadata) { 1624 // Add runtime unroll disable metadata. 1625 LLVMContext &Context = L->getHeader()->getContext(); 1626 SmallVector<Metadata *, 1> DisableOperands; 1627 DisableOperands.push_back( 1628 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 1629 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 1630 MDs.push_back(DisableNode); 1631 MDNode *NewLoopID = MDNode::get(Context, MDs); 1632 // Set operand 0 to refer to the loop id itself. 1633 NewLoopID->replaceOperandWith(0, NewLoopID); 1634 L->setLoopID(NewLoopID); 1635 } 1636 } 1637 1638 bool processLoop(Loop *L) { 1639 assert(L->empty() && "Only process inner loops."); 1640 1641 #ifndef NDEBUG 1642 const std::string DebugLocStr = getDebugLocString(L); 1643 #endif /* NDEBUG */ 1644 1645 DEBUG(dbgs() << "\nLV: Checking a loop in \"" 1646 << L->getHeader()->getParent()->getName() << "\" from " 1647 << DebugLocStr << "\n"); 1648 1649 LoopVectorizeHints Hints(L, DisableUnrolling); 1650 1651 DEBUG(dbgs() << "LV: Loop hints:" 1652 << " force=" 1653 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 1654 ? "disabled" 1655 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 1656 ? "enabled" 1657 : "?")) << " width=" << Hints.getWidth() 1658 << " unroll=" << Hints.getInterleave() << "\n"); 1659 1660 // Function containing loop 1661 Function *F = L->getHeader()->getParent(); 1662 1663 // Looking at the diagnostic output is the only way to determine if a loop 1664 // was vectorized (other than looking at the IR or machine code), so it 1665 // is important to generate an optimization remark for each loop. Most of 1666 // these messages are generated by emitOptimizationRemarkAnalysis. Remarks 1667 // generated by emitOptimizationRemark and emitOptimizationRemarkMissed are 1668 // less verbose reporting vectorized loops and unvectorized loops that may 1669 // benefit from vectorization, respectively. 1670 1671 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { 1672 DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 1673 return false; 1674 } 1675 1676 // Check the loop for a trip count threshold: 1677 // do not vectorize loops with a tiny trip count. 1678 const unsigned TC = SE->getSmallConstantTripCount(L); 1679 if (TC > 0u && TC < TinyTripCountVectorThreshold) { 1680 DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 1681 << "This loop is not worth vectorizing."); 1682 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 1683 DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 1684 else { 1685 DEBUG(dbgs() << "\n"); 1686 emitAnalysisDiag(F, L, Hints, VectorizationReport() 1687 << "vectorization is not beneficial " 1688 "and is not explicitly forced"); 1689 return false; 1690 } 1691 } 1692 1693 // Check if it is legal to vectorize the loop. 1694 LoopVectorizationRequirements Requirements; 1695 LoopVectorizationLegality LVL(L, SE, DT, TLI, AA, F, TTI, LAA, 1696 &Requirements, &Hints); 1697 if (!LVL.canVectorize()) { 1698 DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 1699 emitMissedWarning(F, L, Hints); 1700 return false; 1701 } 1702 1703 // Collect values we want to ignore in the cost model. This includes 1704 // type-promoting instructions we identified during reduction detection. 1705 SmallPtrSet<const Value *, 32> ValuesToIgnore; 1706 CodeMetrics::collectEphemeralValues(L, AC, ValuesToIgnore); 1707 for (auto &Reduction : *LVL.getReductionVars()) { 1708 RecurrenceDescriptor &RedDes = Reduction.second; 1709 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 1710 ValuesToIgnore.insert(Casts.begin(), Casts.end()); 1711 } 1712 1713 // Use the cost model. 1714 LoopVectorizationCostModel CM(L, SE, LI, &LVL, *TTI, TLI, DB, AC, F, &Hints, 1715 ValuesToIgnore); 1716 1717 // Check the function attributes to find out if this function should be 1718 // optimized for size. 1719 bool OptForSize = Hints.getForce() != LoopVectorizeHints::FK_Enabled && 1720 F->optForSize(); 1721 1722 // Compute the weighted frequency of this loop being executed and see if it 1723 // is less than 20% of the function entry baseline frequency. Note that we 1724 // always have a canonical loop here because we think we *can* vectorize. 1725 // FIXME: This is hidden behind a flag due to pervasive problems with 1726 // exactly what block frequency models. 1727 if (LoopVectorizeWithBlockFrequency) { 1728 BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader()); 1729 if (Hints.getForce() != LoopVectorizeHints::FK_Enabled && 1730 LoopEntryFreq < ColdEntryFreq) 1731 OptForSize = true; 1732 } 1733 1734 // Check the function attributes to see if implicit floats are allowed. 1735 // FIXME: This check doesn't seem possibly correct -- what if the loop is 1736 // an integer loop and the vector instructions selected are purely integer 1737 // vector instructions? 1738 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 1739 DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 1740 "attribute is used.\n"); 1741 emitAnalysisDiag( 1742 F, L, Hints, 1743 VectorizationReport() 1744 << "loop not vectorized due to NoImplicitFloat attribute"); 1745 emitMissedWarning(F, L, Hints); 1746 return false; 1747 } 1748 1749 // Select the optimal vectorization factor. 1750 const LoopVectorizationCostModel::VectorizationFactor VF = 1751 CM.selectVectorizationFactor(OptForSize); 1752 1753 // Select the interleave count. 1754 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 1755 1756 // Get user interleave count. 1757 unsigned UserIC = Hints.getInterleave(); 1758 1759 // Identify the diagnostic messages that should be produced. 1760 std::string VecDiagMsg, IntDiagMsg; 1761 bool VectorizeLoop = true, InterleaveLoop = true; 1762 1763 if (Requirements.doesNotMeet(F, L, Hints)) { 1764 DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 1765 "requirements.\n"); 1766 emitMissedWarning(F, L, Hints); 1767 return false; 1768 } 1769 1770 if (VF.Width == 1) { 1771 DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 1772 VecDiagMsg = 1773 "the cost-model indicates that vectorization is not beneficial"; 1774 VectorizeLoop = false; 1775 } 1776 1777 if (IC == 1 && UserIC <= 1) { 1778 // Tell the user interleaving is not beneficial. 1779 DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 1780 IntDiagMsg = 1781 "the cost-model indicates that interleaving is not beneficial"; 1782 InterleaveLoop = false; 1783 if (UserIC == 1) 1784 IntDiagMsg += 1785 " and is explicitly disabled or interleave count is set to 1"; 1786 } else if (IC > 1 && UserIC == 1) { 1787 // Tell the user interleaving is beneficial, but it explicitly disabled. 1788 DEBUG(dbgs() 1789 << "LV: Interleaving is beneficial but is explicitly disabled."); 1790 IntDiagMsg = "the cost-model indicates that interleaving is beneficial " 1791 "but is explicitly disabled or interleave count is set to 1"; 1792 InterleaveLoop = false; 1793 } 1794 1795 // Override IC if user provided an interleave count. 1796 IC = UserIC > 0 ? UserIC : IC; 1797 1798 // Emit diagnostic messages, if any. 1799 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 1800 if (!VectorizeLoop && !InterleaveLoop) { 1801 // Do not vectorize or interleaving the loop. 1802 emitOptimizationRemarkAnalysis(F->getContext(), VAPassName, *F, 1803 L->getStartLoc(), VecDiagMsg); 1804 emitOptimizationRemarkAnalysis(F->getContext(), LV_NAME, *F, 1805 L->getStartLoc(), IntDiagMsg); 1806 return false; 1807 } else if (!VectorizeLoop && InterleaveLoop) { 1808 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 1809 emitOptimizationRemarkAnalysis(F->getContext(), VAPassName, *F, 1810 L->getStartLoc(), VecDiagMsg); 1811 } else if (VectorizeLoop && !InterleaveLoop) { 1812 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 1813 << DebugLocStr << '\n'); 1814 emitOptimizationRemarkAnalysis(F->getContext(), LV_NAME, *F, 1815 L->getStartLoc(), IntDiagMsg); 1816 } else if (VectorizeLoop && InterleaveLoop) { 1817 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 1818 << DebugLocStr << '\n'); 1819 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 1820 } 1821 1822 if (!VectorizeLoop) { 1823 assert(IC > 1 && "interleave count should not be 1 or 0"); 1824 // If we decided that it is not legal to vectorize the loop then 1825 // interleave it. 1826 InnerLoopUnroller Unroller(L, SE, LI, DT, TLI, TTI, IC); 1827 Unroller.vectorize(&LVL, CM.MinBWs); 1828 1829 emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(), 1830 Twine("interleaved loop (interleaved count: ") + 1831 Twine(IC) + ")"); 1832 } else { 1833 // If we decided that it is *legal* to vectorize the loop then do it. 1834 InnerLoopVectorizer LB(L, SE, LI, DT, TLI, TTI, VF.Width, IC); 1835 LB.vectorize(&LVL, CM.MinBWs); 1836 ++LoopsVectorized; 1837 1838 // Add metadata to disable runtime unrolling scalar loop when there's no 1839 // runtime check about strides and memory. Because at this situation, 1840 // scalar loop is rarely used not worthy to be unrolled. 1841 if (!LB.IsSafetyChecksAdded()) 1842 AddRuntimeUnrollDisableMetaData(L); 1843 1844 // Report the vectorization decision. 1845 emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(), 1846 Twine("vectorized loop (vectorization width: ") + 1847 Twine(VF.Width) + ", interleaved count: " + 1848 Twine(IC) + ")"); 1849 } 1850 1851 // Mark the loop as already vectorized to avoid vectorizing again. 1852 Hints.setAlreadyVectorized(); 1853 1854 DEBUG(verifyFunction(*L->getHeader()->getParent())); 1855 return true; 1856 } 1857 1858 void getAnalysisUsage(AnalysisUsage &AU) const override { 1859 AU.addRequired<AssumptionCacheTracker>(); 1860 AU.addRequiredID(LoopSimplifyID); 1861 AU.addRequiredID(LCSSAID); 1862 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1863 AU.addRequired<DominatorTreeWrapperPass>(); 1864 AU.addRequired<LoopInfoWrapperPass>(); 1865 AU.addRequired<ScalarEvolutionWrapperPass>(); 1866 AU.addRequired<TargetTransformInfoWrapperPass>(); 1867 AU.addRequired<AAResultsWrapperPass>(); 1868 AU.addRequired<LoopAccessAnalysis>(); 1869 AU.addRequired<DemandedBits>(); 1870 AU.addPreserved<LoopInfoWrapperPass>(); 1871 AU.addPreserved<DominatorTreeWrapperPass>(); 1872 AU.addPreserved<BasicAAWrapperPass>(); 1873 AU.addPreserved<AAResultsWrapperPass>(); 1874 AU.addPreserved<GlobalsAAWrapperPass>(); 1875 } 1876 1877 }; 1878 1879 } // end anonymous namespace 1880 1881 //===----------------------------------------------------------------------===// 1882 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 1883 // LoopVectorizationCostModel. 1884 //===----------------------------------------------------------------------===// 1885 1886 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 1887 // We need to place the broadcast of invariant variables outside the loop. 1888 Instruction *Instr = dyn_cast<Instruction>(V); 1889 bool NewInstr = 1890 (Instr && std::find(LoopVectorBody.begin(), LoopVectorBody.end(), 1891 Instr->getParent()) != LoopVectorBody.end()); 1892 bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr; 1893 1894 // Place the code for broadcasting invariant variables in the new preheader. 1895 IRBuilder<>::InsertPointGuard Guard(Builder); 1896 if (Invariant) 1897 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1898 1899 // Broadcast the scalar into all locations in the vector. 1900 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 1901 1902 return Shuf; 1903 } 1904 1905 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, 1906 Value *Step) { 1907 assert(Val->getType()->isVectorTy() && "Must be a vector"); 1908 assert(Val->getType()->getScalarType()->isIntegerTy() && 1909 "Elem must be an integer"); 1910 assert(Step->getType() == Val->getType()->getScalarType() && 1911 "Step has wrong type"); 1912 // Create the types. 1913 Type *ITy = Val->getType()->getScalarType(); 1914 VectorType *Ty = cast<VectorType>(Val->getType()); 1915 int VLen = Ty->getNumElements(); 1916 SmallVector<Constant*, 8> Indices; 1917 1918 // Create a vector of consecutive numbers from zero to VF. 1919 for (int i = 0; i < VLen; ++i) 1920 Indices.push_back(ConstantInt::get(ITy, StartIdx + i)); 1921 1922 // Add the consecutive indices to the vector value. 1923 Constant *Cv = ConstantVector::get(Indices); 1924 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 1925 Step = Builder.CreateVectorSplat(VLen, Step); 1926 assert(Step->getType() == Val->getType() && "Invalid step vec"); 1927 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 1928 // which can be found from the original scalar operations. 1929 Step = Builder.CreateMul(Cv, Step); 1930 return Builder.CreateAdd(Val, Step, "induction"); 1931 } 1932 1933 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { 1934 assert(Ptr->getType()->isPointerTy() && "Unexpected non-ptr"); 1935 // Make sure that the pointer does not point to structs. 1936 if (Ptr->getType()->getPointerElementType()->isAggregateType()) 1937 return 0; 1938 1939 // If this value is a pointer induction variable we know it is consecutive. 1940 PHINode *Phi = dyn_cast_or_null<PHINode>(Ptr); 1941 if (Phi && Inductions.count(Phi)) { 1942 InductionDescriptor II = Inductions[Phi]; 1943 return II.getConsecutiveDirection(); 1944 } 1945 1946 GetElementPtrInst *Gep = dyn_cast_or_null<GetElementPtrInst>(Ptr); 1947 if (!Gep) 1948 return 0; 1949 1950 unsigned NumOperands = Gep->getNumOperands(); 1951 Value *GpPtr = Gep->getPointerOperand(); 1952 // If this GEP value is a consecutive pointer induction variable and all of 1953 // the indices are constant then we know it is consecutive. We can 1954 Phi = dyn_cast<PHINode>(GpPtr); 1955 if (Phi && Inductions.count(Phi)) { 1956 1957 // Make sure that the pointer does not point to structs. 1958 PointerType *GepPtrType = cast<PointerType>(GpPtr->getType()); 1959 if (GepPtrType->getElementType()->isAggregateType()) 1960 return 0; 1961 1962 // Make sure that all of the index operands are loop invariant. 1963 for (unsigned i = 1; i < NumOperands; ++i) 1964 if (!SE->isLoopInvariant(SE->getSCEV(Gep->getOperand(i)), TheLoop)) 1965 return 0; 1966 1967 InductionDescriptor II = Inductions[Phi]; 1968 return II.getConsecutiveDirection(); 1969 } 1970 1971 unsigned InductionOperand = getGEPInductionOperand(Gep); 1972 1973 // Check that all of the gep indices are uniform except for our induction 1974 // operand. 1975 for (unsigned i = 0; i != NumOperands; ++i) 1976 if (i != InductionOperand && 1977 !SE->isLoopInvariant(SE->getSCEV(Gep->getOperand(i)), TheLoop)) 1978 return 0; 1979 1980 // We can emit wide load/stores only if the last non-zero index is the 1981 // induction variable. 1982 const SCEV *Last = nullptr; 1983 if (!Strides.count(Gep)) 1984 Last = SE->getSCEV(Gep->getOperand(InductionOperand)); 1985 else { 1986 // Because of the multiplication by a stride we can have a s/zext cast. 1987 // We are going to replace this stride by 1 so the cast is safe to ignore. 1988 // 1989 // %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] 1990 // %0 = trunc i64 %indvars.iv to i32 1991 // %mul = mul i32 %0, %Stride1 1992 // %idxprom = zext i32 %mul to i64 << Safe cast. 1993 // %arrayidx = getelementptr inbounds i32* %B, i64 %idxprom 1994 // 1995 Last = replaceSymbolicStrideSCEV(SE, Strides, 1996 Gep->getOperand(InductionOperand), Gep); 1997 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(Last)) 1998 Last = 1999 (C->getSCEVType() == scSignExtend || C->getSCEVType() == scZeroExtend) 2000 ? C->getOperand() 2001 : Last; 2002 } 2003 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Last)) { 2004 const SCEV *Step = AR->getStepRecurrence(*SE); 2005 2006 // The memory is consecutive because the last index is consecutive 2007 // and all other indices are loop invariant. 2008 if (Step->isOne()) 2009 return 1; 2010 if (Step->isAllOnesValue()) 2011 return -1; 2012 } 2013 2014 return 0; 2015 } 2016 2017 bool LoopVectorizationLegality::isUniform(Value *V) { 2018 return LAI->isUniform(V); 2019 } 2020 2021 InnerLoopVectorizer::VectorParts& 2022 InnerLoopVectorizer::getVectorValue(Value *V) { 2023 assert(V != Induction && "The new induction variable should not be used."); 2024 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2025 2026 // If we have a stride that is replaced by one, do it here. 2027 if (Legal->hasStride(V)) 2028 V = ConstantInt::get(V->getType(), 1); 2029 2030 // If we have this scalar in the map, return it. 2031 if (WidenMap.has(V)) 2032 return WidenMap.get(V); 2033 2034 // If this scalar is unknown, assume that it is a constant or that it is 2035 // loop invariant. Broadcast V and save the value for future uses. 2036 Value *B = getBroadcastInstrs(V); 2037 2038 return WidenMap.splat(V, B); 2039 } 2040 2041 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2042 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2043 SmallVector<Constant*, 8> ShuffleMask; 2044 for (unsigned i = 0; i < VF; ++i) 2045 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 2046 2047 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2048 ConstantVector::get(ShuffleMask), 2049 "reverse"); 2050 } 2051 2052 // Get a mask to interleave \p NumVec vectors into a wide vector. 2053 // I.e. <0, VF, VF*2, ..., VF*(NumVec-1), 1, VF+1, VF*2+1, ...> 2054 // E.g. For 2 interleaved vectors, if VF is 4, the mask is: 2055 // <0, 4, 1, 5, 2, 6, 3, 7> 2056 static Constant *getInterleavedMask(IRBuilder<> &Builder, unsigned VF, 2057 unsigned NumVec) { 2058 SmallVector<Constant *, 16> Mask; 2059 for (unsigned i = 0; i < VF; i++) 2060 for (unsigned j = 0; j < NumVec; j++) 2061 Mask.push_back(Builder.getInt32(j * VF + i)); 2062 2063 return ConstantVector::get(Mask); 2064 } 2065 2066 // Get the strided mask starting from index \p Start. 2067 // I.e. <Start, Start + Stride, ..., Start + Stride*(VF-1)> 2068 static Constant *getStridedMask(IRBuilder<> &Builder, unsigned Start, 2069 unsigned Stride, unsigned VF) { 2070 SmallVector<Constant *, 16> Mask; 2071 for (unsigned i = 0; i < VF; i++) 2072 Mask.push_back(Builder.getInt32(Start + i * Stride)); 2073 2074 return ConstantVector::get(Mask); 2075 } 2076 2077 // Get a mask of two parts: The first part consists of sequential integers 2078 // starting from 0, The second part consists of UNDEFs. 2079 // I.e. <0, 1, 2, ..., NumInt - 1, undef, ..., undef> 2080 static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned NumInt, 2081 unsigned NumUndef) { 2082 SmallVector<Constant *, 16> Mask; 2083 for (unsigned i = 0; i < NumInt; i++) 2084 Mask.push_back(Builder.getInt32(i)); 2085 2086 Constant *Undef = UndefValue::get(Builder.getInt32Ty()); 2087 for (unsigned i = 0; i < NumUndef; i++) 2088 Mask.push_back(Undef); 2089 2090 return ConstantVector::get(Mask); 2091 } 2092 2093 // Concatenate two vectors with the same element type. The 2nd vector should 2094 // not have more elements than the 1st vector. If the 2nd vector has less 2095 // elements, extend it with UNDEFs. 2096 static Value *ConcatenateTwoVectors(IRBuilder<> &Builder, Value *V1, 2097 Value *V2) { 2098 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); 2099 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); 2100 assert(VecTy1 && VecTy2 && 2101 VecTy1->getScalarType() == VecTy2->getScalarType() && 2102 "Expect two vectors with the same element type"); 2103 2104 unsigned NumElts1 = VecTy1->getNumElements(); 2105 unsigned NumElts2 = VecTy2->getNumElements(); 2106 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); 2107 2108 if (NumElts1 > NumElts2) { 2109 // Extend with UNDEFs. 2110 Constant *ExtMask = 2111 getSequentialMask(Builder, NumElts2, NumElts1 - NumElts2); 2112 V2 = Builder.CreateShuffleVector(V2, UndefValue::get(VecTy2), ExtMask); 2113 } 2114 2115 Constant *Mask = getSequentialMask(Builder, NumElts1 + NumElts2, 0); 2116 return Builder.CreateShuffleVector(V1, V2, Mask); 2117 } 2118 2119 // Concatenate vectors in the given list. All vectors have the same type. 2120 static Value *ConcatenateVectors(IRBuilder<> &Builder, 2121 ArrayRef<Value *> InputList) { 2122 unsigned NumVec = InputList.size(); 2123 assert(NumVec > 1 && "Should be at least two vectors"); 2124 2125 SmallVector<Value *, 8> ResList; 2126 ResList.append(InputList.begin(), InputList.end()); 2127 do { 2128 SmallVector<Value *, 8> TmpList; 2129 for (unsigned i = 0; i < NumVec - 1; i += 2) { 2130 Value *V0 = ResList[i], *V1 = ResList[i + 1]; 2131 assert((V0->getType() == V1->getType() || i == NumVec - 2) && 2132 "Only the last vector may have a different type"); 2133 2134 TmpList.push_back(ConcatenateTwoVectors(Builder, V0, V1)); 2135 } 2136 2137 // Push the last vector if the total number of vectors is odd. 2138 if (NumVec % 2 != 0) 2139 TmpList.push_back(ResList[NumVec - 1]); 2140 2141 ResList = TmpList; 2142 NumVec = ResList.size(); 2143 } while (NumVec > 1); 2144 2145 return ResList[0]; 2146 } 2147 2148 // Try to vectorize the interleave group that \p Instr belongs to. 2149 // 2150 // E.g. Translate following interleaved load group (factor = 3): 2151 // for (i = 0; i < N; i+=3) { 2152 // R = Pic[i]; // Member of index 0 2153 // G = Pic[i+1]; // Member of index 1 2154 // B = Pic[i+2]; // Member of index 2 2155 // ... // do something to R, G, B 2156 // } 2157 // To: 2158 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2159 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2160 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2161 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2162 // 2163 // Or translate following interleaved store group (factor = 3): 2164 // for (i = 0; i < N; i+=3) { 2165 // ... do something to R, G, B 2166 // Pic[i] = R; // Member of index 0 2167 // Pic[i+1] = G; // Member of index 1 2168 // Pic[i+2] = B; // Member of index 2 2169 // } 2170 // To: 2171 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2172 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2173 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2174 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2175 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2176 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { 2177 const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr); 2178 assert(Group && "Fail to get an interleaved access group."); 2179 2180 // Skip if current instruction is not the insert position. 2181 if (Instr != Group->getInsertPos()) 2182 return; 2183 2184 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2185 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2186 Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); 2187 2188 // Prepare for the vector type of the interleaved load/store. 2189 Type *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2190 unsigned InterleaveFactor = Group->getFactor(); 2191 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2192 Type *PtrTy = VecTy->getPointerTo(Ptr->getType()->getPointerAddressSpace()); 2193 2194 // Prepare for the new pointers. 2195 setDebugLocFromInst(Builder, Ptr); 2196 VectorParts &PtrParts = getVectorValue(Ptr); 2197 SmallVector<Value *, 2> NewPtrs; 2198 unsigned Index = Group->getIndex(Instr); 2199 for (unsigned Part = 0; Part < UF; Part++) { 2200 // Extract the pointer for current instruction from the pointer vector. A 2201 // reverse access uses the pointer in the last lane. 2202 Value *NewPtr = Builder.CreateExtractElement( 2203 PtrParts[Part], 2204 Group->isReverse() ? Builder.getInt32(VF - 1) : Builder.getInt32(0)); 2205 2206 // Notice current instruction could be any index. Need to adjust the address 2207 // to the member of index 0. 2208 // 2209 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2210 // b = A[i]; // Member of index 0 2211 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2212 // 2213 // E.g. A[i+1] = a; // Member of index 1 2214 // A[i] = b; // Member of index 0 2215 // A[i+2] = c; // Member of index 2 (Current instruction) 2216 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2217 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 2218 2219 // Cast to the vector pointer type. 2220 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2221 } 2222 2223 setDebugLocFromInst(Builder, Instr); 2224 Value *UndefVec = UndefValue::get(VecTy); 2225 2226 // Vectorize the interleaved load group. 2227 if (LI) { 2228 for (unsigned Part = 0; Part < UF; Part++) { 2229 Instruction *NewLoadInstr = Builder.CreateAlignedLoad( 2230 NewPtrs[Part], Group->getAlignment(), "wide.vec"); 2231 2232 for (unsigned i = 0; i < InterleaveFactor; i++) { 2233 Instruction *Member = Group->getMember(i); 2234 2235 // Skip the gaps in the group. 2236 if (!Member) 2237 continue; 2238 2239 Constant *StrideMask = getStridedMask(Builder, i, InterleaveFactor, VF); 2240 Value *StridedVec = Builder.CreateShuffleVector( 2241 NewLoadInstr, UndefVec, StrideMask, "strided.vec"); 2242 2243 // If this member has different type, cast the result type. 2244 if (Member->getType() != ScalarTy) { 2245 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2246 StridedVec = Builder.CreateBitOrPointerCast(StridedVec, OtherVTy); 2247 } 2248 2249 VectorParts &Entry = WidenMap.get(Member); 2250 Entry[Part] = 2251 Group->isReverse() ? reverseVector(StridedVec) : StridedVec; 2252 } 2253 2254 propagateMetadata(NewLoadInstr, Instr); 2255 } 2256 return; 2257 } 2258 2259 // The sub vector type for current instruction. 2260 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2261 2262 // Vectorize the interleaved store group. 2263 for (unsigned Part = 0; Part < UF; Part++) { 2264 // Collect the stored vector from each member. 2265 SmallVector<Value *, 4> StoredVecs; 2266 for (unsigned i = 0; i < InterleaveFactor; i++) { 2267 // Interleaved store group doesn't allow a gap, so each index has a member 2268 Instruction *Member = Group->getMember(i); 2269 assert(Member && "Fail to get a member from an interleaved store group"); 2270 2271 Value *StoredVec = 2272 getVectorValue(dyn_cast<StoreInst>(Member)->getValueOperand())[Part]; 2273 if (Group->isReverse()) 2274 StoredVec = reverseVector(StoredVec); 2275 2276 // If this member has different type, cast it to an unified type. 2277 if (StoredVec->getType() != SubVT) 2278 StoredVec = Builder.CreateBitOrPointerCast(StoredVec, SubVT); 2279 2280 StoredVecs.push_back(StoredVec); 2281 } 2282 2283 // Concatenate all vectors into a wide vector. 2284 Value *WideVec = ConcatenateVectors(Builder, StoredVecs); 2285 2286 // Interleave the elements in the wide vector. 2287 Constant *IMask = getInterleavedMask(Builder, VF, InterleaveFactor); 2288 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2289 "interleaved.vec"); 2290 2291 Instruction *NewStoreInstr = 2292 Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment()); 2293 propagateMetadata(NewStoreInstr, Instr); 2294 } 2295 } 2296 2297 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { 2298 // Attempt to issue a wide load. 2299 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2300 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2301 2302 assert((LI || SI) && "Invalid Load/Store instruction"); 2303 2304 // Try to vectorize the interleave group if this access is interleaved. 2305 if (Legal->isAccessInterleaved(Instr)) 2306 return vectorizeInterleaveGroup(Instr); 2307 2308 Type *ScalarDataTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2309 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2310 Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); 2311 unsigned Alignment = LI ? LI->getAlignment() : SI->getAlignment(); 2312 // An alignment of 0 means target abi alignment. We need to use the scalar's 2313 // target abi alignment in such a case. 2314 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2315 if (!Alignment) 2316 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2317 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2318 unsigned ScalarAllocatedSize = DL.getTypeAllocSize(ScalarDataTy); 2319 unsigned VectorElementSize = DL.getTypeStoreSize(DataTy) / VF; 2320 2321 if (SI && Legal->blockNeedsPredication(SI->getParent()) && 2322 !Legal->isMaskRequired(SI)) 2323 return scalarizeInstruction(Instr, true); 2324 2325 if (ScalarAllocatedSize != VectorElementSize) 2326 return scalarizeInstruction(Instr); 2327 2328 // If the pointer is loop invariant or if it is non-consecutive, 2329 // scalarize the load. 2330 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 2331 bool Reverse = ConsecutiveStride < 0; 2332 bool UniformLoad = LI && Legal->isUniform(Ptr); 2333 if (!ConsecutiveStride || UniformLoad) 2334 return scalarizeInstruction(Instr); 2335 2336 Constant *Zero = Builder.getInt32(0); 2337 VectorParts &Entry = WidenMap.get(Instr); 2338 2339 // Handle consecutive loads/stores. 2340 GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(Ptr); 2341 if (Gep && Legal->isInductionVariable(Gep->getPointerOperand())) { 2342 setDebugLocFromInst(Builder, Gep); 2343 Value *PtrOperand = Gep->getPointerOperand(); 2344 Value *FirstBasePtr = getVectorValue(PtrOperand)[0]; 2345 FirstBasePtr = Builder.CreateExtractElement(FirstBasePtr, Zero); 2346 2347 // Create the new GEP with the new induction variable. 2348 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2349 Gep2->setOperand(0, FirstBasePtr); 2350 Gep2->setName("gep.indvar.base"); 2351 Ptr = Builder.Insert(Gep2); 2352 } else if (Gep) { 2353 setDebugLocFromInst(Builder, Gep); 2354 assert(SE->isLoopInvariant(SE->getSCEV(Gep->getPointerOperand()), 2355 OrigLoop) && "Base ptr must be invariant"); 2356 2357 // The last index does not have to be the induction. It can be 2358 // consecutive and be a function of the index. For example A[I+1]; 2359 unsigned NumOperands = Gep->getNumOperands(); 2360 unsigned InductionOperand = getGEPInductionOperand(Gep); 2361 // Create the new GEP with the new induction variable. 2362 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2363 2364 for (unsigned i = 0; i < NumOperands; ++i) { 2365 Value *GepOperand = Gep->getOperand(i); 2366 Instruction *GepOperandInst = dyn_cast<Instruction>(GepOperand); 2367 2368 // Update last index or loop invariant instruction anchored in loop. 2369 if (i == InductionOperand || 2370 (GepOperandInst && OrigLoop->contains(GepOperandInst))) { 2371 assert((i == InductionOperand || 2372 SE->isLoopInvariant(SE->getSCEV(GepOperandInst), OrigLoop)) && 2373 "Must be last index or loop invariant"); 2374 2375 VectorParts &GEPParts = getVectorValue(GepOperand); 2376 Value *Index = GEPParts[0]; 2377 Index = Builder.CreateExtractElement(Index, Zero); 2378 Gep2->setOperand(i, Index); 2379 Gep2->setName("gep.indvar.idx"); 2380 } 2381 } 2382 Ptr = Builder.Insert(Gep2); 2383 } else { 2384 // Use the induction element ptr. 2385 assert(isa<PHINode>(Ptr) && "Invalid induction ptr"); 2386 setDebugLocFromInst(Builder, Ptr); 2387 VectorParts &PtrVal = getVectorValue(Ptr); 2388 Ptr = Builder.CreateExtractElement(PtrVal[0], Zero); 2389 } 2390 2391 VectorParts Mask = createBlockInMask(Instr->getParent()); 2392 // Handle Stores: 2393 if (SI) { 2394 assert(!Legal->isUniform(SI->getPointerOperand()) && 2395 "We do not allow storing to uniform addresses"); 2396 setDebugLocFromInst(Builder, SI); 2397 // We don't want to update the value in the map as it might be used in 2398 // another expression. So don't use a reference type for "StoredVal". 2399 VectorParts StoredVal = getVectorValue(SI->getValueOperand()); 2400 2401 for (unsigned Part = 0; Part < UF; ++Part) { 2402 // Calculate the pointer for the specific unroll-part. 2403 Value *PartPtr = 2404 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2405 2406 if (Reverse) { 2407 // If we store to reverse consecutive memory locations, then we need 2408 // to reverse the order of elements in the stored value. 2409 StoredVal[Part] = reverseVector(StoredVal[Part]); 2410 // If the address is consecutive but reversed, then the 2411 // wide store needs to start at the last vector element. 2412 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2413 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2414 Mask[Part] = reverseVector(Mask[Part]); 2415 } 2416 2417 Value *VecPtr = Builder.CreateBitCast(PartPtr, 2418 DataTy->getPointerTo(AddressSpace)); 2419 2420 Instruction *NewSI; 2421 if (Legal->isMaskRequired(SI)) 2422 NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment, 2423 Mask[Part]); 2424 else 2425 NewSI = Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment); 2426 propagateMetadata(NewSI, SI); 2427 } 2428 return; 2429 } 2430 2431 // Handle loads. 2432 assert(LI && "Must have a load instruction"); 2433 setDebugLocFromInst(Builder, LI); 2434 for (unsigned Part = 0; Part < UF; ++Part) { 2435 // Calculate the pointer for the specific unroll-part. 2436 Value *PartPtr = 2437 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2438 2439 if (Reverse) { 2440 // If the address is consecutive but reversed, then the 2441 // wide load needs to start at the last vector element. 2442 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2443 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2444 Mask[Part] = reverseVector(Mask[Part]); 2445 } 2446 2447 Instruction* NewLI; 2448 Value *VecPtr = Builder.CreateBitCast(PartPtr, 2449 DataTy->getPointerTo(AddressSpace)); 2450 if (Legal->isMaskRequired(LI)) 2451 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 2452 UndefValue::get(DataTy), 2453 "wide.masked.load"); 2454 else 2455 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 2456 propagateMetadata(NewLI, LI); 2457 Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI; 2458 } 2459 } 2460 2461 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, bool IfPredicateStore) { 2462 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2463 // Holds vector parameters or scalars, in case of uniform vals. 2464 SmallVector<VectorParts, 4> Params; 2465 2466 setDebugLocFromInst(Builder, Instr); 2467 2468 // Find all of the vectorized parameters. 2469 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2470 Value *SrcOp = Instr->getOperand(op); 2471 2472 // If we are accessing the old induction variable, use the new one. 2473 if (SrcOp == OldInduction) { 2474 Params.push_back(getVectorValue(SrcOp)); 2475 continue; 2476 } 2477 2478 // Try using previously calculated values. 2479 Instruction *SrcInst = dyn_cast<Instruction>(SrcOp); 2480 2481 // If the src is an instruction that appeared earlier in the basic block, 2482 // then it should already be vectorized. 2483 if (SrcInst && OrigLoop->contains(SrcInst)) { 2484 assert(WidenMap.has(SrcInst) && "Source operand is unavailable"); 2485 // The parameter is a vector value from earlier. 2486 Params.push_back(WidenMap.get(SrcInst)); 2487 } else { 2488 // The parameter is a scalar from outside the loop. Maybe even a constant. 2489 VectorParts Scalars; 2490 Scalars.append(UF, SrcOp); 2491 Params.push_back(Scalars); 2492 } 2493 } 2494 2495 assert(Params.size() == Instr->getNumOperands() && 2496 "Invalid number of operands"); 2497 2498 // Does this instruction return a value ? 2499 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2500 2501 Value *UndefVec = IsVoidRetTy ? nullptr : 2502 UndefValue::get(VectorType::get(Instr->getType(), VF)); 2503 // Create a new entry in the WidenMap and initialize it to Undef or Null. 2504 VectorParts &VecResults = WidenMap.splat(Instr, UndefVec); 2505 2506 VectorParts Cond; 2507 if (IfPredicateStore) { 2508 assert(Instr->getParent()->getSinglePredecessor() && 2509 "Only support single predecessor blocks"); 2510 Cond = createEdgeMask(Instr->getParent()->getSinglePredecessor(), 2511 Instr->getParent()); 2512 } 2513 2514 // For each vector unroll 'part': 2515 for (unsigned Part = 0; Part < UF; ++Part) { 2516 // For each scalar that we create: 2517 for (unsigned Width = 0; Width < VF; ++Width) { 2518 2519 // Start if-block. 2520 Value *Cmp = nullptr; 2521 if (IfPredicateStore) { 2522 Cmp = Builder.CreateExtractElement(Cond[Part], Builder.getInt32(Width)); 2523 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cmp, ConstantInt::get(Cmp->getType(), 1)); 2524 } 2525 2526 Instruction *Cloned = Instr->clone(); 2527 if (!IsVoidRetTy) 2528 Cloned->setName(Instr->getName() + ".cloned"); 2529 // Replace the operands of the cloned instructions with extracted scalars. 2530 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2531 Value *Op = Params[op][Part]; 2532 // Param is a vector. Need to extract the right lane. 2533 if (Op->getType()->isVectorTy()) 2534 Op = Builder.CreateExtractElement(Op, Builder.getInt32(Width)); 2535 Cloned->setOperand(op, Op); 2536 } 2537 2538 // Place the cloned scalar in the new loop. 2539 Builder.Insert(Cloned); 2540 2541 // If the original scalar returns a value we need to place it in a vector 2542 // so that future users will be able to use it. 2543 if (!IsVoidRetTy) 2544 VecResults[Part] = Builder.CreateInsertElement(VecResults[Part], Cloned, 2545 Builder.getInt32(Width)); 2546 // End if-block. 2547 if (IfPredicateStore) 2548 PredicatedStores.push_back(std::make_pair(cast<StoreInst>(Cloned), 2549 Cmp)); 2550 } 2551 } 2552 } 2553 2554 static Instruction *getFirstInst(Instruction *FirstInst, Value *V, 2555 Instruction *Loc) { 2556 if (FirstInst) 2557 return FirstInst; 2558 if (Instruction *I = dyn_cast<Instruction>(V)) 2559 return I->getParent() == Loc->getParent() ? I : nullptr; 2560 return nullptr; 2561 } 2562 2563 std::pair<Instruction *, Instruction *> 2564 InnerLoopVectorizer::addStrideCheck(Instruction *Loc) { 2565 Instruction *tnullptr = nullptr; 2566 if (!Legal->mustCheckStrides()) 2567 return std::pair<Instruction *, Instruction *>(tnullptr, tnullptr); 2568 2569 IRBuilder<> ChkBuilder(Loc); 2570 2571 // Emit checks. 2572 Value *Check = nullptr; 2573 Instruction *FirstInst = nullptr; 2574 for (SmallPtrSet<Value *, 8>::iterator SI = Legal->strides_begin(), 2575 SE = Legal->strides_end(); 2576 SI != SE; ++SI) { 2577 Value *Ptr = stripIntegerCast(*SI); 2578 Value *C = ChkBuilder.CreateICmpNE(Ptr, ConstantInt::get(Ptr->getType(), 1), 2579 "stride.chk"); 2580 // Store the first instruction we create. 2581 FirstInst = getFirstInst(FirstInst, C, Loc); 2582 if (Check) 2583 Check = ChkBuilder.CreateOr(Check, C); 2584 else 2585 Check = C; 2586 } 2587 2588 // We have to do this trickery because the IRBuilder might fold the check to a 2589 // constant expression in which case there is no Instruction anchored in a 2590 // the block. 2591 LLVMContext &Ctx = Loc->getContext(); 2592 Instruction *TheCheck = 2593 BinaryOperator::CreateAnd(Check, ConstantInt::getTrue(Ctx)); 2594 ChkBuilder.Insert(TheCheck, "stride.not.one"); 2595 FirstInst = getFirstInst(FirstInst, TheCheck, Loc); 2596 2597 return std::make_pair(FirstInst, TheCheck); 2598 } 2599 2600 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, 2601 Value *Start, 2602 Value *End, 2603 Value *Step, 2604 Instruction *DL) { 2605 BasicBlock *Header = L->getHeader(); 2606 BasicBlock *Latch = L->getLoopLatch(); 2607 // As we're just creating this loop, it's possible no latch exists 2608 // yet. If so, use the header as this will be a single block loop. 2609 if (!Latch) 2610 Latch = Header; 2611 2612 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2613 setDebugLocFromInst(Builder, getDebugLocFromInstOrOperands(OldInduction)); 2614 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2615 2616 Builder.SetInsertPoint(Latch->getTerminator()); 2617 2618 // Create i+1 and fill the PHINode. 2619 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2620 Induction->addIncoming(Start, L->getLoopPreheader()); 2621 Induction->addIncoming(Next, Latch); 2622 // Create the compare. 2623 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2624 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 2625 2626 // Now we have two terminators. Remove the old one from the block. 2627 Latch->getTerminator()->eraseFromParent(); 2628 2629 return Induction; 2630 } 2631 2632 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2633 if (TripCount) 2634 return TripCount; 2635 2636 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2637 // Find the loop boundaries. 2638 const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(OrigLoop); 2639 assert(BackedgeTakenCount != SE->getCouldNotCompute() && "Invalid loop count"); 2640 2641 Type *IdxTy = Legal->getWidestInductionType(); 2642 2643 // The exit count might have the type of i64 while the phi is i32. This can 2644 // happen if we have an induction variable that is sign extended before the 2645 // compare. The only way that we get a backedge taken count is that the 2646 // induction variable was signed and as such will not overflow. In such a case 2647 // truncation is legal. 2648 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 2649 IdxTy->getPrimitiveSizeInBits()) 2650 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2651 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2652 2653 // Get the total trip count from the count by adding 1. 2654 const SCEV *ExitCount = SE->getAddExpr( 2655 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2656 2657 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2658 2659 // Expand the trip count and place the new instructions in the preheader. 2660 // Notice that the pre-header does not change, only the loop body. 2661 SCEVExpander Exp(*SE, DL, "induction"); 2662 2663 // Count holds the overall loop count (N). 2664 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2665 L->getLoopPreheader()->getTerminator()); 2666 2667 if (TripCount->getType()->isPointerTy()) 2668 TripCount = 2669 CastInst::CreatePointerCast(TripCount, IdxTy, 2670 "exitcount.ptrcnt.to.int", 2671 L->getLoopPreheader()->getTerminator()); 2672 2673 return TripCount; 2674 } 2675 2676 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 2677 if (VectorTripCount) 2678 return VectorTripCount; 2679 2680 Value *TC = getOrCreateTripCount(L); 2681 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2682 2683 // Now we need to generate the expression for N - (N % VF), which is 2684 // the part that the vectorized body will execute. 2685 // The loop step is equal to the vectorization factor (num of SIMD elements) 2686 // times the unroll factor (num of SIMD instructions). 2687 Constant *Step = ConstantInt::get(TC->getType(), VF * UF); 2688 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2689 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2690 2691 return VectorTripCount; 2692 } 2693 2694 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 2695 BasicBlock *Bypass) { 2696 Value *Count = getOrCreateTripCount(L); 2697 BasicBlock *BB = L->getLoopPreheader(); 2698 IRBuilder<> Builder(BB->getTerminator()); 2699 2700 // Generate code to check that the loop's trip count that we computed by 2701 // adding one to the backedge-taken count will not overflow. 2702 Value *CheckMinIters = 2703 Builder.CreateICmpULT(Count, 2704 ConstantInt::get(Count->getType(), VF * UF), 2705 "min.iters.check"); 2706 2707 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), 2708 "min.iters.checked"); 2709 if (L->getParentLoop()) 2710 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2711 ReplaceInstWithInst(BB->getTerminator(), 2712 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 2713 LoopBypassBlocks.push_back(BB); 2714 } 2715 2716 void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L, 2717 BasicBlock *Bypass) { 2718 Value *TC = getOrCreateVectorTripCount(L); 2719 BasicBlock *BB = L->getLoopPreheader(); 2720 IRBuilder<> Builder(BB->getTerminator()); 2721 2722 // Now, compare the new count to zero. If it is zero skip the vector loop and 2723 // jump to the scalar loop. 2724 Value *Cmp = Builder.CreateICmpEQ(TC, Constant::getNullValue(TC->getType()), 2725 "cmp.zero"); 2726 2727 // Generate code to check that the loop's trip count that we computed by 2728 // adding one to the backedge-taken count will not overflow. 2729 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), 2730 "vector.ph"); 2731 if (L->getParentLoop()) 2732 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2733 ReplaceInstWithInst(BB->getTerminator(), 2734 BranchInst::Create(Bypass, NewBB, Cmp)); 2735 LoopBypassBlocks.push_back(BB); 2736 } 2737 2738 void InnerLoopVectorizer::emitStrideChecks(Loop *L, 2739 BasicBlock *Bypass) { 2740 BasicBlock *BB = L->getLoopPreheader(); 2741 2742 // Generate the code to check that the strides we assumed to be one are really 2743 // one. We want the new basic block to start at the first instruction in a 2744 // sequence of instructions that form a check. 2745 Instruction *StrideCheck; 2746 Instruction *FirstCheckInst; 2747 std::tie(FirstCheckInst, StrideCheck) = addStrideCheck(BB->getTerminator()); 2748 if (!StrideCheck) 2749 return; 2750 2751 // Create a new block containing the stride check. 2752 BB->setName("vector.stridecheck"); 2753 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2754 if (L->getParentLoop()) 2755 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2756 ReplaceInstWithInst(BB->getTerminator(), 2757 BranchInst::Create(Bypass, NewBB, StrideCheck)); 2758 LoopBypassBlocks.push_back(BB); 2759 AddedSafetyChecks = true; 2760 } 2761 2762 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, 2763 BasicBlock *Bypass) { 2764 BasicBlock *BB = L->getLoopPreheader(); 2765 2766 // Generate the code that checks in runtime if arrays overlap. We put the 2767 // checks into a separate block to make the more common case of few elements 2768 // faster. 2769 Instruction *FirstCheckInst; 2770 Instruction *MemRuntimeCheck; 2771 std::tie(FirstCheckInst, MemRuntimeCheck) = 2772 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 2773 if (!MemRuntimeCheck) 2774 return; 2775 2776 // Create a new block containing the memory check. 2777 BB->setName("vector.memcheck"); 2778 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2779 if (L->getParentLoop()) 2780 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2781 ReplaceInstWithInst(BB->getTerminator(), 2782 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 2783 LoopBypassBlocks.push_back(BB); 2784 AddedSafetyChecks = true; 2785 } 2786 2787 2788 void InnerLoopVectorizer::createEmptyLoop() { 2789 /* 2790 In this function we generate a new loop. The new loop will contain 2791 the vectorized instructions while the old loop will continue to run the 2792 scalar remainder. 2793 2794 [ ] <-- loop iteration number check. 2795 / | 2796 / v 2797 | [ ] <-- vector loop bypass (may consist of multiple blocks). 2798 | / | 2799 | / v 2800 || [ ] <-- vector pre header. 2801 |/ | 2802 | v 2803 | [ ] \ 2804 | [ ]_| <-- vector loop. 2805 | | 2806 | v 2807 | -[ ] <--- middle-block. 2808 | / | 2809 | / v 2810 -|- >[ ] <--- new preheader. 2811 | | 2812 | v 2813 | [ ] \ 2814 | [ ]_| <-- old scalar loop to handle remainder. 2815 \ | 2816 \ v 2817 >[ ] <-- exit block. 2818 ... 2819 */ 2820 2821 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 2822 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 2823 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 2824 assert(VectorPH && "Invalid loop structure"); 2825 assert(ExitBlock && "Must have an exit block"); 2826 2827 // Some loops have a single integer induction variable, while other loops 2828 // don't. One example is c++ iterators that often have multiple pointer 2829 // induction variables. In the code below we also support a case where we 2830 // don't have a single induction variable. 2831 // 2832 // We try to obtain an induction variable from the original loop as hard 2833 // as possible. However if we don't find one that: 2834 // - is an integer 2835 // - counts from zero, stepping by one 2836 // - is the size of the widest induction variable type 2837 // then we create a new one. 2838 OldInduction = Legal->getInduction(); 2839 Type *IdxTy = Legal->getWidestInductionType(); 2840 2841 // Split the single block loop into the two loop structure described above. 2842 BasicBlock *VecBody = 2843 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 2844 BasicBlock *MiddleBlock = 2845 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 2846 BasicBlock *ScalarPH = 2847 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 2848 2849 // Create and register the new vector loop. 2850 Loop* Lp = new Loop(); 2851 Loop *ParentLoop = OrigLoop->getParentLoop(); 2852 2853 // Insert the new loop into the loop nest and register the new basic blocks 2854 // before calling any utilities such as SCEV that require valid LoopInfo. 2855 if (ParentLoop) { 2856 ParentLoop->addChildLoop(Lp); 2857 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 2858 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 2859 } else { 2860 LI->addTopLevelLoop(Lp); 2861 } 2862 Lp->addBasicBlockToLoop(VecBody, *LI); 2863 2864 // Find the loop boundaries. 2865 Value *Count = getOrCreateTripCount(Lp); 2866 2867 Value *StartIdx = ConstantInt::get(IdxTy, 0); 2868 2869 // We need to test whether the backedge-taken count is uint##_max. Adding one 2870 // to it will cause overflow and an incorrect loop trip count in the vector 2871 // body. In case of overflow we want to directly jump to the scalar remainder 2872 // loop. 2873 emitMinimumIterationCountCheck(Lp, ScalarPH); 2874 // Now, compare the new count to zero. If it is zero skip the vector loop and 2875 // jump to the scalar loop. 2876 emitVectorLoopEnteredCheck(Lp, ScalarPH); 2877 // Generate the code to check that the strides we assumed to be one are really 2878 // one. We want the new basic block to start at the first instruction in a 2879 // sequence of instructions that form a check. 2880 emitStrideChecks(Lp, ScalarPH); 2881 // Generate the code that checks in runtime if arrays overlap. We put the 2882 // checks into a separate block to make the more common case of few elements 2883 // faster. 2884 emitMemRuntimeChecks(Lp, ScalarPH); 2885 2886 // Generate the induction variable. 2887 // The loop step is equal to the vectorization factor (num of SIMD elements) 2888 // times the unroll factor (num of SIMD instructions). 2889 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 2890 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 2891 Induction = 2892 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 2893 getDebugLocFromInstOrOperands(OldInduction)); 2894 2895 // We are going to resume the execution of the scalar loop. 2896 // Go over all of the induction variables that we found and fix the 2897 // PHIs that are left in the scalar version of the loop. 2898 // The starting values of PHI nodes depend on the counter of the last 2899 // iteration in the vectorized loop. 2900 // If we come from a bypass edge then we need to start from the original 2901 // start value. 2902 2903 // This variable saves the new starting index for the scalar loop. It is used 2904 // to test if there are any tail iterations left once the vector loop has 2905 // completed. 2906 LoopVectorizationLegality::InductionList::iterator I, E; 2907 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 2908 for (I = List->begin(), E = List->end(); I != E; ++I) { 2909 PHINode *OrigPhi = I->first; 2910 InductionDescriptor II = I->second; 2911 2912 // Create phi nodes to merge from the backedge-taken check block. 2913 PHINode *BCResumeVal = PHINode::Create(OrigPhi->getType(), 3, 2914 "bc.resume.val", 2915 ScalarPH->getTerminator()); 2916 Value *EndValue; 2917 if (OrigPhi == OldInduction) { 2918 // We know what the end value is. 2919 EndValue = CountRoundDown; 2920 } else { 2921 IRBuilder<> B(LoopBypassBlocks.back()->getTerminator()); 2922 Value *CRD = B.CreateSExtOrTrunc(CountRoundDown, 2923 II.getStepValue()->getType(), 2924 "cast.crd"); 2925 EndValue = II.transform(B, CRD); 2926 EndValue->setName("ind.end"); 2927 } 2928 2929 // The new PHI merges the original incoming value, in case of a bypass, 2930 // or the value at the end of the vectorized loop. 2931 BCResumeVal->addIncoming(EndValue, MiddleBlock); 2932 2933 // Fix the scalar body counter (PHI node). 2934 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 2935 2936 // The old induction's phi node in the scalar body needs the truncated 2937 // value. 2938 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 2939 BCResumeVal->addIncoming(II.getStartValue(), LoopBypassBlocks[I]); 2940 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 2941 } 2942 2943 // Add a check in the middle block to see if we have completed 2944 // all of the iterations in the first vector loop. 2945 // If (N - N%VF) == N, then we *don't* need to run the remainder. 2946 Value *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 2947 CountRoundDown, "cmp.n", 2948 MiddleBlock->getTerminator()); 2949 ReplaceInstWithInst(MiddleBlock->getTerminator(), 2950 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 2951 2952 // Get ready to start creating new instructions into the vectorized body. 2953 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 2954 2955 // Save the state. 2956 LoopVectorPreHeader = Lp->getLoopPreheader(); 2957 LoopScalarPreHeader = ScalarPH; 2958 LoopMiddleBlock = MiddleBlock; 2959 LoopExitBlock = ExitBlock; 2960 LoopVectorBody.push_back(VecBody); 2961 LoopScalarBody = OldBasicBlock; 2962 2963 LoopVectorizeHints Hints(Lp, true); 2964 Hints.setAlreadyVectorized(); 2965 } 2966 2967 namespace { 2968 struct CSEDenseMapInfo { 2969 static bool canHandle(Instruction *I) { 2970 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 2971 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 2972 } 2973 static inline Instruction *getEmptyKey() { 2974 return DenseMapInfo<Instruction *>::getEmptyKey(); 2975 } 2976 static inline Instruction *getTombstoneKey() { 2977 return DenseMapInfo<Instruction *>::getTombstoneKey(); 2978 } 2979 static unsigned getHashValue(Instruction *I) { 2980 assert(canHandle(I) && "Unknown instruction!"); 2981 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 2982 I->value_op_end())); 2983 } 2984 static bool isEqual(Instruction *LHS, Instruction *RHS) { 2985 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 2986 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 2987 return LHS == RHS; 2988 return LHS->isIdenticalTo(RHS); 2989 } 2990 }; 2991 } 2992 2993 /// \brief Check whether this block is a predicated block. 2994 /// Due to if predication of stores we might create a sequence of "if(pred) a[i] 2995 /// = ...; " blocks. We start with one vectorized basic block. For every 2996 /// conditional block we split this vectorized block. Therefore, every second 2997 /// block will be a predicated one. 2998 static bool isPredicatedBlock(unsigned BlockNum) { 2999 return BlockNum % 2; 3000 } 3001 3002 ///\brief Perform cse of induction variable instructions. 3003 static void cse(SmallVector<BasicBlock *, 4> &BBs) { 3004 // Perform simple cse. 3005 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3006 for (unsigned i = 0, e = BBs.size(); i != e; ++i) { 3007 BasicBlock *BB = BBs[i]; 3008 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3009 Instruction *In = &*I++; 3010 3011 if (!CSEDenseMapInfo::canHandle(In)) 3012 continue; 3013 3014 // Check if we can replace this instruction with any of the 3015 // visited instructions. 3016 if (Instruction *V = CSEMap.lookup(In)) { 3017 In->replaceAllUsesWith(V); 3018 In->eraseFromParent(); 3019 continue; 3020 } 3021 // Ignore instructions in conditional blocks. We create "if (pred) a[i] = 3022 // ...;" blocks for predicated stores. Every second block is a predicated 3023 // block. 3024 if (isPredicatedBlock(i)) 3025 continue; 3026 3027 CSEMap[In] = In; 3028 } 3029 } 3030 } 3031 3032 /// \brief Adds a 'fast' flag to floating point operations. 3033 static Value *addFastMathFlag(Value *V) { 3034 if (isa<FPMathOperator>(V)){ 3035 FastMathFlags Flags; 3036 Flags.setUnsafeAlgebra(); 3037 cast<Instruction>(V)->setFastMathFlags(Flags); 3038 } 3039 return V; 3040 } 3041 3042 /// Estimate the overhead of scalarizing a value. Insert and Extract are set if 3043 /// the result needs to be inserted and/or extracted from vectors. 3044 static unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract, 3045 const TargetTransformInfo &TTI) { 3046 if (Ty->isVoidTy()) 3047 return 0; 3048 3049 assert(Ty->isVectorTy() && "Can only scalarize vectors"); 3050 unsigned Cost = 0; 3051 3052 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 3053 if (Insert) 3054 Cost += TTI.getVectorInstrCost(Instruction::InsertElement, Ty, i); 3055 if (Extract) 3056 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, Ty, i); 3057 } 3058 3059 return Cost; 3060 } 3061 3062 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3063 // Return the cost of the instruction, including scalarization overhead if it's 3064 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3065 // i.e. either vector version isn't available, or is too expensive. 3066 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3067 const TargetTransformInfo &TTI, 3068 const TargetLibraryInfo *TLI, 3069 bool &NeedToScalarize) { 3070 Function *F = CI->getCalledFunction(); 3071 StringRef FnName = CI->getCalledFunction()->getName(); 3072 Type *ScalarRetTy = CI->getType(); 3073 SmallVector<Type *, 4> Tys, ScalarTys; 3074 for (auto &ArgOp : CI->arg_operands()) 3075 ScalarTys.push_back(ArgOp->getType()); 3076 3077 // Estimate cost of scalarized vector call. The source operands are assumed 3078 // to be vectors, so we need to extract individual elements from there, 3079 // execute VF scalar calls, and then gather the result into the vector return 3080 // value. 3081 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3082 if (VF == 1) 3083 return ScalarCallCost; 3084 3085 // Compute corresponding vector type for return value and arguments. 3086 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3087 for (unsigned i = 0, ie = ScalarTys.size(); i != ie; ++i) 3088 Tys.push_back(ToVectorTy(ScalarTys[i], VF)); 3089 3090 // Compute costs of unpacking argument values for the scalar calls and 3091 // packing the return values to a vector. 3092 unsigned ScalarizationCost = 3093 getScalarizationOverhead(RetTy, true, false, TTI); 3094 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) 3095 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true, TTI); 3096 3097 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3098 3099 // If we can't emit a vector call for this function, then the currently found 3100 // cost is the cost we need to return. 3101 NeedToScalarize = true; 3102 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3103 return Cost; 3104 3105 // If the corresponding vector cost is cheaper, return its cost. 3106 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3107 if (VectorCallCost < Cost) { 3108 NeedToScalarize = false; 3109 return VectorCallCost; 3110 } 3111 return Cost; 3112 } 3113 3114 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3115 // factor VF. Return the cost of the instruction, including scalarization 3116 // overhead if it's needed. 3117 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3118 const TargetTransformInfo &TTI, 3119 const TargetLibraryInfo *TLI) { 3120 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 3121 assert(ID && "Expected intrinsic call!"); 3122 3123 Type *RetTy = ToVectorTy(CI->getType(), VF); 3124 SmallVector<Type *, 4> Tys; 3125 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) 3126 Tys.push_back(ToVectorTy(CI->getArgOperand(i)->getType(), VF)); 3127 3128 return TTI.getIntrinsicInstrCost(ID, RetTy, Tys); 3129 } 3130 3131 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3132 IntegerType *I1 = cast<IntegerType>(T1->getVectorElementType()); 3133 IntegerType *I2 = cast<IntegerType>(T2->getVectorElementType()); 3134 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3135 } 3136 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3137 IntegerType *I1 = cast<IntegerType>(T1->getVectorElementType()); 3138 IntegerType *I2 = cast<IntegerType>(T2->getVectorElementType()); 3139 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3140 } 3141 3142 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3143 // For every instruction `I` in MinBWs, truncate the operands, create a 3144 // truncated version of `I` and reextend its result. InstCombine runs 3145 // later and will remove any ext/trunc pairs. 3146 // 3147 for (auto &KV : MinBWs) { 3148 VectorParts &Parts = WidenMap.get(KV.first); 3149 for (Value *&I : Parts) { 3150 if (I->use_empty()) 3151 continue; 3152 Type *OriginalTy = I->getType(); 3153 Type *ScalarTruncatedTy = IntegerType::get(OriginalTy->getContext(), 3154 KV.second); 3155 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3156 OriginalTy->getVectorNumElements()); 3157 if (TruncatedTy == OriginalTy) 3158 continue; 3159 3160 IRBuilder<> B(cast<Instruction>(I)); 3161 auto ShrinkOperand = [&](Value *V) -> Value* { 3162 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3163 if (ZI->getSrcTy() == TruncatedTy) 3164 return ZI->getOperand(0); 3165 return B.CreateZExtOrTrunc(V, TruncatedTy); 3166 }; 3167 3168 // The actual instruction modification depends on the instruction type, 3169 // unfortunately. 3170 Value *NewI = nullptr; 3171 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { 3172 NewI = B.CreateBinOp(BO->getOpcode(), 3173 ShrinkOperand(BO->getOperand(0)), 3174 ShrinkOperand(BO->getOperand(1))); 3175 cast<BinaryOperator>(NewI)->copyIRFlags(I); 3176 } else if (ICmpInst *CI = dyn_cast<ICmpInst>(I)) { 3177 NewI = B.CreateICmp(CI->getPredicate(), 3178 ShrinkOperand(CI->getOperand(0)), 3179 ShrinkOperand(CI->getOperand(1))); 3180 } else if (SelectInst *SI = dyn_cast<SelectInst>(I)) { 3181 NewI = B.CreateSelect(SI->getCondition(), 3182 ShrinkOperand(SI->getTrueValue()), 3183 ShrinkOperand(SI->getFalseValue())); 3184 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 3185 switch (CI->getOpcode()) { 3186 default: llvm_unreachable("Unhandled cast!"); 3187 case Instruction::Trunc: 3188 NewI = ShrinkOperand(CI->getOperand(0)); 3189 break; 3190 case Instruction::SExt: 3191 NewI = B.CreateSExtOrTrunc(CI->getOperand(0), 3192 smallestIntegerVectorType(OriginalTy, 3193 TruncatedTy)); 3194 break; 3195 case Instruction::ZExt: 3196 NewI = B.CreateZExtOrTrunc(CI->getOperand(0), 3197 smallestIntegerVectorType(OriginalTy, 3198 TruncatedTy)); 3199 break; 3200 } 3201 } else if (ShuffleVectorInst *SI = dyn_cast<ShuffleVectorInst>(I)) { 3202 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3203 auto *O0 = 3204 B.CreateZExtOrTrunc(SI->getOperand(0), 3205 VectorType::get(ScalarTruncatedTy, Elements0)); 3206 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3207 auto *O1 = 3208 B.CreateZExtOrTrunc(SI->getOperand(1), 3209 VectorType::get(ScalarTruncatedTy, Elements1)); 3210 3211 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3212 } else if (isa<LoadInst>(I)) { 3213 // Don't do anything with the operands, just extend the result. 3214 continue; 3215 } else { 3216 llvm_unreachable("Unhandled instruction type!"); 3217 } 3218 3219 // Lastly, extend the result. 3220 NewI->takeName(cast<Instruction>(I)); 3221 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3222 I->replaceAllUsesWith(Res); 3223 cast<Instruction>(I)->eraseFromParent(); 3224 I = Res; 3225 } 3226 } 3227 3228 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3229 for (auto &KV : MinBWs) { 3230 VectorParts &Parts = WidenMap.get(KV.first); 3231 for (Value *&I : Parts) { 3232 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3233 if (Inst && Inst->use_empty()) { 3234 Value *NewI = Inst->getOperand(0); 3235 Inst->eraseFromParent(); 3236 I = NewI; 3237 } 3238 } 3239 } 3240 } 3241 3242 void InnerLoopVectorizer::vectorizeLoop() { 3243 //===------------------------------------------------===// 3244 // 3245 // Notice: any optimization or new instruction that go 3246 // into the code below should be also be implemented in 3247 // the cost-model. 3248 // 3249 //===------------------------------------------------===// 3250 Constant *Zero = Builder.getInt32(0); 3251 3252 // In order to support reduction variables we need to be able to vectorize 3253 // Phi nodes. Phi nodes have cycles, so we need to vectorize them in two 3254 // stages. First, we create a new vector PHI node with no incoming edges. 3255 // We use this value when we vectorize all of the instructions that use the 3256 // PHI. Next, after all of the instructions in the block are complete we 3257 // add the new incoming edges to the PHI. At this point all of the 3258 // instructions in the basic block are vectorized, so we can use them to 3259 // construct the PHI. 3260 PhiVector RdxPHIsToFix; 3261 3262 // Scan the loop in a topological order to ensure that defs are vectorized 3263 // before users. 3264 LoopBlocksDFS DFS(OrigLoop); 3265 DFS.perform(LI); 3266 3267 // Vectorize all of the blocks in the original loop. 3268 for (LoopBlocksDFS::RPOIterator bb = DFS.beginRPO(), 3269 be = DFS.endRPO(); bb != be; ++bb) 3270 vectorizeBlockInLoop(*bb, &RdxPHIsToFix); 3271 3272 // Insert truncates and extends for any truncated instructions as hints to 3273 // InstCombine. 3274 if (VF > 1) 3275 truncateToMinimalBitwidths(); 3276 3277 // At this point every instruction in the original loop is widened to 3278 // a vector form. We are almost done. Now, we need to fix the PHI nodes 3279 // that we vectorized. The PHI nodes are currently empty because we did 3280 // not want to introduce cycles. Notice that the remaining PHI nodes 3281 // that we need to fix are reduction variables. 3282 3283 // Create the 'reduced' values for each of the induction vars. 3284 // The reduced values are the vector values that we scalarize and combine 3285 // after the loop is finished. 3286 for (PhiVector::iterator it = RdxPHIsToFix.begin(), e = RdxPHIsToFix.end(); 3287 it != e; ++it) { 3288 PHINode *RdxPhi = *it; 3289 assert(RdxPhi && "Unable to recover vectorized PHI"); 3290 3291 // Find the reduction variable descriptor. 3292 assert(Legal->getReductionVars()->count(RdxPhi) && 3293 "Unable to find the reduction variable"); 3294 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[RdxPhi]; 3295 3296 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3297 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3298 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3299 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3300 RdxDesc.getMinMaxRecurrenceKind(); 3301 setDebugLocFromInst(Builder, ReductionStartValue); 3302 3303 // We need to generate a reduction vector from the incoming scalar. 3304 // To do so, we need to generate the 'identity' vector and override 3305 // one of the elements with the incoming scalar reduction. We need 3306 // to do it in the vector-loop preheader. 3307 Builder.SetInsertPoint(LoopBypassBlocks[1]->getTerminator()); 3308 3309 // This is the vector-clone of the value that leaves the loop. 3310 VectorParts &VectorExit = getVectorValue(LoopExitInst); 3311 Type *VecTy = VectorExit[0]->getType(); 3312 3313 // Find the reduction identity variable. Zero for addition, or, xor, 3314 // one for multiplication, -1 for And. 3315 Value *Identity; 3316 Value *VectorStart; 3317 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3318 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3319 // MinMax reduction have the start value as their identify. 3320 if (VF == 1) { 3321 VectorStart = Identity = ReductionStartValue; 3322 } else { 3323 VectorStart = Identity = 3324 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3325 } 3326 } else { 3327 // Handle other reduction kinds: 3328 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3329 RK, VecTy->getScalarType()); 3330 if (VF == 1) { 3331 Identity = Iden; 3332 // This vector is the Identity vector where the first element is the 3333 // incoming scalar reduction. 3334 VectorStart = ReductionStartValue; 3335 } else { 3336 Identity = ConstantVector::getSplat(VF, Iden); 3337 3338 // This vector is the Identity vector where the first element is the 3339 // incoming scalar reduction. 3340 VectorStart = 3341 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3342 } 3343 } 3344 3345 // Fix the vector-loop phi. 3346 3347 // Reductions do not have to start at zero. They can start with 3348 // any loop invariant values. 3349 VectorParts &VecRdxPhi = WidenMap.get(RdxPhi); 3350 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3351 Value *LoopVal = RdxPhi->getIncomingValueForBlock(Latch); 3352 VectorParts &Val = getVectorValue(LoopVal); 3353 for (unsigned part = 0; part < UF; ++part) { 3354 // Make sure to add the reduction stat value only to the 3355 // first unroll part. 3356 Value *StartVal = (part == 0) ? VectorStart : Identity; 3357 cast<PHINode>(VecRdxPhi[part])->addIncoming(StartVal, 3358 LoopVectorPreHeader); 3359 cast<PHINode>(VecRdxPhi[part])->addIncoming(Val[part], 3360 LoopVectorBody.back()); 3361 } 3362 3363 // Before each round, move the insertion point right between 3364 // the PHIs and the values we are going to write. 3365 // This allows us to write both PHINodes and the extractelement 3366 // instructions. 3367 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3368 3369 VectorParts RdxParts = getVectorValue(LoopExitInst); 3370 setDebugLocFromInst(Builder, LoopExitInst); 3371 3372 // If the vector reduction can be performed in a smaller type, we truncate 3373 // then extend the loop exit value to enable InstCombine to evaluate the 3374 // entire expression in the smaller type. 3375 if (VF > 1 && RdxPhi->getType() != RdxDesc.getRecurrenceType()) { 3376 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3377 Builder.SetInsertPoint(LoopVectorBody.back()->getTerminator()); 3378 for (unsigned part = 0; part < UF; ++part) { 3379 Value *Trunc = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3380 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3381 : Builder.CreateZExt(Trunc, VecTy); 3382 for (Value::user_iterator UI = RdxParts[part]->user_begin(); 3383 UI != RdxParts[part]->user_end();) 3384 if (*UI != Trunc) { 3385 (*UI++)->replaceUsesOfWith(RdxParts[part], Extnd); 3386 RdxParts[part] = Extnd; 3387 } else { 3388 ++UI; 3389 } 3390 } 3391 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3392 for (unsigned part = 0; part < UF; ++part) 3393 RdxParts[part] = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3394 } 3395 3396 // Reduce all of the unrolled parts into a single vector. 3397 Value *ReducedPartRdx = RdxParts[0]; 3398 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3399 setDebugLocFromInst(Builder, ReducedPartRdx); 3400 for (unsigned part = 1; part < UF; ++part) { 3401 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3402 // Floating point operations had to be 'fast' to enable the reduction. 3403 ReducedPartRdx = addFastMathFlag( 3404 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxParts[part], 3405 ReducedPartRdx, "bin.rdx")); 3406 else 3407 ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp( 3408 Builder, MinMaxKind, ReducedPartRdx, RdxParts[part]); 3409 } 3410 3411 if (VF > 1) { 3412 // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles 3413 // and vector ops, reducing the set of values being computed by half each 3414 // round. 3415 assert(isPowerOf2_32(VF) && 3416 "Reduction emission only supported for pow2 vectors!"); 3417 Value *TmpVec = ReducedPartRdx; 3418 SmallVector<Constant*, 32> ShuffleMask(VF, nullptr); 3419 for (unsigned i = VF; i != 1; i >>= 1) { 3420 // Move the upper half of the vector to the lower half. 3421 for (unsigned j = 0; j != i/2; ++j) 3422 ShuffleMask[j] = Builder.getInt32(i/2 + j); 3423 3424 // Fill the rest of the mask with undef. 3425 std::fill(&ShuffleMask[i/2], ShuffleMask.end(), 3426 UndefValue::get(Builder.getInt32Ty())); 3427 3428 Value *Shuf = 3429 Builder.CreateShuffleVector(TmpVec, 3430 UndefValue::get(TmpVec->getType()), 3431 ConstantVector::get(ShuffleMask), 3432 "rdx.shuf"); 3433 3434 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3435 // Floating point operations had to be 'fast' to enable the reduction. 3436 TmpVec = addFastMathFlag(Builder.CreateBinOp( 3437 (Instruction::BinaryOps)Op, TmpVec, Shuf, "bin.rdx")); 3438 else 3439 TmpVec = RecurrenceDescriptor::createMinMaxOp(Builder, MinMaxKind, 3440 TmpVec, Shuf); 3441 } 3442 3443 // The result is in the first element of the vector. 3444 ReducedPartRdx = Builder.CreateExtractElement(TmpVec, 3445 Builder.getInt32(0)); 3446 3447 // If the reduction can be performed in a smaller type, we need to extend 3448 // the reduction to the wider type before we branch to the original loop. 3449 if (RdxPhi->getType() != RdxDesc.getRecurrenceType()) 3450 ReducedPartRdx = 3451 RdxDesc.isSigned() 3452 ? Builder.CreateSExt(ReducedPartRdx, RdxPhi->getType()) 3453 : Builder.CreateZExt(ReducedPartRdx, RdxPhi->getType()); 3454 } 3455 3456 // Create a phi node that merges control-flow from the backedge-taken check 3457 // block and the middle block. 3458 PHINode *BCBlockPhi = PHINode::Create(RdxPhi->getType(), 2, "bc.merge.rdx", 3459 LoopScalarPreHeader->getTerminator()); 3460 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3461 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 3462 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3463 3464 // Now, we need to fix the users of the reduction variable 3465 // inside and outside of the scalar remainder loop. 3466 // We know that the loop is in LCSSA form. We need to update the 3467 // PHI nodes in the exit blocks. 3468 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 3469 LEE = LoopExitBlock->end(); LEI != LEE; ++LEI) { 3470 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 3471 if (!LCSSAPhi) break; 3472 3473 // All PHINodes need to have a single entry edge, or two if 3474 // we already fixed them. 3475 assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 3476 3477 // We found our reduction value exit-PHI. Update it with the 3478 // incoming bypass edge. 3479 if (LCSSAPhi->getIncomingValue(0) == LoopExitInst) { 3480 // Add an edge coming from the bypass. 3481 LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3482 break; 3483 } 3484 }// end of the LCSSA phi scan. 3485 3486 // Fix the scalar loop reduction variable with the incoming reduction sum 3487 // from the vector body and from the backedge value. 3488 int IncomingEdgeBlockIdx = 3489 (RdxPhi)->getBasicBlockIndex(OrigLoop->getLoopLatch()); 3490 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 3491 // Pick the other block. 3492 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 3493 (RdxPhi)->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 3494 (RdxPhi)->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 3495 }// end of for each redux variable. 3496 3497 fixLCSSAPHIs(); 3498 3499 // Make sure DomTree is updated. 3500 updateAnalysis(); 3501 3502 // Predicate any stores. 3503 for (auto KV : PredicatedStores) { 3504 BasicBlock::iterator I(KV.first); 3505 auto *BB = SplitBlock(I->getParent(), &*std::next(I), DT, LI); 3506 auto *T = SplitBlockAndInsertIfThen(KV.second, &*I, /*Unreachable=*/false, 3507 /*BranchWeights=*/nullptr, DT); 3508 I->moveBefore(T); 3509 I->getParent()->setName("pred.store.if"); 3510 BB->setName("pred.store.continue"); 3511 } 3512 DEBUG(DT->verifyDomTree()); 3513 // Remove redundant induction instructions. 3514 cse(LoopVectorBody); 3515 } 3516 3517 void InnerLoopVectorizer::fixLCSSAPHIs() { 3518 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 3519 LEE = LoopExitBlock->end(); LEI != LEE; ++LEI) { 3520 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 3521 if (!LCSSAPhi) break; 3522 if (LCSSAPhi->getNumIncomingValues() == 1) 3523 LCSSAPhi->addIncoming(UndefValue::get(LCSSAPhi->getType()), 3524 LoopMiddleBlock); 3525 } 3526 } 3527 3528 InnerLoopVectorizer::VectorParts 3529 InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) { 3530 assert(std::find(pred_begin(Dst), pred_end(Dst), Src) != pred_end(Dst) && 3531 "Invalid edge"); 3532 3533 // Look for cached value. 3534 std::pair<BasicBlock*, BasicBlock*> Edge(Src, Dst); 3535 EdgeMaskCache::iterator ECEntryIt = MaskCache.find(Edge); 3536 if (ECEntryIt != MaskCache.end()) 3537 return ECEntryIt->second; 3538 3539 VectorParts SrcMask = createBlockInMask(Src); 3540 3541 // The terminator has to be a branch inst! 3542 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 3543 assert(BI && "Unexpected terminator found"); 3544 3545 if (BI->isConditional()) { 3546 VectorParts EdgeMask = getVectorValue(BI->getCondition()); 3547 3548 if (BI->getSuccessor(0) != Dst) 3549 for (unsigned part = 0; part < UF; ++part) 3550 EdgeMask[part] = Builder.CreateNot(EdgeMask[part]); 3551 3552 for (unsigned part = 0; part < UF; ++part) 3553 EdgeMask[part] = Builder.CreateAnd(EdgeMask[part], SrcMask[part]); 3554 3555 MaskCache[Edge] = EdgeMask; 3556 return EdgeMask; 3557 } 3558 3559 MaskCache[Edge] = SrcMask; 3560 return SrcMask; 3561 } 3562 3563 InnerLoopVectorizer::VectorParts 3564 InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) { 3565 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 3566 3567 // Loop incoming mask is all-one. 3568 if (OrigLoop->getHeader() == BB) { 3569 Value *C = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 1); 3570 return getVectorValue(C); 3571 } 3572 3573 // This is the block mask. We OR all incoming edges, and with zero. 3574 Value *Zero = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 0); 3575 VectorParts BlockMask = getVectorValue(Zero); 3576 3577 // For each pred: 3578 for (pred_iterator it = pred_begin(BB), e = pred_end(BB); it != e; ++it) { 3579 VectorParts EM = createEdgeMask(*it, BB); 3580 for (unsigned part = 0; part < UF; ++part) 3581 BlockMask[part] = Builder.CreateOr(BlockMask[part], EM[part]); 3582 } 3583 3584 return BlockMask; 3585 } 3586 3587 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 3588 InnerLoopVectorizer::VectorParts &Entry, 3589 unsigned UF, unsigned VF, PhiVector *PV) { 3590 PHINode* P = cast<PHINode>(PN); 3591 // Handle reduction variables: 3592 if (Legal->getReductionVars()->count(P)) { 3593 for (unsigned part = 0; part < UF; ++part) { 3594 // This is phase one of vectorizing PHIs. 3595 Type *VecTy = (VF == 1) ? PN->getType() : 3596 VectorType::get(PN->getType(), VF); 3597 Entry[part] = PHINode::Create( 3598 VecTy, 2, "vec.phi", &*LoopVectorBody.back()->getFirstInsertionPt()); 3599 } 3600 PV->push_back(P); 3601 return; 3602 } 3603 3604 setDebugLocFromInst(Builder, P); 3605 // Check for PHI nodes that are lowered to vector selects. 3606 if (P->getParent() != OrigLoop->getHeader()) { 3607 // We know that all PHIs in non-header blocks are converted into 3608 // selects, so we don't have to worry about the insertion order and we 3609 // can just use the builder. 3610 // At this point we generate the predication tree. There may be 3611 // duplications since this is a simple recursive scan, but future 3612 // optimizations will clean it up. 3613 3614 unsigned NumIncoming = P->getNumIncomingValues(); 3615 3616 // Generate a sequence of selects of the form: 3617 // SELECT(Mask3, In3, 3618 // SELECT(Mask2, In2, 3619 // ( ...))) 3620 for (unsigned In = 0; In < NumIncoming; In++) { 3621 VectorParts Cond = createEdgeMask(P->getIncomingBlock(In), 3622 P->getParent()); 3623 VectorParts &In0 = getVectorValue(P->getIncomingValue(In)); 3624 3625 for (unsigned part = 0; part < UF; ++part) { 3626 // We might have single edge PHIs (blocks) - use an identity 3627 // 'select' for the first PHI operand. 3628 if (In == 0) 3629 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], 3630 In0[part]); 3631 else 3632 // Select between the current value and the previous incoming edge 3633 // based on the incoming mask. 3634 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], 3635 Entry[part], "predphi"); 3636 } 3637 } 3638 return; 3639 } 3640 3641 // This PHINode must be an induction variable. 3642 // Make sure that we know about it. 3643 assert(Legal->getInductionVars()->count(P) && 3644 "Not an induction variable"); 3645 3646 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 3647 3648 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 3649 // which can be found from the original scalar operations. 3650 switch (II.getKind()) { 3651 case InductionDescriptor::IK_NoInduction: 3652 llvm_unreachable("Unknown induction"); 3653 case InductionDescriptor::IK_IntInduction: { 3654 assert(P->getType() == II.getStartValue()->getType() && "Types must match"); 3655 // Handle other induction variables that are now based on the 3656 // canonical one. 3657 Value *V = Induction; 3658 if (P != OldInduction) { 3659 V = Builder.CreateSExtOrTrunc(Induction, P->getType()); 3660 V = II.transform(Builder, V); 3661 V->setName("offset.idx"); 3662 } 3663 Value *Broadcasted = getBroadcastInstrs(V); 3664 // After broadcasting the induction variable we need to make the vector 3665 // consecutive by adding 0, 1, 2, etc. 3666 for (unsigned part = 0; part < UF; ++part) 3667 Entry[part] = getStepVector(Broadcasted, VF * part, II.getStepValue()); 3668 return; 3669 } 3670 case InductionDescriptor::IK_PtrInduction: 3671 // Handle the pointer induction variable case. 3672 assert(P->getType()->isPointerTy() && "Unexpected type."); 3673 // This is the normalized GEP that starts counting at zero. 3674 Value *PtrInd = Induction; 3675 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStepValue()->getType()); 3676 // This is the vector of results. Notice that we don't generate 3677 // vector geps because scalar geps result in better code. 3678 for (unsigned part = 0; part < UF; ++part) { 3679 if (VF == 1) { 3680 int EltIndex = part; 3681 Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex); 3682 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 3683 Value *SclrGep = II.transform(Builder, GlobalIdx); 3684 SclrGep->setName("next.gep"); 3685 Entry[part] = SclrGep; 3686 continue; 3687 } 3688 3689 Value *VecVal = UndefValue::get(VectorType::get(P->getType(), VF)); 3690 for (unsigned int i = 0; i < VF; ++i) { 3691 int EltIndex = i + part * VF; 3692 Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex); 3693 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 3694 Value *SclrGep = II.transform(Builder, GlobalIdx); 3695 SclrGep->setName("next.gep"); 3696 VecVal = Builder.CreateInsertElement(VecVal, SclrGep, 3697 Builder.getInt32(i), 3698 "insert.gep"); 3699 } 3700 Entry[part] = VecVal; 3701 } 3702 return; 3703 } 3704 } 3705 3706 void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { 3707 // For each instruction in the old loop. 3708 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 3709 VectorParts &Entry = WidenMap.get(&*it); 3710 3711 switch (it->getOpcode()) { 3712 case Instruction::Br: 3713 // Nothing to do for PHIs and BR, since we already took care of the 3714 // loop control flow instructions. 3715 continue; 3716 case Instruction::PHI: { 3717 // Vectorize PHINodes. 3718 widenPHIInstruction(&*it, Entry, UF, VF, PV); 3719 continue; 3720 }// End of PHI. 3721 3722 case Instruction::Add: 3723 case Instruction::FAdd: 3724 case Instruction::Sub: 3725 case Instruction::FSub: 3726 case Instruction::Mul: 3727 case Instruction::FMul: 3728 case Instruction::UDiv: 3729 case Instruction::SDiv: 3730 case Instruction::FDiv: 3731 case Instruction::URem: 3732 case Instruction::SRem: 3733 case Instruction::FRem: 3734 case Instruction::Shl: 3735 case Instruction::LShr: 3736 case Instruction::AShr: 3737 case Instruction::And: 3738 case Instruction::Or: 3739 case Instruction::Xor: { 3740 // Just widen binops. 3741 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(it); 3742 setDebugLocFromInst(Builder, BinOp); 3743 VectorParts &A = getVectorValue(it->getOperand(0)); 3744 VectorParts &B = getVectorValue(it->getOperand(1)); 3745 3746 // Use this vector value for all users of the original instruction. 3747 for (unsigned Part = 0; Part < UF; ++Part) { 3748 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A[Part], B[Part]); 3749 3750 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 3751 VecOp->copyIRFlags(BinOp); 3752 3753 Entry[Part] = V; 3754 } 3755 3756 propagateMetadata(Entry, &*it); 3757 break; 3758 } 3759 case Instruction::Select: { 3760 // Widen selects. 3761 // If the selector is loop invariant we can create a select 3762 // instruction with a scalar condition. Otherwise, use vector-select. 3763 bool InvariantCond = SE->isLoopInvariant(SE->getSCEV(it->getOperand(0)), 3764 OrigLoop); 3765 setDebugLocFromInst(Builder, &*it); 3766 3767 // The condition can be loop invariant but still defined inside the 3768 // loop. This means that we can't just use the original 'cond' value. 3769 // We have to take the 'vectorized' value and pick the first lane. 3770 // Instcombine will make this a no-op. 3771 VectorParts &Cond = getVectorValue(it->getOperand(0)); 3772 VectorParts &Op0 = getVectorValue(it->getOperand(1)); 3773 VectorParts &Op1 = getVectorValue(it->getOperand(2)); 3774 3775 Value *ScalarCond = (VF == 1) ? Cond[0] : 3776 Builder.CreateExtractElement(Cond[0], Builder.getInt32(0)); 3777 3778 for (unsigned Part = 0; Part < UF; ++Part) { 3779 Entry[Part] = Builder.CreateSelect( 3780 InvariantCond ? ScalarCond : Cond[Part], 3781 Op0[Part], 3782 Op1[Part]); 3783 } 3784 3785 propagateMetadata(Entry, &*it); 3786 break; 3787 } 3788 3789 case Instruction::ICmp: 3790 case Instruction::FCmp: { 3791 // Widen compares. Generate vector compares. 3792 bool FCmp = (it->getOpcode() == Instruction::FCmp); 3793 CmpInst *Cmp = dyn_cast<CmpInst>(it); 3794 setDebugLocFromInst(Builder, &*it); 3795 VectorParts &A = getVectorValue(it->getOperand(0)); 3796 VectorParts &B = getVectorValue(it->getOperand(1)); 3797 for (unsigned Part = 0; Part < UF; ++Part) { 3798 Value *C = nullptr; 3799 if (FCmp) { 3800 C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]); 3801 cast<FCmpInst>(C)->copyFastMathFlags(&*it); 3802 } else { 3803 C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]); 3804 } 3805 Entry[Part] = C; 3806 } 3807 3808 propagateMetadata(Entry, &*it); 3809 break; 3810 } 3811 3812 case Instruction::Store: 3813 case Instruction::Load: 3814 vectorizeMemoryInstruction(&*it); 3815 break; 3816 case Instruction::ZExt: 3817 case Instruction::SExt: 3818 case Instruction::FPToUI: 3819 case Instruction::FPToSI: 3820 case Instruction::FPExt: 3821 case Instruction::PtrToInt: 3822 case Instruction::IntToPtr: 3823 case Instruction::SIToFP: 3824 case Instruction::UIToFP: 3825 case Instruction::Trunc: 3826 case Instruction::FPTrunc: 3827 case Instruction::BitCast: { 3828 CastInst *CI = dyn_cast<CastInst>(it); 3829 setDebugLocFromInst(Builder, &*it); 3830 /// Optimize the special case where the source is the induction 3831 /// variable. Notice that we can only optimize the 'trunc' case 3832 /// because: a. FP conversions lose precision, b. sext/zext may wrap, 3833 /// c. other casts depend on pointer size. 3834 if (CI->getOperand(0) == OldInduction && 3835 it->getOpcode() == Instruction::Trunc) { 3836 Value *ScalarCast = Builder.CreateCast(CI->getOpcode(), Induction, 3837 CI->getType()); 3838 Value *Broadcasted = getBroadcastInstrs(ScalarCast); 3839 InductionDescriptor II = Legal->getInductionVars()->lookup(OldInduction); 3840 Constant *Step = 3841 ConstantInt::getSigned(CI->getType(), II.getStepValue()->getSExtValue()); 3842 for (unsigned Part = 0; Part < UF; ++Part) 3843 Entry[Part] = getStepVector(Broadcasted, VF * Part, Step); 3844 propagateMetadata(Entry, &*it); 3845 break; 3846 } 3847 /// Vectorize casts. 3848 Type *DestTy = (VF == 1) ? CI->getType() : 3849 VectorType::get(CI->getType(), VF); 3850 3851 VectorParts &A = getVectorValue(it->getOperand(0)); 3852 for (unsigned Part = 0; Part < UF; ++Part) 3853 Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy); 3854 propagateMetadata(Entry, &*it); 3855 break; 3856 } 3857 3858 case Instruction::Call: { 3859 // Ignore dbg intrinsics. 3860 if (isa<DbgInfoIntrinsic>(it)) 3861 break; 3862 setDebugLocFromInst(Builder, &*it); 3863 3864 Module *M = BB->getParent()->getParent(); 3865 CallInst *CI = cast<CallInst>(it); 3866 3867 StringRef FnName = CI->getCalledFunction()->getName(); 3868 Function *F = CI->getCalledFunction(); 3869 Type *RetTy = ToVectorTy(CI->getType(), VF); 3870 SmallVector<Type *, 4> Tys; 3871 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) 3872 Tys.push_back(ToVectorTy(CI->getArgOperand(i)->getType(), VF)); 3873 3874 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 3875 if (ID && 3876 (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 3877 ID == Intrinsic::lifetime_start)) { 3878 scalarizeInstruction(&*it); 3879 break; 3880 } 3881 // The flag shows whether we use Intrinsic or a usual Call for vectorized 3882 // version of the instruction. 3883 // Is it beneficial to perform intrinsic call compared to lib call? 3884 bool NeedToScalarize; 3885 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 3886 bool UseVectorIntrinsic = 3887 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 3888 if (!UseVectorIntrinsic && NeedToScalarize) { 3889 scalarizeInstruction(&*it); 3890 break; 3891 } 3892 3893 for (unsigned Part = 0; Part < UF; ++Part) { 3894 SmallVector<Value *, 4> Args; 3895 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 3896 Value *Arg = CI->getArgOperand(i); 3897 // Some intrinsics have a scalar argument - don't replace it with a 3898 // vector. 3899 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) { 3900 VectorParts &VectorArg = getVectorValue(CI->getArgOperand(i)); 3901 Arg = VectorArg[Part]; 3902 } 3903 Args.push_back(Arg); 3904 } 3905 3906 Function *VectorF; 3907 if (UseVectorIntrinsic) { 3908 // Use vector version of the intrinsic. 3909 Type *TysForDecl[] = {CI->getType()}; 3910 if (VF > 1) 3911 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 3912 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 3913 } else { 3914 // Use vector version of the library call. 3915 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 3916 assert(!VFnName.empty() && "Vector function name is empty."); 3917 VectorF = M->getFunction(VFnName); 3918 if (!VectorF) { 3919 // Generate a declaration 3920 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 3921 VectorF = 3922 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 3923 VectorF->copyAttributesFrom(F); 3924 } 3925 } 3926 assert(VectorF && "Can't create vector function."); 3927 Entry[Part] = Builder.CreateCall(VectorF, Args); 3928 } 3929 3930 propagateMetadata(Entry, &*it); 3931 break; 3932 } 3933 3934 default: 3935 // All other instructions are unsupported. Scalarize them. 3936 scalarizeInstruction(&*it); 3937 break; 3938 }// end of switch. 3939 }// end of for_each instr. 3940 } 3941 3942 void InnerLoopVectorizer::updateAnalysis() { 3943 // Forget the original basic block. 3944 SE->forgetLoop(OrigLoop); 3945 3946 // Update the dominator tree information. 3947 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 3948 "Entry does not dominate exit."); 3949 3950 for (unsigned I = 1, E = LoopBypassBlocks.size(); I != E; ++I) 3951 DT->addNewBlock(LoopBypassBlocks[I], LoopBypassBlocks[I-1]); 3952 DT->addNewBlock(LoopVectorPreHeader, LoopBypassBlocks.back()); 3953 3954 // We don't predicate stores by this point, so the vector body should be a 3955 // single loop. 3956 assert(LoopVectorBody.size() == 1 && "Expected single block loop!"); 3957 DT->addNewBlock(LoopVectorBody[0], LoopVectorPreHeader); 3958 3959 DT->addNewBlock(LoopMiddleBlock, LoopVectorBody.back()); 3960 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 3961 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 3962 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 3963 3964 DEBUG(DT->verifyDomTree()); 3965 } 3966 3967 /// \brief Check whether it is safe to if-convert this phi node. 3968 /// 3969 /// Phi nodes with constant expressions that can trap are not safe to if 3970 /// convert. 3971 static bool canIfConvertPHINodes(BasicBlock *BB) { 3972 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { 3973 PHINode *Phi = dyn_cast<PHINode>(I); 3974 if (!Phi) 3975 return true; 3976 for (unsigned p = 0, e = Phi->getNumIncomingValues(); p != e; ++p) 3977 if (Constant *C = dyn_cast<Constant>(Phi->getIncomingValue(p))) 3978 if (C->canTrap()) 3979 return false; 3980 } 3981 return true; 3982 } 3983 3984 bool LoopVectorizationLegality::canVectorizeWithIfConvert() { 3985 if (!EnableIfConversion) { 3986 emitAnalysis(VectorizationReport() << "if-conversion is disabled"); 3987 return false; 3988 } 3989 3990 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable"); 3991 3992 // A list of pointers that we can safely read and write to. 3993 SmallPtrSet<Value *, 8> SafePointes; 3994 3995 // Collect safe addresses. 3996 for (Loop::block_iterator BI = TheLoop->block_begin(), 3997 BE = TheLoop->block_end(); BI != BE; ++BI) { 3998 BasicBlock *BB = *BI; 3999 4000 if (blockNeedsPredication(BB)) 4001 continue; 4002 4003 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { 4004 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 4005 SafePointes.insert(LI->getPointerOperand()); 4006 else if (StoreInst *SI = dyn_cast<StoreInst>(I)) 4007 SafePointes.insert(SI->getPointerOperand()); 4008 } 4009 } 4010 4011 // Collect the blocks that need predication. 4012 BasicBlock *Header = TheLoop->getHeader(); 4013 for (Loop::block_iterator BI = TheLoop->block_begin(), 4014 BE = TheLoop->block_end(); BI != BE; ++BI) { 4015 BasicBlock *BB = *BI; 4016 4017 // We don't support switch statements inside loops. 4018 if (!isa<BranchInst>(BB->getTerminator())) { 4019 emitAnalysis(VectorizationReport(BB->getTerminator()) 4020 << "loop contains a switch statement"); 4021 return false; 4022 } 4023 4024 // We must be able to predicate all blocks that need to be predicated. 4025 if (blockNeedsPredication(BB)) { 4026 if (!blockCanBePredicated(BB, SafePointes)) { 4027 emitAnalysis(VectorizationReport(BB->getTerminator()) 4028 << "control flow cannot be substituted for a select"); 4029 return false; 4030 } 4031 } else if (BB != Header && !canIfConvertPHINodes(BB)) { 4032 emitAnalysis(VectorizationReport(BB->getTerminator()) 4033 << "control flow cannot be substituted for a select"); 4034 return false; 4035 } 4036 } 4037 4038 // We can if-convert this loop. 4039 return true; 4040 } 4041 4042 bool LoopVectorizationLegality::canVectorize() { 4043 // We must have a loop in canonical form. Loops with indirectbr in them cannot 4044 // be canonicalized. 4045 if (!TheLoop->getLoopPreheader()) { 4046 emitAnalysis( 4047 VectorizationReport() << 4048 "loop control flow is not understood by vectorizer"); 4049 return false; 4050 } 4051 4052 // We can only vectorize innermost loops. 4053 if (!TheLoop->empty()) { 4054 emitAnalysis(VectorizationReport() << "loop is not the innermost loop"); 4055 return false; 4056 } 4057 4058 // We must have a single backedge. 4059 if (TheLoop->getNumBackEdges() != 1) { 4060 emitAnalysis( 4061 VectorizationReport() << 4062 "loop control flow is not understood by vectorizer"); 4063 return false; 4064 } 4065 4066 // We must have a single exiting block. 4067 if (!TheLoop->getExitingBlock()) { 4068 emitAnalysis( 4069 VectorizationReport() << 4070 "loop control flow is not understood by vectorizer"); 4071 return false; 4072 } 4073 4074 // We only handle bottom-tested loops, i.e. loop in which the condition is 4075 // checked at the end of each iteration. With that we can assume that all 4076 // instructions in the loop are executed the same number of times. 4077 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 4078 emitAnalysis( 4079 VectorizationReport() << 4080 "loop control flow is not understood by vectorizer"); 4081 return false; 4082 } 4083 4084 // We need to have a loop header. 4085 DEBUG(dbgs() << "LV: Found a loop: " << 4086 TheLoop->getHeader()->getName() << '\n'); 4087 4088 // Check if we can if-convert non-single-bb loops. 4089 unsigned NumBlocks = TheLoop->getNumBlocks(); 4090 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { 4091 DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); 4092 return false; 4093 } 4094 4095 // ScalarEvolution needs to be able to find the exit count. 4096 const SCEV *ExitCount = SE->getBackedgeTakenCount(TheLoop); 4097 if (ExitCount == SE->getCouldNotCompute()) { 4098 emitAnalysis(VectorizationReport() << 4099 "could not determine number of loop iterations"); 4100 DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n"); 4101 return false; 4102 } 4103 4104 // Check if we can vectorize the instructions and CFG in this loop. 4105 if (!canVectorizeInstrs()) { 4106 DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); 4107 return false; 4108 } 4109 4110 // Go over each instruction and look at memory deps. 4111 if (!canVectorizeMemory()) { 4112 DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); 4113 return false; 4114 } 4115 4116 // Collect all of the variables that remain uniform after vectorization. 4117 collectLoopUniforms(); 4118 4119 DEBUG(dbgs() << "LV: We can vectorize this loop" 4120 << (LAI->getRuntimePointerChecking()->Need 4121 ? " (with a runtime bound check)" 4122 : "") 4123 << "!\n"); 4124 4125 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 4126 4127 // If an override option has been passed in for interleaved accesses, use it. 4128 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 4129 UseInterleaved = EnableInterleavedMemAccesses; 4130 4131 // Analyze interleaved memory accesses. 4132 if (UseInterleaved) 4133 InterleaveInfo.analyzeInterleaving(Strides); 4134 4135 // Okay! We can vectorize. At this point we don't have any other mem analysis 4136 // which may limit our maximum vectorization factor, so just return true with 4137 // no restrictions. 4138 return true; 4139 } 4140 4141 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { 4142 if (Ty->isPointerTy()) 4143 return DL.getIntPtrType(Ty); 4144 4145 // It is possible that char's or short's overflow when we ask for the loop's 4146 // trip count, work around this by changing the type size. 4147 if (Ty->getScalarSizeInBits() < 32) 4148 return Type::getInt32Ty(Ty->getContext()); 4149 4150 return Ty; 4151 } 4152 4153 static Type* getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { 4154 Ty0 = convertPointerToIntegerType(DL, Ty0); 4155 Ty1 = convertPointerToIntegerType(DL, Ty1); 4156 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) 4157 return Ty0; 4158 return Ty1; 4159 } 4160 4161 /// \brief Check that the instruction has outside loop users and is not an 4162 /// identified reduction variable. 4163 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, 4164 SmallPtrSetImpl<Value *> &Reductions) { 4165 // Reduction instructions are allowed to have exit users. All other 4166 // instructions must not have external users. 4167 if (!Reductions.count(Inst)) 4168 //Check that all of the users of the loop are inside the BB. 4169 for (User *U : Inst->users()) { 4170 Instruction *UI = cast<Instruction>(U); 4171 // This user may be a reduction exit value. 4172 if (!TheLoop->contains(UI)) { 4173 DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); 4174 return true; 4175 } 4176 } 4177 return false; 4178 } 4179 4180 bool LoopVectorizationLegality::canVectorizeInstrs() { 4181 BasicBlock *Header = TheLoop->getHeader(); 4182 4183 // Look for the attribute signaling the absence of NaNs. 4184 Function &F = *Header->getParent(); 4185 const DataLayout &DL = F.getParent()->getDataLayout(); 4186 if (F.hasFnAttribute("no-nans-fp-math")) 4187 HasFunNoNaNAttr = 4188 F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; 4189 4190 // For each block in the loop. 4191 for (Loop::block_iterator bb = TheLoop->block_begin(), 4192 be = TheLoop->block_end(); bb != be; ++bb) { 4193 4194 // Scan the instructions in the block and look for hazards. 4195 for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e; 4196 ++it) { 4197 4198 if (PHINode *Phi = dyn_cast<PHINode>(it)) { 4199 Type *PhiTy = Phi->getType(); 4200 // Check that this PHI type is allowed. 4201 if (!PhiTy->isIntegerTy() && 4202 !PhiTy->isFloatingPointTy() && 4203 !PhiTy->isPointerTy()) { 4204 emitAnalysis(VectorizationReport(&*it) 4205 << "loop control flow is not understood by vectorizer"); 4206 DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); 4207 return false; 4208 } 4209 4210 // If this PHINode is not in the header block, then we know that we 4211 // can convert it to select during if-conversion. No need to check if 4212 // the PHIs in this block are induction or reduction variables. 4213 if (*bb != Header) { 4214 // Check that this instruction has no outside users or is an 4215 // identified reduction value with an outside user. 4216 if (!hasOutsideLoopUser(TheLoop, &*it, AllowedExit)) 4217 continue; 4218 emitAnalysis(VectorizationReport(&*it) << 4219 "value could not be identified as " 4220 "an induction or reduction variable"); 4221 return false; 4222 } 4223 4224 // We only allow if-converted PHIs with exactly two incoming values. 4225 if (Phi->getNumIncomingValues() != 2) { 4226 emitAnalysis(VectorizationReport(&*it) 4227 << "control flow not understood by vectorizer"); 4228 DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); 4229 return false; 4230 } 4231 4232 InductionDescriptor ID; 4233 if (InductionDescriptor::isInductionPHI(Phi, SE, ID)) { 4234 Inductions[Phi] = ID; 4235 // Get the widest type. 4236 if (!WidestIndTy) 4237 WidestIndTy = convertPointerToIntegerType(DL, PhiTy); 4238 else 4239 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); 4240 4241 // Int inductions are special because we only allow one IV. 4242 if (ID.getKind() == InductionDescriptor::IK_IntInduction && 4243 ID.getStepValue()->isOne() && 4244 isa<Constant>(ID.getStartValue()) && 4245 cast<Constant>(ID.getStartValue())->isNullValue()) { 4246 // Use the phi node with the widest type as induction. Use the last 4247 // one if there are multiple (no good reason for doing this other 4248 // than it is expedient). We've checked that it begins at zero and 4249 // steps by one, so this is a canonical induction variable. 4250 if (!Induction || PhiTy == WidestIndTy) 4251 Induction = Phi; 4252 } 4253 4254 DEBUG(dbgs() << "LV: Found an induction variable.\n"); 4255 4256 // Until we explicitly handle the case of an induction variable with 4257 // an outside loop user we have to give up vectorizing this loop. 4258 if (hasOutsideLoopUser(TheLoop, &*it, AllowedExit)) { 4259 emitAnalysis(VectorizationReport(&*it) << 4260 "use of induction value outside of the " 4261 "loop is not handled by vectorizer"); 4262 return false; 4263 } 4264 4265 continue; 4266 } 4267 4268 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, 4269 Reductions[Phi])) { 4270 if (Reductions[Phi].hasUnsafeAlgebra()) 4271 Requirements->addUnsafeAlgebraInst( 4272 Reductions[Phi].getUnsafeAlgebraInst()); 4273 AllowedExit.insert(Reductions[Phi].getLoopExitInstr()); 4274 continue; 4275 } 4276 4277 emitAnalysis(VectorizationReport(&*it) << 4278 "value that could not be identified as " 4279 "reduction is used outside the loop"); 4280 DEBUG(dbgs() << "LV: Found an unidentified PHI."<< *Phi <<"\n"); 4281 return false; 4282 }// end of PHI handling 4283 4284 // We handle calls that: 4285 // * Are debug info intrinsics. 4286 // * Have a mapping to an IR intrinsic. 4287 // * Have a vector version available. 4288 CallInst *CI = dyn_cast<CallInst>(it); 4289 if (CI && !getIntrinsicIDForCall(CI, TLI) && !isa<DbgInfoIntrinsic>(CI) && 4290 !(CI->getCalledFunction() && TLI && 4291 TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) { 4292 emitAnalysis(VectorizationReport(&*it) 4293 << "call instruction cannot be vectorized"); 4294 DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); 4295 return false; 4296 } 4297 4298 // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the 4299 // second argument is the same (i.e. loop invariant) 4300 if (CI && 4301 hasVectorInstrinsicScalarOpd(getIntrinsicIDForCall(CI, TLI), 1)) { 4302 if (!SE->isLoopInvariant(SE->getSCEV(CI->getOperand(1)), TheLoop)) { 4303 emitAnalysis(VectorizationReport(&*it) 4304 << "intrinsic instruction cannot be vectorized"); 4305 DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n"); 4306 return false; 4307 } 4308 } 4309 4310 // Check that the instruction return type is vectorizable. 4311 // Also, we can't vectorize extractelement instructions. 4312 if ((!VectorType::isValidElementType(it->getType()) && 4313 !it->getType()->isVoidTy()) || isa<ExtractElementInst>(it)) { 4314 emitAnalysis(VectorizationReport(&*it) 4315 << "instruction return type cannot be vectorized"); 4316 DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); 4317 return false; 4318 } 4319 4320 // Check that the stored type is vectorizable. 4321 if (StoreInst *ST = dyn_cast<StoreInst>(it)) { 4322 Type *T = ST->getValueOperand()->getType(); 4323 if (!VectorType::isValidElementType(T)) { 4324 emitAnalysis(VectorizationReport(ST) << 4325 "store instruction cannot be vectorized"); 4326 return false; 4327 } 4328 if (EnableMemAccessVersioning) 4329 collectStridedAccess(ST); 4330 } 4331 4332 if (EnableMemAccessVersioning) 4333 if (LoadInst *LI = dyn_cast<LoadInst>(it)) 4334 collectStridedAccess(LI); 4335 4336 // Reduction instructions are allowed to have exit users. 4337 // All other instructions must not have external users. 4338 if (hasOutsideLoopUser(TheLoop, &*it, AllowedExit)) { 4339 emitAnalysis(VectorizationReport(&*it) << 4340 "value cannot be used outside the loop"); 4341 return false; 4342 } 4343 4344 } // next instr. 4345 4346 } 4347 4348 if (!Induction) { 4349 DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); 4350 if (Inductions.empty()) { 4351 emitAnalysis(VectorizationReport() 4352 << "loop induction variable could not be identified"); 4353 return false; 4354 } 4355 } 4356 4357 // Now we know the widest induction type, check if our found induction 4358 // is the same size. If it's not, unset it here and InnerLoopVectorizer 4359 // will create another. 4360 if (Induction && WidestIndTy != Induction->getType()) 4361 Induction = nullptr; 4362 4363 return true; 4364 } 4365 4366 void LoopVectorizationLegality::collectStridedAccess(Value *MemAccess) { 4367 Value *Ptr = nullptr; 4368 if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess)) 4369 Ptr = LI->getPointerOperand(); 4370 else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess)) 4371 Ptr = SI->getPointerOperand(); 4372 else 4373 return; 4374 4375 Value *Stride = getStrideFromPointer(Ptr, SE, TheLoop); 4376 if (!Stride) 4377 return; 4378 4379 DEBUG(dbgs() << "LV: Found a strided access that we can version"); 4380 DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n"); 4381 Strides[Ptr] = Stride; 4382 StrideSet.insert(Stride); 4383 } 4384 4385 void LoopVectorizationLegality::collectLoopUniforms() { 4386 // We now know that the loop is vectorizable! 4387 // Collect variables that will remain uniform after vectorization. 4388 std::vector<Value*> Worklist; 4389 BasicBlock *Latch = TheLoop->getLoopLatch(); 4390 4391 // Start with the conditional branch and walk up the block. 4392 Worklist.push_back(Latch->getTerminator()->getOperand(0)); 4393 4394 // Also add all consecutive pointer values; these values will be uniform 4395 // after vectorization (and subsequent cleanup) and, until revectorization is 4396 // supported, all dependencies must also be uniform. 4397 for (Loop::block_iterator B = TheLoop->block_begin(), 4398 BE = TheLoop->block_end(); B != BE; ++B) 4399 for (BasicBlock::iterator I = (*B)->begin(), IE = (*B)->end(); 4400 I != IE; ++I) 4401 if (I->getType()->isPointerTy() && isConsecutivePtr(&*I)) 4402 Worklist.insert(Worklist.end(), I->op_begin(), I->op_end()); 4403 4404 while (!Worklist.empty()) { 4405 Instruction *I = dyn_cast<Instruction>(Worklist.back()); 4406 Worklist.pop_back(); 4407 4408 // Look at instructions inside this loop. 4409 // Stop when reaching PHI nodes. 4410 // TODO: we need to follow values all over the loop, not only in this block. 4411 if (!I || !TheLoop->contains(I) || isa<PHINode>(I)) 4412 continue; 4413 4414 // This is a known uniform. 4415 Uniforms.insert(I); 4416 4417 // Insert all operands. 4418 Worklist.insert(Worklist.end(), I->op_begin(), I->op_end()); 4419 } 4420 } 4421 4422 bool LoopVectorizationLegality::canVectorizeMemory() { 4423 LAI = &LAA->getInfo(TheLoop, Strides); 4424 auto &OptionalReport = LAI->getReport(); 4425 if (OptionalReport) 4426 emitAnalysis(VectorizationReport(*OptionalReport)); 4427 if (!LAI->canVectorizeMemory()) 4428 return false; 4429 4430 if (LAI->hasStoreToLoopInvariantAddress()) { 4431 emitAnalysis( 4432 VectorizationReport() 4433 << "write to a loop invariant address could not be vectorized"); 4434 DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); 4435 return false; 4436 } 4437 4438 Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks()); 4439 4440 return true; 4441 } 4442 4443 bool LoopVectorizationLegality::isInductionVariable(const Value *V) { 4444 Value *In0 = const_cast<Value*>(V); 4445 PHINode *PN = dyn_cast_or_null<PHINode>(In0); 4446 if (!PN) 4447 return false; 4448 4449 return Inductions.count(PN); 4450 } 4451 4452 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { 4453 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 4454 } 4455 4456 bool LoopVectorizationLegality::blockCanBePredicated(BasicBlock *BB, 4457 SmallPtrSetImpl<Value *> &SafePtrs) { 4458 4459 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 4460 // Check that we don't have a constant expression that can trap as operand. 4461 for (Instruction::op_iterator OI = it->op_begin(), OE = it->op_end(); 4462 OI != OE; ++OI) { 4463 if (Constant *C = dyn_cast<Constant>(*OI)) 4464 if (C->canTrap()) 4465 return false; 4466 } 4467 // We might be able to hoist the load. 4468 if (it->mayReadFromMemory()) { 4469 LoadInst *LI = dyn_cast<LoadInst>(it); 4470 if (!LI) 4471 return false; 4472 if (!SafePtrs.count(LI->getPointerOperand())) { 4473 if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand())) { 4474 MaskedOp.insert(LI); 4475 continue; 4476 } 4477 return false; 4478 } 4479 } 4480 4481 // We don't predicate stores at the moment. 4482 if (it->mayWriteToMemory()) { 4483 StoreInst *SI = dyn_cast<StoreInst>(it); 4484 // We only support predication of stores in basic blocks with one 4485 // predecessor. 4486 if (!SI) 4487 return false; 4488 4489 bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0); 4490 bool isSinglePredecessor = SI->getParent()->getSinglePredecessor(); 4491 4492 if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr || 4493 !isSinglePredecessor) { 4494 // Build a masked store if it is legal for the target, otherwise scalarize 4495 // the block. 4496 bool isLegalMaskedOp = 4497 isLegalMaskedStore(SI->getValueOperand()->getType(), 4498 SI->getPointerOperand()); 4499 if (isLegalMaskedOp) { 4500 --NumPredStores; 4501 MaskedOp.insert(SI); 4502 continue; 4503 } 4504 return false; 4505 } 4506 } 4507 if (it->mayThrow()) 4508 return false; 4509 4510 // The instructions below can trap. 4511 switch (it->getOpcode()) { 4512 default: continue; 4513 case Instruction::UDiv: 4514 case Instruction::SDiv: 4515 case Instruction::URem: 4516 case Instruction::SRem: 4517 return false; 4518 } 4519 } 4520 4521 return true; 4522 } 4523 4524 void InterleavedAccessInfo::collectConstStridedAccesses( 4525 MapVector<Instruction *, StrideDescriptor> &StrideAccesses, 4526 const ValueToValueMap &Strides) { 4527 // Holds load/store instructions in program order. 4528 SmallVector<Instruction *, 16> AccessList; 4529 4530 for (auto *BB : TheLoop->getBlocks()) { 4531 bool IsPred = LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 4532 4533 for (auto &I : *BB) { 4534 if (!isa<LoadInst>(&I) && !isa<StoreInst>(&I)) 4535 continue; 4536 // FIXME: Currently we can't handle mixed accesses and predicated accesses 4537 if (IsPred) 4538 return; 4539 4540 AccessList.push_back(&I); 4541 } 4542 } 4543 4544 if (AccessList.empty()) 4545 return; 4546 4547 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 4548 for (auto I : AccessList) { 4549 LoadInst *LI = dyn_cast<LoadInst>(I); 4550 StoreInst *SI = dyn_cast<StoreInst>(I); 4551 4552 Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); 4553 int Stride = isStridedPtr(SE, Ptr, TheLoop, Strides); 4554 4555 // The factor of the corresponding interleave group. 4556 unsigned Factor = std::abs(Stride); 4557 4558 // Ignore the access if the factor is too small or too large. 4559 if (Factor < 2 || Factor > MaxInterleaveGroupFactor) 4560 continue; 4561 4562 const SCEV *Scev = replaceSymbolicStrideSCEV(SE, Strides, Ptr); 4563 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 4564 unsigned Size = DL.getTypeAllocSize(PtrTy->getElementType()); 4565 4566 // An alignment of 0 means target ABI alignment. 4567 unsigned Align = LI ? LI->getAlignment() : SI->getAlignment(); 4568 if (!Align) 4569 Align = DL.getABITypeAlignment(PtrTy->getElementType()); 4570 4571 StrideAccesses[I] = StrideDescriptor(Stride, Scev, Size, Align); 4572 } 4573 } 4574 4575 // Analyze interleaved accesses and collect them into interleave groups. 4576 // 4577 // Notice that the vectorization on interleaved groups will change instruction 4578 // orders and may break dependences. But the memory dependence check guarantees 4579 // that there is no overlap between two pointers of different strides, element 4580 // sizes or underlying bases. 4581 // 4582 // For pointers sharing the same stride, element size and underlying base, no 4583 // need to worry about Read-After-Write dependences and Write-After-Read 4584 // dependences. 4585 // 4586 // E.g. The RAW dependence: A[i] = a; 4587 // b = A[i]; 4588 // This won't exist as it is a store-load forwarding conflict, which has 4589 // already been checked and forbidden in the dependence check. 4590 // 4591 // E.g. The WAR dependence: a = A[i]; // (1) 4592 // A[i] = b; // (2) 4593 // The store group of (2) is always inserted at or below (2), and the load group 4594 // of (1) is always inserted at or above (1). The dependence is safe. 4595 void InterleavedAccessInfo::analyzeInterleaving( 4596 const ValueToValueMap &Strides) { 4597 DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 4598 4599 // Holds all the stride accesses. 4600 MapVector<Instruction *, StrideDescriptor> StrideAccesses; 4601 collectConstStridedAccesses(StrideAccesses, Strides); 4602 4603 if (StrideAccesses.empty()) 4604 return; 4605 4606 // Holds all interleaved store groups temporarily. 4607 SmallSetVector<InterleaveGroup *, 4> StoreGroups; 4608 4609 // Search the load-load/write-write pair B-A in bottom-up order and try to 4610 // insert B into the interleave group of A according to 3 rules: 4611 // 1. A and B have the same stride. 4612 // 2. A and B have the same memory object size. 4613 // 3. B belongs to the group according to the distance. 4614 // 4615 // The bottom-up order can avoid breaking the Write-After-Write dependences 4616 // between two pointers of the same base. 4617 // E.g. A[i] = a; (1) 4618 // A[i] = b; (2) 4619 // A[i+1] = c (3) 4620 // We form the group (2)+(3) in front, so (1) has to form groups with accesses 4621 // above (1), which guarantees that (1) is always above (2). 4622 for (auto I = StrideAccesses.rbegin(), E = StrideAccesses.rend(); I != E; 4623 ++I) { 4624 Instruction *A = I->first; 4625 StrideDescriptor DesA = I->second; 4626 4627 InterleaveGroup *Group = getInterleaveGroup(A); 4628 if (!Group) { 4629 DEBUG(dbgs() << "LV: Creating an interleave group with:" << *A << '\n'); 4630 Group = createInterleaveGroup(A, DesA.Stride, DesA.Align); 4631 } 4632 4633 if (A->mayWriteToMemory()) 4634 StoreGroups.insert(Group); 4635 4636 for (auto II = std::next(I); II != E; ++II) { 4637 Instruction *B = II->first; 4638 StrideDescriptor DesB = II->second; 4639 4640 // Ignore if B is already in a group or B is a different memory operation. 4641 if (isInterleaved(B) || A->mayReadFromMemory() != B->mayReadFromMemory()) 4642 continue; 4643 4644 // Check the rule 1 and 2. 4645 if (DesB.Stride != DesA.Stride || DesB.Size != DesA.Size) 4646 continue; 4647 4648 // Calculate the distance and prepare for the rule 3. 4649 const SCEVConstant *DistToA = 4650 dyn_cast<SCEVConstant>(SE->getMinusSCEV(DesB.Scev, DesA.Scev)); 4651 if (!DistToA) 4652 continue; 4653 4654 int DistanceToA = DistToA->getValue()->getValue().getSExtValue(); 4655 4656 // Skip if the distance is not multiple of size as they are not in the 4657 // same group. 4658 if (DistanceToA % static_cast<int>(DesA.Size)) 4659 continue; 4660 4661 // The index of B is the index of A plus the related index to A. 4662 int IndexB = 4663 Group->getIndex(A) + DistanceToA / static_cast<int>(DesA.Size); 4664 4665 // Try to insert B into the group. 4666 if (Group->insertMember(B, IndexB, DesB.Align)) { 4667 DEBUG(dbgs() << "LV: Inserted:" << *B << '\n' 4668 << " into the interleave group with" << *A << '\n'); 4669 InterleaveGroupMap[B] = Group; 4670 4671 // Set the first load in program order as the insert position. 4672 if (B->mayReadFromMemory()) 4673 Group->setInsertPos(B); 4674 } 4675 } // Iteration on instruction B 4676 } // Iteration on instruction A 4677 4678 // Remove interleaved store groups with gaps. 4679 for (InterleaveGroup *Group : StoreGroups) 4680 if (Group->getNumMembers() != Group->getFactor()) 4681 releaseGroup(Group); 4682 } 4683 4684 LoopVectorizationCostModel::VectorizationFactor 4685 LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { 4686 // Width 1 means no vectorize 4687 VectorizationFactor Factor = { 1U, 0U }; 4688 if (OptForSize && Legal->getRuntimePointerChecking()->Need) { 4689 emitAnalysis(VectorizationReport() << 4690 "runtime pointer checks needed. Enable vectorization of this " 4691 "loop with '#pragma clang loop vectorize(enable)' when " 4692 "compiling with -Os/-Oz"); 4693 DEBUG(dbgs() << 4694 "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 4695 return Factor; 4696 } 4697 4698 if (!EnableCondStoresVectorization && Legal->getNumPredStores()) { 4699 emitAnalysis(VectorizationReport() << 4700 "store that is conditionally executed prevents vectorization"); 4701 DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); 4702 return Factor; 4703 } 4704 4705 // Find the trip count. 4706 unsigned TC = SE->getSmallConstantTripCount(TheLoop); 4707 DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 4708 4709 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 4710 unsigned WidestType = getWidestType(); 4711 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 4712 unsigned MaxSafeDepDist = -1U; 4713 if (Legal->getMaxSafeDepDistBytes() != -1U) 4714 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 4715 WidestRegister = ((WidestRegister < MaxSafeDepDist) ? 4716 WidestRegister : MaxSafeDepDist); 4717 unsigned MaxVectorSize = WidestRegister / WidestType; 4718 DEBUG(dbgs() << "LV: The Widest type: " << WidestType << " bits.\n"); 4719 DEBUG(dbgs() << "LV: The Widest register is: " 4720 << WidestRegister << " bits.\n"); 4721 4722 if (MaxVectorSize == 0) { 4723 DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 4724 MaxVectorSize = 1; 4725 } 4726 4727 assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements" 4728 " into one vector!"); 4729 4730 unsigned VF = MaxVectorSize; 4731 4732 // If we optimize the program for size, avoid creating the tail loop. 4733 if (OptForSize) { 4734 // If we are unable to calculate the trip count then don't try to vectorize. 4735 if (TC < 2) { 4736 emitAnalysis 4737 (VectorizationReport() << 4738 "unable to calculate the loop count due to complex control flow"); 4739 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 4740 return Factor; 4741 } 4742 4743 // Find the maximum SIMD width that can fit within the trip count. 4744 VF = TC % MaxVectorSize; 4745 4746 if (VF == 0) 4747 VF = MaxVectorSize; 4748 else { 4749 // If the trip count that we found modulo the vectorization factor is not 4750 // zero then we require a tail. 4751 emitAnalysis(VectorizationReport() << 4752 "cannot optimize for size and vectorize at the " 4753 "same time. Enable vectorization of this loop " 4754 "with '#pragma clang loop vectorize(enable)' " 4755 "when compiling with -Os/-Oz"); 4756 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 4757 return Factor; 4758 } 4759 } 4760 4761 int UserVF = Hints->getWidth(); 4762 if (UserVF != 0) { 4763 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 4764 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 4765 4766 Factor.Width = UserVF; 4767 return Factor; 4768 } 4769 4770 float Cost = expectedCost(1); 4771 #ifndef NDEBUG 4772 const float ScalarCost = Cost; 4773 #endif /* NDEBUG */ 4774 unsigned Width = 1; 4775 DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 4776 4777 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 4778 // Ignore scalar width, because the user explicitly wants vectorization. 4779 if (ForceVectorization && VF > 1) { 4780 Width = 2; 4781 Cost = expectedCost(Width) / (float)Width; 4782 } 4783 4784 for (unsigned i=2; i <= VF; i*=2) { 4785 // Notice that the vector loop needs to be executed less times, so 4786 // we need to divide the cost of the vector loops by the width of 4787 // the vector elements. 4788 float VectorCost = expectedCost(i) / (float)i; 4789 DEBUG(dbgs() << "LV: Vector loop of width " << i << " costs: " << 4790 (int)VectorCost << ".\n"); 4791 if (VectorCost < Cost) { 4792 Cost = VectorCost; 4793 Width = i; 4794 } 4795 } 4796 4797 DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 4798 << "LV: Vectorization seems to be not beneficial, " 4799 << "but was forced by a user.\n"); 4800 DEBUG(dbgs() << "LV: Selecting VF: "<< Width << ".\n"); 4801 Factor.Width = Width; 4802 Factor.Cost = Width * Cost; 4803 return Factor; 4804 } 4805 4806 unsigned LoopVectorizationCostModel::getWidestType() { 4807 unsigned MaxWidth = 8; 4808 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 4809 4810 // For each block. 4811 for (Loop::block_iterator bb = TheLoop->block_begin(), 4812 be = TheLoop->block_end(); bb != be; ++bb) { 4813 BasicBlock *BB = *bb; 4814 4815 // For each instruction in the loop. 4816 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 4817 Type *T = it->getType(); 4818 4819 // Skip ignored values. 4820 if (ValuesToIgnore.count(&*it)) 4821 continue; 4822 4823 // Only examine Loads, Stores and PHINodes. 4824 if (!isa<LoadInst>(it) && !isa<StoreInst>(it) && !isa<PHINode>(it)) 4825 continue; 4826 4827 // Examine PHI nodes that are reduction variables. Update the type to 4828 // account for the recurrence type. 4829 if (PHINode *PN = dyn_cast<PHINode>(it)) { 4830 if (!Legal->getReductionVars()->count(PN)) 4831 continue; 4832 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 4833 T = RdxDesc.getRecurrenceType(); 4834 } 4835 4836 // Examine the stored values. 4837 if (StoreInst *ST = dyn_cast<StoreInst>(it)) 4838 T = ST->getValueOperand()->getType(); 4839 4840 // Ignore loaded pointer types and stored pointer types that are not 4841 // consecutive. However, we do want to take consecutive stores/loads of 4842 // pointer vectors into account. 4843 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&*it)) 4844 continue; 4845 4846 MaxWidth = std::max(MaxWidth, 4847 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 4848 } 4849 } 4850 4851 return MaxWidth; 4852 } 4853 4854 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 4855 unsigned VF, 4856 unsigned LoopCost) { 4857 4858 // -- The interleave heuristics -- 4859 // We interleave the loop in order to expose ILP and reduce the loop overhead. 4860 // There are many micro-architectural considerations that we can't predict 4861 // at this level. For example, frontend pressure (on decode or fetch) due to 4862 // code size, or the number and capabilities of the execution ports. 4863 // 4864 // We use the following heuristics to select the interleave count: 4865 // 1. If the code has reductions, then we interleave to break the cross 4866 // iteration dependency. 4867 // 2. If the loop is really small, then we interleave to reduce the loop 4868 // overhead. 4869 // 3. We don't interleave if we think that we will spill registers to memory 4870 // due to the increased register pressure. 4871 4872 // When we optimize for size, we don't interleave. 4873 if (OptForSize) 4874 return 1; 4875 4876 // We used the distance for the interleave count. 4877 if (Legal->getMaxSafeDepDistBytes() != -1U) 4878 return 1; 4879 4880 // Do not interleave loops with a relatively small trip count. 4881 unsigned TC = SE->getSmallConstantTripCount(TheLoop); 4882 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 4883 return 1; 4884 4885 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 4886 DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters << 4887 " registers\n"); 4888 4889 if (VF == 1) { 4890 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 4891 TargetNumRegisters = ForceTargetNumScalarRegs; 4892 } else { 4893 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 4894 TargetNumRegisters = ForceTargetNumVectorRegs; 4895 } 4896 4897 LoopVectorizationCostModel::RegisterUsage R = calculateRegisterUsage(); 4898 // We divide by these constants so assume that we have at least one 4899 // instruction that uses at least one register. 4900 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 4901 R.NumInstructions = std::max(R.NumInstructions, 1U); 4902 4903 // We calculate the interleave count using the following formula. 4904 // Subtract the number of loop invariants from the number of available 4905 // registers. These registers are used by all of the interleaved instances. 4906 // Next, divide the remaining registers by the number of registers that is 4907 // required by the loop, in order to estimate how many parallel instances 4908 // fit without causing spills. All of this is rounded down if necessary to be 4909 // a power of two. We want power of two interleave count to simplify any 4910 // addressing operations or alignment considerations. 4911 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 4912 R.MaxLocalUsers); 4913 4914 // Don't count the induction variable as interleaved. 4915 if (EnableIndVarRegisterHeur) 4916 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 4917 std::max(1U, (R.MaxLocalUsers - 1))); 4918 4919 // Clamp the interleave ranges to reasonable counts. 4920 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 4921 4922 // Check if the user has overridden the max. 4923 if (VF == 1) { 4924 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 4925 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 4926 } else { 4927 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 4928 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 4929 } 4930 4931 // If we did not calculate the cost for VF (because the user selected the VF) 4932 // then we calculate the cost of VF here. 4933 if (LoopCost == 0) 4934 LoopCost = expectedCost(VF); 4935 4936 // Clamp the calculated IC to be between the 1 and the max interleave count 4937 // that the target allows. 4938 if (IC > MaxInterleaveCount) 4939 IC = MaxInterleaveCount; 4940 else if (IC < 1) 4941 IC = 1; 4942 4943 // Interleave if we vectorized this loop and there is a reduction that could 4944 // benefit from interleaving. 4945 if (VF > 1 && Legal->getReductionVars()->size()) { 4946 DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 4947 return IC; 4948 } 4949 4950 // Note that if we've already vectorized the loop we will have done the 4951 // runtime check and so interleaving won't require further checks. 4952 bool InterleavingRequiresRuntimePointerCheck = 4953 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 4954 4955 // We want to interleave small loops in order to reduce the loop overhead and 4956 // potentially expose ILP opportunities. 4957 DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 4958 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 4959 // We assume that the cost overhead is 1 and we use the cost model 4960 // to estimate the cost of the loop and interleave until the cost of the 4961 // loop overhead is about 5% of the cost of the loop. 4962 unsigned SmallIC = 4963 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 4964 4965 // Interleave until store/load ports (estimated by max interleave count) are 4966 // saturated. 4967 unsigned NumStores = Legal->getNumStores(); 4968 unsigned NumLoads = Legal->getNumLoads(); 4969 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 4970 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 4971 4972 // If we have a scalar reduction (vector reductions are already dealt with 4973 // by this point), we can increase the critical path length if the loop 4974 // we're interleaving is inside another loop. Limit, by default to 2, so the 4975 // critical path only gets increased by one reduction operation. 4976 if (Legal->getReductionVars()->size() && 4977 TheLoop->getLoopDepth() > 1) { 4978 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 4979 SmallIC = std::min(SmallIC, F); 4980 StoresIC = std::min(StoresIC, F); 4981 LoadsIC = std::min(LoadsIC, F); 4982 } 4983 4984 if (EnableLoadStoreRuntimeInterleave && 4985 std::max(StoresIC, LoadsIC) > SmallIC) { 4986 DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 4987 return std::max(StoresIC, LoadsIC); 4988 } 4989 4990 DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 4991 return SmallIC; 4992 } 4993 4994 // Interleave if this is a large loop (small loops are already dealt with by 4995 // this 4996 // point) that could benefit from interleaving. 4997 bool HasReductions = (Legal->getReductionVars()->size() > 0); 4998 if (TTI.enableAggressiveInterleaving(HasReductions)) { 4999 DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5000 return IC; 5001 } 5002 5003 DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5004 return 1; 5005 } 5006 5007 LoopVectorizationCostModel::RegisterUsage 5008 LoopVectorizationCostModel::calculateRegisterUsage() { 5009 // This function calculates the register usage by measuring the highest number 5010 // of values that are alive at a single location. Obviously, this is a very 5011 // rough estimation. We scan the loop in a topological order in order and 5012 // assign a number to each instruction. We use RPO to ensure that defs are 5013 // met before their users. We assume that each instruction that has in-loop 5014 // users starts an interval. We record every time that an in-loop value is 5015 // used, so we have a list of the first and last occurrences of each 5016 // instruction. Next, we transpose this data structure into a multi map that 5017 // holds the list of intervals that *end* at a specific location. This multi 5018 // map allows us to perform a linear search. We scan the instructions linearly 5019 // and record each time that a new interval starts, by placing it in a set. 5020 // If we find this value in the multi-map then we remove it from the set. 5021 // The max register usage is the maximum size of the set. 5022 // We also search for instructions that are defined outside the loop, but are 5023 // used inside the loop. We need this number separately from the max-interval 5024 // usage number because when we unroll, loop-invariant values do not take 5025 // more register. 5026 LoopBlocksDFS DFS(TheLoop); 5027 DFS.perform(LI); 5028 5029 RegisterUsage R; 5030 R.NumInstructions = 0; 5031 5032 // Each 'key' in the map opens a new interval. The values 5033 // of the map are the index of the 'last seen' usage of the 5034 // instruction that is the key. 5035 typedef DenseMap<Instruction*, unsigned> IntervalMap; 5036 // Maps instruction to its index. 5037 DenseMap<unsigned, Instruction*> IdxToInstr; 5038 // Marks the end of each interval. 5039 IntervalMap EndPoint; 5040 // Saves the list of instruction indices that are used in the loop. 5041 SmallSet<Instruction*, 8> Ends; 5042 // Saves the list of values that are used in the loop but are 5043 // defined outside the loop, such as arguments and constants. 5044 SmallPtrSet<Value*, 8> LoopInvariants; 5045 5046 unsigned Index = 0; 5047 for (LoopBlocksDFS::RPOIterator bb = DFS.beginRPO(), 5048 be = DFS.endRPO(); bb != be; ++bb) { 5049 R.NumInstructions += (*bb)->size(); 5050 for (Instruction &I : **bb) { 5051 IdxToInstr[Index++] = &I; 5052 5053 // Save the end location of each USE. 5054 for (unsigned i = 0; i < I.getNumOperands(); ++i) { 5055 Value *U = I.getOperand(i); 5056 Instruction *Instr = dyn_cast<Instruction>(U); 5057 5058 // Ignore non-instruction values such as arguments, constants, etc. 5059 if (!Instr) continue; 5060 5061 // If this instruction is outside the loop then record it and continue. 5062 if (!TheLoop->contains(Instr)) { 5063 LoopInvariants.insert(Instr); 5064 continue; 5065 } 5066 5067 // Overwrite previous end points. 5068 EndPoint[Instr] = Index; 5069 Ends.insert(Instr); 5070 } 5071 } 5072 } 5073 5074 // Saves the list of intervals that end with the index in 'key'. 5075 typedef SmallVector<Instruction*, 2> InstrList; 5076 DenseMap<unsigned, InstrList> TransposeEnds; 5077 5078 // Transpose the EndPoints to a list of values that end at each index. 5079 for (IntervalMap::iterator it = EndPoint.begin(), e = EndPoint.end(); 5080 it != e; ++it) 5081 TransposeEnds[it->second].push_back(it->first); 5082 5083 SmallSet<Instruction*, 8> OpenIntervals; 5084 unsigned MaxUsage = 0; 5085 5086 5087 DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5088 for (unsigned int i = 0; i < Index; ++i) { 5089 Instruction *I = IdxToInstr[i]; 5090 // Ignore instructions that are never used within the loop. 5091 if (!Ends.count(I)) continue; 5092 5093 // Skip ignored values. 5094 if (ValuesToIgnore.count(I)) 5095 continue; 5096 5097 // Remove all of the instructions that end at this location. 5098 InstrList &List = TransposeEnds[i]; 5099 for (unsigned int j=0, e = List.size(); j < e; ++j) 5100 OpenIntervals.erase(List[j]); 5101 5102 // Count the number of live interals. 5103 MaxUsage = std::max(MaxUsage, OpenIntervals.size()); 5104 5105 DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " << 5106 OpenIntervals.size() << '\n'); 5107 5108 // Add the current instruction to the list of open intervals. 5109 OpenIntervals.insert(I); 5110 } 5111 5112 unsigned Invariant = LoopInvariants.size(); 5113 DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsage << '\n'); 5114 DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); 5115 DEBUG(dbgs() << "LV(REG): LoopSize: " << R.NumInstructions << '\n'); 5116 5117 R.LoopInvariantRegs = Invariant; 5118 R.MaxLocalUsers = MaxUsage; 5119 return R; 5120 } 5121 5122 unsigned LoopVectorizationCostModel::expectedCost(unsigned VF) { 5123 unsigned Cost = 0; 5124 5125 // For each block. 5126 for (Loop::block_iterator bb = TheLoop->block_begin(), 5127 be = TheLoop->block_end(); bb != be; ++bb) { 5128 unsigned BlockCost = 0; 5129 BasicBlock *BB = *bb; 5130 5131 // For each instruction in the old loop. 5132 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 5133 // Skip dbg intrinsics. 5134 if (isa<DbgInfoIntrinsic>(it)) 5135 continue; 5136 5137 // Skip ignored values. 5138 if (ValuesToIgnore.count(&*it)) 5139 continue; 5140 5141 unsigned C = getInstructionCost(&*it, VF); 5142 5143 // Check if we should override the cost. 5144 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 5145 C = ForceTargetInstructionCost; 5146 5147 BlockCost += C; 5148 DEBUG(dbgs() << "LV: Found an estimated cost of " << C << " for VF " << 5149 VF << " For instruction: " << *it << '\n'); 5150 } 5151 5152 // We assume that if-converted blocks have a 50% chance of being executed. 5153 // When the code is scalar then some of the blocks are avoided due to CF. 5154 // When the code is vectorized we execute all code paths. 5155 if (VF == 1 && Legal->blockNeedsPredication(*bb)) 5156 BlockCost /= 2; 5157 5158 Cost += BlockCost; 5159 } 5160 5161 return Cost; 5162 } 5163 5164 /// \brief Check whether the address computation for a non-consecutive memory 5165 /// access looks like an unlikely candidate for being merged into the indexing 5166 /// mode. 5167 /// 5168 /// We look for a GEP which has one index that is an induction variable and all 5169 /// other indices are loop invariant. If the stride of this access is also 5170 /// within a small bound we decide that this address computation can likely be 5171 /// merged into the addressing mode. 5172 /// In all other cases, we identify the address computation as complex. 5173 static bool isLikelyComplexAddressComputation(Value *Ptr, 5174 LoopVectorizationLegality *Legal, 5175 ScalarEvolution *SE, 5176 const Loop *TheLoop) { 5177 GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(Ptr); 5178 if (!Gep) 5179 return true; 5180 5181 // We are looking for a gep with all loop invariant indices except for one 5182 // which should be an induction variable. 5183 unsigned NumOperands = Gep->getNumOperands(); 5184 for (unsigned i = 1; i < NumOperands; ++i) { 5185 Value *Opd = Gep->getOperand(i); 5186 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 5187 !Legal->isInductionVariable(Opd)) 5188 return true; 5189 } 5190 5191 // Now we know we have a GEP ptr, %inv, %ind, %inv. Make sure that the step 5192 // can likely be merged into the address computation. 5193 unsigned MaxMergeDistance = 64; 5194 5195 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Ptr)); 5196 if (!AddRec) 5197 return true; 5198 5199 // Check the step is constant. 5200 const SCEV *Step = AddRec->getStepRecurrence(*SE); 5201 // Calculate the pointer stride and check if it is consecutive. 5202 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step); 5203 if (!C) 5204 return true; 5205 5206 const APInt &APStepVal = C->getValue()->getValue(); 5207 5208 // Huge step value - give up. 5209 if (APStepVal.getBitWidth() > 64) 5210 return true; 5211 5212 int64_t StepVal = APStepVal.getSExtValue(); 5213 5214 return StepVal > MaxMergeDistance; 5215 } 5216 5217 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 5218 return Legal->hasStride(I->getOperand(0)) || 5219 Legal->hasStride(I->getOperand(1)); 5220 } 5221 5222 unsigned 5223 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 5224 // If we know that this instruction will remain uniform, check the cost of 5225 // the scalar version. 5226 if (Legal->isUniformAfterVectorization(I)) 5227 VF = 1; 5228 5229 Type *RetTy = I->getType(); 5230 if (VF > 1 && MinBWs.count(I)) 5231 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 5232 Type *VectorTy = ToVectorTy(RetTy, VF); 5233 5234 // TODO: We need to estimate the cost of intrinsic calls. 5235 switch (I->getOpcode()) { 5236 case Instruction::GetElementPtr: 5237 // We mark this instruction as zero-cost because the cost of GEPs in 5238 // vectorized code depends on whether the corresponding memory instruction 5239 // is scalarized or not. Therefore, we handle GEPs with the memory 5240 // instruction cost. 5241 return 0; 5242 case Instruction::Br: { 5243 return TTI.getCFInstrCost(I->getOpcode()); 5244 } 5245 case Instruction::PHI: 5246 //TODO: IF-converted IFs become selects. 5247 return 0; 5248 case Instruction::Add: 5249 case Instruction::FAdd: 5250 case Instruction::Sub: 5251 case Instruction::FSub: 5252 case Instruction::Mul: 5253 case Instruction::FMul: 5254 case Instruction::UDiv: 5255 case Instruction::SDiv: 5256 case Instruction::FDiv: 5257 case Instruction::URem: 5258 case Instruction::SRem: 5259 case Instruction::FRem: 5260 case Instruction::Shl: 5261 case Instruction::LShr: 5262 case Instruction::AShr: 5263 case Instruction::And: 5264 case Instruction::Or: 5265 case Instruction::Xor: { 5266 // Since we will replace the stride by 1 the multiplication should go away. 5267 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 5268 return 0; 5269 // Certain instructions can be cheaper to vectorize if they have a constant 5270 // second vector operand. One example of this are shifts on x86. 5271 TargetTransformInfo::OperandValueKind Op1VK = 5272 TargetTransformInfo::OK_AnyValue; 5273 TargetTransformInfo::OperandValueKind Op2VK = 5274 TargetTransformInfo::OK_AnyValue; 5275 TargetTransformInfo::OperandValueProperties Op1VP = 5276 TargetTransformInfo::OP_None; 5277 TargetTransformInfo::OperandValueProperties Op2VP = 5278 TargetTransformInfo::OP_None; 5279 Value *Op2 = I->getOperand(1); 5280 5281 // Check for a splat of a constant or for a non uniform vector of constants. 5282 if (isa<ConstantInt>(Op2)) { 5283 ConstantInt *CInt = cast<ConstantInt>(Op2); 5284 if (CInt && CInt->getValue().isPowerOf2()) 5285 Op2VP = TargetTransformInfo::OP_PowerOf2; 5286 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 5287 } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) { 5288 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 5289 Constant *SplatValue = cast<Constant>(Op2)->getSplatValue(); 5290 if (SplatValue) { 5291 ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue); 5292 if (CInt && CInt->getValue().isPowerOf2()) 5293 Op2VP = TargetTransformInfo::OP_PowerOf2; 5294 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 5295 } 5296 } 5297 5298 return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, Op2VK, 5299 Op1VP, Op2VP); 5300 } 5301 case Instruction::Select: { 5302 SelectInst *SI = cast<SelectInst>(I); 5303 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 5304 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 5305 Type *CondTy = SI->getCondition()->getType(); 5306 if (!ScalarCond) 5307 CondTy = VectorType::get(CondTy, VF); 5308 5309 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy); 5310 } 5311 case Instruction::ICmp: 5312 case Instruction::FCmp: { 5313 Type *ValTy = I->getOperand(0)->getType(); 5314 if (VF > 1 && MinBWs.count(dyn_cast<Instruction>(I->getOperand(0)))) 5315 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[I]); 5316 VectorTy = ToVectorTy(ValTy, VF); 5317 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy); 5318 } 5319 case Instruction::Store: 5320 case Instruction::Load: { 5321 StoreInst *SI = dyn_cast<StoreInst>(I); 5322 LoadInst *LI = dyn_cast<LoadInst>(I); 5323 Type *ValTy = (SI ? SI->getValueOperand()->getType() : 5324 LI->getType()); 5325 VectorTy = ToVectorTy(ValTy, VF); 5326 5327 unsigned Alignment = SI ? SI->getAlignment() : LI->getAlignment(); 5328 unsigned AS = SI ? SI->getPointerAddressSpace() : 5329 LI->getPointerAddressSpace(); 5330 Value *Ptr = SI ? SI->getPointerOperand() : LI->getPointerOperand(); 5331 // We add the cost of address computation here instead of with the gep 5332 // instruction because only here we know whether the operation is 5333 // scalarized. 5334 if (VF == 1) 5335 return TTI.getAddressComputationCost(VectorTy) + 5336 TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 5337 5338 // For an interleaved access, calculate the total cost of the whole 5339 // interleave group. 5340 if (Legal->isAccessInterleaved(I)) { 5341 auto Group = Legal->getInterleavedAccessGroup(I); 5342 assert(Group && "Fail to get an interleaved access group."); 5343 5344 // Only calculate the cost once at the insert position. 5345 if (Group->getInsertPos() != I) 5346 return 0; 5347 5348 unsigned InterleaveFactor = Group->getFactor(); 5349 Type *WideVecTy = 5350 VectorType::get(VectorTy->getVectorElementType(), 5351 VectorTy->getVectorNumElements() * InterleaveFactor); 5352 5353 // Holds the indices of existing members in an interleaved load group. 5354 // An interleaved store group doesn't need this as it dones't allow gaps. 5355 SmallVector<unsigned, 4> Indices; 5356 if (LI) { 5357 for (unsigned i = 0; i < InterleaveFactor; i++) 5358 if (Group->getMember(i)) 5359 Indices.push_back(i); 5360 } 5361 5362 // Calculate the cost of the whole interleaved group. 5363 unsigned Cost = TTI.getInterleavedMemoryOpCost( 5364 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 5365 Group->getAlignment(), AS); 5366 5367 if (Group->isReverse()) 5368 Cost += 5369 Group->getNumMembers() * 5370 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5371 5372 // FIXME: The interleaved load group with a huge gap could be even more 5373 // expensive than scalar operations. Then we could ignore such group and 5374 // use scalar operations instead. 5375 return Cost; 5376 } 5377 5378 // Scalarized loads/stores. 5379 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 5380 bool Reverse = ConsecutiveStride < 0; 5381 const DataLayout &DL = I->getModule()->getDataLayout(); 5382 unsigned ScalarAllocatedSize = DL.getTypeAllocSize(ValTy); 5383 unsigned VectorElementSize = DL.getTypeStoreSize(VectorTy) / VF; 5384 if (!ConsecutiveStride || ScalarAllocatedSize != VectorElementSize) { 5385 bool IsComplexComputation = 5386 isLikelyComplexAddressComputation(Ptr, Legal, SE, TheLoop); 5387 unsigned Cost = 0; 5388 // The cost of extracting from the value vector and pointer vector. 5389 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 5390 for (unsigned i = 0; i < VF; ++i) { 5391 // The cost of extracting the pointer operand. 5392 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, PtrTy, i); 5393 // In case of STORE, the cost of ExtractElement from the vector. 5394 // In case of LOAD, the cost of InsertElement into the returned 5395 // vector. 5396 Cost += TTI.getVectorInstrCost(SI ? Instruction::ExtractElement : 5397 Instruction::InsertElement, 5398 VectorTy, i); 5399 } 5400 5401 // The cost of the scalar loads/stores. 5402 Cost += VF * TTI.getAddressComputationCost(PtrTy, IsComplexComputation); 5403 Cost += VF * TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 5404 Alignment, AS); 5405 return Cost; 5406 } 5407 5408 // Wide load/stores. 5409 unsigned Cost = TTI.getAddressComputationCost(VectorTy); 5410 if (Legal->isMaskRequired(I)) 5411 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, 5412 AS); 5413 else 5414 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 5415 5416 if (Reverse) 5417 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, 5418 VectorTy, 0); 5419 return Cost; 5420 } 5421 case Instruction::ZExt: 5422 case Instruction::SExt: 5423 case Instruction::FPToUI: 5424 case Instruction::FPToSI: 5425 case Instruction::FPExt: 5426 case Instruction::PtrToInt: 5427 case Instruction::IntToPtr: 5428 case Instruction::SIToFP: 5429 case Instruction::UIToFP: 5430 case Instruction::Trunc: 5431 case Instruction::FPTrunc: 5432 case Instruction::BitCast: { 5433 // We optimize the truncation of induction variable. 5434 // The cost of these is the same as the scalar operation. 5435 if (I->getOpcode() == Instruction::Trunc && 5436 Legal->isInductionVariable(I->getOperand(0))) 5437 return TTI.getCastInstrCost(I->getOpcode(), I->getType(), 5438 I->getOperand(0)->getType()); 5439 5440 Type *SrcScalarTy = I->getOperand(0)->getType(); 5441 Type *SrcVecTy = ToVectorTy(SrcScalarTy, VF); 5442 if (VF > 1 && MinBWs.count(I)) { 5443 // This cast is going to be shrunk. This may remove the cast or it might 5444 // turn it into slightly different cast. For example, if MinBW == 16, 5445 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 5446 // 5447 // Calculate the modified src and dest types. 5448 Type *MinVecTy = VectorTy; 5449 if (I->getOpcode() == Instruction::Trunc) { 5450 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 5451 VectorTy = largestIntegerVectorType(ToVectorTy(I->getType(), VF), 5452 MinVecTy); 5453 } else if (I->getOpcode() == Instruction::ZExt || 5454 I->getOpcode() == Instruction::SExt) { 5455 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 5456 VectorTy = smallestIntegerVectorType(ToVectorTy(I->getType(), VF), 5457 MinVecTy); 5458 } 5459 } 5460 5461 return TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy); 5462 } 5463 case Instruction::Call: { 5464 bool NeedToScalarize; 5465 CallInst *CI = cast<CallInst>(I); 5466 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 5467 if (getIntrinsicIDForCall(CI, TLI)) 5468 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 5469 return CallCost; 5470 } 5471 default: { 5472 // We are scalarizing the instruction. Return the cost of the scalar 5473 // instruction, plus the cost of insert and extract into vector 5474 // elements, times the vector width. 5475 unsigned Cost = 0; 5476 5477 if (!RetTy->isVoidTy() && VF != 1) { 5478 unsigned InsCost = TTI.getVectorInstrCost(Instruction::InsertElement, 5479 VectorTy); 5480 unsigned ExtCost = TTI.getVectorInstrCost(Instruction::ExtractElement, 5481 VectorTy); 5482 5483 // The cost of inserting the results plus extracting each one of the 5484 // operands. 5485 Cost += VF * (InsCost + ExtCost * I->getNumOperands()); 5486 } 5487 5488 // The cost of executing VF copies of the scalar instruction. This opcode 5489 // is unknown. Assume that it is the same as 'mul'. 5490 Cost += VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy); 5491 return Cost; 5492 } 5493 }// end of switch. 5494 } 5495 5496 char LoopVectorize::ID = 0; 5497 static const char lv_name[] = "Loop Vectorization"; 5498 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 5499 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 5500 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 5501 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 5502 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 5503 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 5504 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 5505 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 5506 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 5507 INITIALIZE_PASS_DEPENDENCY(LCSSA) 5508 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 5509 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 5510 INITIALIZE_PASS_DEPENDENCY(LoopAccessAnalysis) 5511 INITIALIZE_PASS_DEPENDENCY(DemandedBits) 5512 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 5513 5514 namespace llvm { 5515 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { 5516 return new LoopVectorize(NoUnrolling, AlwaysVectorize); 5517 } 5518 } 5519 5520 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 5521 // Check for a store. 5522 if (StoreInst *ST = dyn_cast<StoreInst>(Inst)) 5523 return Legal->isConsecutivePtr(ST->getPointerOperand()) != 0; 5524 5525 // Check for a load. 5526 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) 5527 return Legal->isConsecutivePtr(LI->getPointerOperand()) != 0; 5528 5529 return false; 5530 } 5531 5532 5533 void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr, 5534 bool IfPredicateStore) { 5535 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 5536 // Holds vector parameters or scalars, in case of uniform vals. 5537 SmallVector<VectorParts, 4> Params; 5538 5539 setDebugLocFromInst(Builder, Instr); 5540 5541 // Find all of the vectorized parameters. 5542 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 5543 Value *SrcOp = Instr->getOperand(op); 5544 5545 // If we are accessing the old induction variable, use the new one. 5546 if (SrcOp == OldInduction) { 5547 Params.push_back(getVectorValue(SrcOp)); 5548 continue; 5549 } 5550 5551 // Try using previously calculated values. 5552 Instruction *SrcInst = dyn_cast<Instruction>(SrcOp); 5553 5554 // If the src is an instruction that appeared earlier in the basic block 5555 // then it should already be vectorized. 5556 if (SrcInst && OrigLoop->contains(SrcInst)) { 5557 assert(WidenMap.has(SrcInst) && "Source operand is unavailable"); 5558 // The parameter is a vector value from earlier. 5559 Params.push_back(WidenMap.get(SrcInst)); 5560 } else { 5561 // The parameter is a scalar from outside the loop. Maybe even a constant. 5562 VectorParts Scalars; 5563 Scalars.append(UF, SrcOp); 5564 Params.push_back(Scalars); 5565 } 5566 } 5567 5568 assert(Params.size() == Instr->getNumOperands() && 5569 "Invalid number of operands"); 5570 5571 // Does this instruction return a value ? 5572 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 5573 5574 Value *UndefVec = IsVoidRetTy ? nullptr : 5575 UndefValue::get(Instr->getType()); 5576 // Create a new entry in the WidenMap and initialize it to Undef or Null. 5577 VectorParts &VecResults = WidenMap.splat(Instr, UndefVec); 5578 5579 VectorParts Cond; 5580 if (IfPredicateStore) { 5581 assert(Instr->getParent()->getSinglePredecessor() && 5582 "Only support single predecessor blocks"); 5583 Cond = createEdgeMask(Instr->getParent()->getSinglePredecessor(), 5584 Instr->getParent()); 5585 } 5586 5587 // For each vector unroll 'part': 5588 for (unsigned Part = 0; Part < UF; ++Part) { 5589 // For each scalar that we create: 5590 5591 // Start an "if (pred) a[i] = ..." block. 5592 Value *Cmp = nullptr; 5593 if (IfPredicateStore) { 5594 if (Cond[Part]->getType()->isVectorTy()) 5595 Cond[Part] = 5596 Builder.CreateExtractElement(Cond[Part], Builder.getInt32(0)); 5597 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cond[Part], 5598 ConstantInt::get(Cond[Part]->getType(), 1)); 5599 } 5600 5601 Instruction *Cloned = Instr->clone(); 5602 if (!IsVoidRetTy) 5603 Cloned->setName(Instr->getName() + ".cloned"); 5604 // Replace the operands of the cloned instructions with extracted scalars. 5605 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 5606 Value *Op = Params[op][Part]; 5607 Cloned->setOperand(op, Op); 5608 } 5609 5610 // Place the cloned scalar in the new loop. 5611 Builder.Insert(Cloned); 5612 5613 // If the original scalar returns a value we need to place it in a vector 5614 // so that future users will be able to use it. 5615 if (!IsVoidRetTy) 5616 VecResults[Part] = Cloned; 5617 5618 // End if-block. 5619 if (IfPredicateStore) 5620 PredicatedStores.push_back(std::make_pair(cast<StoreInst>(Cloned), 5621 Cmp)); 5622 } 5623 } 5624 5625 void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) { 5626 StoreInst *SI = dyn_cast<StoreInst>(Instr); 5627 bool IfPredicateStore = (SI && Legal->blockNeedsPredication(SI->getParent())); 5628 5629 return scalarizeInstruction(Instr, IfPredicateStore); 5630 } 5631 5632 Value *InnerLoopUnroller::reverseVector(Value *Vec) { 5633 return Vec; 5634 } 5635 5636 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { 5637 return V; 5638 } 5639 5640 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step) { 5641 // When unrolling and the VF is 1, we only need to add a simple scalar. 5642 Type *ITy = Val->getType(); 5643 assert(!ITy->isVectorTy() && "Val must be a scalar"); 5644 Constant *C = ConstantInt::get(ITy, StartIdx); 5645 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 5646 } 5647