1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 11 // and generates target-independent LLVM-IR. 12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 13 // of instructions in order to estimate the profitability of vectorization. 14 // 15 // The loop vectorizer combines consecutive loop iterations into a single 16 // 'wide' iteration. After this transformation the index is incremented 17 // by the SIMD vector width, and not by one. 18 // 19 // This pass has three parts: 20 // 1. The main loop pass that drives the different parts. 21 // 2. LoopVectorizationLegality - A unit that checks for the legality 22 // of the vectorization. 23 // 3. InnerLoopVectorizer - A unit that performs the actual 24 // widening of instructions. 25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 26 // of vectorization. It decides on the optimal vector width, which 27 // can be one, if vectorization is not profitable. 28 // 29 //===----------------------------------------------------------------------===// 30 // 31 // The reduction-variable vectorization is based on the paper: 32 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 33 // 34 // Variable uniformity checks are inspired by: 35 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 36 // 37 // The interleaved access vectorization is based on the paper: 38 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 39 // Data for SIMD 40 // 41 // Other ideas/concepts are from: 42 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 43 // 44 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 45 // Vectorizing Compilers. 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 50 #include "llvm/ADT/DenseMap.h" 51 #include "llvm/ADT/Hashing.h" 52 #include "llvm/ADT/MapVector.h" 53 #include "llvm/ADT/SetVector.h" 54 #include "llvm/ADT/SmallPtrSet.h" 55 #include "llvm/ADT/SmallSet.h" 56 #include "llvm/ADT/SmallVector.h" 57 #include "llvm/ADT/Statistic.h" 58 #include "llvm/ADT/StringExtras.h" 59 #include "llvm/Analysis/CodeMetrics.h" 60 #include "llvm/Analysis/GlobalsModRef.h" 61 #include "llvm/Analysis/LoopInfo.h" 62 #include "llvm/Analysis/LoopIterator.h" 63 #include "llvm/Analysis/LoopPass.h" 64 #include "llvm/Analysis/ScalarEvolutionExpander.h" 65 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 66 #include "llvm/Analysis/ValueTracking.h" 67 #include "llvm/Analysis/VectorUtils.h" 68 #include "llvm/IR/Constants.h" 69 #include "llvm/IR/DataLayout.h" 70 #include "llvm/IR/DebugInfo.h" 71 #include "llvm/IR/DerivedTypes.h" 72 #include "llvm/IR/DiagnosticInfo.h" 73 #include "llvm/IR/Dominators.h" 74 #include "llvm/IR/Function.h" 75 #include "llvm/IR/IRBuilder.h" 76 #include "llvm/IR/Instructions.h" 77 #include "llvm/IR/IntrinsicInst.h" 78 #include "llvm/IR/LLVMContext.h" 79 #include "llvm/IR/Module.h" 80 #include "llvm/IR/PatternMatch.h" 81 #include "llvm/IR/Type.h" 82 #include "llvm/IR/Value.h" 83 #include "llvm/IR/ValueHandle.h" 84 #include "llvm/IR/Verifier.h" 85 #include "llvm/Pass.h" 86 #include "llvm/Support/BranchProbability.h" 87 #include "llvm/Support/CommandLine.h" 88 #include "llvm/Support/Debug.h" 89 #include "llvm/Support/raw_ostream.h" 90 #include "llvm/Transforms/Scalar.h" 91 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 92 #include "llvm/Transforms/Utils/Local.h" 93 #include "llvm/Transforms/Utils/LoopUtils.h" 94 #include "llvm/Transforms/Utils/LoopVersioning.h" 95 #include "llvm/Transforms/Vectorize.h" 96 #include <algorithm> 97 #include <map> 98 #include <tuple> 99 100 using namespace llvm; 101 using namespace llvm::PatternMatch; 102 103 #define LV_NAME "loop-vectorize" 104 #define DEBUG_TYPE LV_NAME 105 106 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 107 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 108 109 static cl::opt<bool> 110 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, 111 cl::desc("Enable if-conversion during vectorization.")); 112 113 /// We don't vectorize loops with a known constant trip count below this number. 114 static cl::opt<unsigned> TinyTripCountVectorThreshold( 115 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 116 cl::desc("Don't vectorize loops with a constant " 117 "trip count that is smaller than this " 118 "value.")); 119 120 static cl::opt<bool> MaximizeBandwidth( 121 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 122 cl::desc("Maximize bandwidth when selecting vectorization factor which " 123 "will be determined by the smallest type in loop.")); 124 125 static cl::opt<bool> EnableInterleavedMemAccesses( 126 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 127 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 128 129 /// Maximum factor for an interleaved memory access. 130 static cl::opt<unsigned> MaxInterleaveGroupFactor( 131 "max-interleave-group-factor", cl::Hidden, 132 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 133 cl::init(8)); 134 135 /// We don't interleave loops with a known constant trip count below this 136 /// number. 137 static const unsigned TinyTripCountInterleaveThreshold = 128; 138 139 static cl::opt<unsigned> ForceTargetNumScalarRegs( 140 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 141 cl::desc("A flag that overrides the target's number of scalar registers.")); 142 143 static cl::opt<unsigned> ForceTargetNumVectorRegs( 144 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 145 cl::desc("A flag that overrides the target's number of vector registers.")); 146 147 /// Maximum vectorization interleave count. 148 static const unsigned MaxInterleaveFactor = 16; 149 150 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 151 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 152 cl::desc("A flag that overrides the target's max interleave factor for " 153 "scalar loops.")); 154 155 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 156 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 157 cl::desc("A flag that overrides the target's max interleave factor for " 158 "vectorized loops.")); 159 160 static cl::opt<unsigned> ForceTargetInstructionCost( 161 "force-target-instruction-cost", cl::init(0), cl::Hidden, 162 cl::desc("A flag that overrides the target's expected cost for " 163 "an instruction to a single constant value. Mostly " 164 "useful for getting consistent testing.")); 165 166 static cl::opt<unsigned> SmallLoopCost( 167 "small-loop-cost", cl::init(20), cl::Hidden, 168 cl::desc( 169 "The cost of a loop that is considered 'small' by the interleaver.")); 170 171 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 172 "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden, 173 cl::desc("Enable the use of the block frequency analysis to access PGO " 174 "heuristics minimizing code growth in cold regions and being more " 175 "aggressive in hot regions.")); 176 177 // Runtime interleave loops for load/store throughput. 178 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 179 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 180 cl::desc( 181 "Enable runtime interleaving until load/store ports are saturated")); 182 183 /// The number of stores in a loop that are allowed to need predication. 184 static cl::opt<unsigned> NumberOfStoresToPredicate( 185 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 186 cl::desc("Max number of stores to be predicated behind an if.")); 187 188 static cl::opt<bool> EnableIndVarRegisterHeur( 189 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 190 cl::desc("Count the induction variable only once when interleaving")); 191 192 static cl::opt<bool> EnableCondStoresVectorization( 193 "enable-cond-stores-vec", cl::init(false), cl::Hidden, 194 cl::desc("Enable if predication of stores during vectorization.")); 195 196 static cl::opt<unsigned> MaxNestedScalarReductionIC( 197 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 198 cl::desc("The maximum interleave count to use when interleaving a scalar " 199 "reduction in a nested loop.")); 200 201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 202 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 203 cl::desc("The maximum allowed number of runtime memory checks with a " 204 "vectorize(enable) pragma.")); 205 206 static cl::opt<unsigned> VectorizeSCEVCheckThreshold( 207 "vectorize-scev-check-threshold", cl::init(16), cl::Hidden, 208 cl::desc("The maximum number of SCEV checks allowed.")); 209 210 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold( 211 "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden, 212 cl::desc("The maximum number of SCEV checks allowed with a " 213 "vectorize(enable) pragma")); 214 215 namespace { 216 217 // Forward declarations. 218 class LoopVectorizeHints; 219 class LoopVectorizationLegality; 220 class LoopVectorizationCostModel; 221 class LoopVectorizationRequirements; 222 223 /// \brief This modifies LoopAccessReport to initialize message with 224 /// loop-vectorizer-specific part. 225 class VectorizationReport : public LoopAccessReport { 226 public: 227 VectorizationReport(Instruction *I = nullptr) 228 : LoopAccessReport("loop not vectorized: ", I) {} 229 230 /// \brief This allows promotion of the loop-access analysis report into the 231 /// loop-vectorizer report. It modifies the message to add the 232 /// loop-vectorizer-specific part of the message. 233 explicit VectorizationReport(const LoopAccessReport &R) 234 : LoopAccessReport(Twine("loop not vectorized: ") + R.str(), 235 R.getInstr()) {} 236 }; 237 238 /// A helper function for converting Scalar types to vector types. 239 /// If the incoming type is void, we return void. If the VF is 1, we return 240 /// the scalar type. 241 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 242 if (Scalar->isVoidTy() || VF == 1) 243 return Scalar; 244 return VectorType::get(Scalar, VF); 245 } 246 247 /// A helper function that returns GEP instruction and knows to skip a 248 /// 'bitcast'. The 'bitcast' may be skipped if the source and the destination 249 /// pointee types of the 'bitcast' have the same size. 250 /// For example: 251 /// bitcast double** %var to i64* - can be skipped 252 /// bitcast double** %var to i8* - can not 253 static GetElementPtrInst *getGEPInstruction(Value *Ptr) { 254 255 if (isa<GetElementPtrInst>(Ptr)) 256 return cast<GetElementPtrInst>(Ptr); 257 258 if (isa<BitCastInst>(Ptr) && 259 isa<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0))) { 260 Type *BitcastTy = Ptr->getType(); 261 Type *GEPTy = cast<BitCastInst>(Ptr)->getSrcTy(); 262 if (!isa<PointerType>(BitcastTy) || !isa<PointerType>(GEPTy)) 263 return nullptr; 264 Type *Pointee1Ty = cast<PointerType>(BitcastTy)->getPointerElementType(); 265 Type *Pointee2Ty = cast<PointerType>(GEPTy)->getPointerElementType(); 266 const DataLayout &DL = cast<BitCastInst>(Ptr)->getModule()->getDataLayout(); 267 if (DL.getTypeSizeInBits(Pointee1Ty) == DL.getTypeSizeInBits(Pointee2Ty)) 268 return cast<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0)); 269 } 270 return nullptr; 271 } 272 273 /// InnerLoopVectorizer vectorizes loops which contain only one basic 274 /// block to a specified vectorization factor (VF). 275 /// This class performs the widening of scalars into vectors, or multiple 276 /// scalars. This class also implements the following features: 277 /// * It inserts an epilogue loop for handling loops that don't have iteration 278 /// counts that are known to be a multiple of the vectorization factor. 279 /// * It handles the code generation for reduction variables. 280 /// * Scalarization (implementation using scalars) of un-vectorizable 281 /// instructions. 282 /// InnerLoopVectorizer does not perform any vectorization-legality 283 /// checks, and relies on the caller to check for the different legality 284 /// aspects. The InnerLoopVectorizer relies on the 285 /// LoopVectorizationLegality class to provide information about the induction 286 /// and reduction variables that were found to a given vectorization factor. 287 class InnerLoopVectorizer { 288 public: 289 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 290 LoopInfo *LI, DominatorTree *DT, 291 const TargetLibraryInfo *TLI, 292 const TargetTransformInfo *TTI, AssumptionCache *AC, 293 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 294 unsigned UnrollFactor) 295 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 296 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 297 Builder(PSE.getSE()->getContext()), Induction(nullptr), 298 OldInduction(nullptr), WidenMap(UnrollFactor), TripCount(nullptr), 299 VectorTripCount(nullptr), Legal(nullptr), AddedSafetyChecks(false) {} 300 301 // Perform the actual loop widening (vectorization). 302 // MinimumBitWidths maps scalar integer values to the smallest bitwidth they 303 // can be validly truncated to. The cost model has assumed this truncation 304 // will happen when vectorizing. VecValuesToIgnore contains scalar values 305 // that the cost model has chosen to ignore because they will not be 306 // vectorized. 307 void vectorize(LoopVectorizationLegality *L, 308 const MapVector<Instruction *, uint64_t> &MinimumBitWidths, 309 SmallPtrSetImpl<const Value *> &VecValuesToIgnore) { 310 MinBWs = &MinimumBitWidths; 311 ValuesNotWidened = &VecValuesToIgnore; 312 Legal = L; 313 // Create a new empty loop. Unlink the old loop and connect the new one. 314 createEmptyLoop(); 315 // Widen each instruction in the old loop to a new one in the new loop. 316 // Use the Legality module to find the induction and reduction variables. 317 vectorizeLoop(); 318 } 319 320 // Return true if any runtime check is added. 321 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 322 323 virtual ~InnerLoopVectorizer() {} 324 325 protected: 326 /// A small list of PHINodes. 327 typedef SmallVector<PHINode *, 4> PhiVector; 328 /// When we unroll loops we have multiple vector values for each scalar. 329 /// This data structure holds the unrolled and vectorized values that 330 /// originated from one scalar instruction. 331 typedef SmallVector<Value *, 2> VectorParts; 332 333 // When we if-convert we need to create edge masks. We have to cache values 334 // so that we don't end up with exponential recursion/IR. 335 typedef DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts> 336 EdgeMaskCache; 337 338 /// Create an empty loop, based on the loop ranges of the old loop. 339 void createEmptyLoop(); 340 341 /// Set up the values of the IVs correctly when exiting the vector loop. 342 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 343 Value *CountRoundDown, Value *EndValue, 344 BasicBlock *MiddleBlock); 345 346 /// Create a new induction variable inside L. 347 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 348 Value *Step, Instruction *DL); 349 /// Copy and widen the instructions from the old loop. 350 virtual void vectorizeLoop(); 351 352 /// Fix a first-order recurrence. This is the second phase of vectorizing 353 /// this phi node. 354 void fixFirstOrderRecurrence(PHINode *Phi); 355 356 /// \brief The Loop exit block may have single value PHI nodes where the 357 /// incoming value is 'Undef'. While vectorizing we only handled real values 358 /// that were defined inside the loop. Here we fix the 'undef case'. 359 /// See PR14725. 360 void fixLCSSAPHIs(); 361 362 /// Shrinks vector element sizes based on information in "MinBWs". 363 void truncateToMinimalBitwidths(); 364 365 /// A helper function that computes the predicate of the block BB, assuming 366 /// that the header block of the loop is set to True. It returns the *entry* 367 /// mask for the block BB. 368 VectorParts createBlockInMask(BasicBlock *BB); 369 /// A helper function that computes the predicate of the edge between SRC 370 /// and DST. 371 VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst); 372 373 /// A helper function to vectorize a single BB within the innermost loop. 374 void vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV); 375 376 /// Vectorize a single PHINode in a block. This method handles the induction 377 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 378 /// arbitrary length vectors. 379 void widenPHIInstruction(Instruction *PN, VectorParts &Entry, unsigned UF, 380 unsigned VF, PhiVector *PV); 381 382 /// Insert the new loop to the loop hierarchy and pass manager 383 /// and update the analysis passes. 384 void updateAnalysis(); 385 386 /// This instruction is un-vectorizable. Implement it as a sequence 387 /// of scalars. If \p IfPredicateStore is true we need to 'hide' each 388 /// scalarized instruction behind an if block predicated on the control 389 /// dependence of the instruction. 390 virtual void scalarizeInstruction(Instruction *Instr, 391 bool IfPredicateStore = false); 392 393 /// Vectorize Load and Store instructions, 394 virtual void vectorizeMemoryInstruction(Instruction *Instr); 395 396 /// Create a broadcast instruction. This method generates a broadcast 397 /// instruction (shuffle) for loop invariant values and for the induction 398 /// value. If this is the induction variable then we extend it to N, N+1, ... 399 /// this is needed because each iteration in the loop corresponds to a SIMD 400 /// element. 401 virtual Value *getBroadcastInstrs(Value *V); 402 403 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 404 /// to each vector element of Val. The sequence starts at StartIndex. 405 /// \p Opcode is relevant for FP induction variable. 406 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 407 Instruction::BinaryOps Opcode = 408 Instruction::BinaryOpsEnd); 409 410 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 411 /// variable on which to base the steps, \p Step is the size of the step, and 412 /// \p EntryVal is the value from the original loop that maps to the steps. 413 /// Note that \p EntryVal doesn't have to be an induction variable (e.g., it 414 /// can be a truncate instruction). 415 void buildScalarSteps(Value *ScalarIV, Value *Step, Value *EntryVal); 416 417 /// Create a vector induction phi node based on an existing scalar one. This 418 /// currently only works for integer induction variables with a constant 419 /// step. If \p TruncType is non-null, instead of widening the original IV, 420 /// we widen a version of the IV truncated to \p TruncType. 421 void createVectorIntInductionPHI(const InductionDescriptor &II, 422 VectorParts &Entry, IntegerType *TruncType); 423 424 /// Widen an integer induction variable \p IV. If \p Trunc is provided, the 425 /// induction variable will first be truncated to the corresponding type. The 426 /// widened values are placed in \p Entry. 427 void widenIntInduction(PHINode *IV, VectorParts &Entry, 428 TruncInst *Trunc = nullptr); 429 430 /// When we go over instructions in the basic block we rely on previous 431 /// values within the current basic block or on loop invariant values. 432 /// When we widen (vectorize) values we place them in the map. If the values 433 /// are not within the map, they have to be loop invariant, so we simply 434 /// broadcast them into a vector. 435 VectorParts &getVectorValue(Value *V); 436 437 /// Try to vectorize the interleaved access group that \p Instr belongs to. 438 void vectorizeInterleaveGroup(Instruction *Instr); 439 440 /// Generate a shuffle sequence that will reverse the vector Vec. 441 virtual Value *reverseVector(Value *Vec); 442 443 /// Returns (and creates if needed) the original loop trip count. 444 Value *getOrCreateTripCount(Loop *NewLoop); 445 446 /// Returns (and creates if needed) the trip count of the widened loop. 447 Value *getOrCreateVectorTripCount(Loop *NewLoop); 448 449 /// Emit a bypass check to see if the trip count would overflow, or we 450 /// wouldn't have enough iterations to execute one vector loop. 451 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 452 /// Emit a bypass check to see if the vector trip count is nonzero. 453 void emitVectorLoopEnteredCheck(Loop *L, BasicBlock *Bypass); 454 /// Emit a bypass check to see if all of the SCEV assumptions we've 455 /// had to make are correct. 456 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 457 /// Emit bypass checks to check any memory assumptions we may have made. 458 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 459 460 /// Add additional metadata to \p To that was not present on \p Orig. 461 /// 462 /// Currently this is used to add the noalias annotations based on the 463 /// inserted memchecks. Use this for instructions that are *cloned* into the 464 /// vector loop. 465 void addNewMetadata(Instruction *To, const Instruction *Orig); 466 467 /// Add metadata from one instruction to another. 468 /// 469 /// This includes both the original MDs from \p From and additional ones (\see 470 /// addNewMetadata). Use this for *newly created* instructions in the vector 471 /// loop. 472 void addMetadata(Instruction *To, Instruction *From); 473 474 /// \brief Similar to the previous function but it adds the metadata to a 475 /// vector of instructions. 476 void addMetadata(ArrayRef<Value *> To, Instruction *From); 477 478 /// This is a helper class that holds the vectorizer state. It maps scalar 479 /// instructions to vector instructions. When the code is 'unrolled' then 480 /// then a single scalar value is mapped to multiple vector parts. The parts 481 /// are stored in the VectorPart type. 482 struct ValueMap { 483 /// C'tor. UnrollFactor controls the number of vectors ('parts') that 484 /// are mapped. 485 ValueMap(unsigned UnrollFactor) : UF(UnrollFactor) {} 486 487 /// \return True if 'Key' is saved in the Value Map. 488 bool has(Value *Key) const { return MapStorage.count(Key); } 489 490 /// Initializes a new entry in the map. Sets all of the vector parts to the 491 /// save value in 'Val'. 492 /// \return A reference to a vector with splat values. 493 VectorParts &splat(Value *Key, Value *Val) { 494 VectorParts &Entry = MapStorage[Key]; 495 Entry.assign(UF, Val); 496 return Entry; 497 } 498 499 ///\return A reference to the value that is stored at 'Key'. 500 VectorParts &get(Value *Key) { 501 VectorParts &Entry = MapStorage[Key]; 502 if (Entry.empty()) 503 Entry.resize(UF); 504 assert(Entry.size() == UF); 505 return Entry; 506 } 507 508 private: 509 /// The unroll factor. Each entry in the map stores this number of vector 510 /// elements. 511 unsigned UF; 512 513 /// Map storage. We use std::map and not DenseMap because insertions to a 514 /// dense map invalidates its iterators. 515 std::map<Value *, VectorParts> MapStorage; 516 }; 517 518 /// The original loop. 519 Loop *OrigLoop; 520 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 521 /// dynamic knowledge to simplify SCEV expressions and converts them to a 522 /// more usable form. 523 PredicatedScalarEvolution &PSE; 524 /// Loop Info. 525 LoopInfo *LI; 526 /// Dominator Tree. 527 DominatorTree *DT; 528 /// Alias Analysis. 529 AliasAnalysis *AA; 530 /// Target Library Info. 531 const TargetLibraryInfo *TLI; 532 /// Target Transform Info. 533 const TargetTransformInfo *TTI; 534 /// Assumption Cache. 535 AssumptionCache *AC; 536 /// Interface to emit optimization remarks. 537 OptimizationRemarkEmitter *ORE; 538 539 /// \brief LoopVersioning. It's only set up (non-null) if memchecks were 540 /// used. 541 /// 542 /// This is currently only used to add no-alias metadata based on the 543 /// memchecks. The actually versioning is performed manually. 544 std::unique_ptr<LoopVersioning> LVer; 545 546 /// The vectorization SIMD factor to use. Each vector will have this many 547 /// vector elements. 548 unsigned VF; 549 550 protected: 551 /// The vectorization unroll factor to use. Each scalar is vectorized to this 552 /// many different vector instructions. 553 unsigned UF; 554 555 /// The builder that we use 556 IRBuilder<> Builder; 557 558 // --- Vectorization state --- 559 560 /// The vector-loop preheader. 561 BasicBlock *LoopVectorPreHeader; 562 /// The scalar-loop preheader. 563 BasicBlock *LoopScalarPreHeader; 564 /// Middle Block between the vector and the scalar. 565 BasicBlock *LoopMiddleBlock; 566 /// The ExitBlock of the scalar loop. 567 BasicBlock *LoopExitBlock; 568 /// The vector loop body. 569 BasicBlock *LoopVectorBody; 570 /// The scalar loop body. 571 BasicBlock *LoopScalarBody; 572 /// A list of all bypass blocks. The first block is the entry of the loop. 573 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 574 575 /// The new Induction variable which was added to the new block. 576 PHINode *Induction; 577 /// The induction variable of the old basic block. 578 PHINode *OldInduction; 579 /// Maps scalars to widened vectors. 580 ValueMap WidenMap; 581 582 /// A map of induction variables from the original loop to their 583 /// corresponding VF * UF scalarized values in the vectorized loop. The 584 /// purpose of ScalarIVMap is similar to that of WidenMap. Whereas WidenMap 585 /// maps original loop values to their vector versions in the new loop, 586 /// ScalarIVMap maps induction variables from the original loop that are not 587 /// vectorized to their scalar equivalents in the vector loop. Maintaining a 588 /// separate map for scalarized induction variables allows us to avoid 589 /// unnecessary scalar-to-vector-to-scalar conversions. 590 DenseMap<Value *, SmallVector<Value *, 8>> ScalarIVMap; 591 592 /// Store instructions that should be predicated, as a pair 593 /// <StoreInst, Predicate> 594 SmallVector<std::pair<StoreInst *, Value *>, 4> PredicatedStores; 595 EdgeMaskCache MaskCache; 596 /// Trip count of the original loop. 597 Value *TripCount; 598 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 599 Value *VectorTripCount; 600 601 /// Map of scalar integer values to the smallest bitwidth they can be legally 602 /// represented as. The vector equivalents of these values should be truncated 603 /// to this type. 604 const MapVector<Instruction *, uint64_t> *MinBWs; 605 606 /// A set of values that should not be widened. This is taken from 607 /// VecValuesToIgnore in the cost model. 608 SmallPtrSetImpl<const Value *> *ValuesNotWidened; 609 610 LoopVectorizationLegality *Legal; 611 612 // Record whether runtime checks are added. 613 bool AddedSafetyChecks; 614 }; 615 616 class InnerLoopUnroller : public InnerLoopVectorizer { 617 public: 618 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 619 LoopInfo *LI, DominatorTree *DT, 620 const TargetLibraryInfo *TLI, 621 const TargetTransformInfo *TTI, AssumptionCache *AC, 622 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor) 623 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 624 UnrollFactor) {} 625 626 private: 627 void scalarizeInstruction(Instruction *Instr, 628 bool IfPredicateStore = false) override; 629 void vectorizeMemoryInstruction(Instruction *Instr) override; 630 Value *getBroadcastInstrs(Value *V) override; 631 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 632 Instruction::BinaryOps Opcode = 633 Instruction::BinaryOpsEnd) override; 634 Value *reverseVector(Value *Vec) override; 635 }; 636 637 /// \brief Look for a meaningful debug location on the instruction or it's 638 /// operands. 639 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 640 if (!I) 641 return I; 642 643 DebugLoc Empty; 644 if (I->getDebugLoc() != Empty) 645 return I; 646 647 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 648 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 649 if (OpInst->getDebugLoc() != Empty) 650 return OpInst; 651 } 652 653 return I; 654 } 655 656 /// \brief Set the debug location in the builder using the debug location in the 657 /// instruction. 658 static void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 659 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) 660 B.SetCurrentDebugLocation(Inst->getDebugLoc()); 661 else 662 B.SetCurrentDebugLocation(DebugLoc()); 663 } 664 665 #ifndef NDEBUG 666 /// \return string containing a file name and a line # for the given loop. 667 static std::string getDebugLocString(const Loop *L) { 668 std::string Result; 669 if (L) { 670 raw_string_ostream OS(Result); 671 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 672 LoopDbgLoc.print(OS); 673 else 674 // Just print the module name. 675 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 676 OS.flush(); 677 } 678 return Result; 679 } 680 #endif 681 682 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 683 const Instruction *Orig) { 684 // If the loop was versioned with memchecks, add the corresponding no-alias 685 // metadata. 686 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 687 LVer->annotateInstWithNoAlias(To, Orig); 688 } 689 690 void InnerLoopVectorizer::addMetadata(Instruction *To, 691 Instruction *From) { 692 propagateMetadata(To, From); 693 addNewMetadata(To, From); 694 } 695 696 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 697 Instruction *From) { 698 for (Value *V : To) { 699 if (Instruction *I = dyn_cast<Instruction>(V)) 700 addMetadata(I, From); 701 } 702 } 703 704 /// \brief The group of interleaved loads/stores sharing the same stride and 705 /// close to each other. 706 /// 707 /// Each member in this group has an index starting from 0, and the largest 708 /// index should be less than interleaved factor, which is equal to the absolute 709 /// value of the access's stride. 710 /// 711 /// E.g. An interleaved load group of factor 4: 712 /// for (unsigned i = 0; i < 1024; i+=4) { 713 /// a = A[i]; // Member of index 0 714 /// b = A[i+1]; // Member of index 1 715 /// d = A[i+3]; // Member of index 3 716 /// ... 717 /// } 718 /// 719 /// An interleaved store group of factor 4: 720 /// for (unsigned i = 0; i < 1024; i+=4) { 721 /// ... 722 /// A[i] = a; // Member of index 0 723 /// A[i+1] = b; // Member of index 1 724 /// A[i+2] = c; // Member of index 2 725 /// A[i+3] = d; // Member of index 3 726 /// } 727 /// 728 /// Note: the interleaved load group could have gaps (missing members), but 729 /// the interleaved store group doesn't allow gaps. 730 class InterleaveGroup { 731 public: 732 InterleaveGroup(Instruction *Instr, int Stride, unsigned Align) 733 : Align(Align), SmallestKey(0), LargestKey(0), InsertPos(Instr) { 734 assert(Align && "The alignment should be non-zero"); 735 736 Factor = std::abs(Stride); 737 assert(Factor > 1 && "Invalid interleave factor"); 738 739 Reverse = Stride < 0; 740 Members[0] = Instr; 741 } 742 743 bool isReverse() const { return Reverse; } 744 unsigned getFactor() const { return Factor; } 745 unsigned getAlignment() const { return Align; } 746 unsigned getNumMembers() const { return Members.size(); } 747 748 /// \brief Try to insert a new member \p Instr with index \p Index and 749 /// alignment \p NewAlign. The index is related to the leader and it could be 750 /// negative if it is the new leader. 751 /// 752 /// \returns false if the instruction doesn't belong to the group. 753 bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) { 754 assert(NewAlign && "The new member's alignment should be non-zero"); 755 756 int Key = Index + SmallestKey; 757 758 // Skip if there is already a member with the same index. 759 if (Members.count(Key)) 760 return false; 761 762 if (Key > LargestKey) { 763 // The largest index is always less than the interleave factor. 764 if (Index >= static_cast<int>(Factor)) 765 return false; 766 767 LargestKey = Key; 768 } else if (Key < SmallestKey) { 769 // The largest index is always less than the interleave factor. 770 if (LargestKey - Key >= static_cast<int>(Factor)) 771 return false; 772 773 SmallestKey = Key; 774 } 775 776 // It's always safe to select the minimum alignment. 777 Align = std::min(Align, NewAlign); 778 Members[Key] = Instr; 779 return true; 780 } 781 782 /// \brief Get the member with the given index \p Index 783 /// 784 /// \returns nullptr if contains no such member. 785 Instruction *getMember(unsigned Index) const { 786 int Key = SmallestKey + Index; 787 if (!Members.count(Key)) 788 return nullptr; 789 790 return Members.find(Key)->second; 791 } 792 793 /// \brief Get the index for the given member. Unlike the key in the member 794 /// map, the index starts from 0. 795 unsigned getIndex(Instruction *Instr) const { 796 for (auto I : Members) 797 if (I.second == Instr) 798 return I.first - SmallestKey; 799 800 llvm_unreachable("InterleaveGroup contains no such member"); 801 } 802 803 Instruction *getInsertPos() const { return InsertPos; } 804 void setInsertPos(Instruction *Inst) { InsertPos = Inst; } 805 806 private: 807 unsigned Factor; // Interleave Factor. 808 bool Reverse; 809 unsigned Align; 810 DenseMap<int, Instruction *> Members; 811 int SmallestKey; 812 int LargestKey; 813 814 // To avoid breaking dependences, vectorized instructions of an interleave 815 // group should be inserted at either the first load or the last store in 816 // program order. 817 // 818 // E.g. %even = load i32 // Insert Position 819 // %add = add i32 %even // Use of %even 820 // %odd = load i32 821 // 822 // store i32 %even 823 // %odd = add i32 // Def of %odd 824 // store i32 %odd // Insert Position 825 Instruction *InsertPos; 826 }; 827 828 /// \brief Drive the analysis of interleaved memory accesses in the loop. 829 /// 830 /// Use this class to analyze interleaved accesses only when we can vectorize 831 /// a loop. Otherwise it's meaningless to do analysis as the vectorization 832 /// on interleaved accesses is unsafe. 833 /// 834 /// The analysis collects interleave groups and records the relationships 835 /// between the member and the group in a map. 836 class InterleavedAccessInfo { 837 public: 838 InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L, 839 DominatorTree *DT, LoopInfo *LI) 840 : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(nullptr), 841 RequiresScalarEpilogue(false) {} 842 843 ~InterleavedAccessInfo() { 844 SmallSet<InterleaveGroup *, 4> DelSet; 845 // Avoid releasing a pointer twice. 846 for (auto &I : InterleaveGroupMap) 847 DelSet.insert(I.second); 848 for (auto *Ptr : DelSet) 849 delete Ptr; 850 } 851 852 /// \brief Analyze the interleaved accesses and collect them in interleave 853 /// groups. Substitute symbolic strides using \p Strides. 854 void analyzeInterleaving(const ValueToValueMap &Strides); 855 856 /// \brief Check if \p Instr belongs to any interleave group. 857 bool isInterleaved(Instruction *Instr) const { 858 return InterleaveGroupMap.count(Instr); 859 } 860 861 /// \brief Return the maximum interleave factor of all interleaved groups. 862 unsigned getMaxInterleaveFactor() const { 863 unsigned MaxFactor = 1; 864 for (auto &Entry : InterleaveGroupMap) 865 MaxFactor = std::max(MaxFactor, Entry.second->getFactor()); 866 return MaxFactor; 867 } 868 869 /// \brief Get the interleave group that \p Instr belongs to. 870 /// 871 /// \returns nullptr if doesn't have such group. 872 InterleaveGroup *getInterleaveGroup(Instruction *Instr) const { 873 if (InterleaveGroupMap.count(Instr)) 874 return InterleaveGroupMap.find(Instr)->second; 875 return nullptr; 876 } 877 878 /// \brief Returns true if an interleaved group that may access memory 879 /// out-of-bounds requires a scalar epilogue iteration for correctness. 880 bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; } 881 882 /// \brief Initialize the LoopAccessInfo used for dependence checking. 883 void setLAI(const LoopAccessInfo *Info) { LAI = Info; } 884 885 private: 886 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks. 887 /// Simplifies SCEV expressions in the context of existing SCEV assumptions. 888 /// The interleaved access analysis can also add new predicates (for example 889 /// by versioning strides of pointers). 890 PredicatedScalarEvolution &PSE; 891 Loop *TheLoop; 892 DominatorTree *DT; 893 LoopInfo *LI; 894 const LoopAccessInfo *LAI; 895 896 /// True if the loop may contain non-reversed interleaved groups with 897 /// out-of-bounds accesses. We ensure we don't speculatively access memory 898 /// out-of-bounds by executing at least one scalar epilogue iteration. 899 bool RequiresScalarEpilogue; 900 901 /// Holds the relationships between the members and the interleave group. 902 DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap; 903 904 /// Holds dependences among the memory accesses in the loop. It maps a source 905 /// access to a set of dependent sink accesses. 906 DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences; 907 908 /// \brief The descriptor for a strided memory access. 909 struct StrideDescriptor { 910 StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size, 911 unsigned Align) 912 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {} 913 914 StrideDescriptor() = default; 915 916 // The access's stride. It is negative for a reverse access. 917 int64_t Stride = 0; 918 const SCEV *Scev = nullptr; // The scalar expression of this access 919 uint64_t Size = 0; // The size of the memory object. 920 unsigned Align = 0; // The alignment of this access. 921 }; 922 923 /// \brief A type for holding instructions and their stride descriptors. 924 typedef std::pair<Instruction *, StrideDescriptor> StrideEntry; 925 926 /// \brief Create a new interleave group with the given instruction \p Instr, 927 /// stride \p Stride and alignment \p Align. 928 /// 929 /// \returns the newly created interleave group. 930 InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride, 931 unsigned Align) { 932 assert(!InterleaveGroupMap.count(Instr) && 933 "Already in an interleaved access group"); 934 InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align); 935 return InterleaveGroupMap[Instr]; 936 } 937 938 /// \brief Release the group and remove all the relationships. 939 void releaseGroup(InterleaveGroup *Group) { 940 for (unsigned i = 0; i < Group->getFactor(); i++) 941 if (Instruction *Member = Group->getMember(i)) 942 InterleaveGroupMap.erase(Member); 943 944 delete Group; 945 } 946 947 /// \brief Collect all the accesses with a constant stride in program order. 948 void collectConstStrideAccesses( 949 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 950 const ValueToValueMap &Strides); 951 952 /// \brief Returns true if \p Stride is allowed in an interleaved group. 953 static bool isStrided(int Stride) { 954 unsigned Factor = std::abs(Stride); 955 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 956 } 957 958 /// \brief Returns true if \p BB is a predicated block. 959 bool isPredicated(BasicBlock *BB) const { 960 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 961 } 962 963 /// \brief Returns true if LoopAccessInfo can be used for dependence queries. 964 bool areDependencesValid() const { 965 return LAI && LAI->getDepChecker().getDependences(); 966 } 967 968 /// \brief Returns true if memory accesses \p A and \p B can be reordered, if 969 /// necessary, when constructing interleaved groups. 970 /// 971 /// \p A must precede \p B in program order. We return false if reordering is 972 /// not necessary or is prevented because \p A and \p B may be dependent. 973 bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A, 974 StrideEntry *B) const { 975 976 // Code motion for interleaved accesses can potentially hoist strided loads 977 // and sink strided stores. The code below checks the legality of the 978 // following two conditions: 979 // 980 // 1. Potentially moving a strided load (B) before any store (A) that 981 // precedes B, or 982 // 983 // 2. Potentially moving a strided store (A) after any load or store (B) 984 // that A precedes. 985 // 986 // It's legal to reorder A and B if we know there isn't a dependence from A 987 // to B. Note that this determination is conservative since some 988 // dependences could potentially be reordered safely. 989 990 // A is potentially the source of a dependence. 991 auto *Src = A->first; 992 auto SrcDes = A->second; 993 994 // B is potentially the sink of a dependence. 995 auto *Sink = B->first; 996 auto SinkDes = B->second; 997 998 // Code motion for interleaved accesses can't violate WAR dependences. 999 // Thus, reordering is legal if the source isn't a write. 1000 if (!Src->mayWriteToMemory()) 1001 return true; 1002 1003 // At least one of the accesses must be strided. 1004 if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride)) 1005 return true; 1006 1007 // If dependence information is not available from LoopAccessInfo, 1008 // conservatively assume the instructions can't be reordered. 1009 if (!areDependencesValid()) 1010 return false; 1011 1012 // If we know there is a dependence from source to sink, assume the 1013 // instructions can't be reordered. Otherwise, reordering is legal. 1014 return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink); 1015 } 1016 1017 /// \brief Collect the dependences from LoopAccessInfo. 1018 /// 1019 /// We process the dependences once during the interleaved access analysis to 1020 /// enable constant-time dependence queries. 1021 void collectDependences() { 1022 if (!areDependencesValid()) 1023 return; 1024 auto *Deps = LAI->getDepChecker().getDependences(); 1025 for (auto Dep : *Deps) 1026 Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI)); 1027 } 1028 }; 1029 1030 /// Utility class for getting and setting loop vectorizer hints in the form 1031 /// of loop metadata. 1032 /// This class keeps a number of loop annotations locally (as member variables) 1033 /// and can, upon request, write them back as metadata on the loop. It will 1034 /// initially scan the loop for existing metadata, and will update the local 1035 /// values based on information in the loop. 1036 /// We cannot write all values to metadata, as the mere presence of some info, 1037 /// for example 'force', means a decision has been made. So, we need to be 1038 /// careful NOT to add them if the user hasn't specifically asked so. 1039 class LoopVectorizeHints { 1040 enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE }; 1041 1042 /// Hint - associates name and validation with the hint value. 1043 struct Hint { 1044 const char *Name; 1045 unsigned Value; // This may have to change for non-numeric values. 1046 HintKind Kind; 1047 1048 Hint(const char *Name, unsigned Value, HintKind Kind) 1049 : Name(Name), Value(Value), Kind(Kind) {} 1050 1051 bool validate(unsigned Val) { 1052 switch (Kind) { 1053 case HK_WIDTH: 1054 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth; 1055 case HK_UNROLL: 1056 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor; 1057 case HK_FORCE: 1058 return (Val <= 1); 1059 } 1060 return false; 1061 } 1062 }; 1063 1064 /// Vectorization width. 1065 Hint Width; 1066 /// Vectorization interleave factor. 1067 Hint Interleave; 1068 /// Vectorization forced 1069 Hint Force; 1070 1071 /// Return the loop metadata prefix. 1072 static StringRef Prefix() { return "llvm.loop."; } 1073 1074 /// True if there is any unsafe math in the loop. 1075 bool PotentiallyUnsafe; 1076 1077 public: 1078 enum ForceKind { 1079 FK_Undefined = -1, ///< Not selected. 1080 FK_Disabled = 0, ///< Forcing disabled. 1081 FK_Enabled = 1, ///< Forcing enabled. 1082 }; 1083 1084 LoopVectorizeHints(const Loop *L, bool DisableInterleaving, 1085 OptimizationRemarkEmitter &ORE) 1086 : Width("vectorize.width", VectorizerParams::VectorizationFactor, 1087 HK_WIDTH), 1088 Interleave("interleave.count", DisableInterleaving, HK_UNROLL), 1089 Force("vectorize.enable", FK_Undefined, HK_FORCE), 1090 PotentiallyUnsafe(false), TheLoop(L), ORE(ORE) { 1091 // Populate values with existing loop metadata. 1092 getHintsFromMetadata(); 1093 1094 // force-vector-interleave overrides DisableInterleaving. 1095 if (VectorizerParams::isInterleaveForced()) 1096 Interleave.Value = VectorizerParams::VectorizationInterleave; 1097 1098 DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs() 1099 << "LV: Interleaving disabled by the pass manager\n"); 1100 } 1101 1102 /// Mark the loop L as already vectorized by setting the width to 1. 1103 void setAlreadyVectorized() { 1104 Width.Value = Interleave.Value = 1; 1105 Hint Hints[] = {Width, Interleave}; 1106 writeHintsToMetadata(Hints); 1107 } 1108 1109 bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const { 1110 if (getForce() == LoopVectorizeHints::FK_Disabled) { 1111 DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); 1112 ORE.emitOptimizationRemarkAnalysis(vectorizeAnalysisPassName(), L, 1113 emitRemark()); 1114 return false; 1115 } 1116 1117 if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) { 1118 DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); 1119 ORE.emitOptimizationRemarkAnalysis(vectorizeAnalysisPassName(), L, 1120 emitRemark()); 1121 return false; 1122 } 1123 1124 if (getWidth() == 1 && getInterleave() == 1) { 1125 // FIXME: Add a separate metadata to indicate when the loop has already 1126 // been vectorized instead of setting width and count to 1. 1127 DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); 1128 // FIXME: Add interleave.disable metadata. This will allow 1129 // vectorize.disable to be used without disabling the pass and errors 1130 // to differentiate between disabled vectorization and a width of 1. 1131 ORE.emitOptimizationRemarkAnalysis( 1132 vectorizeAnalysisPassName(), L, 1133 "loop not vectorized: vectorization and interleaving are explicitly " 1134 "disabled, or vectorize width and interleave count are both set to " 1135 "1"); 1136 return false; 1137 } 1138 1139 return true; 1140 } 1141 1142 /// Dumps all the hint information. 1143 std::string emitRemark() const { 1144 VectorizationReport R; 1145 if (Force.Value == LoopVectorizeHints::FK_Disabled) 1146 R << "vectorization is explicitly disabled"; 1147 else { 1148 R << "use -Rpass-analysis=loop-vectorize for more info"; 1149 if (Force.Value == LoopVectorizeHints::FK_Enabled) { 1150 R << " (Force=true"; 1151 if (Width.Value != 0) 1152 R << ", Vector Width=" << Width.Value; 1153 if (Interleave.Value != 0) 1154 R << ", Interleave Count=" << Interleave.Value; 1155 R << ")"; 1156 } 1157 } 1158 1159 return R.str(); 1160 } 1161 1162 unsigned getWidth() const { return Width.Value; } 1163 unsigned getInterleave() const { return Interleave.Value; } 1164 enum ForceKind getForce() const { return (ForceKind)Force.Value; } 1165 1166 /// \brief If hints are provided that force vectorization, use the AlwaysPrint 1167 /// pass name to force the frontend to print the diagnostic. 1168 const char *vectorizeAnalysisPassName() const { 1169 if (getWidth() == 1) 1170 return LV_NAME; 1171 if (getForce() == LoopVectorizeHints::FK_Disabled) 1172 return LV_NAME; 1173 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0) 1174 return LV_NAME; 1175 return DiagnosticInfoOptimizationRemarkAnalysis::AlwaysPrint; 1176 } 1177 1178 bool allowReordering() const { 1179 // When enabling loop hints are provided we allow the vectorizer to change 1180 // the order of operations that is given by the scalar loop. This is not 1181 // enabled by default because can be unsafe or inefficient. For example, 1182 // reordering floating-point operations will change the way round-off 1183 // error accumulates in the loop. 1184 return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1; 1185 } 1186 1187 bool isPotentiallyUnsafe() const { 1188 // Avoid FP vectorization if the target is unsure about proper support. 1189 // This may be related to the SIMD unit in the target not handling 1190 // IEEE 754 FP ops properly, or bad single-to-double promotions. 1191 // Otherwise, a sequence of vectorized loops, even without reduction, 1192 // could lead to different end results on the destination vectors. 1193 return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe; 1194 } 1195 1196 void setPotentiallyUnsafe() { PotentiallyUnsafe = true; } 1197 1198 private: 1199 /// Find hints specified in the loop metadata and update local values. 1200 void getHintsFromMetadata() { 1201 MDNode *LoopID = TheLoop->getLoopID(); 1202 if (!LoopID) 1203 return; 1204 1205 // First operand should refer to the loop id itself. 1206 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 1207 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 1208 1209 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1210 const MDString *S = nullptr; 1211 SmallVector<Metadata *, 4> Args; 1212 1213 // The expected hint is either a MDString or a MDNode with the first 1214 // operand a MDString. 1215 if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) { 1216 if (!MD || MD->getNumOperands() == 0) 1217 continue; 1218 S = dyn_cast<MDString>(MD->getOperand(0)); 1219 for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i) 1220 Args.push_back(MD->getOperand(i)); 1221 } else { 1222 S = dyn_cast<MDString>(LoopID->getOperand(i)); 1223 assert(Args.size() == 0 && "too many arguments for MDString"); 1224 } 1225 1226 if (!S) 1227 continue; 1228 1229 // Check if the hint starts with the loop metadata prefix. 1230 StringRef Name = S->getString(); 1231 if (Args.size() == 1) 1232 setHint(Name, Args[0]); 1233 } 1234 } 1235 1236 /// Checks string hint with one operand and set value if valid. 1237 void setHint(StringRef Name, Metadata *Arg) { 1238 if (!Name.startswith(Prefix())) 1239 return; 1240 Name = Name.substr(Prefix().size(), StringRef::npos); 1241 1242 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); 1243 if (!C) 1244 return; 1245 unsigned Val = C->getZExtValue(); 1246 1247 Hint *Hints[] = {&Width, &Interleave, &Force}; 1248 for (auto H : Hints) { 1249 if (Name == H->Name) { 1250 if (H->validate(Val)) 1251 H->Value = Val; 1252 else 1253 DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); 1254 break; 1255 } 1256 } 1257 } 1258 1259 /// Create a new hint from name / value pair. 1260 MDNode *createHintMetadata(StringRef Name, unsigned V) const { 1261 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1262 Metadata *MDs[] = {MDString::get(Context, Name), 1263 ConstantAsMetadata::get( 1264 ConstantInt::get(Type::getInt32Ty(Context), V))}; 1265 return MDNode::get(Context, MDs); 1266 } 1267 1268 /// Matches metadata with hint name. 1269 bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) { 1270 MDString *Name = dyn_cast<MDString>(Node->getOperand(0)); 1271 if (!Name) 1272 return false; 1273 1274 for (auto H : HintTypes) 1275 if (Name->getString().endswith(H.Name)) 1276 return true; 1277 return false; 1278 } 1279 1280 /// Sets current hints into loop metadata, keeping other values intact. 1281 void writeHintsToMetadata(ArrayRef<Hint> HintTypes) { 1282 if (HintTypes.size() == 0) 1283 return; 1284 1285 // Reserve the first element to LoopID (see below). 1286 SmallVector<Metadata *, 4> MDs(1); 1287 // If the loop already has metadata, then ignore the existing operands. 1288 MDNode *LoopID = TheLoop->getLoopID(); 1289 if (LoopID) { 1290 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1291 MDNode *Node = cast<MDNode>(LoopID->getOperand(i)); 1292 // If node in update list, ignore old value. 1293 if (!matchesHintMetadataName(Node, HintTypes)) 1294 MDs.push_back(Node); 1295 } 1296 } 1297 1298 // Now, add the missing hints. 1299 for (auto H : HintTypes) 1300 MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value)); 1301 1302 // Replace current metadata node with new one. 1303 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1304 MDNode *NewLoopID = MDNode::get(Context, MDs); 1305 // Set operand 0 to refer to the loop id itself. 1306 NewLoopID->replaceOperandWith(0, NewLoopID); 1307 1308 TheLoop->setLoopID(NewLoopID); 1309 } 1310 1311 /// The loop these hints belong to. 1312 const Loop *TheLoop; 1313 1314 /// Interface to emit optimization remarks. 1315 OptimizationRemarkEmitter &ORE; 1316 }; 1317 1318 static void emitAnalysisDiag(const Loop *TheLoop, 1319 const LoopVectorizeHints &Hints, 1320 OptimizationRemarkEmitter &ORE, 1321 const LoopAccessReport &Message) { 1322 const char *Name = Hints.vectorizeAnalysisPassName(); 1323 LoopAccessReport::emitAnalysis(Message, TheLoop, Name, ORE); 1324 } 1325 1326 static void emitMissedWarning(Function *F, Loop *L, 1327 const LoopVectorizeHints &LH, 1328 OptimizationRemarkEmitter *ORE) { 1329 ORE->emitOptimizationRemarkMissed(LV_NAME, L, LH.emitRemark()); 1330 1331 if (LH.getForce() == LoopVectorizeHints::FK_Enabled) { 1332 if (LH.getWidth() != 1) 1333 emitLoopVectorizeWarning( 1334 F->getContext(), *F, L->getStartLoc(), 1335 "failed explicitly specified loop vectorization"); 1336 else if (LH.getInterleave() != 1) 1337 emitLoopInterleaveWarning( 1338 F->getContext(), *F, L->getStartLoc(), 1339 "failed explicitly specified loop interleaving"); 1340 } 1341 } 1342 1343 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and 1344 /// to what vectorization factor. 1345 /// This class does not look at the profitability of vectorization, only the 1346 /// legality. This class has two main kinds of checks: 1347 /// * Memory checks - The code in canVectorizeMemory checks if vectorization 1348 /// will change the order of memory accesses in a way that will change the 1349 /// correctness of the program. 1350 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory 1351 /// checks for a number of different conditions, such as the availability of a 1352 /// single induction variable, that all types are supported and vectorize-able, 1353 /// etc. This code reflects the capabilities of InnerLoopVectorizer. 1354 /// This class is also used by InnerLoopVectorizer for identifying 1355 /// induction variable and the different reduction variables. 1356 class LoopVectorizationLegality { 1357 public: 1358 LoopVectorizationLegality( 1359 Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT, 1360 TargetLibraryInfo *TLI, AliasAnalysis *AA, Function *F, 1361 const TargetTransformInfo *TTI, 1362 std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI, 1363 OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R, 1364 LoopVectorizeHints *H) 1365 : NumPredStores(0), TheLoop(L), PSE(PSE), TLI(TLI), TTI(TTI), DT(DT), 1366 GetLAA(GetLAA), LAI(nullptr), ORE(ORE), InterleaveInfo(PSE, L, DT, LI), 1367 Induction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false), 1368 Requirements(R), Hints(H) {} 1369 1370 /// ReductionList contains the reduction descriptors for all 1371 /// of the reductions that were found in the loop. 1372 typedef DenseMap<PHINode *, RecurrenceDescriptor> ReductionList; 1373 1374 /// InductionList saves induction variables and maps them to the 1375 /// induction descriptor. 1376 typedef MapVector<PHINode *, InductionDescriptor> InductionList; 1377 1378 /// RecurrenceSet contains the phi nodes that are recurrences other than 1379 /// inductions and reductions. 1380 typedef SmallPtrSet<const PHINode *, 8> RecurrenceSet; 1381 1382 /// Returns true if it is legal to vectorize this loop. 1383 /// This does not mean that it is profitable to vectorize this 1384 /// loop, only that it is legal to do so. 1385 bool canVectorize(); 1386 1387 /// Returns the Induction variable. 1388 PHINode *getInduction() { return Induction; } 1389 1390 /// Returns the reduction variables found in the loop. 1391 ReductionList *getReductionVars() { return &Reductions; } 1392 1393 /// Returns the induction variables found in the loop. 1394 InductionList *getInductionVars() { return &Inductions; } 1395 1396 /// Return the first-order recurrences found in the loop. 1397 RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; } 1398 1399 /// Returns the widest induction type. 1400 Type *getWidestInductionType() { return WidestIndTy; } 1401 1402 /// Returns True if V is an induction variable in this loop. 1403 bool isInductionVariable(const Value *V); 1404 1405 /// Returns True if PN is a reduction variable in this loop. 1406 bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); } 1407 1408 /// Returns True if Phi is a first-order recurrence in this loop. 1409 bool isFirstOrderRecurrence(const PHINode *Phi); 1410 1411 /// Return true if the block BB needs to be predicated in order for the loop 1412 /// to be vectorized. 1413 bool blockNeedsPredication(BasicBlock *BB); 1414 1415 /// Check if this pointer is consecutive when vectorizing. This happens 1416 /// when the last index of the GEP is the induction variable, or that the 1417 /// pointer itself is an induction variable. 1418 /// This check allows us to vectorize A[idx] into a wide load/store. 1419 /// Returns: 1420 /// 0 - Stride is unknown or non-consecutive. 1421 /// 1 - Address is consecutive. 1422 /// -1 - Address is consecutive, and decreasing. 1423 int isConsecutivePtr(Value *Ptr); 1424 1425 /// Returns true if the value V is uniform within the loop. 1426 bool isUniform(Value *V); 1427 1428 /// Returns true if this instruction will remain scalar after vectorization. 1429 bool isUniformAfterVectorization(Instruction *I) { return Uniforms.count(I); } 1430 1431 /// Returns the information that we collected about runtime memory check. 1432 const RuntimePointerChecking *getRuntimePointerChecking() const { 1433 return LAI->getRuntimePointerChecking(); 1434 } 1435 1436 const LoopAccessInfo *getLAI() const { return LAI; } 1437 1438 /// \brief Check if \p Instr belongs to any interleaved access group. 1439 bool isAccessInterleaved(Instruction *Instr) { 1440 return InterleaveInfo.isInterleaved(Instr); 1441 } 1442 1443 /// \brief Return the maximum interleave factor of all interleaved groups. 1444 unsigned getMaxInterleaveFactor() const { 1445 return InterleaveInfo.getMaxInterleaveFactor(); 1446 } 1447 1448 /// \brief Get the interleaved access group that \p Instr belongs to. 1449 const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { 1450 return InterleaveInfo.getInterleaveGroup(Instr); 1451 } 1452 1453 /// \brief Returns true if an interleaved group requires a scalar iteration 1454 /// to handle accesses with gaps. 1455 bool requiresScalarEpilogue() const { 1456 return InterleaveInfo.requiresScalarEpilogue(); 1457 } 1458 1459 unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); } 1460 1461 bool hasStride(Value *V) { return LAI->hasStride(V); } 1462 1463 /// Returns true if the target machine supports masked store operation 1464 /// for the given \p DataType and kind of access to \p Ptr. 1465 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1466 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType); 1467 } 1468 /// Returns true if the target machine supports masked load operation 1469 /// for the given \p DataType and kind of access to \p Ptr. 1470 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1471 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType); 1472 } 1473 /// Returns true if the target machine supports masked scatter operation 1474 /// for the given \p DataType. 1475 bool isLegalMaskedScatter(Type *DataType) { 1476 return TTI->isLegalMaskedScatter(DataType); 1477 } 1478 /// Returns true if the target machine supports masked gather operation 1479 /// for the given \p DataType. 1480 bool isLegalMaskedGather(Type *DataType) { 1481 return TTI->isLegalMaskedGather(DataType); 1482 } 1483 1484 /// Returns true if vector representation of the instruction \p I 1485 /// requires mask. 1486 bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); } 1487 unsigned getNumStores() const { return LAI->getNumStores(); } 1488 unsigned getNumLoads() const { return LAI->getNumLoads(); } 1489 unsigned getNumPredStores() const { return NumPredStores; } 1490 1491 private: 1492 /// Check if a single basic block loop is vectorizable. 1493 /// At this point we know that this is a loop with a constant trip count 1494 /// and we only need to check individual instructions. 1495 bool canVectorizeInstrs(); 1496 1497 /// When we vectorize loops we may change the order in which 1498 /// we read and write from memory. This method checks if it is 1499 /// legal to vectorize the code, considering only memory constrains. 1500 /// Returns true if the loop is vectorizable 1501 bool canVectorizeMemory(); 1502 1503 /// Return true if we can vectorize this loop using the IF-conversion 1504 /// transformation. 1505 bool canVectorizeWithIfConvert(); 1506 1507 /// Collect the variables that need to stay uniform after vectorization. 1508 void collectLoopUniforms(); 1509 1510 /// Return true if all of the instructions in the block can be speculatively 1511 /// executed. \p SafePtrs is a list of addresses that are known to be legal 1512 /// and we know that we can read from them without segfault. 1513 bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs); 1514 1515 /// Updates the vectorization state by adding \p Phi to the inductions list. 1516 /// This can set \p Phi as the main induction of the loop if \p Phi is a 1517 /// better choice for the main induction than the existing one. 1518 void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID, 1519 SmallPtrSetImpl<Value *> &AllowedExit); 1520 1521 /// Report an analysis message to assist the user in diagnosing loops that are 1522 /// not vectorized. These are handled as LoopAccessReport rather than 1523 /// VectorizationReport because the << operator of VectorizationReport returns 1524 /// LoopAccessReport. 1525 void emitAnalysis(const LoopAccessReport &Message) const { 1526 emitAnalysisDiag(TheLoop, *Hints, *ORE, Message); 1527 } 1528 1529 /// \brief If an access has a symbolic strides, this maps the pointer value to 1530 /// the stride symbol. 1531 const ValueToValueMap *getSymbolicStrides() { 1532 // FIXME: Currently, the set of symbolic strides is sometimes queried before 1533 // it's collected. This happens from canVectorizeWithIfConvert, when the 1534 // pointer is checked to reference consecutive elements suitable for a 1535 // masked access. 1536 return LAI ? &LAI->getSymbolicStrides() : nullptr; 1537 } 1538 1539 unsigned NumPredStores; 1540 1541 /// The loop that we evaluate. 1542 Loop *TheLoop; 1543 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. 1544 /// Applies dynamic knowledge to simplify SCEV expressions in the context 1545 /// of existing SCEV assumptions. The analysis will also add a minimal set 1546 /// of new predicates if this is required to enable vectorization and 1547 /// unrolling. 1548 PredicatedScalarEvolution &PSE; 1549 /// Target Library Info. 1550 TargetLibraryInfo *TLI; 1551 /// Target Transform Info 1552 const TargetTransformInfo *TTI; 1553 /// Dominator Tree. 1554 DominatorTree *DT; 1555 // LoopAccess analysis. 1556 std::function<const LoopAccessInfo &(Loop &)> *GetLAA; 1557 // And the loop-accesses info corresponding to this loop. This pointer is 1558 // null until canVectorizeMemory sets it up. 1559 const LoopAccessInfo *LAI; 1560 /// Interface to emit optimization remarks. 1561 OptimizationRemarkEmitter *ORE; 1562 1563 /// The interleave access information contains groups of interleaved accesses 1564 /// with the same stride and close to each other. 1565 InterleavedAccessInfo InterleaveInfo; 1566 1567 // --- vectorization state --- // 1568 1569 /// Holds the integer induction variable. This is the counter of the 1570 /// loop. 1571 PHINode *Induction; 1572 /// Holds the reduction variables. 1573 ReductionList Reductions; 1574 /// Holds all of the induction variables that we found in the loop. 1575 /// Notice that inductions don't need to start at zero and that induction 1576 /// variables can be pointers. 1577 InductionList Inductions; 1578 /// Holds the phi nodes that are first-order recurrences. 1579 RecurrenceSet FirstOrderRecurrences; 1580 /// Holds the widest induction type encountered. 1581 Type *WidestIndTy; 1582 1583 /// Allowed outside users. This holds the induction and reduction 1584 /// vars which can be accessed from outside the loop. 1585 SmallPtrSet<Value *, 4> AllowedExit; 1586 /// This set holds the variables which are known to be uniform after 1587 /// vectorization. 1588 SmallPtrSet<Instruction *, 4> Uniforms; 1589 1590 /// Can we assume the absence of NaNs. 1591 bool HasFunNoNaNAttr; 1592 1593 /// Vectorization requirements that will go through late-evaluation. 1594 LoopVectorizationRequirements *Requirements; 1595 1596 /// Used to emit an analysis of any legality issues. 1597 LoopVectorizeHints *Hints; 1598 1599 /// While vectorizing these instructions we have to generate a 1600 /// call to the appropriate masked intrinsic 1601 SmallPtrSet<const Instruction *, 8> MaskedOp; 1602 }; 1603 1604 /// LoopVectorizationCostModel - estimates the expected speedups due to 1605 /// vectorization. 1606 /// In many cases vectorization is not profitable. This can happen because of 1607 /// a number of reasons. In this class we mainly attempt to predict the 1608 /// expected speedup/slowdowns due to the supported instruction set. We use the 1609 /// TargetTransformInfo to query the different backends for the cost of 1610 /// different operations. 1611 class LoopVectorizationCostModel { 1612 public: 1613 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, 1614 LoopInfo *LI, LoopVectorizationLegality *Legal, 1615 const TargetTransformInfo &TTI, 1616 const TargetLibraryInfo *TLI, DemandedBits *DB, 1617 AssumptionCache *AC, 1618 OptimizationRemarkEmitter *ORE, const Function *F, 1619 const LoopVectorizeHints *Hints) 1620 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 1621 AC(AC), ORE(ORE), TheFunction(F), Hints(Hints) {} 1622 1623 /// Information about vectorization costs 1624 struct VectorizationFactor { 1625 unsigned Width; // Vector width with best cost 1626 unsigned Cost; // Cost of the loop with that width 1627 }; 1628 /// \return The most profitable vectorization factor and the cost of that VF. 1629 /// This method checks every power of two up to VF. If UserVF is not ZERO 1630 /// then this vectorization factor will be selected if vectorization is 1631 /// possible. 1632 VectorizationFactor selectVectorizationFactor(bool OptForSize); 1633 1634 /// \return The size (in bits) of the smallest and widest types in the code 1635 /// that needs to be vectorized. We ignore values that remain scalar such as 1636 /// 64 bit loop indices. 1637 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1638 1639 /// \return The desired interleave count. 1640 /// If interleave count has been specified by metadata it will be returned. 1641 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1642 /// are the selected vectorization factor and the cost of the selected VF. 1643 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 1644 unsigned LoopCost); 1645 1646 /// \return The most profitable unroll factor. 1647 /// This method finds the best unroll-factor based on register pressure and 1648 /// other parameters. VF and LoopCost are the selected vectorization factor 1649 /// and the cost of the selected VF. 1650 unsigned computeInterleaveCount(bool OptForSize, unsigned VF, 1651 unsigned LoopCost); 1652 1653 /// \brief A struct that represents some properties of the register usage 1654 /// of a loop. 1655 struct RegisterUsage { 1656 /// Holds the number of loop invariant values that are used in the loop. 1657 unsigned LoopInvariantRegs; 1658 /// Holds the maximum number of concurrent live intervals in the loop. 1659 unsigned MaxLocalUsers; 1660 /// Holds the number of instructions in the loop. 1661 unsigned NumInstructions; 1662 }; 1663 1664 /// \return Returns information about the register usages of the loop for the 1665 /// given vectorization factors. 1666 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 1667 1668 /// Collect values we want to ignore in the cost model. 1669 void collectValuesToIgnore(); 1670 1671 private: 1672 /// The vectorization cost is a combination of the cost itself and a boolean 1673 /// indicating whether any of the contributing operations will actually 1674 /// operate on 1675 /// vector values after type legalization in the backend. If this latter value 1676 /// is 1677 /// false, then all operations will be scalarized (i.e. no vectorization has 1678 /// actually taken place). 1679 typedef std::pair<unsigned, bool> VectorizationCostTy; 1680 1681 /// Returns the expected execution cost. The unit of the cost does 1682 /// not matter because we use the 'cost' units to compare different 1683 /// vector widths. The cost that is returned is *not* normalized by 1684 /// the factor width. 1685 VectorizationCostTy expectedCost(unsigned VF); 1686 1687 /// Returns the execution time cost of an instruction for a given vector 1688 /// width. Vector width of one means scalar. 1689 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1690 1691 /// The cost-computation logic from getInstructionCost which provides 1692 /// the vector type as an output parameter. 1693 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1694 1695 /// Returns whether the instruction is a load or store and will be a emitted 1696 /// as a vector operation. 1697 bool isConsecutiveLoadOrStore(Instruction *I); 1698 1699 /// Report an analysis message to assist the user in diagnosing loops that are 1700 /// not vectorized. These are handled as LoopAccessReport rather than 1701 /// VectorizationReport because the << operator of VectorizationReport returns 1702 /// LoopAccessReport. 1703 void emitAnalysis(const LoopAccessReport &Message) const { 1704 emitAnalysisDiag(TheLoop, *Hints, *ORE, Message); 1705 } 1706 1707 public: 1708 /// Map of scalar integer values to the smallest bitwidth they can be legally 1709 /// represented as. The vector equivalents of these values should be truncated 1710 /// to this type. 1711 MapVector<Instruction *, uint64_t> MinBWs; 1712 1713 /// The loop that we evaluate. 1714 Loop *TheLoop; 1715 /// Predicated scalar evolution analysis. 1716 PredicatedScalarEvolution &PSE; 1717 /// Loop Info analysis. 1718 LoopInfo *LI; 1719 /// Vectorization legality. 1720 LoopVectorizationLegality *Legal; 1721 /// Vector target information. 1722 const TargetTransformInfo &TTI; 1723 /// Target Library Info. 1724 const TargetLibraryInfo *TLI; 1725 /// Demanded bits analysis. 1726 DemandedBits *DB; 1727 /// Assumption cache. 1728 AssumptionCache *AC; 1729 /// Interface to emit optimization remarks. 1730 OptimizationRemarkEmitter *ORE; 1731 1732 const Function *TheFunction; 1733 /// Loop Vectorize Hint. 1734 const LoopVectorizeHints *Hints; 1735 /// Values to ignore in the cost model. 1736 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1737 /// Values to ignore in the cost model when VF > 1. 1738 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1739 }; 1740 1741 /// \brief This holds vectorization requirements that must be verified late in 1742 /// the process. The requirements are set by legalize and costmodel. Once 1743 /// vectorization has been determined to be possible and profitable the 1744 /// requirements can be verified by looking for metadata or compiler options. 1745 /// For example, some loops require FP commutativity which is only allowed if 1746 /// vectorization is explicitly specified or if the fast-math compiler option 1747 /// has been provided. 1748 /// Late evaluation of these requirements allows helpful diagnostics to be 1749 /// composed that tells the user what need to be done to vectorize the loop. For 1750 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late 1751 /// evaluation should be used only when diagnostics can generated that can be 1752 /// followed by a non-expert user. 1753 class LoopVectorizationRequirements { 1754 public: 1755 LoopVectorizationRequirements(OptimizationRemarkEmitter &ORE) 1756 : NumRuntimePointerChecks(0), UnsafeAlgebraInst(nullptr), ORE(ORE) {} 1757 1758 void addUnsafeAlgebraInst(Instruction *I) { 1759 // First unsafe algebra instruction. 1760 if (!UnsafeAlgebraInst) 1761 UnsafeAlgebraInst = I; 1762 } 1763 1764 void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; } 1765 1766 bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) { 1767 const char *Name = Hints.vectorizeAnalysisPassName(); 1768 bool Failed = false; 1769 if (UnsafeAlgebraInst && !Hints.allowReordering()) { 1770 ORE.emitOptimizationRemarkAnalysisFPCommute( 1771 Name, UnsafeAlgebraInst->getDebugLoc(), 1772 UnsafeAlgebraInst->getParent(), 1773 VectorizationReport() << "cannot prove it is safe to reorder " 1774 "floating-point operations"); 1775 Failed = true; 1776 } 1777 1778 // Test if runtime memcheck thresholds are exceeded. 1779 bool PragmaThresholdReached = 1780 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 1781 bool ThresholdReached = 1782 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 1783 if ((ThresholdReached && !Hints.allowReordering()) || 1784 PragmaThresholdReached) { 1785 ORE.emitOptimizationRemarkAnalysisAliasing( 1786 Name, L, 1787 VectorizationReport() 1788 << "cannot prove it is safe to reorder memory operations"); 1789 DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 1790 Failed = true; 1791 } 1792 1793 return Failed; 1794 } 1795 1796 private: 1797 unsigned NumRuntimePointerChecks; 1798 Instruction *UnsafeAlgebraInst; 1799 1800 /// Interface to emit optimization remarks. 1801 OptimizationRemarkEmitter &ORE; 1802 }; 1803 1804 static void addInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) { 1805 if (L.empty()) 1806 return V.push_back(&L); 1807 1808 for (Loop *InnerL : L) 1809 addInnerLoop(*InnerL, V); 1810 } 1811 1812 /// The LoopVectorize Pass. 1813 struct LoopVectorize : public FunctionPass { 1814 /// Pass identification, replacement for typeid 1815 static char ID; 1816 1817 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) 1818 : FunctionPass(ID) { 1819 Impl.DisableUnrolling = NoUnrolling; 1820 Impl.AlwaysVectorize = AlwaysVectorize; 1821 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1822 } 1823 1824 LoopVectorizePass Impl; 1825 1826 bool runOnFunction(Function &F) override { 1827 if (skipFunction(F)) 1828 return false; 1829 1830 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1831 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1832 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1833 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1834 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1835 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1836 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 1837 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1838 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1839 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 1840 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1841 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 1842 1843 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 1844 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 1845 1846 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 1847 GetLAA, *ORE); 1848 } 1849 1850 void getAnalysisUsage(AnalysisUsage &AU) const override { 1851 AU.addRequired<AssumptionCacheTracker>(); 1852 AU.addRequiredID(LoopSimplifyID); 1853 AU.addRequiredID(LCSSAID); 1854 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1855 AU.addRequired<DominatorTreeWrapperPass>(); 1856 AU.addRequired<LoopInfoWrapperPass>(); 1857 AU.addRequired<ScalarEvolutionWrapperPass>(); 1858 AU.addRequired<TargetTransformInfoWrapperPass>(); 1859 AU.addRequired<AAResultsWrapperPass>(); 1860 AU.addRequired<LoopAccessLegacyAnalysis>(); 1861 AU.addRequired<DemandedBitsWrapperPass>(); 1862 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 1863 AU.addPreserved<LoopInfoWrapperPass>(); 1864 AU.addPreserved<DominatorTreeWrapperPass>(); 1865 AU.addPreserved<BasicAAWrapperPass>(); 1866 AU.addPreserved<GlobalsAAWrapperPass>(); 1867 } 1868 }; 1869 1870 } // end anonymous namespace 1871 1872 //===----------------------------------------------------------------------===// 1873 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 1874 // LoopVectorizationCostModel. 1875 //===----------------------------------------------------------------------===// 1876 1877 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 1878 // We need to place the broadcast of invariant variables outside the loop. 1879 Instruction *Instr = dyn_cast<Instruction>(V); 1880 bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody); 1881 bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr; 1882 1883 // Place the code for broadcasting invariant variables in the new preheader. 1884 IRBuilder<>::InsertPointGuard Guard(Builder); 1885 if (Invariant) 1886 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1887 1888 // Broadcast the scalar into all locations in the vector. 1889 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 1890 1891 return Shuf; 1892 } 1893 1894 void InnerLoopVectorizer::createVectorIntInductionPHI( 1895 const InductionDescriptor &II, VectorParts &Entry, IntegerType *TruncType) { 1896 Value *Start = II.getStartValue(); 1897 ConstantInt *Step = II.getConstIntStepValue(); 1898 assert(Step && "Can not widen an IV with a non-constant step"); 1899 1900 // Construct the initial value of the vector IV in the vector loop preheader 1901 auto CurrIP = Builder.saveIP(); 1902 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1903 if (TruncType) { 1904 Step = ConstantInt::getSigned(TruncType, Step->getSExtValue()); 1905 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 1906 } 1907 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 1908 Value *SteppedStart = getStepVector(SplatStart, 0, Step); 1909 Builder.restoreIP(CurrIP); 1910 1911 Value *SplatVF = 1912 ConstantVector::getSplat(VF, ConstantInt::getSigned(Start->getType(), 1913 VF * Step->getSExtValue())); 1914 // We may need to add the step a number of times, depending on the unroll 1915 // factor. The last of those goes into the PHI. 1916 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 1917 &*LoopVectorBody->getFirstInsertionPt()); 1918 Instruction *LastInduction = VecInd; 1919 for (unsigned Part = 0; Part < UF; ++Part) { 1920 Entry[Part] = LastInduction; 1921 LastInduction = cast<Instruction>( 1922 Builder.CreateAdd(LastInduction, SplatVF, "step.add")); 1923 } 1924 1925 // Move the last step to the end of the latch block. This ensures consistent 1926 // placement of all induction updates. 1927 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 1928 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 1929 auto *ICmp = cast<Instruction>(Br->getCondition()); 1930 LastInduction->moveBefore(ICmp); 1931 LastInduction->setName("vec.ind.next"); 1932 1933 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 1934 VecInd->addIncoming(LastInduction, LoopVectorLatch); 1935 } 1936 1937 void InnerLoopVectorizer::widenIntInduction(PHINode *IV, VectorParts &Entry, 1938 TruncInst *Trunc) { 1939 1940 auto II = Legal->getInductionVars()->find(IV); 1941 assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); 1942 1943 auto ID = II->second; 1944 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 1945 1946 // If a truncate instruction was provided, get the smaller type. 1947 auto *TruncType = Trunc ? cast<IntegerType>(Trunc->getType()) : nullptr; 1948 1949 // The step of the induction. 1950 Value *Step = nullptr; 1951 1952 // If the induction variable has a constant integer step value, go ahead and 1953 // get it now. 1954 if (ID.getConstIntStepValue()) 1955 Step = ID.getConstIntStepValue(); 1956 1957 // Try to create a new independent vector induction variable. If we can't 1958 // create the phi node, we will splat the scalar induction variable in each 1959 // loop iteration. 1960 if (VF > 1 && IV->getType() == Induction->getType() && Step && 1961 !ValuesNotWidened->count(IV)) 1962 return createVectorIntInductionPHI(ID, Entry, TruncType); 1963 1964 // The scalar value to broadcast. This will be derived from the canonical 1965 // induction variable. 1966 Value *ScalarIV = nullptr; 1967 1968 // Define the scalar induction variable and step values. If we were given a 1969 // truncation type, truncate the canonical induction variable and constant 1970 // step. Otherwise, derive these values from the induction descriptor. 1971 if (TruncType) { 1972 assert(Step && "Truncation requires constant integer step"); 1973 auto StepInt = cast<ConstantInt>(Step)->getSExtValue(); 1974 ScalarIV = Builder.CreateCast(Instruction::Trunc, Induction, TruncType); 1975 Step = ConstantInt::getSigned(TruncType, StepInt); 1976 } else { 1977 ScalarIV = Induction; 1978 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 1979 if (IV != OldInduction) { 1980 ScalarIV = Builder.CreateSExtOrTrunc(ScalarIV, IV->getType()); 1981 ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL); 1982 ScalarIV->setName("offset.idx"); 1983 } 1984 if (!Step) { 1985 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 1986 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), 1987 &*Builder.GetInsertPoint()); 1988 } 1989 } 1990 1991 // Splat the scalar induction variable, and build the necessary step vectors. 1992 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 1993 for (unsigned Part = 0; Part < UF; ++Part) 1994 Entry[Part] = getStepVector(Broadcasted, VF * Part, Step); 1995 1996 // If an induction variable is only used for counting loop iterations or 1997 // calculating addresses, it doesn't need to be widened. Create scalar steps 1998 // that can be used by instructions we will later scalarize. Note that the 1999 // addition of the scalar steps will not increase the number of instructions 2000 // in the loop in the common case prior to InstCombine. We will be trading 2001 // one vector extract for each scalar step. 2002 if (VF > 1 && ValuesNotWidened->count(IV)) { 2003 auto *EntryVal = Trunc ? cast<Value>(Trunc) : IV; 2004 buildScalarSteps(ScalarIV, Step, EntryVal); 2005 } 2006 } 2007 2008 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2009 Instruction::BinaryOps BinOp) { 2010 // Create and check the types. 2011 assert(Val->getType()->isVectorTy() && "Must be a vector"); 2012 int VLen = Val->getType()->getVectorNumElements(); 2013 2014 Type *STy = Val->getType()->getScalarType(); 2015 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2016 "Induction Step must be an integer or FP"); 2017 assert(Step->getType() == STy && "Step has wrong type"); 2018 2019 SmallVector<Constant *, 8> Indices; 2020 2021 if (STy->isIntegerTy()) { 2022 // Create a vector of consecutive numbers from zero to VF. 2023 for (int i = 0; i < VLen; ++i) 2024 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 2025 2026 // Add the consecutive indices to the vector value. 2027 Constant *Cv = ConstantVector::get(Indices); 2028 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2029 Step = Builder.CreateVectorSplat(VLen, Step); 2030 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2031 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2032 // which can be found from the original scalar operations. 2033 Step = Builder.CreateMul(Cv, Step); 2034 return Builder.CreateAdd(Val, Step, "induction"); 2035 } 2036 2037 // Floating point induction. 2038 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2039 "Binary Opcode should be specified for FP induction"); 2040 // Create a vector of consecutive numbers from zero to VF. 2041 for (int i = 0; i < VLen; ++i) 2042 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2043 2044 // Add the consecutive indices to the vector value. 2045 Constant *Cv = ConstantVector::get(Indices); 2046 2047 Step = Builder.CreateVectorSplat(VLen, Step); 2048 2049 // Floating point operations had to be 'fast' to enable the induction. 2050 FastMathFlags Flags; 2051 Flags.setUnsafeAlgebra(); 2052 2053 Value *MulOp = Builder.CreateFMul(Cv, Step); 2054 if (isa<Instruction>(MulOp)) 2055 // Have to check, MulOp may be a constant 2056 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2057 2058 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2059 if (isa<Instruction>(BOp)) 2060 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2061 return BOp; 2062 } 2063 2064 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2065 Value *EntryVal) { 2066 2067 // We shouldn't have to build scalar steps if we aren't vectorizing. 2068 assert(VF > 1 && "VF should be greater than one"); 2069 2070 // Get the value type and ensure it and the step have the same integer type. 2071 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2072 assert(ScalarIVTy->isIntegerTy() && ScalarIVTy == Step->getType() && 2073 "Val and Step should have the same integer type"); 2074 2075 // Compute the scalar steps and save the results in ScalarIVMap. 2076 for (unsigned Part = 0; Part < UF; ++Part) 2077 for (unsigned I = 0; I < VF; ++I) { 2078 auto *StartIdx = ConstantInt::get(ScalarIVTy, VF * Part + I); 2079 auto *Mul = Builder.CreateMul(StartIdx, Step); 2080 auto *Add = Builder.CreateAdd(ScalarIV, Mul); 2081 ScalarIVMap[EntryVal].push_back(Add); 2082 } 2083 } 2084 2085 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { 2086 assert(Ptr->getType()->isPointerTy() && "Unexpected non-ptr"); 2087 auto *SE = PSE.getSE(); 2088 // Make sure that the pointer does not point to structs. 2089 if (Ptr->getType()->getPointerElementType()->isAggregateType()) 2090 return 0; 2091 2092 // If this value is a pointer induction variable, we know it is consecutive. 2093 PHINode *Phi = dyn_cast_or_null<PHINode>(Ptr); 2094 if (Phi && Inductions.count(Phi)) { 2095 InductionDescriptor II = Inductions[Phi]; 2096 return II.getConsecutiveDirection(); 2097 } 2098 2099 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2100 if (!Gep) 2101 return 0; 2102 2103 unsigned NumOperands = Gep->getNumOperands(); 2104 Value *GpPtr = Gep->getPointerOperand(); 2105 // If this GEP value is a consecutive pointer induction variable and all of 2106 // the indices are constant, then we know it is consecutive. 2107 Phi = dyn_cast<PHINode>(GpPtr); 2108 if (Phi && Inductions.count(Phi)) { 2109 2110 // Make sure that the pointer does not point to structs. 2111 PointerType *GepPtrType = cast<PointerType>(GpPtr->getType()); 2112 if (GepPtrType->getElementType()->isAggregateType()) 2113 return 0; 2114 2115 // Make sure that all of the index operands are loop invariant. 2116 for (unsigned i = 1; i < NumOperands; ++i) 2117 if (!SE->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), TheLoop)) 2118 return 0; 2119 2120 InductionDescriptor II = Inductions[Phi]; 2121 return II.getConsecutiveDirection(); 2122 } 2123 2124 unsigned InductionOperand = getGEPInductionOperand(Gep); 2125 2126 // Check that all of the gep indices are uniform except for our induction 2127 // operand. 2128 for (unsigned i = 0; i != NumOperands; ++i) 2129 if (i != InductionOperand && 2130 !SE->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), TheLoop)) 2131 return 0; 2132 2133 // We can emit wide load/stores only if the last non-zero index is the 2134 // induction variable. 2135 const SCEV *Last = nullptr; 2136 if (!getSymbolicStrides() || !getSymbolicStrides()->count(Gep)) 2137 Last = PSE.getSCEV(Gep->getOperand(InductionOperand)); 2138 else { 2139 // Because of the multiplication by a stride we can have a s/zext cast. 2140 // We are going to replace this stride by 1 so the cast is safe to ignore. 2141 // 2142 // %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] 2143 // %0 = trunc i64 %indvars.iv to i32 2144 // %mul = mul i32 %0, %Stride1 2145 // %idxprom = zext i32 %mul to i64 << Safe cast. 2146 // %arrayidx = getelementptr inbounds i32* %B, i64 %idxprom 2147 // 2148 Last = replaceSymbolicStrideSCEV(PSE, *getSymbolicStrides(), 2149 Gep->getOperand(InductionOperand), Gep); 2150 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(Last)) 2151 Last = 2152 (C->getSCEVType() == scSignExtend || C->getSCEVType() == scZeroExtend) 2153 ? C->getOperand() 2154 : Last; 2155 } 2156 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Last)) { 2157 const SCEV *Step = AR->getStepRecurrence(*SE); 2158 2159 // The memory is consecutive because the last index is consecutive 2160 // and all other indices are loop invariant. 2161 if (Step->isOne()) 2162 return 1; 2163 if (Step->isAllOnesValue()) 2164 return -1; 2165 } 2166 2167 return 0; 2168 } 2169 2170 bool LoopVectorizationLegality::isUniform(Value *V) { 2171 return LAI->isUniform(V); 2172 } 2173 2174 InnerLoopVectorizer::VectorParts & 2175 InnerLoopVectorizer::getVectorValue(Value *V) { 2176 assert(V != Induction && "The new induction variable should not be used."); 2177 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2178 2179 // If we have a stride that is replaced by one, do it here. 2180 if (Legal->hasStride(V)) 2181 V = ConstantInt::get(V->getType(), 1); 2182 2183 // If we have this scalar in the map, return it. 2184 if (WidenMap.has(V)) 2185 return WidenMap.get(V); 2186 2187 // If this scalar is unknown, assume that it is a constant or that it is 2188 // loop invariant. Broadcast V and save the value for future uses. 2189 Value *B = getBroadcastInstrs(V); 2190 return WidenMap.splat(V, B); 2191 } 2192 2193 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2194 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2195 SmallVector<Constant *, 8> ShuffleMask; 2196 for (unsigned i = 0; i < VF; ++i) 2197 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 2198 2199 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2200 ConstantVector::get(ShuffleMask), 2201 "reverse"); 2202 } 2203 2204 // Get a mask to interleave \p NumVec vectors into a wide vector. 2205 // I.e. <0, VF, VF*2, ..., VF*(NumVec-1), 1, VF+1, VF*2+1, ...> 2206 // E.g. For 2 interleaved vectors, if VF is 4, the mask is: 2207 // <0, 4, 1, 5, 2, 6, 3, 7> 2208 static Constant *getInterleavedMask(IRBuilder<> &Builder, unsigned VF, 2209 unsigned NumVec) { 2210 SmallVector<Constant *, 16> Mask; 2211 for (unsigned i = 0; i < VF; i++) 2212 for (unsigned j = 0; j < NumVec; j++) 2213 Mask.push_back(Builder.getInt32(j * VF + i)); 2214 2215 return ConstantVector::get(Mask); 2216 } 2217 2218 // Get the strided mask starting from index \p Start. 2219 // I.e. <Start, Start + Stride, ..., Start + Stride*(VF-1)> 2220 static Constant *getStridedMask(IRBuilder<> &Builder, unsigned Start, 2221 unsigned Stride, unsigned VF) { 2222 SmallVector<Constant *, 16> Mask; 2223 for (unsigned i = 0; i < VF; i++) 2224 Mask.push_back(Builder.getInt32(Start + i * Stride)); 2225 2226 return ConstantVector::get(Mask); 2227 } 2228 2229 // Get a mask of two parts: The first part consists of sequential integers 2230 // starting from 0, The second part consists of UNDEFs. 2231 // I.e. <0, 1, 2, ..., NumInt - 1, undef, ..., undef> 2232 static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned NumInt, 2233 unsigned NumUndef) { 2234 SmallVector<Constant *, 16> Mask; 2235 for (unsigned i = 0; i < NumInt; i++) 2236 Mask.push_back(Builder.getInt32(i)); 2237 2238 Constant *Undef = UndefValue::get(Builder.getInt32Ty()); 2239 for (unsigned i = 0; i < NumUndef; i++) 2240 Mask.push_back(Undef); 2241 2242 return ConstantVector::get(Mask); 2243 } 2244 2245 // Concatenate two vectors with the same element type. The 2nd vector should 2246 // not have more elements than the 1st vector. If the 2nd vector has less 2247 // elements, extend it with UNDEFs. 2248 static Value *ConcatenateTwoVectors(IRBuilder<> &Builder, Value *V1, 2249 Value *V2) { 2250 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); 2251 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); 2252 assert(VecTy1 && VecTy2 && 2253 VecTy1->getScalarType() == VecTy2->getScalarType() && 2254 "Expect two vectors with the same element type"); 2255 2256 unsigned NumElts1 = VecTy1->getNumElements(); 2257 unsigned NumElts2 = VecTy2->getNumElements(); 2258 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); 2259 2260 if (NumElts1 > NumElts2) { 2261 // Extend with UNDEFs. 2262 Constant *ExtMask = 2263 getSequentialMask(Builder, NumElts2, NumElts1 - NumElts2); 2264 V2 = Builder.CreateShuffleVector(V2, UndefValue::get(VecTy2), ExtMask); 2265 } 2266 2267 Constant *Mask = getSequentialMask(Builder, NumElts1 + NumElts2, 0); 2268 return Builder.CreateShuffleVector(V1, V2, Mask); 2269 } 2270 2271 // Concatenate vectors in the given list. All vectors have the same type. 2272 static Value *ConcatenateVectors(IRBuilder<> &Builder, 2273 ArrayRef<Value *> InputList) { 2274 unsigned NumVec = InputList.size(); 2275 assert(NumVec > 1 && "Should be at least two vectors"); 2276 2277 SmallVector<Value *, 8> ResList; 2278 ResList.append(InputList.begin(), InputList.end()); 2279 do { 2280 SmallVector<Value *, 8> TmpList; 2281 for (unsigned i = 0; i < NumVec - 1; i += 2) { 2282 Value *V0 = ResList[i], *V1 = ResList[i + 1]; 2283 assert((V0->getType() == V1->getType() || i == NumVec - 2) && 2284 "Only the last vector may have a different type"); 2285 2286 TmpList.push_back(ConcatenateTwoVectors(Builder, V0, V1)); 2287 } 2288 2289 // Push the last vector if the total number of vectors is odd. 2290 if (NumVec % 2 != 0) 2291 TmpList.push_back(ResList[NumVec - 1]); 2292 2293 ResList = TmpList; 2294 NumVec = ResList.size(); 2295 } while (NumVec > 1); 2296 2297 return ResList[0]; 2298 } 2299 2300 // Try to vectorize the interleave group that \p Instr belongs to. 2301 // 2302 // E.g. Translate following interleaved load group (factor = 3): 2303 // for (i = 0; i < N; i+=3) { 2304 // R = Pic[i]; // Member of index 0 2305 // G = Pic[i+1]; // Member of index 1 2306 // B = Pic[i+2]; // Member of index 2 2307 // ... // do something to R, G, B 2308 // } 2309 // To: 2310 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2311 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2312 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2313 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2314 // 2315 // Or translate following interleaved store group (factor = 3): 2316 // for (i = 0; i < N; i+=3) { 2317 // ... do something to R, G, B 2318 // Pic[i] = R; // Member of index 0 2319 // Pic[i+1] = G; // Member of index 1 2320 // Pic[i+2] = B; // Member of index 2 2321 // } 2322 // To: 2323 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2324 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2325 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2326 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2327 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2328 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { 2329 const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr); 2330 assert(Group && "Fail to get an interleaved access group."); 2331 2332 // Skip if current instruction is not the insert position. 2333 if (Instr != Group->getInsertPos()) 2334 return; 2335 2336 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2337 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2338 Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); 2339 2340 // Prepare for the vector type of the interleaved load/store. 2341 Type *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2342 unsigned InterleaveFactor = Group->getFactor(); 2343 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2344 Type *PtrTy = VecTy->getPointerTo(Ptr->getType()->getPointerAddressSpace()); 2345 2346 // Prepare for the new pointers. 2347 setDebugLocFromInst(Builder, Ptr); 2348 VectorParts &PtrParts = getVectorValue(Ptr); 2349 SmallVector<Value *, 2> NewPtrs; 2350 unsigned Index = Group->getIndex(Instr); 2351 for (unsigned Part = 0; Part < UF; Part++) { 2352 // Extract the pointer for current instruction from the pointer vector. A 2353 // reverse access uses the pointer in the last lane. 2354 Value *NewPtr = Builder.CreateExtractElement( 2355 PtrParts[Part], 2356 Group->isReverse() ? Builder.getInt32(VF - 1) : Builder.getInt32(0)); 2357 2358 // Notice current instruction could be any index. Need to adjust the address 2359 // to the member of index 0. 2360 // 2361 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2362 // b = A[i]; // Member of index 0 2363 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2364 // 2365 // E.g. A[i+1] = a; // Member of index 1 2366 // A[i] = b; // Member of index 0 2367 // A[i+2] = c; // Member of index 2 (Current instruction) 2368 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2369 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 2370 2371 // Cast to the vector pointer type. 2372 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2373 } 2374 2375 setDebugLocFromInst(Builder, Instr); 2376 Value *UndefVec = UndefValue::get(VecTy); 2377 2378 // Vectorize the interleaved load group. 2379 if (LI) { 2380 for (unsigned Part = 0; Part < UF; Part++) { 2381 Instruction *NewLoadInstr = Builder.CreateAlignedLoad( 2382 NewPtrs[Part], Group->getAlignment(), "wide.vec"); 2383 2384 for (unsigned i = 0; i < InterleaveFactor; i++) { 2385 Instruction *Member = Group->getMember(i); 2386 2387 // Skip the gaps in the group. 2388 if (!Member) 2389 continue; 2390 2391 Constant *StrideMask = getStridedMask(Builder, i, InterleaveFactor, VF); 2392 Value *StridedVec = Builder.CreateShuffleVector( 2393 NewLoadInstr, UndefVec, StrideMask, "strided.vec"); 2394 2395 // If this member has different type, cast the result type. 2396 if (Member->getType() != ScalarTy) { 2397 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2398 StridedVec = Builder.CreateBitOrPointerCast(StridedVec, OtherVTy); 2399 } 2400 2401 VectorParts &Entry = WidenMap.get(Member); 2402 Entry[Part] = 2403 Group->isReverse() ? reverseVector(StridedVec) : StridedVec; 2404 } 2405 2406 addMetadata(NewLoadInstr, Instr); 2407 } 2408 return; 2409 } 2410 2411 // The sub vector type for current instruction. 2412 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2413 2414 // Vectorize the interleaved store group. 2415 for (unsigned Part = 0; Part < UF; Part++) { 2416 // Collect the stored vector from each member. 2417 SmallVector<Value *, 4> StoredVecs; 2418 for (unsigned i = 0; i < InterleaveFactor; i++) { 2419 // Interleaved store group doesn't allow a gap, so each index has a member 2420 Instruction *Member = Group->getMember(i); 2421 assert(Member && "Fail to get a member from an interleaved store group"); 2422 2423 Value *StoredVec = 2424 getVectorValue(cast<StoreInst>(Member)->getValueOperand())[Part]; 2425 if (Group->isReverse()) 2426 StoredVec = reverseVector(StoredVec); 2427 2428 // If this member has different type, cast it to an unified type. 2429 if (StoredVec->getType() != SubVT) 2430 StoredVec = Builder.CreateBitOrPointerCast(StoredVec, SubVT); 2431 2432 StoredVecs.push_back(StoredVec); 2433 } 2434 2435 // Concatenate all vectors into a wide vector. 2436 Value *WideVec = ConcatenateVectors(Builder, StoredVecs); 2437 2438 // Interleave the elements in the wide vector. 2439 Constant *IMask = getInterleavedMask(Builder, VF, InterleaveFactor); 2440 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2441 "interleaved.vec"); 2442 2443 Instruction *NewStoreInstr = 2444 Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment()); 2445 addMetadata(NewStoreInstr, Instr); 2446 } 2447 } 2448 2449 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { 2450 // Attempt to issue a wide load. 2451 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2452 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2453 2454 assert((LI || SI) && "Invalid Load/Store instruction"); 2455 2456 // Try to vectorize the interleave group if this access is interleaved. 2457 if (Legal->isAccessInterleaved(Instr)) 2458 return vectorizeInterleaveGroup(Instr); 2459 2460 Type *ScalarDataTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2461 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2462 Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); 2463 unsigned Alignment = LI ? LI->getAlignment() : SI->getAlignment(); 2464 // An alignment of 0 means target abi alignment. We need to use the scalar's 2465 // target abi alignment in such a case. 2466 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2467 if (!Alignment) 2468 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2469 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2470 uint64_t ScalarAllocatedSize = DL.getTypeAllocSize(ScalarDataTy); 2471 uint64_t VectorElementSize = DL.getTypeStoreSize(DataTy) / VF; 2472 2473 if (SI && Legal->blockNeedsPredication(SI->getParent()) && 2474 !Legal->isMaskRequired(SI)) 2475 return scalarizeInstruction(Instr, true); 2476 2477 if (ScalarAllocatedSize != VectorElementSize) 2478 return scalarizeInstruction(Instr); 2479 2480 // If the pointer is loop invariant scalarize the load. 2481 if (LI && Legal->isUniform(Ptr)) 2482 return scalarizeInstruction(Instr); 2483 2484 // If the pointer is non-consecutive and gather/scatter is not supported 2485 // scalarize the instruction. 2486 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 2487 bool Reverse = ConsecutiveStride < 0; 2488 bool CreateGatherScatter = 2489 !ConsecutiveStride && ((LI && Legal->isLegalMaskedGather(ScalarDataTy)) || 2490 (SI && Legal->isLegalMaskedScatter(ScalarDataTy))); 2491 2492 if (!ConsecutiveStride && !CreateGatherScatter) 2493 return scalarizeInstruction(Instr); 2494 2495 Constant *Zero = Builder.getInt32(0); 2496 VectorParts &Entry = WidenMap.get(Instr); 2497 VectorParts VectorGep; 2498 2499 // Handle consecutive loads/stores. 2500 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2501 if (ConsecutiveStride) { 2502 if (Gep && Legal->isInductionVariable(Gep->getPointerOperand())) { 2503 setDebugLocFromInst(Builder, Gep); 2504 Value *PtrOperand = Gep->getPointerOperand(); 2505 Value *FirstBasePtr = getVectorValue(PtrOperand)[0]; 2506 FirstBasePtr = Builder.CreateExtractElement(FirstBasePtr, Zero); 2507 2508 // Create the new GEP with the new induction variable. 2509 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2510 Gep2->setOperand(0, FirstBasePtr); 2511 Gep2->setName("gep.indvar.base"); 2512 Ptr = Builder.Insert(Gep2); 2513 } else if (Gep) { 2514 setDebugLocFromInst(Builder, Gep); 2515 assert(PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getPointerOperand()), 2516 OrigLoop) && 2517 "Base ptr must be invariant"); 2518 // The last index does not have to be the induction. It can be 2519 // consecutive and be a function of the index. For example A[I+1]; 2520 unsigned NumOperands = Gep->getNumOperands(); 2521 unsigned InductionOperand = getGEPInductionOperand(Gep); 2522 // Create the new GEP with the new induction variable. 2523 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2524 2525 for (unsigned i = 0; i < NumOperands; ++i) { 2526 Value *GepOperand = Gep->getOperand(i); 2527 Instruction *GepOperandInst = dyn_cast<Instruction>(GepOperand); 2528 2529 // Update last index or loop invariant instruction anchored in loop. 2530 if (i == InductionOperand || 2531 (GepOperandInst && OrigLoop->contains(GepOperandInst))) { 2532 assert((i == InductionOperand || 2533 PSE.getSE()->isLoopInvariant(PSE.getSCEV(GepOperandInst), 2534 OrigLoop)) && 2535 "Must be last index or loop invariant"); 2536 2537 VectorParts &GEPParts = getVectorValue(GepOperand); 2538 2539 // If GepOperand is an induction variable, and there's a scalarized 2540 // version of it available, use it. Otherwise, we will need to create 2541 // an extractelement instruction. 2542 Value *Index = ScalarIVMap.count(GepOperand) 2543 ? ScalarIVMap[GepOperand][0] 2544 : Builder.CreateExtractElement(GEPParts[0], Zero); 2545 2546 Gep2->setOperand(i, Index); 2547 Gep2->setName("gep.indvar.idx"); 2548 } 2549 } 2550 Ptr = Builder.Insert(Gep2); 2551 } else { // No GEP 2552 // Use the induction element ptr. 2553 assert(isa<PHINode>(Ptr) && "Invalid induction ptr"); 2554 setDebugLocFromInst(Builder, Ptr); 2555 VectorParts &PtrVal = getVectorValue(Ptr); 2556 Ptr = Builder.CreateExtractElement(PtrVal[0], Zero); 2557 } 2558 } else { 2559 // At this point we should vector version of GEP for Gather or Scatter 2560 assert(CreateGatherScatter && "The instruction should be scalarized"); 2561 if (Gep) { 2562 // Vectorizing GEP, across UF parts. We want to get a vector value for base 2563 // and each index that's defined inside the loop, even if it is 2564 // loop-invariant but wasn't hoisted out. Otherwise we want to keep them 2565 // scalar. 2566 SmallVector<VectorParts, 4> OpsV; 2567 for (Value *Op : Gep->operands()) { 2568 Instruction *SrcInst = dyn_cast<Instruction>(Op); 2569 if (SrcInst && OrigLoop->contains(SrcInst)) 2570 OpsV.push_back(getVectorValue(Op)); 2571 else 2572 OpsV.push_back(VectorParts(UF, Op)); 2573 } 2574 for (unsigned Part = 0; Part < UF; ++Part) { 2575 SmallVector<Value *, 4> Ops; 2576 Value *GEPBasePtr = OpsV[0][Part]; 2577 for (unsigned i = 1; i < Gep->getNumOperands(); i++) 2578 Ops.push_back(OpsV[i][Part]); 2579 Value *NewGep = Builder.CreateGEP(GEPBasePtr, Ops, "VectorGep"); 2580 cast<GetElementPtrInst>(NewGep)->setIsInBounds(Gep->isInBounds()); 2581 assert(NewGep->getType()->isVectorTy() && "Expected vector GEP"); 2582 2583 NewGep = 2584 Builder.CreateBitCast(NewGep, VectorType::get(Ptr->getType(), VF)); 2585 VectorGep.push_back(NewGep); 2586 } 2587 } else 2588 VectorGep = getVectorValue(Ptr); 2589 } 2590 2591 VectorParts Mask = createBlockInMask(Instr->getParent()); 2592 // Handle Stores: 2593 if (SI) { 2594 assert(!Legal->isUniform(SI->getPointerOperand()) && 2595 "We do not allow storing to uniform addresses"); 2596 setDebugLocFromInst(Builder, SI); 2597 // We don't want to update the value in the map as it might be used in 2598 // another expression. So don't use a reference type for "StoredVal". 2599 VectorParts StoredVal = getVectorValue(SI->getValueOperand()); 2600 2601 for (unsigned Part = 0; Part < UF; ++Part) { 2602 Instruction *NewSI = nullptr; 2603 if (CreateGatherScatter) { 2604 Value *MaskPart = Legal->isMaskRequired(SI) ? Mask[Part] : nullptr; 2605 NewSI = Builder.CreateMaskedScatter(StoredVal[Part], VectorGep[Part], 2606 Alignment, MaskPart); 2607 } else { 2608 // Calculate the pointer for the specific unroll-part. 2609 Value *PartPtr = 2610 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2611 2612 if (Reverse) { 2613 // If we store to reverse consecutive memory locations, then we need 2614 // to reverse the order of elements in the stored value. 2615 StoredVal[Part] = reverseVector(StoredVal[Part]); 2616 // If the address is consecutive but reversed, then the 2617 // wide store needs to start at the last vector element. 2618 PartPtr = 2619 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2620 PartPtr = 2621 Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2622 Mask[Part] = reverseVector(Mask[Part]); 2623 } 2624 2625 Value *VecPtr = 2626 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2627 2628 if (Legal->isMaskRequired(SI)) 2629 NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment, 2630 Mask[Part]); 2631 else 2632 NewSI = 2633 Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment); 2634 } 2635 addMetadata(NewSI, SI); 2636 } 2637 return; 2638 } 2639 2640 // Handle loads. 2641 assert(LI && "Must have a load instruction"); 2642 setDebugLocFromInst(Builder, LI); 2643 for (unsigned Part = 0; Part < UF; ++Part) { 2644 Instruction *NewLI; 2645 if (CreateGatherScatter) { 2646 Value *MaskPart = Legal->isMaskRequired(LI) ? Mask[Part] : nullptr; 2647 NewLI = Builder.CreateMaskedGather(VectorGep[Part], Alignment, MaskPart, 2648 0, "wide.masked.gather"); 2649 Entry[Part] = NewLI; 2650 } else { 2651 // Calculate the pointer for the specific unroll-part. 2652 Value *PartPtr = 2653 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2654 2655 if (Reverse) { 2656 // If the address is consecutive but reversed, then the 2657 // wide load needs to start at the last vector element. 2658 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2659 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2660 Mask[Part] = reverseVector(Mask[Part]); 2661 } 2662 2663 Value *VecPtr = 2664 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2665 if (Legal->isMaskRequired(LI)) 2666 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 2667 UndefValue::get(DataTy), 2668 "wide.masked.load"); 2669 else 2670 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 2671 Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI; 2672 } 2673 addMetadata(NewLI, LI); 2674 } 2675 } 2676 2677 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2678 bool IfPredicateStore) { 2679 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2680 // Holds vector parameters or scalars, in case of uniform vals. 2681 SmallVector<VectorParts, 4> Params; 2682 2683 setDebugLocFromInst(Builder, Instr); 2684 2685 // Find all of the vectorized parameters. 2686 for (Value *SrcOp : Instr->operands()) { 2687 // If we are accessing the old induction variable, use the new one. 2688 if (SrcOp == OldInduction) { 2689 Params.push_back(getVectorValue(SrcOp)); 2690 continue; 2691 } 2692 2693 // Try using previously calculated values. 2694 auto *SrcInst = dyn_cast<Instruction>(SrcOp); 2695 2696 // If the src is an instruction that appeared earlier in the basic block, 2697 // then it should already be vectorized. 2698 if (SrcInst && OrigLoop->contains(SrcInst)) { 2699 assert(WidenMap.has(SrcInst) && "Source operand is unavailable"); 2700 // The parameter is a vector value from earlier. 2701 Params.push_back(WidenMap.get(SrcInst)); 2702 } else { 2703 // The parameter is a scalar from outside the loop. Maybe even a constant. 2704 VectorParts Scalars; 2705 Scalars.append(UF, SrcOp); 2706 Params.push_back(Scalars); 2707 } 2708 } 2709 2710 assert(Params.size() == Instr->getNumOperands() && 2711 "Invalid number of operands"); 2712 2713 // Does this instruction return a value ? 2714 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2715 2716 Value *UndefVec = 2717 IsVoidRetTy ? nullptr 2718 : UndefValue::get(VectorType::get(Instr->getType(), VF)); 2719 // Create a new entry in the WidenMap and initialize it to Undef or Null. 2720 VectorParts &VecResults = WidenMap.splat(Instr, UndefVec); 2721 2722 VectorParts Cond; 2723 if (IfPredicateStore) { 2724 assert(Instr->getParent()->getSinglePredecessor() && 2725 "Only support single predecessor blocks"); 2726 Cond = createEdgeMask(Instr->getParent()->getSinglePredecessor(), 2727 Instr->getParent()); 2728 } 2729 2730 // For each vector unroll 'part': 2731 for (unsigned Part = 0; Part < UF; ++Part) { 2732 // For each scalar that we create: 2733 for (unsigned Width = 0; Width < VF; ++Width) { 2734 2735 // Start if-block. 2736 Value *Cmp = nullptr; 2737 if (IfPredicateStore) { 2738 Cmp = Builder.CreateExtractElement(Cond[Part], Builder.getInt32(Width)); 2739 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cmp, 2740 ConstantInt::get(Cmp->getType(), 1)); 2741 } 2742 2743 Instruction *Cloned = Instr->clone(); 2744 if (!IsVoidRetTy) 2745 Cloned->setName(Instr->getName() + ".cloned"); 2746 // Replace the operands of the cloned instructions with extracted scalars. 2747 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2748 2749 // If the operand is an induction variable, and there's a scalarized 2750 // version of it available, use it. Otherwise, we will need to create 2751 // an extractelement instruction if vectorizing. 2752 auto *NewOp = Params[op][Part]; 2753 auto *ScalarOp = Instr->getOperand(op); 2754 if (ScalarIVMap.count(ScalarOp)) 2755 NewOp = ScalarIVMap[ScalarOp][VF * Part + Width]; 2756 else if (NewOp->getType()->isVectorTy()) 2757 NewOp = Builder.CreateExtractElement(NewOp, Builder.getInt32(Width)); 2758 Cloned->setOperand(op, NewOp); 2759 } 2760 addNewMetadata(Cloned, Instr); 2761 2762 // Place the cloned scalar in the new loop. 2763 Builder.Insert(Cloned); 2764 2765 // If we just cloned a new assumption, add it the assumption cache. 2766 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2767 if (II->getIntrinsicID() == Intrinsic::assume) 2768 AC->registerAssumption(II); 2769 2770 // If the original scalar returns a value we need to place it in a vector 2771 // so that future users will be able to use it. 2772 if (!IsVoidRetTy) 2773 VecResults[Part] = Builder.CreateInsertElement(VecResults[Part], Cloned, 2774 Builder.getInt32(Width)); 2775 // End if-block. 2776 if (IfPredicateStore) 2777 PredicatedStores.push_back( 2778 std::make_pair(cast<StoreInst>(Cloned), Cmp)); 2779 } 2780 } 2781 } 2782 2783 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2784 Value *End, Value *Step, 2785 Instruction *DL) { 2786 BasicBlock *Header = L->getHeader(); 2787 BasicBlock *Latch = L->getLoopLatch(); 2788 // As we're just creating this loop, it's possible no latch exists 2789 // yet. If so, use the header as this will be a single block loop. 2790 if (!Latch) 2791 Latch = Header; 2792 2793 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2794 setDebugLocFromInst(Builder, getDebugLocFromInstOrOperands(OldInduction)); 2795 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2796 2797 Builder.SetInsertPoint(Latch->getTerminator()); 2798 2799 // Create i+1 and fill the PHINode. 2800 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2801 Induction->addIncoming(Start, L->getLoopPreheader()); 2802 Induction->addIncoming(Next, Latch); 2803 // Create the compare. 2804 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2805 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 2806 2807 // Now we have two terminators. Remove the old one from the block. 2808 Latch->getTerminator()->eraseFromParent(); 2809 2810 return Induction; 2811 } 2812 2813 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2814 if (TripCount) 2815 return TripCount; 2816 2817 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2818 // Find the loop boundaries. 2819 ScalarEvolution *SE = PSE.getSE(); 2820 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2821 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 2822 "Invalid loop count"); 2823 2824 Type *IdxTy = Legal->getWidestInductionType(); 2825 2826 // The exit count might have the type of i64 while the phi is i32. This can 2827 // happen if we have an induction variable that is sign extended before the 2828 // compare. The only way that we get a backedge taken count is that the 2829 // induction variable was signed and as such will not overflow. In such a case 2830 // truncation is legal. 2831 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 2832 IdxTy->getPrimitiveSizeInBits()) 2833 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2834 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2835 2836 // Get the total trip count from the count by adding 1. 2837 const SCEV *ExitCount = SE->getAddExpr( 2838 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2839 2840 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2841 2842 // Expand the trip count and place the new instructions in the preheader. 2843 // Notice that the pre-header does not change, only the loop body. 2844 SCEVExpander Exp(*SE, DL, "induction"); 2845 2846 // Count holds the overall loop count (N). 2847 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2848 L->getLoopPreheader()->getTerminator()); 2849 2850 if (TripCount->getType()->isPointerTy()) 2851 TripCount = 2852 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2853 L->getLoopPreheader()->getTerminator()); 2854 2855 return TripCount; 2856 } 2857 2858 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 2859 if (VectorTripCount) 2860 return VectorTripCount; 2861 2862 Value *TC = getOrCreateTripCount(L); 2863 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2864 2865 // Now we need to generate the expression for the part of the loop that the 2866 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2867 // iterations are not required for correctness, or N - Step, otherwise. Step 2868 // is equal to the vectorization factor (number of SIMD elements) times the 2869 // unroll factor (number of SIMD instructions). 2870 Constant *Step = ConstantInt::get(TC->getType(), VF * UF); 2871 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2872 2873 // If there is a non-reversed interleaved group that may speculatively access 2874 // memory out-of-bounds, we need to ensure that there will be at least one 2875 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 2876 // the trip count, we set the remainder to be equal to the step. If the step 2877 // does not evenly divide the trip count, no adjustment is necessary since 2878 // there will already be scalar iterations. Note that the minimum iterations 2879 // check ensures that N >= Step. 2880 if (VF > 1 && Legal->requiresScalarEpilogue()) { 2881 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2882 R = Builder.CreateSelect(IsZero, Step, R); 2883 } 2884 2885 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2886 2887 return VectorTripCount; 2888 } 2889 2890 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 2891 BasicBlock *Bypass) { 2892 Value *Count = getOrCreateTripCount(L); 2893 BasicBlock *BB = L->getLoopPreheader(); 2894 IRBuilder<> Builder(BB->getTerminator()); 2895 2896 // Generate code to check that the loop's trip count that we computed by 2897 // adding one to the backedge-taken count will not overflow. 2898 Value *CheckMinIters = Builder.CreateICmpULT( 2899 Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check"); 2900 2901 BasicBlock *NewBB = 2902 BB->splitBasicBlock(BB->getTerminator(), "min.iters.checked"); 2903 // Update dominator tree immediately if the generated block is a 2904 // LoopBypassBlock because SCEV expansions to generate loop bypass 2905 // checks may query it before the current function is finished. 2906 DT->addNewBlock(NewBB, BB); 2907 if (L->getParentLoop()) 2908 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2909 ReplaceInstWithInst(BB->getTerminator(), 2910 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 2911 LoopBypassBlocks.push_back(BB); 2912 } 2913 2914 void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L, 2915 BasicBlock *Bypass) { 2916 Value *TC = getOrCreateVectorTripCount(L); 2917 BasicBlock *BB = L->getLoopPreheader(); 2918 IRBuilder<> Builder(BB->getTerminator()); 2919 2920 // Now, compare the new count to zero. If it is zero skip the vector loop and 2921 // jump to the scalar loop. 2922 Value *Cmp = Builder.CreateICmpEQ(TC, Constant::getNullValue(TC->getType()), 2923 "cmp.zero"); 2924 2925 // Generate code to check that the loop's trip count that we computed by 2926 // adding one to the backedge-taken count will not overflow. 2927 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2928 // Update dominator tree immediately if the generated block is a 2929 // LoopBypassBlock because SCEV expansions to generate loop bypass 2930 // checks may query it before the current function is finished. 2931 DT->addNewBlock(NewBB, BB); 2932 if (L->getParentLoop()) 2933 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2934 ReplaceInstWithInst(BB->getTerminator(), 2935 BranchInst::Create(Bypass, NewBB, Cmp)); 2936 LoopBypassBlocks.push_back(BB); 2937 } 2938 2939 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 2940 BasicBlock *BB = L->getLoopPreheader(); 2941 2942 // Generate the code to check that the SCEV assumptions that we made. 2943 // We want the new basic block to start at the first instruction in a 2944 // sequence of instructions that form a check. 2945 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 2946 "scev.check"); 2947 Value *SCEVCheck = 2948 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 2949 2950 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 2951 if (C->isZero()) 2952 return; 2953 2954 // Create a new block containing the stride check. 2955 BB->setName("vector.scevcheck"); 2956 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2957 // Update dominator tree immediately if the generated block is a 2958 // LoopBypassBlock because SCEV expansions to generate loop bypass 2959 // checks may query it before the current function is finished. 2960 DT->addNewBlock(NewBB, BB); 2961 if (L->getParentLoop()) 2962 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2963 ReplaceInstWithInst(BB->getTerminator(), 2964 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 2965 LoopBypassBlocks.push_back(BB); 2966 AddedSafetyChecks = true; 2967 } 2968 2969 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 2970 BasicBlock *BB = L->getLoopPreheader(); 2971 2972 // Generate the code that checks in runtime if arrays overlap. We put the 2973 // checks into a separate block to make the more common case of few elements 2974 // faster. 2975 Instruction *FirstCheckInst; 2976 Instruction *MemRuntimeCheck; 2977 std::tie(FirstCheckInst, MemRuntimeCheck) = 2978 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 2979 if (!MemRuntimeCheck) 2980 return; 2981 2982 // Create a new block containing the memory check. 2983 BB->setName("vector.memcheck"); 2984 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2985 // Update dominator tree immediately if the generated block is a 2986 // LoopBypassBlock because SCEV expansions to generate loop bypass 2987 // checks may query it before the current function is finished. 2988 DT->addNewBlock(NewBB, BB); 2989 if (L->getParentLoop()) 2990 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2991 ReplaceInstWithInst(BB->getTerminator(), 2992 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 2993 LoopBypassBlocks.push_back(BB); 2994 AddedSafetyChecks = true; 2995 2996 // We currently don't use LoopVersioning for the actual loop cloning but we 2997 // still use it to add the noalias metadata. 2998 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 2999 PSE.getSE()); 3000 LVer->prepareNoAliasMetadata(); 3001 } 3002 3003 void InnerLoopVectorizer::createEmptyLoop() { 3004 /* 3005 In this function we generate a new loop. The new loop will contain 3006 the vectorized instructions while the old loop will continue to run the 3007 scalar remainder. 3008 3009 [ ] <-- loop iteration number check. 3010 / | 3011 / v 3012 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3013 | / | 3014 | / v 3015 || [ ] <-- vector pre header. 3016 |/ | 3017 | v 3018 | [ ] \ 3019 | [ ]_| <-- vector loop. 3020 | | 3021 | v 3022 | -[ ] <--- middle-block. 3023 | / | 3024 | / v 3025 -|- >[ ] <--- new preheader. 3026 | | 3027 | v 3028 | [ ] \ 3029 | [ ]_| <-- old scalar loop to handle remainder. 3030 \ | 3031 \ v 3032 >[ ] <-- exit block. 3033 ... 3034 */ 3035 3036 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 3037 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 3038 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 3039 assert(VectorPH && "Invalid loop structure"); 3040 assert(ExitBlock && "Must have an exit block"); 3041 3042 // Some loops have a single integer induction variable, while other loops 3043 // don't. One example is c++ iterators that often have multiple pointer 3044 // induction variables. In the code below we also support a case where we 3045 // don't have a single induction variable. 3046 // 3047 // We try to obtain an induction variable from the original loop as hard 3048 // as possible. However if we don't find one that: 3049 // - is an integer 3050 // - counts from zero, stepping by one 3051 // - is the size of the widest induction variable type 3052 // then we create a new one. 3053 OldInduction = Legal->getInduction(); 3054 Type *IdxTy = Legal->getWidestInductionType(); 3055 3056 // Split the single block loop into the two loop structure described above. 3057 BasicBlock *VecBody = 3058 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 3059 BasicBlock *MiddleBlock = 3060 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 3061 BasicBlock *ScalarPH = 3062 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 3063 3064 // Create and register the new vector loop. 3065 Loop *Lp = new Loop(); 3066 Loop *ParentLoop = OrigLoop->getParentLoop(); 3067 3068 // Insert the new loop into the loop nest and register the new basic blocks 3069 // before calling any utilities such as SCEV that require valid LoopInfo. 3070 if (ParentLoop) { 3071 ParentLoop->addChildLoop(Lp); 3072 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 3073 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 3074 } else { 3075 LI->addTopLevelLoop(Lp); 3076 } 3077 Lp->addBasicBlockToLoop(VecBody, *LI); 3078 3079 // Find the loop boundaries. 3080 Value *Count = getOrCreateTripCount(Lp); 3081 3082 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3083 3084 // We need to test whether the backedge-taken count is uint##_max. Adding one 3085 // to it will cause overflow and an incorrect loop trip count in the vector 3086 // body. In case of overflow we want to directly jump to the scalar remainder 3087 // loop. 3088 emitMinimumIterationCountCheck(Lp, ScalarPH); 3089 // Now, compare the new count to zero. If it is zero skip the vector loop and 3090 // jump to the scalar loop. 3091 emitVectorLoopEnteredCheck(Lp, ScalarPH); 3092 // Generate the code to check any assumptions that we've made for SCEV 3093 // expressions. 3094 emitSCEVChecks(Lp, ScalarPH); 3095 3096 // Generate the code that checks in runtime if arrays overlap. We put the 3097 // checks into a separate block to make the more common case of few elements 3098 // faster. 3099 emitMemRuntimeChecks(Lp, ScalarPH); 3100 3101 // Generate the induction variable. 3102 // The loop step is equal to the vectorization factor (num of SIMD elements) 3103 // times the unroll factor (num of SIMD instructions). 3104 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3105 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 3106 Induction = 3107 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3108 getDebugLocFromInstOrOperands(OldInduction)); 3109 3110 // We are going to resume the execution of the scalar loop. 3111 // Go over all of the induction variables that we found and fix the 3112 // PHIs that are left in the scalar version of the loop. 3113 // The starting values of PHI nodes depend on the counter of the last 3114 // iteration in the vectorized loop. 3115 // If we come from a bypass edge then we need to start from the original 3116 // start value. 3117 3118 // This variable saves the new starting index for the scalar loop. It is used 3119 // to test if there are any tail iterations left once the vector loop has 3120 // completed. 3121 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 3122 for (auto &InductionEntry : *List) { 3123 PHINode *OrigPhi = InductionEntry.first; 3124 InductionDescriptor II = InductionEntry.second; 3125 3126 // Create phi nodes to merge from the backedge-taken check block. 3127 PHINode *BCResumeVal = PHINode::Create( 3128 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 3129 Value *EndValue; 3130 if (OrigPhi == OldInduction) { 3131 // We know what the end value is. 3132 EndValue = CountRoundDown; 3133 } else { 3134 IRBuilder<> B(LoopBypassBlocks.back()->getTerminator()); 3135 Type *StepType = II.getStep()->getType(); 3136 Instruction::CastOps CastOp = 3137 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 3138 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 3139 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3140 EndValue = II.transform(B, CRD, PSE.getSE(), DL); 3141 EndValue->setName("ind.end"); 3142 } 3143 3144 // The new PHI merges the original incoming value, in case of a bypass, 3145 // or the value at the end of the vectorized loop. 3146 BCResumeVal->addIncoming(EndValue, MiddleBlock); 3147 3148 // Fix up external users of the induction variable. 3149 fixupIVUsers(OrigPhi, II, CountRoundDown, EndValue, MiddleBlock); 3150 3151 // Fix the scalar body counter (PHI node). 3152 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 3153 3154 // The old induction's phi node in the scalar body needs the truncated 3155 // value. 3156 for (BasicBlock *BB : LoopBypassBlocks) 3157 BCResumeVal->addIncoming(II.getStartValue(), BB); 3158 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 3159 } 3160 3161 // Add a check in the middle block to see if we have completed 3162 // all of the iterations in the first vector loop. 3163 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3164 Value *CmpN = 3165 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 3166 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 3167 ReplaceInstWithInst(MiddleBlock->getTerminator(), 3168 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 3169 3170 // Get ready to start creating new instructions into the vectorized body. 3171 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 3172 3173 // Save the state. 3174 LoopVectorPreHeader = Lp->getLoopPreheader(); 3175 LoopScalarPreHeader = ScalarPH; 3176 LoopMiddleBlock = MiddleBlock; 3177 LoopExitBlock = ExitBlock; 3178 LoopVectorBody = VecBody; 3179 LoopScalarBody = OldBasicBlock; 3180 3181 // Keep all loop hints from the original loop on the vector loop (we'll 3182 // replace the vectorizer-specific hints below). 3183 if (MDNode *LID = OrigLoop->getLoopID()) 3184 Lp->setLoopID(LID); 3185 3186 LoopVectorizeHints Hints(Lp, true, *ORE); 3187 Hints.setAlreadyVectorized(); 3188 } 3189 3190 // Fix up external users of the induction variable. At this point, we are 3191 // in LCSSA form, with all external PHIs that use the IV having one input value, 3192 // coming from the remainder loop. We need those PHIs to also have a correct 3193 // value for the IV when arriving directly from the middle block. 3194 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3195 const InductionDescriptor &II, 3196 Value *CountRoundDown, Value *EndValue, 3197 BasicBlock *MiddleBlock) { 3198 // There are two kinds of external IV usages - those that use the value 3199 // computed in the last iteration (the PHI) and those that use the penultimate 3200 // value (the value that feeds into the phi from the loop latch). 3201 // We allow both, but they, obviously, have different values. 3202 3203 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 3204 3205 DenseMap<Value *, Value *> MissingVals; 3206 3207 // An external user of the last iteration's value should see the value that 3208 // the remainder loop uses to initialize its own IV. 3209 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3210 for (User *U : PostInc->users()) { 3211 Instruction *UI = cast<Instruction>(U); 3212 if (!OrigLoop->contains(UI)) { 3213 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3214 MissingVals[UI] = EndValue; 3215 } 3216 } 3217 3218 // An external user of the penultimate value need to see EndValue - Step. 3219 // The simplest way to get this is to recompute it from the constituent SCEVs, 3220 // that is Start + (Step * (CRD - 1)). 3221 for (User *U : OrigPhi->users()) { 3222 auto *UI = cast<Instruction>(U); 3223 if (!OrigLoop->contains(UI)) { 3224 const DataLayout &DL = 3225 OrigLoop->getHeader()->getModule()->getDataLayout(); 3226 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3227 3228 IRBuilder<> B(MiddleBlock->getTerminator()); 3229 Value *CountMinusOne = B.CreateSub( 3230 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3231 Value *CMO = B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType(), 3232 "cast.cmo"); 3233 Value *Escape = II.transform(B, CMO, PSE.getSE(), DL); 3234 Escape->setName("ind.escape"); 3235 MissingVals[UI] = Escape; 3236 } 3237 } 3238 3239 for (auto &I : MissingVals) { 3240 PHINode *PHI = cast<PHINode>(I.first); 3241 // One corner case we have to handle is two IVs "chasing" each-other, 3242 // that is %IV2 = phi [...], [ %IV1, %latch ] 3243 // In this case, if IV1 has an external use, we need to avoid adding both 3244 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3245 // don't already have an incoming value for the middle block. 3246 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3247 PHI->addIncoming(I.second, MiddleBlock); 3248 } 3249 } 3250 3251 namespace { 3252 struct CSEDenseMapInfo { 3253 static bool canHandle(Instruction *I) { 3254 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3255 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3256 } 3257 static inline Instruction *getEmptyKey() { 3258 return DenseMapInfo<Instruction *>::getEmptyKey(); 3259 } 3260 static inline Instruction *getTombstoneKey() { 3261 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3262 } 3263 static unsigned getHashValue(Instruction *I) { 3264 assert(canHandle(I) && "Unknown instruction!"); 3265 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3266 I->value_op_end())); 3267 } 3268 static bool isEqual(Instruction *LHS, Instruction *RHS) { 3269 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3270 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3271 return LHS == RHS; 3272 return LHS->isIdenticalTo(RHS); 3273 } 3274 }; 3275 } 3276 3277 ///\brief Perform cse of induction variable instructions. 3278 static void cse(BasicBlock *BB) { 3279 // Perform simple cse. 3280 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3281 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3282 Instruction *In = &*I++; 3283 3284 if (!CSEDenseMapInfo::canHandle(In)) 3285 continue; 3286 3287 // Check if we can replace this instruction with any of the 3288 // visited instructions. 3289 if (Instruction *V = CSEMap.lookup(In)) { 3290 In->replaceAllUsesWith(V); 3291 In->eraseFromParent(); 3292 continue; 3293 } 3294 3295 CSEMap[In] = In; 3296 } 3297 } 3298 3299 /// \brief Adds a 'fast' flag to floating point operations. 3300 static Value *addFastMathFlag(Value *V) { 3301 if (isa<FPMathOperator>(V)) { 3302 FastMathFlags Flags; 3303 Flags.setUnsafeAlgebra(); 3304 cast<Instruction>(V)->setFastMathFlags(Flags); 3305 } 3306 return V; 3307 } 3308 3309 /// Estimate the overhead of scalarizing a value. Insert and Extract are set if 3310 /// the result needs to be inserted and/or extracted from vectors. 3311 static unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract, 3312 const TargetTransformInfo &TTI) { 3313 if (Ty->isVoidTy()) 3314 return 0; 3315 3316 assert(Ty->isVectorTy() && "Can only scalarize vectors"); 3317 unsigned Cost = 0; 3318 3319 for (unsigned I = 0, E = Ty->getVectorNumElements(); I < E; ++I) { 3320 if (Insert) 3321 Cost += TTI.getVectorInstrCost(Instruction::InsertElement, Ty, I); 3322 if (Extract) 3323 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, Ty, I); 3324 } 3325 3326 return Cost; 3327 } 3328 3329 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3330 // Return the cost of the instruction, including scalarization overhead if it's 3331 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3332 // i.e. either vector version isn't available, or is too expensive. 3333 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3334 const TargetTransformInfo &TTI, 3335 const TargetLibraryInfo *TLI, 3336 bool &NeedToScalarize) { 3337 Function *F = CI->getCalledFunction(); 3338 StringRef FnName = CI->getCalledFunction()->getName(); 3339 Type *ScalarRetTy = CI->getType(); 3340 SmallVector<Type *, 4> Tys, ScalarTys; 3341 for (auto &ArgOp : CI->arg_operands()) 3342 ScalarTys.push_back(ArgOp->getType()); 3343 3344 // Estimate cost of scalarized vector call. The source operands are assumed 3345 // to be vectors, so we need to extract individual elements from there, 3346 // execute VF scalar calls, and then gather the result into the vector return 3347 // value. 3348 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3349 if (VF == 1) 3350 return ScalarCallCost; 3351 3352 // Compute corresponding vector type for return value and arguments. 3353 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3354 for (Type *ScalarTy : ScalarTys) 3355 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3356 3357 // Compute costs of unpacking argument values for the scalar calls and 3358 // packing the return values to a vector. 3359 unsigned ScalarizationCost = 3360 getScalarizationOverhead(RetTy, true, false, TTI); 3361 for (Type *Ty : Tys) 3362 ScalarizationCost += getScalarizationOverhead(Ty, false, true, TTI); 3363 3364 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3365 3366 // If we can't emit a vector call for this function, then the currently found 3367 // cost is the cost we need to return. 3368 NeedToScalarize = true; 3369 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3370 return Cost; 3371 3372 // If the corresponding vector cost is cheaper, return its cost. 3373 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3374 if (VectorCallCost < Cost) { 3375 NeedToScalarize = false; 3376 return VectorCallCost; 3377 } 3378 return Cost; 3379 } 3380 3381 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3382 // factor VF. Return the cost of the instruction, including scalarization 3383 // overhead if it's needed. 3384 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3385 const TargetTransformInfo &TTI, 3386 const TargetLibraryInfo *TLI) { 3387 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3388 assert(ID && "Expected intrinsic call!"); 3389 3390 Type *RetTy = ToVectorTy(CI->getType(), VF); 3391 SmallVector<Type *, 4> Tys; 3392 for (Value *ArgOperand : CI->arg_operands()) 3393 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 3394 3395 FastMathFlags FMF; 3396 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3397 FMF = FPMO->getFastMathFlags(); 3398 3399 return TTI.getIntrinsicInstrCost(ID, RetTy, Tys, FMF); 3400 } 3401 3402 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3403 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3404 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3405 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3406 } 3407 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3408 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3409 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3410 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3411 } 3412 3413 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3414 // For every instruction `I` in MinBWs, truncate the operands, create a 3415 // truncated version of `I` and reextend its result. InstCombine runs 3416 // later and will remove any ext/trunc pairs. 3417 // 3418 SmallPtrSet<Value *, 4> Erased; 3419 for (const auto &KV : *MinBWs) { 3420 VectorParts &Parts = WidenMap.get(KV.first); 3421 for (Value *&I : Parts) { 3422 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3423 continue; 3424 Type *OriginalTy = I->getType(); 3425 Type *ScalarTruncatedTy = 3426 IntegerType::get(OriginalTy->getContext(), KV.second); 3427 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3428 OriginalTy->getVectorNumElements()); 3429 if (TruncatedTy == OriginalTy) 3430 continue; 3431 3432 IRBuilder<> B(cast<Instruction>(I)); 3433 auto ShrinkOperand = [&](Value *V) -> Value * { 3434 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3435 if (ZI->getSrcTy() == TruncatedTy) 3436 return ZI->getOperand(0); 3437 return B.CreateZExtOrTrunc(V, TruncatedTy); 3438 }; 3439 3440 // The actual instruction modification depends on the instruction type, 3441 // unfortunately. 3442 Value *NewI = nullptr; 3443 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3444 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3445 ShrinkOperand(BO->getOperand(1))); 3446 cast<BinaryOperator>(NewI)->copyIRFlags(I); 3447 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3448 NewI = 3449 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3450 ShrinkOperand(CI->getOperand(1))); 3451 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3452 NewI = B.CreateSelect(SI->getCondition(), 3453 ShrinkOperand(SI->getTrueValue()), 3454 ShrinkOperand(SI->getFalseValue())); 3455 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3456 switch (CI->getOpcode()) { 3457 default: 3458 llvm_unreachable("Unhandled cast!"); 3459 case Instruction::Trunc: 3460 NewI = ShrinkOperand(CI->getOperand(0)); 3461 break; 3462 case Instruction::SExt: 3463 NewI = B.CreateSExtOrTrunc( 3464 CI->getOperand(0), 3465 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3466 break; 3467 case Instruction::ZExt: 3468 NewI = B.CreateZExtOrTrunc( 3469 CI->getOperand(0), 3470 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3471 break; 3472 } 3473 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3474 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3475 auto *O0 = B.CreateZExtOrTrunc( 3476 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3477 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3478 auto *O1 = B.CreateZExtOrTrunc( 3479 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3480 3481 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3482 } else if (isa<LoadInst>(I)) { 3483 // Don't do anything with the operands, just extend the result. 3484 continue; 3485 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3486 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3487 auto *O0 = B.CreateZExtOrTrunc( 3488 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3489 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3490 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3491 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3492 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3493 auto *O0 = B.CreateZExtOrTrunc( 3494 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3495 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3496 } else { 3497 llvm_unreachable("Unhandled instruction type!"); 3498 } 3499 3500 // Lastly, extend the result. 3501 NewI->takeName(cast<Instruction>(I)); 3502 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3503 I->replaceAllUsesWith(Res); 3504 cast<Instruction>(I)->eraseFromParent(); 3505 Erased.insert(I); 3506 I = Res; 3507 } 3508 } 3509 3510 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3511 for (const auto &KV : *MinBWs) { 3512 VectorParts &Parts = WidenMap.get(KV.first); 3513 for (Value *&I : Parts) { 3514 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3515 if (Inst && Inst->use_empty()) { 3516 Value *NewI = Inst->getOperand(0); 3517 Inst->eraseFromParent(); 3518 I = NewI; 3519 } 3520 } 3521 } 3522 } 3523 3524 void InnerLoopVectorizer::vectorizeLoop() { 3525 //===------------------------------------------------===// 3526 // 3527 // Notice: any optimization or new instruction that go 3528 // into the code below should be also be implemented in 3529 // the cost-model. 3530 // 3531 //===------------------------------------------------===// 3532 Constant *Zero = Builder.getInt32(0); 3533 3534 // In order to support recurrences we need to be able to vectorize Phi nodes. 3535 // Phi nodes have cycles, so we need to vectorize them in two stages. First, 3536 // we create a new vector PHI node with no incoming edges. We use this value 3537 // when we vectorize all of the instructions that use the PHI. Next, after 3538 // all of the instructions in the block are complete we add the new incoming 3539 // edges to the PHI. At this point all of the instructions in the basic block 3540 // are vectorized, so we can use them to construct the PHI. 3541 PhiVector PHIsToFix; 3542 3543 // Scan the loop in a topological order to ensure that defs are vectorized 3544 // before users. 3545 LoopBlocksDFS DFS(OrigLoop); 3546 DFS.perform(LI); 3547 3548 // Vectorize all of the blocks in the original loop. 3549 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 3550 vectorizeBlockInLoop(BB, &PHIsToFix); 3551 3552 // Insert truncates and extends for any truncated instructions as hints to 3553 // InstCombine. 3554 if (VF > 1) 3555 truncateToMinimalBitwidths(); 3556 3557 // At this point every instruction in the original loop is widened to a 3558 // vector form. Now we need to fix the recurrences in PHIsToFix. These PHI 3559 // nodes are currently empty because we did not want to introduce cycles. 3560 // This is the second stage of vectorizing recurrences. 3561 for (PHINode *Phi : PHIsToFix) { 3562 assert(Phi && "Unable to recover vectorized PHI"); 3563 3564 // Handle first-order recurrences that need to be fixed. 3565 if (Legal->isFirstOrderRecurrence(Phi)) { 3566 fixFirstOrderRecurrence(Phi); 3567 continue; 3568 } 3569 3570 // If the phi node is not a first-order recurrence, it must be a reduction. 3571 // Get it's reduction variable descriptor. 3572 assert(Legal->isReductionVariable(Phi) && 3573 "Unable to find the reduction variable"); 3574 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 3575 3576 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3577 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3578 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3579 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3580 RdxDesc.getMinMaxRecurrenceKind(); 3581 setDebugLocFromInst(Builder, ReductionStartValue); 3582 3583 // We need to generate a reduction vector from the incoming scalar. 3584 // To do so, we need to generate the 'identity' vector and override 3585 // one of the elements with the incoming scalar reduction. We need 3586 // to do it in the vector-loop preheader. 3587 Builder.SetInsertPoint(LoopBypassBlocks[1]->getTerminator()); 3588 3589 // This is the vector-clone of the value that leaves the loop. 3590 VectorParts &VectorExit = getVectorValue(LoopExitInst); 3591 Type *VecTy = VectorExit[0]->getType(); 3592 3593 // Find the reduction identity variable. Zero for addition, or, xor, 3594 // one for multiplication, -1 for And. 3595 Value *Identity; 3596 Value *VectorStart; 3597 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3598 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3599 // MinMax reduction have the start value as their identify. 3600 if (VF == 1) { 3601 VectorStart = Identity = ReductionStartValue; 3602 } else { 3603 VectorStart = Identity = 3604 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3605 } 3606 } else { 3607 // Handle other reduction kinds: 3608 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3609 RK, VecTy->getScalarType()); 3610 if (VF == 1) { 3611 Identity = Iden; 3612 // This vector is the Identity vector where the first element is the 3613 // incoming scalar reduction. 3614 VectorStart = ReductionStartValue; 3615 } else { 3616 Identity = ConstantVector::getSplat(VF, Iden); 3617 3618 // This vector is the Identity vector where the first element is the 3619 // incoming scalar reduction. 3620 VectorStart = 3621 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3622 } 3623 } 3624 3625 // Fix the vector-loop phi. 3626 3627 // Reductions do not have to start at zero. They can start with 3628 // any loop invariant values. 3629 VectorParts &VecRdxPhi = WidenMap.get(Phi); 3630 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3631 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3632 VectorParts &Val = getVectorValue(LoopVal); 3633 for (unsigned part = 0; part < UF; ++part) { 3634 // Make sure to add the reduction stat value only to the 3635 // first unroll part. 3636 Value *StartVal = (part == 0) ? VectorStart : Identity; 3637 cast<PHINode>(VecRdxPhi[part]) 3638 ->addIncoming(StartVal, LoopVectorPreHeader); 3639 cast<PHINode>(VecRdxPhi[part]) 3640 ->addIncoming(Val[part], LoopVectorBody); 3641 } 3642 3643 // Before each round, move the insertion point right between 3644 // the PHIs and the values we are going to write. 3645 // This allows us to write both PHINodes and the extractelement 3646 // instructions. 3647 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3648 3649 VectorParts RdxParts = getVectorValue(LoopExitInst); 3650 setDebugLocFromInst(Builder, LoopExitInst); 3651 3652 // If the vector reduction can be performed in a smaller type, we truncate 3653 // then extend the loop exit value to enable InstCombine to evaluate the 3654 // entire expression in the smaller type. 3655 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3656 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3657 Builder.SetInsertPoint(LoopVectorBody->getTerminator()); 3658 for (unsigned part = 0; part < UF; ++part) { 3659 Value *Trunc = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3660 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3661 : Builder.CreateZExt(Trunc, VecTy); 3662 for (Value::user_iterator UI = RdxParts[part]->user_begin(); 3663 UI != RdxParts[part]->user_end();) 3664 if (*UI != Trunc) { 3665 (*UI++)->replaceUsesOfWith(RdxParts[part], Extnd); 3666 RdxParts[part] = Extnd; 3667 } else { 3668 ++UI; 3669 } 3670 } 3671 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3672 for (unsigned part = 0; part < UF; ++part) 3673 RdxParts[part] = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3674 } 3675 3676 // Reduce all of the unrolled parts into a single vector. 3677 Value *ReducedPartRdx = RdxParts[0]; 3678 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3679 setDebugLocFromInst(Builder, ReducedPartRdx); 3680 for (unsigned part = 1; part < UF; ++part) { 3681 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3682 // Floating point operations had to be 'fast' to enable the reduction. 3683 ReducedPartRdx = addFastMathFlag( 3684 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxParts[part], 3685 ReducedPartRdx, "bin.rdx")); 3686 else 3687 ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp( 3688 Builder, MinMaxKind, ReducedPartRdx, RdxParts[part]); 3689 } 3690 3691 if (VF > 1) { 3692 // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles 3693 // and vector ops, reducing the set of values being computed by half each 3694 // round. 3695 assert(isPowerOf2_32(VF) && 3696 "Reduction emission only supported for pow2 vectors!"); 3697 Value *TmpVec = ReducedPartRdx; 3698 SmallVector<Constant *, 32> ShuffleMask(VF, nullptr); 3699 for (unsigned i = VF; i != 1; i >>= 1) { 3700 // Move the upper half of the vector to the lower half. 3701 for (unsigned j = 0; j != i / 2; ++j) 3702 ShuffleMask[j] = Builder.getInt32(i / 2 + j); 3703 3704 // Fill the rest of the mask with undef. 3705 std::fill(&ShuffleMask[i / 2], ShuffleMask.end(), 3706 UndefValue::get(Builder.getInt32Ty())); 3707 3708 Value *Shuf = Builder.CreateShuffleVector( 3709 TmpVec, UndefValue::get(TmpVec->getType()), 3710 ConstantVector::get(ShuffleMask), "rdx.shuf"); 3711 3712 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3713 // Floating point operations had to be 'fast' to enable the reduction. 3714 TmpVec = addFastMathFlag(Builder.CreateBinOp( 3715 (Instruction::BinaryOps)Op, TmpVec, Shuf, "bin.rdx")); 3716 else 3717 TmpVec = RecurrenceDescriptor::createMinMaxOp(Builder, MinMaxKind, 3718 TmpVec, Shuf); 3719 } 3720 3721 // The result is in the first element of the vector. 3722 ReducedPartRdx = 3723 Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 3724 3725 // If the reduction can be performed in a smaller type, we need to extend 3726 // the reduction to the wider type before we branch to the original loop. 3727 if (Phi->getType() != RdxDesc.getRecurrenceType()) 3728 ReducedPartRdx = 3729 RdxDesc.isSigned() 3730 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 3731 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 3732 } 3733 3734 // Create a phi node that merges control-flow from the backedge-taken check 3735 // block and the middle block. 3736 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 3737 LoopScalarPreHeader->getTerminator()); 3738 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3739 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 3740 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3741 3742 // Now, we need to fix the users of the reduction variable 3743 // inside and outside of the scalar remainder loop. 3744 // We know that the loop is in LCSSA form. We need to update the 3745 // PHI nodes in the exit blocks. 3746 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 3747 LEE = LoopExitBlock->end(); 3748 LEI != LEE; ++LEI) { 3749 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 3750 if (!LCSSAPhi) 3751 break; 3752 3753 // All PHINodes need to have a single entry edge, or two if 3754 // we already fixed them. 3755 assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 3756 3757 // We found our reduction value exit-PHI. Update it with the 3758 // incoming bypass edge. 3759 if (LCSSAPhi->getIncomingValue(0) == LoopExitInst) { 3760 // Add an edge coming from the bypass. 3761 LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3762 break; 3763 } 3764 } // end of the LCSSA phi scan. 3765 3766 // Fix the scalar loop reduction variable with the incoming reduction sum 3767 // from the vector body and from the backedge value. 3768 int IncomingEdgeBlockIdx = 3769 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 3770 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 3771 // Pick the other block. 3772 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 3773 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 3774 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 3775 } // end of for each Phi in PHIsToFix. 3776 3777 fixLCSSAPHIs(); 3778 3779 // Make sure DomTree is updated. 3780 updateAnalysis(); 3781 3782 // Predicate any stores. 3783 for (auto KV : PredicatedStores) { 3784 BasicBlock::iterator I(KV.first); 3785 auto *BB = SplitBlock(I->getParent(), &*std::next(I), DT, LI); 3786 auto *T = SplitBlockAndInsertIfThen(KV.second, &*I, /*Unreachable=*/false, 3787 /*BranchWeights=*/nullptr, DT, LI); 3788 I->moveBefore(T); 3789 I->getParent()->setName("pred.store.if"); 3790 BB->setName("pred.store.continue"); 3791 } 3792 DEBUG(DT->verifyDomTree()); 3793 // Remove redundant induction instructions. 3794 cse(LoopVectorBody); 3795 } 3796 3797 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 3798 3799 // This is the second phase of vectorizing first-order recurrences. An 3800 // overview of the transformation is described below. Suppose we have the 3801 // following loop. 3802 // 3803 // for (int i = 0; i < n; ++i) 3804 // b[i] = a[i] - a[i - 1]; 3805 // 3806 // There is a first-order recurrence on "a". For this loop, the shorthand 3807 // scalar IR looks like: 3808 // 3809 // scalar.ph: 3810 // s_init = a[-1] 3811 // br scalar.body 3812 // 3813 // scalar.body: 3814 // i = phi [0, scalar.ph], [i+1, scalar.body] 3815 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3816 // s2 = a[i] 3817 // b[i] = s2 - s1 3818 // br cond, scalar.body, ... 3819 // 3820 // In this example, s1 is a recurrence because it's value depends on the 3821 // previous iteration. In the first phase of vectorization, we created a 3822 // temporary value for s1. We now complete the vectorization and produce the 3823 // shorthand vector IR shown below (for VF = 4, UF = 1). 3824 // 3825 // vector.ph: 3826 // v_init = vector(..., ..., ..., a[-1]) 3827 // br vector.body 3828 // 3829 // vector.body 3830 // i = phi [0, vector.ph], [i+4, vector.body] 3831 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3832 // v2 = a[i, i+1, i+2, i+3]; 3833 // v3 = vector(v1(3), v2(0, 1, 2)) 3834 // b[i, i+1, i+2, i+3] = v2 - v3 3835 // br cond, vector.body, middle.block 3836 // 3837 // middle.block: 3838 // x = v2(3) 3839 // br scalar.ph 3840 // 3841 // scalar.ph: 3842 // s_init = phi [x, middle.block], [a[-1], otherwise] 3843 // br scalar.body 3844 // 3845 // After execution completes the vector loop, we extract the next value of 3846 // the recurrence (x) to use as the initial value in the scalar loop. 3847 3848 // Get the original loop preheader and single loop latch. 3849 auto *Preheader = OrigLoop->getLoopPreheader(); 3850 auto *Latch = OrigLoop->getLoopLatch(); 3851 3852 // Get the initial and previous values of the scalar recurrence. 3853 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 3854 auto *Previous = Phi->getIncomingValueForBlock(Latch); 3855 3856 // Create a vector from the initial value. 3857 auto *VectorInit = ScalarInit; 3858 if (VF > 1) { 3859 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3860 VectorInit = Builder.CreateInsertElement( 3861 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 3862 Builder.getInt32(VF - 1), "vector.recur.init"); 3863 } 3864 3865 // We constructed a temporary phi node in the first phase of vectorization. 3866 // This phi node will eventually be deleted. 3867 auto &PhiParts = getVectorValue(Phi); 3868 Builder.SetInsertPoint(cast<Instruction>(PhiParts[0])); 3869 3870 // Create a phi node for the new recurrence. The current value will either be 3871 // the initial value inserted into a vector or loop-varying vector value. 3872 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 3873 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 3874 3875 // Get the vectorized previous value. We ensured the previous values was an 3876 // instruction when detecting the recurrence. 3877 auto &PreviousParts = getVectorValue(Previous); 3878 3879 // Set the insertion point to be after this instruction. We ensured the 3880 // previous value dominated all uses of the phi when detecting the 3881 // recurrence. 3882 Builder.SetInsertPoint( 3883 &*++BasicBlock::iterator(cast<Instruction>(PreviousParts[UF - 1]))); 3884 3885 // We will construct a vector for the recurrence by combining the values for 3886 // the current and previous iterations. This is the required shuffle mask. 3887 SmallVector<Constant *, 8> ShuffleMask(VF); 3888 ShuffleMask[0] = Builder.getInt32(VF - 1); 3889 for (unsigned I = 1; I < VF; ++I) 3890 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 3891 3892 // The vector from which to take the initial value for the current iteration 3893 // (actual or unrolled). Initially, this is the vector phi node. 3894 Value *Incoming = VecPhi; 3895 3896 // Shuffle the current and previous vector and update the vector parts. 3897 for (unsigned Part = 0; Part < UF; ++Part) { 3898 auto *Shuffle = 3899 VF > 1 3900 ? Builder.CreateShuffleVector(Incoming, PreviousParts[Part], 3901 ConstantVector::get(ShuffleMask)) 3902 : Incoming; 3903 PhiParts[Part]->replaceAllUsesWith(Shuffle); 3904 cast<Instruction>(PhiParts[Part])->eraseFromParent(); 3905 PhiParts[Part] = Shuffle; 3906 Incoming = PreviousParts[Part]; 3907 } 3908 3909 // Fix the latch value of the new recurrence in the vector loop. 3910 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3911 3912 // Extract the last vector element in the middle block. This will be the 3913 // initial value for the recurrence when jumping to the scalar loop. 3914 auto *Extract = Incoming; 3915 if (VF > 1) { 3916 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3917 Extract = Builder.CreateExtractElement(Extract, Builder.getInt32(VF - 1), 3918 "vector.recur.extract"); 3919 } 3920 3921 // Fix the initial value of the original recurrence in the scalar loop. 3922 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3923 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3924 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3925 auto *Incoming = BB == LoopMiddleBlock ? Extract : ScalarInit; 3926 Start->addIncoming(Incoming, BB); 3927 } 3928 3929 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 3930 Phi->setName("scalar.recur"); 3931 3932 // Finally, fix users of the recurrence outside the loop. The users will need 3933 // either the last value of the scalar recurrence or the last value of the 3934 // vector recurrence we extracted in the middle block. Since the loop is in 3935 // LCSSA form, we just need to find the phi node for the original scalar 3936 // recurrence in the exit block, and then add an edge for the middle block. 3937 for (auto &I : *LoopExitBlock) { 3938 auto *LCSSAPhi = dyn_cast<PHINode>(&I); 3939 if (!LCSSAPhi) 3940 break; 3941 if (LCSSAPhi->getIncomingValue(0) == Phi) { 3942 LCSSAPhi->addIncoming(Extract, LoopMiddleBlock); 3943 break; 3944 } 3945 } 3946 } 3947 3948 void InnerLoopVectorizer::fixLCSSAPHIs() { 3949 for (Instruction &LEI : *LoopExitBlock) { 3950 auto *LCSSAPhi = dyn_cast<PHINode>(&LEI); 3951 if (!LCSSAPhi) 3952 break; 3953 if (LCSSAPhi->getNumIncomingValues() == 1) 3954 LCSSAPhi->addIncoming(UndefValue::get(LCSSAPhi->getType()), 3955 LoopMiddleBlock); 3956 } 3957 } 3958 3959 InnerLoopVectorizer::VectorParts 3960 InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) { 3961 assert(std::find(pred_begin(Dst), pred_end(Dst), Src) != pred_end(Dst) && 3962 "Invalid edge"); 3963 3964 // Look for cached value. 3965 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 3966 EdgeMaskCache::iterator ECEntryIt = MaskCache.find(Edge); 3967 if (ECEntryIt != MaskCache.end()) 3968 return ECEntryIt->second; 3969 3970 VectorParts SrcMask = createBlockInMask(Src); 3971 3972 // The terminator has to be a branch inst! 3973 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 3974 assert(BI && "Unexpected terminator found"); 3975 3976 if (BI->isConditional()) { 3977 VectorParts EdgeMask = getVectorValue(BI->getCondition()); 3978 3979 if (BI->getSuccessor(0) != Dst) 3980 for (unsigned part = 0; part < UF; ++part) 3981 EdgeMask[part] = Builder.CreateNot(EdgeMask[part]); 3982 3983 for (unsigned part = 0; part < UF; ++part) 3984 EdgeMask[part] = Builder.CreateAnd(EdgeMask[part], SrcMask[part]); 3985 3986 MaskCache[Edge] = EdgeMask; 3987 return EdgeMask; 3988 } 3989 3990 MaskCache[Edge] = SrcMask; 3991 return SrcMask; 3992 } 3993 3994 InnerLoopVectorizer::VectorParts 3995 InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) { 3996 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 3997 3998 // Loop incoming mask is all-one. 3999 if (OrigLoop->getHeader() == BB) { 4000 Value *C = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 1); 4001 return getVectorValue(C); 4002 } 4003 4004 // This is the block mask. We OR all incoming edges, and with zero. 4005 Value *Zero = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 0); 4006 VectorParts BlockMask = getVectorValue(Zero); 4007 4008 // For each pred: 4009 for (pred_iterator it = pred_begin(BB), e = pred_end(BB); it != e; ++it) { 4010 VectorParts EM = createEdgeMask(*it, BB); 4011 for (unsigned part = 0; part < UF; ++part) 4012 BlockMask[part] = Builder.CreateOr(BlockMask[part], EM[part]); 4013 } 4014 4015 return BlockMask; 4016 } 4017 4018 void InnerLoopVectorizer::widenPHIInstruction( 4019 Instruction *PN, InnerLoopVectorizer::VectorParts &Entry, unsigned UF, 4020 unsigned VF, PhiVector *PV) { 4021 PHINode *P = cast<PHINode>(PN); 4022 // Handle recurrences. 4023 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 4024 for (unsigned part = 0; part < UF; ++part) { 4025 // This is phase one of vectorizing PHIs. 4026 Type *VecTy = 4027 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4028 Entry[part] = PHINode::Create( 4029 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4030 } 4031 PV->push_back(P); 4032 return; 4033 } 4034 4035 setDebugLocFromInst(Builder, P); 4036 // Check for PHI nodes that are lowered to vector selects. 4037 if (P->getParent() != OrigLoop->getHeader()) { 4038 // We know that all PHIs in non-header blocks are converted into 4039 // selects, so we don't have to worry about the insertion order and we 4040 // can just use the builder. 4041 // At this point we generate the predication tree. There may be 4042 // duplications since this is a simple recursive scan, but future 4043 // optimizations will clean it up. 4044 4045 unsigned NumIncoming = P->getNumIncomingValues(); 4046 4047 // Generate a sequence of selects of the form: 4048 // SELECT(Mask3, In3, 4049 // SELECT(Mask2, In2, 4050 // ( ...))) 4051 for (unsigned In = 0; In < NumIncoming; In++) { 4052 VectorParts Cond = 4053 createEdgeMask(P->getIncomingBlock(In), P->getParent()); 4054 VectorParts &In0 = getVectorValue(P->getIncomingValue(In)); 4055 4056 for (unsigned part = 0; part < UF; ++part) { 4057 // We might have single edge PHIs (blocks) - use an identity 4058 // 'select' for the first PHI operand. 4059 if (In == 0) 4060 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], In0[part]); 4061 else 4062 // Select between the current value and the previous incoming edge 4063 // based on the incoming mask. 4064 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], Entry[part], 4065 "predphi"); 4066 } 4067 } 4068 return; 4069 } 4070 4071 // This PHINode must be an induction variable. 4072 // Make sure that we know about it. 4073 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 4074 4075 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 4076 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4077 4078 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4079 // which can be found from the original scalar operations. 4080 switch (II.getKind()) { 4081 case InductionDescriptor::IK_NoInduction: 4082 llvm_unreachable("Unknown induction"); 4083 case InductionDescriptor::IK_IntInduction: 4084 return widenIntInduction(P, Entry); 4085 case InductionDescriptor::IK_PtrInduction: { 4086 // Handle the pointer induction variable case. 4087 assert(P->getType()->isPointerTy() && "Unexpected type."); 4088 // This is the normalized GEP that starts counting at zero. 4089 Value *PtrInd = Induction; 4090 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 4091 // This is the vector of results. Notice that we don't generate 4092 // vector geps because scalar geps result in better code. 4093 for (unsigned part = 0; part < UF; ++part) { 4094 if (VF == 1) { 4095 int EltIndex = part; 4096 Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex); 4097 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4098 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 4099 SclrGep->setName("next.gep"); 4100 Entry[part] = SclrGep; 4101 continue; 4102 } 4103 4104 Value *VecVal = UndefValue::get(VectorType::get(P->getType(), VF)); 4105 for (unsigned int i = 0; i < VF; ++i) { 4106 int EltIndex = i + part * VF; 4107 Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex); 4108 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4109 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 4110 SclrGep->setName("next.gep"); 4111 VecVal = Builder.CreateInsertElement(VecVal, SclrGep, 4112 Builder.getInt32(i), "insert.gep"); 4113 } 4114 Entry[part] = VecVal; 4115 } 4116 return; 4117 } 4118 case InductionDescriptor::IK_FpInduction: { 4119 assert(P->getType() == II.getStartValue()->getType() && 4120 "Types must match"); 4121 // Handle other induction variables that are now based on the 4122 // canonical one. 4123 assert(P != OldInduction && "Primary induction can be integer only"); 4124 4125 Value *V = Builder.CreateCast(Instruction::SIToFP, Induction, P->getType()); 4126 V = II.transform(Builder, V, PSE.getSE(), DL); 4127 V->setName("fp.offset.idx"); 4128 4129 // Now we have scalar op: %fp.offset.idx = StartVal +/- Induction*StepVal 4130 4131 Value *Broadcasted = getBroadcastInstrs(V); 4132 // After broadcasting the induction variable we need to make the vector 4133 // consecutive by adding StepVal*0, StepVal*1, StepVal*2, etc. 4134 Value *StepVal = cast<SCEVUnknown>(II.getStep())->getValue(); 4135 for (unsigned part = 0; part < UF; ++part) 4136 Entry[part] = getStepVector(Broadcasted, VF * part, StepVal, 4137 II.getInductionOpcode()); 4138 return; 4139 } 4140 } 4141 } 4142 4143 void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { 4144 // For each instruction in the old loop. 4145 for (Instruction &I : *BB) { 4146 VectorParts &Entry = WidenMap.get(&I); 4147 4148 switch (I.getOpcode()) { 4149 case Instruction::Br: 4150 // Nothing to do for PHIs and BR, since we already took care of the 4151 // loop control flow instructions. 4152 continue; 4153 case Instruction::PHI: { 4154 // Vectorize PHINodes. 4155 widenPHIInstruction(&I, Entry, UF, VF, PV); 4156 continue; 4157 } // End of PHI. 4158 4159 case Instruction::Add: 4160 case Instruction::FAdd: 4161 case Instruction::Sub: 4162 case Instruction::FSub: 4163 case Instruction::Mul: 4164 case Instruction::FMul: 4165 case Instruction::UDiv: 4166 case Instruction::SDiv: 4167 case Instruction::FDiv: 4168 case Instruction::URem: 4169 case Instruction::SRem: 4170 case Instruction::FRem: 4171 case Instruction::Shl: 4172 case Instruction::LShr: 4173 case Instruction::AShr: 4174 case Instruction::And: 4175 case Instruction::Or: 4176 case Instruction::Xor: { 4177 // Just widen binops. 4178 auto *BinOp = cast<BinaryOperator>(&I); 4179 setDebugLocFromInst(Builder, BinOp); 4180 VectorParts &A = getVectorValue(BinOp->getOperand(0)); 4181 VectorParts &B = getVectorValue(BinOp->getOperand(1)); 4182 4183 // Use this vector value for all users of the original instruction. 4184 for (unsigned Part = 0; Part < UF; ++Part) { 4185 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A[Part], B[Part]); 4186 4187 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 4188 VecOp->copyIRFlags(BinOp); 4189 4190 Entry[Part] = V; 4191 } 4192 4193 addMetadata(Entry, BinOp); 4194 break; 4195 } 4196 case Instruction::Select: { 4197 // Widen selects. 4198 // If the selector is loop invariant we can create a select 4199 // instruction with a scalar condition. Otherwise, use vector-select. 4200 auto *SE = PSE.getSE(); 4201 bool InvariantCond = 4202 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); 4203 setDebugLocFromInst(Builder, &I); 4204 4205 // The condition can be loop invariant but still defined inside the 4206 // loop. This means that we can't just use the original 'cond' value. 4207 // We have to take the 'vectorized' value and pick the first lane. 4208 // Instcombine will make this a no-op. 4209 VectorParts &Cond = getVectorValue(I.getOperand(0)); 4210 VectorParts &Op0 = getVectorValue(I.getOperand(1)); 4211 VectorParts &Op1 = getVectorValue(I.getOperand(2)); 4212 4213 Value *ScalarCond = 4214 (VF == 1) 4215 ? Cond[0] 4216 : Builder.CreateExtractElement(Cond[0], Builder.getInt32(0)); 4217 4218 for (unsigned Part = 0; Part < UF; ++Part) { 4219 Entry[Part] = Builder.CreateSelect( 4220 InvariantCond ? ScalarCond : Cond[Part], Op0[Part], Op1[Part]); 4221 } 4222 4223 addMetadata(Entry, &I); 4224 break; 4225 } 4226 4227 case Instruction::ICmp: 4228 case Instruction::FCmp: { 4229 // Widen compares. Generate vector compares. 4230 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4231 auto *Cmp = dyn_cast<CmpInst>(&I); 4232 setDebugLocFromInst(Builder, Cmp); 4233 VectorParts &A = getVectorValue(Cmp->getOperand(0)); 4234 VectorParts &B = getVectorValue(Cmp->getOperand(1)); 4235 for (unsigned Part = 0; Part < UF; ++Part) { 4236 Value *C = nullptr; 4237 if (FCmp) { 4238 C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]); 4239 cast<FCmpInst>(C)->copyFastMathFlags(Cmp); 4240 } else { 4241 C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]); 4242 } 4243 Entry[Part] = C; 4244 } 4245 4246 addMetadata(Entry, &I); 4247 break; 4248 } 4249 4250 case Instruction::Store: 4251 case Instruction::Load: 4252 vectorizeMemoryInstruction(&I); 4253 break; 4254 case Instruction::ZExt: 4255 case Instruction::SExt: 4256 case Instruction::FPToUI: 4257 case Instruction::FPToSI: 4258 case Instruction::FPExt: 4259 case Instruction::PtrToInt: 4260 case Instruction::IntToPtr: 4261 case Instruction::SIToFP: 4262 case Instruction::UIToFP: 4263 case Instruction::Trunc: 4264 case Instruction::FPTrunc: 4265 case Instruction::BitCast: { 4266 auto *CI = dyn_cast<CastInst>(&I); 4267 setDebugLocFromInst(Builder, CI); 4268 4269 // Optimize the special case where the source is a constant integer 4270 // induction variable. Notice that we can only optimize the 'trunc' case 4271 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 4272 // (c) other casts depend on pointer size. 4273 auto ID = Legal->getInductionVars()->lookup(OldInduction); 4274 if (isa<TruncInst>(CI) && CI->getOperand(0) == OldInduction && 4275 ID.getConstIntStepValue()) { 4276 widenIntInduction(OldInduction, Entry, cast<TruncInst>(CI)); 4277 addMetadata(Entry, &I); 4278 break; 4279 } 4280 4281 /// Vectorize casts. 4282 Type *DestTy = 4283 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4284 4285 VectorParts &A = getVectorValue(CI->getOperand(0)); 4286 for (unsigned Part = 0; Part < UF; ++Part) 4287 Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy); 4288 addMetadata(Entry, &I); 4289 break; 4290 } 4291 4292 case Instruction::Call: { 4293 // Ignore dbg intrinsics. 4294 if (isa<DbgInfoIntrinsic>(I)) 4295 break; 4296 setDebugLocFromInst(Builder, &I); 4297 4298 Module *M = BB->getParent()->getParent(); 4299 auto *CI = cast<CallInst>(&I); 4300 4301 StringRef FnName = CI->getCalledFunction()->getName(); 4302 Function *F = CI->getCalledFunction(); 4303 Type *RetTy = ToVectorTy(CI->getType(), VF); 4304 SmallVector<Type *, 4> Tys; 4305 for (Value *ArgOperand : CI->arg_operands()) 4306 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4307 4308 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4309 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 4310 ID == Intrinsic::lifetime_start)) { 4311 scalarizeInstruction(&I); 4312 break; 4313 } 4314 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4315 // version of the instruction. 4316 // Is it beneficial to perform intrinsic call compared to lib call? 4317 bool NeedToScalarize; 4318 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4319 bool UseVectorIntrinsic = 4320 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4321 if (!UseVectorIntrinsic && NeedToScalarize) { 4322 scalarizeInstruction(&I); 4323 break; 4324 } 4325 4326 for (unsigned Part = 0; Part < UF; ++Part) { 4327 SmallVector<Value *, 4> Args; 4328 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4329 Value *Arg = CI->getArgOperand(i); 4330 // Some intrinsics have a scalar argument - don't replace it with a 4331 // vector. 4332 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) { 4333 VectorParts &VectorArg = getVectorValue(CI->getArgOperand(i)); 4334 Arg = VectorArg[Part]; 4335 } 4336 Args.push_back(Arg); 4337 } 4338 4339 Function *VectorF; 4340 if (UseVectorIntrinsic) { 4341 // Use vector version of the intrinsic. 4342 Type *TysForDecl[] = {CI->getType()}; 4343 if (VF > 1) 4344 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4345 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4346 } else { 4347 // Use vector version of the library call. 4348 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4349 assert(!VFnName.empty() && "Vector function name is empty."); 4350 VectorF = M->getFunction(VFnName); 4351 if (!VectorF) { 4352 // Generate a declaration 4353 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4354 VectorF = 4355 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4356 VectorF->copyAttributesFrom(F); 4357 } 4358 } 4359 assert(VectorF && "Can't create vector function."); 4360 4361 SmallVector<OperandBundleDef, 1> OpBundles; 4362 CI->getOperandBundlesAsDefs(OpBundles); 4363 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4364 4365 if (isa<FPMathOperator>(V)) 4366 V->copyFastMathFlags(CI); 4367 4368 Entry[Part] = V; 4369 } 4370 4371 addMetadata(Entry, &I); 4372 break; 4373 } 4374 4375 default: 4376 // All other instructions are unsupported. Scalarize them. 4377 scalarizeInstruction(&I); 4378 break; 4379 } // end of switch. 4380 } // end of for_each instr. 4381 } 4382 4383 void InnerLoopVectorizer::updateAnalysis() { 4384 // Forget the original basic block. 4385 PSE.getSE()->forgetLoop(OrigLoop); 4386 4387 // Update the dominator tree information. 4388 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4389 "Entry does not dominate exit."); 4390 4391 // We don't predicate stores by this point, so the vector body should be a 4392 // single loop. 4393 DT->addNewBlock(LoopVectorBody, LoopVectorPreHeader); 4394 4395 DT->addNewBlock(LoopMiddleBlock, LoopVectorBody); 4396 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4397 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4398 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4399 4400 DEBUG(DT->verifyDomTree()); 4401 } 4402 4403 /// \brief Check whether it is safe to if-convert this phi node. 4404 /// 4405 /// Phi nodes with constant expressions that can trap are not safe to if 4406 /// convert. 4407 static bool canIfConvertPHINodes(BasicBlock *BB) { 4408 for (Instruction &I : *BB) { 4409 auto *Phi = dyn_cast<PHINode>(&I); 4410 if (!Phi) 4411 return true; 4412 for (Value *V : Phi->incoming_values()) 4413 if (auto *C = dyn_cast<Constant>(V)) 4414 if (C->canTrap()) 4415 return false; 4416 } 4417 return true; 4418 } 4419 4420 bool LoopVectorizationLegality::canVectorizeWithIfConvert() { 4421 if (!EnableIfConversion) { 4422 emitAnalysis(VectorizationReport() << "if-conversion is disabled"); 4423 return false; 4424 } 4425 4426 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable"); 4427 4428 // A list of pointers that we can safely read and write to. 4429 SmallPtrSet<Value *, 8> SafePointes; 4430 4431 // Collect safe addresses. 4432 for (BasicBlock *BB : TheLoop->blocks()) { 4433 if (blockNeedsPredication(BB)) 4434 continue; 4435 4436 for (Instruction &I : *BB) { 4437 if (auto *LI = dyn_cast<LoadInst>(&I)) 4438 SafePointes.insert(LI->getPointerOperand()); 4439 else if (auto *SI = dyn_cast<StoreInst>(&I)) 4440 SafePointes.insert(SI->getPointerOperand()); 4441 } 4442 } 4443 4444 // Collect the blocks that need predication. 4445 BasicBlock *Header = TheLoop->getHeader(); 4446 for (BasicBlock *BB : TheLoop->blocks()) { 4447 // We don't support switch statements inside loops. 4448 if (!isa<BranchInst>(BB->getTerminator())) { 4449 emitAnalysis(VectorizationReport(BB->getTerminator()) 4450 << "loop contains a switch statement"); 4451 return false; 4452 } 4453 4454 // We must be able to predicate all blocks that need to be predicated. 4455 if (blockNeedsPredication(BB)) { 4456 if (!blockCanBePredicated(BB, SafePointes)) { 4457 emitAnalysis(VectorizationReport(BB->getTerminator()) 4458 << "control flow cannot be substituted for a select"); 4459 return false; 4460 } 4461 } else if (BB != Header && !canIfConvertPHINodes(BB)) { 4462 emitAnalysis(VectorizationReport(BB->getTerminator()) 4463 << "control flow cannot be substituted for a select"); 4464 return false; 4465 } 4466 } 4467 4468 // We can if-convert this loop. 4469 return true; 4470 } 4471 4472 bool LoopVectorizationLegality::canVectorize() { 4473 // We must have a loop in canonical form. Loops with indirectbr in them cannot 4474 // be canonicalized. 4475 if (!TheLoop->getLoopPreheader()) { 4476 emitAnalysis(VectorizationReport() 4477 << "loop control flow is not understood by vectorizer"); 4478 return false; 4479 } 4480 4481 // We can only vectorize innermost loops. 4482 if (!TheLoop->empty()) { 4483 emitAnalysis(VectorizationReport() << "loop is not the innermost loop"); 4484 return false; 4485 } 4486 4487 // We must have a single backedge. 4488 if (TheLoop->getNumBackEdges() != 1) { 4489 emitAnalysis(VectorizationReport() 4490 << "loop control flow is not understood by vectorizer"); 4491 return false; 4492 } 4493 4494 // We must have a single exiting block. 4495 if (!TheLoop->getExitingBlock()) { 4496 emitAnalysis(VectorizationReport() 4497 << "loop control flow is not understood by vectorizer"); 4498 return false; 4499 } 4500 4501 // We only handle bottom-tested loops, i.e. loop in which the condition is 4502 // checked at the end of each iteration. With that we can assume that all 4503 // instructions in the loop are executed the same number of times. 4504 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 4505 emitAnalysis(VectorizationReport() 4506 << "loop control flow is not understood by vectorizer"); 4507 return false; 4508 } 4509 4510 // We need to have a loop header. 4511 DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName() 4512 << '\n'); 4513 4514 // Check if we can if-convert non-single-bb loops. 4515 unsigned NumBlocks = TheLoop->getNumBlocks(); 4516 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { 4517 DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); 4518 return false; 4519 } 4520 4521 // ScalarEvolution needs to be able to find the exit count. 4522 const SCEV *ExitCount = PSE.getBackedgeTakenCount(); 4523 if (ExitCount == PSE.getSE()->getCouldNotCompute()) { 4524 emitAnalysis(VectorizationReport() 4525 << "could not determine number of loop iterations"); 4526 DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n"); 4527 return false; 4528 } 4529 4530 // Check if we can vectorize the instructions and CFG in this loop. 4531 if (!canVectorizeInstrs()) { 4532 DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); 4533 return false; 4534 } 4535 4536 // Go over each instruction and look at memory deps. 4537 if (!canVectorizeMemory()) { 4538 DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); 4539 return false; 4540 } 4541 4542 // Collect all of the variables that remain uniform after vectorization. 4543 collectLoopUniforms(); 4544 4545 DEBUG(dbgs() << "LV: We can vectorize this loop" 4546 << (LAI->getRuntimePointerChecking()->Need 4547 ? " (with a runtime bound check)" 4548 : "") 4549 << "!\n"); 4550 4551 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 4552 4553 // If an override option has been passed in for interleaved accesses, use it. 4554 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 4555 UseInterleaved = EnableInterleavedMemAccesses; 4556 4557 // Analyze interleaved memory accesses. 4558 if (UseInterleaved) 4559 InterleaveInfo.analyzeInterleaving(*getSymbolicStrides()); 4560 4561 unsigned SCEVThreshold = VectorizeSCEVCheckThreshold; 4562 if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) 4563 SCEVThreshold = PragmaVectorizeSCEVCheckThreshold; 4564 4565 if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) { 4566 emitAnalysis(VectorizationReport() 4567 << "Too many SCEV assumptions need to be made and checked " 4568 << "at runtime"); 4569 DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n"); 4570 return false; 4571 } 4572 4573 // Okay! We can vectorize. At this point we don't have any other mem analysis 4574 // which may limit our maximum vectorization factor, so just return true with 4575 // no restrictions. 4576 return true; 4577 } 4578 4579 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { 4580 if (Ty->isPointerTy()) 4581 return DL.getIntPtrType(Ty); 4582 4583 // It is possible that char's or short's overflow when we ask for the loop's 4584 // trip count, work around this by changing the type size. 4585 if (Ty->getScalarSizeInBits() < 32) 4586 return Type::getInt32Ty(Ty->getContext()); 4587 4588 return Ty; 4589 } 4590 4591 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { 4592 Ty0 = convertPointerToIntegerType(DL, Ty0); 4593 Ty1 = convertPointerToIntegerType(DL, Ty1); 4594 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) 4595 return Ty0; 4596 return Ty1; 4597 } 4598 4599 /// \brief Check that the instruction has outside loop users and is not an 4600 /// identified reduction variable. 4601 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, 4602 SmallPtrSetImpl<Value *> &AllowedExit) { 4603 // Reduction and Induction instructions are allowed to have exit users. All 4604 // other instructions must not have external users. 4605 if (!AllowedExit.count(Inst)) 4606 // Check that all of the users of the loop are inside the BB. 4607 for (User *U : Inst->users()) { 4608 Instruction *UI = cast<Instruction>(U); 4609 // This user may be a reduction exit value. 4610 if (!TheLoop->contains(UI)) { 4611 DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); 4612 return true; 4613 } 4614 } 4615 return false; 4616 } 4617 4618 void LoopVectorizationLegality::addInductionPhi( 4619 PHINode *Phi, const InductionDescriptor &ID, 4620 SmallPtrSetImpl<Value *> &AllowedExit) { 4621 Inductions[Phi] = ID; 4622 Type *PhiTy = Phi->getType(); 4623 const DataLayout &DL = Phi->getModule()->getDataLayout(); 4624 4625 // Get the widest type. 4626 if (!PhiTy->isFloatingPointTy()) { 4627 if (!WidestIndTy) 4628 WidestIndTy = convertPointerToIntegerType(DL, PhiTy); 4629 else 4630 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); 4631 } 4632 4633 // Int inductions are special because we only allow one IV. 4634 if (ID.getKind() == InductionDescriptor::IK_IntInduction && 4635 ID.getConstIntStepValue() && 4636 ID.getConstIntStepValue()->isOne() && 4637 isa<Constant>(ID.getStartValue()) && 4638 cast<Constant>(ID.getStartValue())->isNullValue()) { 4639 4640 // Use the phi node with the widest type as induction. Use the last 4641 // one if there are multiple (no good reason for doing this other 4642 // than it is expedient). We've checked that it begins at zero and 4643 // steps by one, so this is a canonical induction variable. 4644 if (!Induction || PhiTy == WidestIndTy) 4645 Induction = Phi; 4646 } 4647 4648 // Both the PHI node itself, and the "post-increment" value feeding 4649 // back into the PHI node may have external users. 4650 AllowedExit.insert(Phi); 4651 AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch())); 4652 4653 DEBUG(dbgs() << "LV: Found an induction variable.\n"); 4654 return; 4655 } 4656 4657 bool LoopVectorizationLegality::canVectorizeInstrs() { 4658 BasicBlock *Header = TheLoop->getHeader(); 4659 4660 // Look for the attribute signaling the absence of NaNs. 4661 Function &F = *Header->getParent(); 4662 HasFunNoNaNAttr = 4663 F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; 4664 4665 // For each block in the loop. 4666 for (BasicBlock *BB : TheLoop->blocks()) { 4667 // Scan the instructions in the block and look for hazards. 4668 for (Instruction &I : *BB) { 4669 if (auto *Phi = dyn_cast<PHINode>(&I)) { 4670 Type *PhiTy = Phi->getType(); 4671 // Check that this PHI type is allowed. 4672 if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() && 4673 !PhiTy->isPointerTy()) { 4674 emitAnalysis(VectorizationReport(Phi) 4675 << "loop control flow is not understood by vectorizer"); 4676 DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); 4677 return false; 4678 } 4679 4680 // If this PHINode is not in the header block, then we know that we 4681 // can convert it to select during if-conversion. No need to check if 4682 // the PHIs in this block are induction or reduction variables. 4683 if (BB != Header) { 4684 // Check that this instruction has no outside users or is an 4685 // identified reduction value with an outside user. 4686 if (!hasOutsideLoopUser(TheLoop, Phi, AllowedExit)) 4687 continue; 4688 emitAnalysis(VectorizationReport(Phi) 4689 << "value could not be identified as " 4690 "an induction or reduction variable"); 4691 return false; 4692 } 4693 4694 // We only allow if-converted PHIs with exactly two incoming values. 4695 if (Phi->getNumIncomingValues() != 2) { 4696 emitAnalysis(VectorizationReport(Phi) 4697 << "control flow not understood by vectorizer"); 4698 DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); 4699 return false; 4700 } 4701 4702 RecurrenceDescriptor RedDes; 4703 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) { 4704 if (RedDes.hasUnsafeAlgebra()) 4705 Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst()); 4706 AllowedExit.insert(RedDes.getLoopExitInstr()); 4707 Reductions[Phi] = RedDes; 4708 continue; 4709 } 4710 4711 InductionDescriptor ID; 4712 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID)) { 4713 addInductionPhi(Phi, ID, AllowedExit); 4714 if (ID.hasUnsafeAlgebra() && !HasFunNoNaNAttr) 4715 Requirements->addUnsafeAlgebraInst(ID.getUnsafeAlgebraInst()); 4716 continue; 4717 } 4718 4719 if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, DT)) { 4720 FirstOrderRecurrences.insert(Phi); 4721 continue; 4722 } 4723 4724 // As a last resort, coerce the PHI to a AddRec expression 4725 // and re-try classifying it a an induction PHI. 4726 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true)) { 4727 addInductionPhi(Phi, ID, AllowedExit); 4728 continue; 4729 } 4730 4731 emitAnalysis(VectorizationReport(Phi) 4732 << "value that could not be identified as " 4733 "reduction is used outside the loop"); 4734 DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n"); 4735 return false; 4736 } // end of PHI handling 4737 4738 // We handle calls that: 4739 // * Are debug info intrinsics. 4740 // * Have a mapping to an IR intrinsic. 4741 // * Have a vector version available. 4742 auto *CI = dyn_cast<CallInst>(&I); 4743 if (CI && !getVectorIntrinsicIDForCall(CI, TLI) && 4744 !isa<DbgInfoIntrinsic>(CI) && 4745 !(CI->getCalledFunction() && TLI && 4746 TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) { 4747 emitAnalysis(VectorizationReport(CI) 4748 << "call instruction cannot be vectorized"); 4749 DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); 4750 return false; 4751 } 4752 4753 // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the 4754 // second argument is the same (i.e. loop invariant) 4755 if (CI && hasVectorInstrinsicScalarOpd( 4756 getVectorIntrinsicIDForCall(CI, TLI), 1)) { 4757 auto *SE = PSE.getSE(); 4758 if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) { 4759 emitAnalysis(VectorizationReport(CI) 4760 << "intrinsic instruction cannot be vectorized"); 4761 DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n"); 4762 return false; 4763 } 4764 } 4765 4766 // Check that the instruction return type is vectorizable. 4767 // Also, we can't vectorize extractelement instructions. 4768 if ((!VectorType::isValidElementType(I.getType()) && 4769 !I.getType()->isVoidTy()) || 4770 isa<ExtractElementInst>(I)) { 4771 emitAnalysis(VectorizationReport(&I) 4772 << "instruction return type cannot be vectorized"); 4773 DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); 4774 return false; 4775 } 4776 4777 // Check that the stored type is vectorizable. 4778 if (auto *ST = dyn_cast<StoreInst>(&I)) { 4779 Type *T = ST->getValueOperand()->getType(); 4780 if (!VectorType::isValidElementType(T)) { 4781 emitAnalysis(VectorizationReport(ST) 4782 << "store instruction cannot be vectorized"); 4783 return false; 4784 } 4785 4786 // FP instructions can allow unsafe algebra, thus vectorizable by 4787 // non-IEEE-754 compliant SIMD units. 4788 // This applies to floating-point math operations and calls, not memory 4789 // operations, shuffles, or casts, as they don't change precision or 4790 // semantics. 4791 } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) && 4792 !I.hasUnsafeAlgebra()) { 4793 DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n"); 4794 Hints->setPotentiallyUnsafe(); 4795 } 4796 4797 // Reduction instructions are allowed to have exit users. 4798 // All other instructions must not have external users. 4799 if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) { 4800 emitAnalysis(VectorizationReport(&I) 4801 << "value cannot be used outside the loop"); 4802 return false; 4803 } 4804 4805 } // next instr. 4806 } 4807 4808 if (!Induction) { 4809 DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); 4810 if (Inductions.empty()) { 4811 emitAnalysis(VectorizationReport() 4812 << "loop induction variable could not be identified"); 4813 return false; 4814 } 4815 } 4816 4817 // Now we know the widest induction type, check if our found induction 4818 // is the same size. If it's not, unset it here and InnerLoopVectorizer 4819 // will create another. 4820 if (Induction && WidestIndTy != Induction->getType()) 4821 Induction = nullptr; 4822 4823 return true; 4824 } 4825 4826 void LoopVectorizationLegality::collectLoopUniforms() { 4827 // We now know that the loop is vectorizable! 4828 // Collect variables that will remain uniform after vectorization. 4829 4830 // If V is not an instruction inside the current loop, it is a Value 4831 // outside of the scope which we are interesting in. 4832 auto isOutOfScope = [&](Value *V) -> bool { 4833 Instruction *I = dyn_cast<Instruction>(V); 4834 return (!I || !TheLoop->contains(I)); 4835 }; 4836 4837 SetVector<Instruction *> Worklist; 4838 BasicBlock *Latch = TheLoop->getLoopLatch(); 4839 // Start with the conditional branch. 4840 if (!isOutOfScope(Latch->getTerminator()->getOperand(0))) { 4841 Instruction *Cmp = cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4842 Worklist.insert(Cmp); 4843 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); 4844 } 4845 4846 // Also add all consecutive pointer values; these values will be uniform 4847 // after vectorization (and subsequent cleanup). 4848 for (auto *BB : TheLoop->blocks()) { 4849 for (auto &I : *BB) { 4850 if (I.getType()->isPointerTy() && isConsecutivePtr(&I)) { 4851 Worklist.insert(&I); 4852 DEBUG(dbgs() << "LV: Found uniform instruction: " << I << "\n"); 4853 } 4854 } 4855 } 4856 4857 // Expand Worklist in topological order: whenever a new instruction 4858 // is added , its users should be either already inside Worklist, or 4859 // out of scope. It ensures a uniform instruction will only be used 4860 // by uniform instructions or out of scope instructions. 4861 unsigned idx = 0; 4862 do { 4863 Instruction *I = Worklist[idx++]; 4864 4865 for (auto OV : I->operand_values()) { 4866 if (isOutOfScope(OV)) 4867 continue; 4868 auto *OI = cast<Instruction>(OV); 4869 if (all_of(OI->users(), [&](User *U) -> bool { 4870 return isOutOfScope(U) || Worklist.count(cast<Instruction>(U)); 4871 })) { 4872 Worklist.insert(OI); 4873 DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); 4874 } 4875 } 4876 } while (idx != Worklist.size()); 4877 4878 // For an instruction to be added into Worklist above, all its users inside 4879 // the current loop should be already added into Worklist. This condition 4880 // cannot be true for phi instructions which is always in a dependence loop. 4881 // Because any instruction in the dependence cycle always depends on others 4882 // in the cycle to be added into Worklist first, the result is no ones in 4883 // the cycle will be added into Worklist in the end. 4884 // That is why we process PHI separately. 4885 for (auto &Induction : *getInductionVars()) { 4886 auto *PN = Induction.first; 4887 auto *UpdateV = PN->getIncomingValueForBlock(TheLoop->getLoopLatch()); 4888 if (all_of(PN->users(), 4889 [&](User *U) -> bool { 4890 return U == UpdateV || isOutOfScope(U) || 4891 Worklist.count(cast<Instruction>(U)); 4892 }) && 4893 all_of(UpdateV->users(), [&](User *U) -> bool { 4894 return U == PN || isOutOfScope(U) || 4895 Worklist.count(cast<Instruction>(U)); 4896 })) { 4897 Worklist.insert(cast<Instruction>(PN)); 4898 Worklist.insert(cast<Instruction>(UpdateV)); 4899 DEBUG(dbgs() << "LV: Found uniform instruction: " << *PN << "\n"); 4900 DEBUG(dbgs() << "LV: Found uniform instruction: " << *UpdateV << "\n"); 4901 } 4902 } 4903 4904 Uniforms.insert(Worklist.begin(), Worklist.end()); 4905 } 4906 4907 bool LoopVectorizationLegality::canVectorizeMemory() { 4908 LAI = &(*GetLAA)(*TheLoop); 4909 InterleaveInfo.setLAI(LAI); 4910 auto &OptionalReport = LAI->getReport(); 4911 if (OptionalReport) 4912 emitAnalysis(VectorizationReport(*OptionalReport)); 4913 if (!LAI->canVectorizeMemory()) 4914 return false; 4915 4916 if (LAI->hasStoreToLoopInvariantAddress()) { 4917 emitAnalysis( 4918 VectorizationReport() 4919 << "write to a loop invariant address could not be vectorized"); 4920 DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); 4921 return false; 4922 } 4923 4924 Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks()); 4925 PSE.addPredicate(LAI->getPSE().getUnionPredicate()); 4926 4927 return true; 4928 } 4929 4930 bool LoopVectorizationLegality::isInductionVariable(const Value *V) { 4931 Value *In0 = const_cast<Value *>(V); 4932 PHINode *PN = dyn_cast_or_null<PHINode>(In0); 4933 if (!PN) 4934 return false; 4935 4936 return Inductions.count(PN); 4937 } 4938 4939 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) { 4940 return FirstOrderRecurrences.count(Phi); 4941 } 4942 4943 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { 4944 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 4945 } 4946 4947 bool LoopVectorizationLegality::blockCanBePredicated( 4948 BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) { 4949 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 4950 4951 for (Instruction &I : *BB) { 4952 // Check that we don't have a constant expression that can trap as operand. 4953 for (Value *Operand : I.operands()) { 4954 if (auto *C = dyn_cast<Constant>(Operand)) 4955 if (C->canTrap()) 4956 return false; 4957 } 4958 // We might be able to hoist the load. 4959 if (I.mayReadFromMemory()) { 4960 auto *LI = dyn_cast<LoadInst>(&I); 4961 if (!LI) 4962 return false; 4963 if (!SafePtrs.count(LI->getPointerOperand())) { 4964 if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) || 4965 isLegalMaskedGather(LI->getType())) { 4966 MaskedOp.insert(LI); 4967 continue; 4968 } 4969 // !llvm.mem.parallel_loop_access implies if-conversion safety. 4970 if (IsAnnotatedParallel) 4971 continue; 4972 return false; 4973 } 4974 } 4975 4976 // We don't predicate stores at the moment. 4977 if (I.mayWriteToMemory()) { 4978 auto *SI = dyn_cast<StoreInst>(&I); 4979 // We only support predication of stores in basic blocks with one 4980 // predecessor. 4981 if (!SI) 4982 return false; 4983 4984 // Build a masked store if it is legal for the target. 4985 if (isLegalMaskedStore(SI->getValueOperand()->getType(), 4986 SI->getPointerOperand()) || 4987 isLegalMaskedScatter(SI->getValueOperand()->getType())) { 4988 MaskedOp.insert(SI); 4989 continue; 4990 } 4991 4992 bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0); 4993 bool isSinglePredecessor = SI->getParent()->getSinglePredecessor(); 4994 4995 if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr || 4996 !isSinglePredecessor) 4997 return false; 4998 } 4999 if (I.mayThrow()) 5000 return false; 5001 5002 // The instructions below can trap. 5003 switch (I.getOpcode()) { 5004 default: 5005 continue; 5006 case Instruction::UDiv: 5007 case Instruction::SDiv: 5008 case Instruction::URem: 5009 case Instruction::SRem: 5010 return false; 5011 } 5012 } 5013 5014 return true; 5015 } 5016 5017 void InterleavedAccessInfo::collectConstStrideAccesses( 5018 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 5019 const ValueToValueMap &Strides) { 5020 5021 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 5022 5023 // Since it's desired that the load/store instructions be maintained in 5024 // "program order" for the interleaved access analysis, we have to visit the 5025 // blocks in the loop in reverse postorder (i.e., in a topological order). 5026 // Such an ordering will ensure that any load/store that may be executed 5027 // before a second load/store will precede the second load/store in 5028 // AccessStrideInfo. 5029 LoopBlocksDFS DFS(TheLoop); 5030 DFS.perform(LI); 5031 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 5032 for (auto &I : *BB) { 5033 auto *LI = dyn_cast<LoadInst>(&I); 5034 auto *SI = dyn_cast<StoreInst>(&I); 5035 if (!LI && !SI) 5036 continue; 5037 5038 Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); 5039 int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides); 5040 5041 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 5042 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 5043 uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 5044 5045 // An alignment of 0 means target ABI alignment. 5046 unsigned Align = LI ? LI->getAlignment() : SI->getAlignment(); 5047 if (!Align) 5048 Align = DL.getABITypeAlignment(PtrTy->getElementType()); 5049 5050 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align); 5051 } 5052 } 5053 5054 // Analyze interleaved accesses and collect them into interleaved load and 5055 // store groups. 5056 // 5057 // When generating code for an interleaved load group, we effectively hoist all 5058 // loads in the group to the location of the first load in program order. When 5059 // generating code for an interleaved store group, we sink all stores to the 5060 // location of the last store. This code motion can change the order of load 5061 // and store instructions and may break dependences. 5062 // 5063 // The code generation strategy mentioned above ensures that we won't violate 5064 // any write-after-read (WAR) dependences. 5065 // 5066 // E.g., for the WAR dependence: a = A[i]; // (1) 5067 // A[i] = b; // (2) 5068 // 5069 // The store group of (2) is always inserted at or below (2), and the load 5070 // group of (1) is always inserted at or above (1). Thus, the instructions will 5071 // never be reordered. All other dependences are checked to ensure the 5072 // correctness of the instruction reordering. 5073 // 5074 // The algorithm visits all memory accesses in the loop in bottom-up program 5075 // order. Program order is established by traversing the blocks in the loop in 5076 // reverse postorder when collecting the accesses. 5077 // 5078 // We visit the memory accesses in bottom-up order because it can simplify the 5079 // construction of store groups in the presence of write-after-write (WAW) 5080 // dependences. 5081 // 5082 // E.g., for the WAW dependence: A[i] = a; // (1) 5083 // A[i] = b; // (2) 5084 // A[i + 1] = c; // (3) 5085 // 5086 // We will first create a store group with (3) and (2). (1) can't be added to 5087 // this group because it and (2) are dependent. However, (1) can be grouped 5088 // with other accesses that may precede it in program order. Note that a 5089 // bottom-up order does not imply that WAW dependences should not be checked. 5090 void InterleavedAccessInfo::analyzeInterleaving( 5091 const ValueToValueMap &Strides) { 5092 DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 5093 5094 // Holds all accesses with a constant stride. 5095 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 5096 collectConstStrideAccesses(AccessStrideInfo, Strides); 5097 5098 if (AccessStrideInfo.empty()) 5099 return; 5100 5101 // Collect the dependences in the loop. 5102 collectDependences(); 5103 5104 // Holds all interleaved store groups temporarily. 5105 SmallSetVector<InterleaveGroup *, 4> StoreGroups; 5106 // Holds all interleaved load groups temporarily. 5107 SmallSetVector<InterleaveGroup *, 4> LoadGroups; 5108 5109 // Search in bottom-up program order for pairs of accesses (A and B) that can 5110 // form interleaved load or store groups. In the algorithm below, access A 5111 // precedes access B in program order. We initialize a group for B in the 5112 // outer loop of the algorithm, and then in the inner loop, we attempt to 5113 // insert each A into B's group if: 5114 // 5115 // 1. A and B have the same stride, 5116 // 2. A and B have the same memory object size, and 5117 // 3. A belongs in B's group according to its distance from B. 5118 // 5119 // Special care is taken to ensure group formation will not break any 5120 // dependences. 5121 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 5122 BI != E; ++BI) { 5123 Instruction *B = BI->first; 5124 StrideDescriptor DesB = BI->second; 5125 5126 // Initialize a group for B if it has an allowable stride. Even if we don't 5127 // create a group for B, we continue with the bottom-up algorithm to ensure 5128 // we don't break any of B's dependences. 5129 InterleaveGroup *Group = nullptr; 5130 if (isStrided(DesB.Stride)) { 5131 Group = getInterleaveGroup(B); 5132 if (!Group) { 5133 DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n'); 5134 Group = createInterleaveGroup(B, DesB.Stride, DesB.Align); 5135 } 5136 if (B->mayWriteToMemory()) 5137 StoreGroups.insert(Group); 5138 else 5139 LoadGroups.insert(Group); 5140 } 5141 5142 for (auto AI = std::next(BI); AI != E; ++AI) { 5143 Instruction *A = AI->first; 5144 StrideDescriptor DesA = AI->second; 5145 5146 // Our code motion strategy implies that we can't have dependences 5147 // between accesses in an interleaved group and other accesses located 5148 // between the first and last member of the group. Note that this also 5149 // means that a group can't have more than one member at a given offset. 5150 // The accesses in a group can have dependences with other accesses, but 5151 // we must ensure we don't extend the boundaries of the group such that 5152 // we encompass those dependent accesses. 5153 // 5154 // For example, assume we have the sequence of accesses shown below in a 5155 // stride-2 loop: 5156 // 5157 // (1, 2) is a group | A[i] = a; // (1) 5158 // | A[i-1] = b; // (2) | 5159 // A[i-3] = c; // (3) 5160 // A[i] = d; // (4) | (2, 4) is not a group 5161 // 5162 // Because accesses (2) and (3) are dependent, we can group (2) with (1) 5163 // but not with (4). If we did, the dependent access (3) would be within 5164 // the boundaries of the (2, 4) group. 5165 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { 5166 5167 // If a dependence exists and A is already in a group, we know that A 5168 // must be a store since A precedes B and WAR dependences are allowed. 5169 // Thus, A would be sunk below B. We release A's group to prevent this 5170 // illegal code motion. A will then be free to form another group with 5171 // instructions that precede it. 5172 if (isInterleaved(A)) { 5173 InterleaveGroup *StoreGroup = getInterleaveGroup(A); 5174 StoreGroups.remove(StoreGroup); 5175 releaseGroup(StoreGroup); 5176 } 5177 5178 // If a dependence exists and A is not already in a group (or it was 5179 // and we just released it), B might be hoisted above A (if B is a 5180 // load) or another store might be sunk below A (if B is a store). In 5181 // either case, we can't add additional instructions to B's group. B 5182 // will only form a group with instructions that it precedes. 5183 break; 5184 } 5185 5186 // At this point, we've checked for illegal code motion. If either A or B 5187 // isn't strided, there's nothing left to do. 5188 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 5189 continue; 5190 5191 // Ignore A if it's already in a group or isn't the same kind of memory 5192 // operation as B. 5193 if (isInterleaved(A) || A->mayReadFromMemory() != B->mayReadFromMemory()) 5194 continue; 5195 5196 // Check rules 1 and 2. Ignore A if its stride or size is different from 5197 // that of B. 5198 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 5199 continue; 5200 5201 // Calculate the distance from A to B. 5202 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 5203 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 5204 if (!DistToB) 5205 continue; 5206 int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 5207 5208 // Check rule 3. Ignore A if its distance to B is not a multiple of the 5209 // size. 5210 if (DistanceToB % static_cast<int64_t>(DesB.Size)) 5211 continue; 5212 5213 // Ignore A if either A or B is in a predicated block. Although we 5214 // currently prevent group formation for predicated accesses, we may be 5215 // able to relax this limitation in the future once we handle more 5216 // complicated blocks. 5217 if (isPredicated(A->getParent()) || isPredicated(B->getParent())) 5218 continue; 5219 5220 // The index of A is the index of B plus A's distance to B in multiples 5221 // of the size. 5222 int IndexA = 5223 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 5224 5225 // Try to insert A into B's group. 5226 if (Group->insertMember(A, IndexA, DesA.Align)) { 5227 DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 5228 << " into the interleave group with" << *B << '\n'); 5229 InterleaveGroupMap[A] = Group; 5230 5231 // Set the first load in program order as the insert position. 5232 if (A->mayReadFromMemory()) 5233 Group->setInsertPos(A); 5234 } 5235 } // Iteration over A accesses. 5236 } // Iteration over B accesses. 5237 5238 // Remove interleaved store groups with gaps. 5239 for (InterleaveGroup *Group : StoreGroups) 5240 if (Group->getNumMembers() != Group->getFactor()) 5241 releaseGroup(Group); 5242 5243 // If there is a non-reversed interleaved load group with gaps, we will need 5244 // to execute at least one scalar epilogue iteration. This will ensure that 5245 // we don't speculatively access memory out-of-bounds. Note that we only need 5246 // to look for a member at index factor - 1, since every group must have a 5247 // member at index zero. 5248 for (InterleaveGroup *Group : LoadGroups) 5249 if (!Group->getMember(Group->getFactor() - 1)) { 5250 if (Group->isReverse()) { 5251 releaseGroup(Group); 5252 } else { 5253 DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 5254 RequiresScalarEpilogue = true; 5255 } 5256 } 5257 } 5258 5259 LoopVectorizationCostModel::VectorizationFactor 5260 LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { 5261 // Width 1 means no vectorize 5262 VectorizationFactor Factor = {1U, 0U}; 5263 if (OptForSize && Legal->getRuntimePointerChecking()->Need) { 5264 emitAnalysis( 5265 VectorizationReport() 5266 << "runtime pointer checks needed. Enable vectorization of this " 5267 "loop with '#pragma clang loop vectorize(enable)' when " 5268 "compiling with -Os/-Oz"); 5269 DEBUG(dbgs() 5270 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 5271 return Factor; 5272 } 5273 5274 if (!EnableCondStoresVectorization && Legal->getNumPredStores()) { 5275 emitAnalysis( 5276 VectorizationReport() 5277 << "store that is conditionally executed prevents vectorization"); 5278 DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); 5279 return Factor; 5280 } 5281 5282 // Find the trip count. 5283 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5284 DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5285 5286 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5287 unsigned SmallestType, WidestType; 5288 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5289 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 5290 unsigned MaxSafeDepDist = -1U; 5291 5292 // Get the maximum safe dependence distance in bits computed by LAA. If the 5293 // loop contains any interleaved accesses, we divide the dependence distance 5294 // by the maximum interleave factor of all interleaved groups. Note that 5295 // although the division ensures correctness, this is a fairly conservative 5296 // computation because the maximum distance computed by LAA may not involve 5297 // any of the interleaved accesses. 5298 if (Legal->getMaxSafeDepDistBytes() != -1U) 5299 MaxSafeDepDist = 5300 Legal->getMaxSafeDepDistBytes() * 8 / Legal->getMaxInterleaveFactor(); 5301 5302 WidestRegister = 5303 ((WidestRegister < MaxSafeDepDist) ? WidestRegister : MaxSafeDepDist); 5304 unsigned MaxVectorSize = WidestRegister / WidestType; 5305 5306 DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " 5307 << WidestType << " bits.\n"); 5308 DEBUG(dbgs() << "LV: The Widest register is: " << WidestRegister 5309 << " bits.\n"); 5310 5311 if (MaxVectorSize == 0) { 5312 DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5313 MaxVectorSize = 1; 5314 } 5315 5316 assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements" 5317 " into one vector!"); 5318 5319 unsigned VF = MaxVectorSize; 5320 if (MaximizeBandwidth && !OptForSize) { 5321 // Collect all viable vectorization factors. 5322 SmallVector<unsigned, 8> VFs; 5323 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 5324 for (unsigned VS = MaxVectorSize; VS <= NewMaxVectorSize; VS *= 2) 5325 VFs.push_back(VS); 5326 5327 // For each VF calculate its register usage. 5328 auto RUs = calculateRegisterUsage(VFs); 5329 5330 // Select the largest VF which doesn't require more registers than existing 5331 // ones. 5332 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 5333 for (int i = RUs.size() - 1; i >= 0; --i) { 5334 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 5335 VF = VFs[i]; 5336 break; 5337 } 5338 } 5339 } 5340 5341 // If we optimize the program for size, avoid creating the tail loop. 5342 if (OptForSize) { 5343 // If we are unable to calculate the trip count then don't try to vectorize. 5344 if (TC < 2) { 5345 emitAnalysis( 5346 VectorizationReport() 5347 << "unable to calculate the loop count due to complex control flow"); 5348 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 5349 return Factor; 5350 } 5351 5352 // Find the maximum SIMD width that can fit within the trip count. 5353 VF = TC % MaxVectorSize; 5354 5355 if (VF == 0) 5356 VF = MaxVectorSize; 5357 else { 5358 // If the trip count that we found modulo the vectorization factor is not 5359 // zero then we require a tail. 5360 emitAnalysis(VectorizationReport() 5361 << "cannot optimize for size and vectorize at the " 5362 "same time. Enable vectorization of this loop " 5363 "with '#pragma clang loop vectorize(enable)' " 5364 "when compiling with -Os/-Oz"); 5365 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 5366 return Factor; 5367 } 5368 } 5369 5370 int UserVF = Hints->getWidth(); 5371 if (UserVF != 0) { 5372 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 5373 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 5374 5375 Factor.Width = UserVF; 5376 return Factor; 5377 } 5378 5379 float Cost = expectedCost(1).first; 5380 #ifndef NDEBUG 5381 const float ScalarCost = Cost; 5382 #endif /* NDEBUG */ 5383 unsigned Width = 1; 5384 DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 5385 5386 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5387 // Ignore scalar width, because the user explicitly wants vectorization. 5388 if (ForceVectorization && VF > 1) { 5389 Width = 2; 5390 Cost = expectedCost(Width).first / (float)Width; 5391 } 5392 5393 for (unsigned i = 2; i <= VF; i *= 2) { 5394 // Notice that the vector loop needs to be executed less times, so 5395 // we need to divide the cost of the vector loops by the width of 5396 // the vector elements. 5397 VectorizationCostTy C = expectedCost(i); 5398 float VectorCost = C.first / (float)i; 5399 DEBUG(dbgs() << "LV: Vector loop of width " << i 5400 << " costs: " << (int)VectorCost << ".\n"); 5401 if (!C.second && !ForceVectorization) { 5402 DEBUG( 5403 dbgs() << "LV: Not considering vector loop of width " << i 5404 << " because it will not generate any vector instructions.\n"); 5405 continue; 5406 } 5407 if (VectorCost < Cost) { 5408 Cost = VectorCost; 5409 Width = i; 5410 } 5411 } 5412 5413 DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 5414 << "LV: Vectorization seems to be not beneficial, " 5415 << "but was forced by a user.\n"); 5416 DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 5417 Factor.Width = Width; 5418 Factor.Cost = Width * Cost; 5419 return Factor; 5420 } 5421 5422 std::pair<unsigned, unsigned> 5423 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5424 unsigned MinWidth = -1U; 5425 unsigned MaxWidth = 8; 5426 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5427 5428 // For each block. 5429 for (BasicBlock *BB : TheLoop->blocks()) { 5430 // For each instruction in the loop. 5431 for (Instruction &I : *BB) { 5432 Type *T = I.getType(); 5433 5434 // Skip ignored values. 5435 if (ValuesToIgnore.count(&I)) 5436 continue; 5437 5438 // Only examine Loads, Stores and PHINodes. 5439 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5440 continue; 5441 5442 // Examine PHI nodes that are reduction variables. Update the type to 5443 // account for the recurrence type. 5444 if (auto *PN = dyn_cast<PHINode>(&I)) { 5445 if (!Legal->isReductionVariable(PN)) 5446 continue; 5447 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 5448 T = RdxDesc.getRecurrenceType(); 5449 } 5450 5451 // Examine the stored values. 5452 if (auto *ST = dyn_cast<StoreInst>(&I)) 5453 T = ST->getValueOperand()->getType(); 5454 5455 // Ignore loaded pointer types and stored pointer types that are not 5456 // consecutive. However, we do want to take consecutive stores/loads of 5457 // pointer vectors into account. 5458 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I)) 5459 continue; 5460 5461 MinWidth = std::min(MinWidth, 5462 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5463 MaxWidth = std::max(MaxWidth, 5464 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5465 } 5466 } 5467 5468 return {MinWidth, MaxWidth}; 5469 } 5470 5471 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 5472 unsigned VF, 5473 unsigned LoopCost) { 5474 5475 // -- The interleave heuristics -- 5476 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5477 // There are many micro-architectural considerations that we can't predict 5478 // at this level. For example, frontend pressure (on decode or fetch) due to 5479 // code size, or the number and capabilities of the execution ports. 5480 // 5481 // We use the following heuristics to select the interleave count: 5482 // 1. If the code has reductions, then we interleave to break the cross 5483 // iteration dependency. 5484 // 2. If the loop is really small, then we interleave to reduce the loop 5485 // overhead. 5486 // 3. We don't interleave if we think that we will spill registers to memory 5487 // due to the increased register pressure. 5488 5489 // When we optimize for size, we don't interleave. 5490 if (OptForSize) 5491 return 1; 5492 5493 // We used the distance for the interleave count. 5494 if (Legal->getMaxSafeDepDistBytes() != -1U) 5495 return 1; 5496 5497 // Do not interleave loops with a relatively small trip count. 5498 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5499 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 5500 return 1; 5501 5502 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 5503 DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5504 << " registers\n"); 5505 5506 if (VF == 1) { 5507 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5508 TargetNumRegisters = ForceTargetNumScalarRegs; 5509 } else { 5510 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5511 TargetNumRegisters = ForceTargetNumVectorRegs; 5512 } 5513 5514 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5515 // We divide by these constants so assume that we have at least one 5516 // instruction that uses at least one register. 5517 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 5518 R.NumInstructions = std::max(R.NumInstructions, 1U); 5519 5520 // We calculate the interleave count using the following formula. 5521 // Subtract the number of loop invariants from the number of available 5522 // registers. These registers are used by all of the interleaved instances. 5523 // Next, divide the remaining registers by the number of registers that is 5524 // required by the loop, in order to estimate how many parallel instances 5525 // fit without causing spills. All of this is rounded down if necessary to be 5526 // a power of two. We want power of two interleave count to simplify any 5527 // addressing operations or alignment considerations. 5528 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 5529 R.MaxLocalUsers); 5530 5531 // Don't count the induction variable as interleaved. 5532 if (EnableIndVarRegisterHeur) 5533 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 5534 std::max(1U, (R.MaxLocalUsers - 1))); 5535 5536 // Clamp the interleave ranges to reasonable counts. 5537 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 5538 5539 // Check if the user has overridden the max. 5540 if (VF == 1) { 5541 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5542 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5543 } else { 5544 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5545 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5546 } 5547 5548 // If we did not calculate the cost for VF (because the user selected the VF) 5549 // then we calculate the cost of VF here. 5550 if (LoopCost == 0) 5551 LoopCost = expectedCost(VF).first; 5552 5553 // Clamp the calculated IC to be between the 1 and the max interleave count 5554 // that the target allows. 5555 if (IC > MaxInterleaveCount) 5556 IC = MaxInterleaveCount; 5557 else if (IC < 1) 5558 IC = 1; 5559 5560 // Interleave if we vectorized this loop and there is a reduction that could 5561 // benefit from interleaving. 5562 if (VF > 1 && Legal->getReductionVars()->size()) { 5563 DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5564 return IC; 5565 } 5566 5567 // Note that if we've already vectorized the loop we will have done the 5568 // runtime check and so interleaving won't require further checks. 5569 bool InterleavingRequiresRuntimePointerCheck = 5570 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 5571 5572 // We want to interleave small loops in order to reduce the loop overhead and 5573 // potentially expose ILP opportunities. 5574 DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 5575 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 5576 // We assume that the cost overhead is 1 and we use the cost model 5577 // to estimate the cost of the loop and interleave until the cost of the 5578 // loop overhead is about 5% of the cost of the loop. 5579 unsigned SmallIC = 5580 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5581 5582 // Interleave until store/load ports (estimated by max interleave count) are 5583 // saturated. 5584 unsigned NumStores = Legal->getNumStores(); 5585 unsigned NumLoads = Legal->getNumLoads(); 5586 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5587 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5588 5589 // If we have a scalar reduction (vector reductions are already dealt with 5590 // by this point), we can increase the critical path length if the loop 5591 // we're interleaving is inside another loop. Limit, by default to 2, so the 5592 // critical path only gets increased by one reduction operation. 5593 if (Legal->getReductionVars()->size() && TheLoop->getLoopDepth() > 1) { 5594 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5595 SmallIC = std::min(SmallIC, F); 5596 StoresIC = std::min(StoresIC, F); 5597 LoadsIC = std::min(LoadsIC, F); 5598 } 5599 5600 if (EnableLoadStoreRuntimeInterleave && 5601 std::max(StoresIC, LoadsIC) > SmallIC) { 5602 DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5603 return std::max(StoresIC, LoadsIC); 5604 } 5605 5606 DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5607 return SmallIC; 5608 } 5609 5610 // Interleave if this is a large loop (small loops are already dealt with by 5611 // this point) that could benefit from interleaving. 5612 bool HasReductions = (Legal->getReductionVars()->size() > 0); 5613 if (TTI.enableAggressiveInterleaving(HasReductions)) { 5614 DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5615 return IC; 5616 } 5617 5618 DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5619 return 1; 5620 } 5621 5622 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5623 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 5624 // This function calculates the register usage by measuring the highest number 5625 // of values that are alive at a single location. Obviously, this is a very 5626 // rough estimation. We scan the loop in a topological order in order and 5627 // assign a number to each instruction. We use RPO to ensure that defs are 5628 // met before their users. We assume that each instruction that has in-loop 5629 // users starts an interval. We record every time that an in-loop value is 5630 // used, so we have a list of the first and last occurrences of each 5631 // instruction. Next, we transpose this data structure into a multi map that 5632 // holds the list of intervals that *end* at a specific location. This multi 5633 // map allows us to perform a linear search. We scan the instructions linearly 5634 // and record each time that a new interval starts, by placing it in a set. 5635 // If we find this value in the multi-map then we remove it from the set. 5636 // The max register usage is the maximum size of the set. 5637 // We also search for instructions that are defined outside the loop, but are 5638 // used inside the loop. We need this number separately from the max-interval 5639 // usage number because when we unroll, loop-invariant values do not take 5640 // more register. 5641 LoopBlocksDFS DFS(TheLoop); 5642 DFS.perform(LI); 5643 5644 RegisterUsage RU; 5645 RU.NumInstructions = 0; 5646 5647 // Each 'key' in the map opens a new interval. The values 5648 // of the map are the index of the 'last seen' usage of the 5649 // instruction that is the key. 5650 typedef DenseMap<Instruction *, unsigned> IntervalMap; 5651 // Maps instruction to its index. 5652 DenseMap<unsigned, Instruction *> IdxToInstr; 5653 // Marks the end of each interval. 5654 IntervalMap EndPoint; 5655 // Saves the list of instruction indices that are used in the loop. 5656 SmallSet<Instruction *, 8> Ends; 5657 // Saves the list of values that are used in the loop but are 5658 // defined outside the loop, such as arguments and constants. 5659 SmallPtrSet<Value *, 8> LoopInvariants; 5660 5661 unsigned Index = 0; 5662 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5663 RU.NumInstructions += BB->size(); 5664 for (Instruction &I : *BB) { 5665 IdxToInstr[Index++] = &I; 5666 5667 // Save the end location of each USE. 5668 for (Value *U : I.operands()) { 5669 auto *Instr = dyn_cast<Instruction>(U); 5670 5671 // Ignore non-instruction values such as arguments, constants, etc. 5672 if (!Instr) 5673 continue; 5674 5675 // If this instruction is outside the loop then record it and continue. 5676 if (!TheLoop->contains(Instr)) { 5677 LoopInvariants.insert(Instr); 5678 continue; 5679 } 5680 5681 // Overwrite previous end points. 5682 EndPoint[Instr] = Index; 5683 Ends.insert(Instr); 5684 } 5685 } 5686 } 5687 5688 // Saves the list of intervals that end with the index in 'key'. 5689 typedef SmallVector<Instruction *, 2> InstrList; 5690 DenseMap<unsigned, InstrList> TransposeEnds; 5691 5692 // Transpose the EndPoints to a list of values that end at each index. 5693 for (auto &Interval : EndPoint) 5694 TransposeEnds[Interval.second].push_back(Interval.first); 5695 5696 SmallSet<Instruction *, 8> OpenIntervals; 5697 5698 // Get the size of the widest register. 5699 unsigned MaxSafeDepDist = -1U; 5700 if (Legal->getMaxSafeDepDistBytes() != -1U) 5701 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 5702 unsigned WidestRegister = 5703 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 5704 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5705 5706 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5707 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 5708 5709 DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5710 5711 // A lambda that gets the register usage for the given type and VF. 5712 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 5713 if (Ty->isTokenTy()) 5714 return 0U; 5715 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 5716 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 5717 }; 5718 5719 for (unsigned int i = 0; i < Index; ++i) { 5720 Instruction *I = IdxToInstr[i]; 5721 // Ignore instructions that are never used within the loop. 5722 if (!Ends.count(I)) 5723 continue; 5724 5725 // Remove all of the instructions that end at this location. 5726 InstrList &List = TransposeEnds[i]; 5727 for (Instruction *ToRemove : List) 5728 OpenIntervals.erase(ToRemove); 5729 5730 // Skip ignored values. 5731 if (ValuesToIgnore.count(I)) 5732 continue; 5733 5734 // For each VF find the maximum usage of registers. 5735 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 5736 if (VFs[j] == 1) { 5737 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 5738 continue; 5739 } 5740 5741 // Count the number of live intervals. 5742 unsigned RegUsage = 0; 5743 for (auto Inst : OpenIntervals) { 5744 // Skip ignored values for VF > 1. 5745 if (VecValuesToIgnore.count(Inst)) 5746 continue; 5747 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 5748 } 5749 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 5750 } 5751 5752 DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 5753 << OpenIntervals.size() << '\n'); 5754 5755 // Add the current instruction to the list of open intervals. 5756 OpenIntervals.insert(I); 5757 } 5758 5759 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 5760 unsigned Invariant = 0; 5761 if (VFs[i] == 1) 5762 Invariant = LoopInvariants.size(); 5763 else { 5764 for (auto Inst : LoopInvariants) 5765 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 5766 } 5767 5768 DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 5769 DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 5770 DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); 5771 DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n'); 5772 5773 RU.LoopInvariantRegs = Invariant; 5774 RU.MaxLocalUsers = MaxUsages[i]; 5775 RUs[i] = RU; 5776 } 5777 5778 return RUs; 5779 } 5780 5781 LoopVectorizationCostModel::VectorizationCostTy 5782 LoopVectorizationCostModel::expectedCost(unsigned VF) { 5783 VectorizationCostTy Cost; 5784 5785 // For each block. 5786 for (BasicBlock *BB : TheLoop->blocks()) { 5787 VectorizationCostTy BlockCost; 5788 5789 // For each instruction in the old loop. 5790 for (Instruction &I : *BB) { 5791 // Skip dbg intrinsics. 5792 if (isa<DbgInfoIntrinsic>(I)) 5793 continue; 5794 5795 // Skip ignored values. 5796 if (ValuesToIgnore.count(&I)) 5797 continue; 5798 5799 VectorizationCostTy C = getInstructionCost(&I, VF); 5800 5801 // Check if we should override the cost. 5802 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 5803 C.first = ForceTargetInstructionCost; 5804 5805 BlockCost.first += C.first; 5806 BlockCost.second |= C.second; 5807 DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " 5808 << VF << " For instruction: " << I << '\n'); 5809 } 5810 5811 // We assume that if-converted blocks have a 50% chance of being executed. 5812 // When the code is scalar then some of the blocks are avoided due to CF. 5813 // When the code is vectorized we execute all code paths. 5814 if (VF == 1 && Legal->blockNeedsPredication(BB)) 5815 BlockCost.first /= 2; 5816 5817 Cost.first += BlockCost.first; 5818 Cost.second |= BlockCost.second; 5819 } 5820 5821 return Cost; 5822 } 5823 5824 /// \brief Check if the load/store instruction \p I may be translated into 5825 /// gather/scatter during vectorization. 5826 /// 5827 /// Pointer \p Ptr specifies address in memory for the given scalar memory 5828 /// instruction. We need it to retrieve data type. 5829 /// Using gather/scatter is possible when it is supported by target. 5830 static bool isGatherOrScatterLegal(Instruction *I, Value *Ptr, 5831 LoopVectorizationLegality *Legal) { 5832 auto *DataTy = cast<PointerType>(Ptr->getType())->getElementType(); 5833 return (isa<LoadInst>(I) && Legal->isLegalMaskedGather(DataTy)) || 5834 (isa<StoreInst>(I) && Legal->isLegalMaskedScatter(DataTy)); 5835 } 5836 5837 /// \brief Check whether the address computation for a non-consecutive memory 5838 /// access looks like an unlikely candidate for being merged into the indexing 5839 /// mode. 5840 /// 5841 /// We look for a GEP which has one index that is an induction variable and all 5842 /// other indices are loop invariant. If the stride of this access is also 5843 /// within a small bound we decide that this address computation can likely be 5844 /// merged into the addressing mode. 5845 /// In all other cases, we identify the address computation as complex. 5846 static bool isLikelyComplexAddressComputation(Value *Ptr, 5847 LoopVectorizationLegality *Legal, 5848 ScalarEvolution *SE, 5849 const Loop *TheLoop) { 5850 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 5851 if (!Gep) 5852 return true; 5853 5854 // We are looking for a gep with all loop invariant indices except for one 5855 // which should be an induction variable. 5856 unsigned NumOperands = Gep->getNumOperands(); 5857 for (unsigned i = 1; i < NumOperands; ++i) { 5858 Value *Opd = Gep->getOperand(i); 5859 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 5860 !Legal->isInductionVariable(Opd)) 5861 return true; 5862 } 5863 5864 // Now we know we have a GEP ptr, %inv, %ind, %inv. Make sure that the step 5865 // can likely be merged into the address computation. 5866 unsigned MaxMergeDistance = 64; 5867 5868 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Ptr)); 5869 if (!AddRec) 5870 return true; 5871 5872 // Check the step is constant. 5873 const SCEV *Step = AddRec->getStepRecurrence(*SE); 5874 // Calculate the pointer stride and check if it is consecutive. 5875 const auto *C = dyn_cast<SCEVConstant>(Step); 5876 if (!C) 5877 return true; 5878 5879 const APInt &APStepVal = C->getAPInt(); 5880 5881 // Huge step value - give up. 5882 if (APStepVal.getBitWidth() > 64) 5883 return true; 5884 5885 int64_t StepVal = APStepVal.getSExtValue(); 5886 5887 return StepVal > MaxMergeDistance; 5888 } 5889 5890 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 5891 return Legal->hasStride(I->getOperand(0)) || 5892 Legal->hasStride(I->getOperand(1)); 5893 } 5894 5895 LoopVectorizationCostModel::VectorizationCostTy 5896 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 5897 // If we know that this instruction will remain uniform, check the cost of 5898 // the scalar version. 5899 if (Legal->isUniformAfterVectorization(I)) 5900 VF = 1; 5901 5902 Type *VectorTy; 5903 unsigned C = getInstructionCost(I, VF, VectorTy); 5904 5905 bool TypeNotScalarized = 5906 VF > 1 && !VectorTy->isVoidTy() && TTI.getNumberOfParts(VectorTy) < VF; 5907 return VectorizationCostTy(C, TypeNotScalarized); 5908 } 5909 5910 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 5911 unsigned VF, 5912 Type *&VectorTy) { 5913 Type *RetTy = I->getType(); 5914 if (VF > 1 && MinBWs.count(I)) 5915 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 5916 VectorTy = ToVectorTy(RetTy, VF); 5917 auto SE = PSE.getSE(); 5918 5919 // TODO: We need to estimate the cost of intrinsic calls. 5920 switch (I->getOpcode()) { 5921 case Instruction::GetElementPtr: 5922 // We mark this instruction as zero-cost because the cost of GEPs in 5923 // vectorized code depends on whether the corresponding memory instruction 5924 // is scalarized or not. Therefore, we handle GEPs with the memory 5925 // instruction cost. 5926 return 0; 5927 case Instruction::Br: { 5928 return TTI.getCFInstrCost(I->getOpcode()); 5929 } 5930 case Instruction::PHI: { 5931 auto *Phi = cast<PHINode>(I); 5932 5933 // First-order recurrences are replaced by vector shuffles inside the loop. 5934 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 5935 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 5936 VectorTy, VF - 1, VectorTy); 5937 5938 // TODO: IF-converted IFs become selects. 5939 return 0; 5940 } 5941 case Instruction::Add: 5942 case Instruction::FAdd: 5943 case Instruction::Sub: 5944 case Instruction::FSub: 5945 case Instruction::Mul: 5946 case Instruction::FMul: 5947 case Instruction::UDiv: 5948 case Instruction::SDiv: 5949 case Instruction::FDiv: 5950 case Instruction::URem: 5951 case Instruction::SRem: 5952 case Instruction::FRem: 5953 case Instruction::Shl: 5954 case Instruction::LShr: 5955 case Instruction::AShr: 5956 case Instruction::And: 5957 case Instruction::Or: 5958 case Instruction::Xor: { 5959 // Since we will replace the stride by 1 the multiplication should go away. 5960 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 5961 return 0; 5962 // Certain instructions can be cheaper to vectorize if they have a constant 5963 // second vector operand. One example of this are shifts on x86. 5964 TargetTransformInfo::OperandValueKind Op1VK = 5965 TargetTransformInfo::OK_AnyValue; 5966 TargetTransformInfo::OperandValueKind Op2VK = 5967 TargetTransformInfo::OK_AnyValue; 5968 TargetTransformInfo::OperandValueProperties Op1VP = 5969 TargetTransformInfo::OP_None; 5970 TargetTransformInfo::OperandValueProperties Op2VP = 5971 TargetTransformInfo::OP_None; 5972 Value *Op2 = I->getOperand(1); 5973 5974 // Check for a splat of a constant or for a non uniform vector of constants. 5975 if (isa<ConstantInt>(Op2)) { 5976 ConstantInt *CInt = cast<ConstantInt>(Op2); 5977 if (CInt && CInt->getValue().isPowerOf2()) 5978 Op2VP = TargetTransformInfo::OP_PowerOf2; 5979 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 5980 } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) { 5981 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 5982 Constant *SplatValue = cast<Constant>(Op2)->getSplatValue(); 5983 if (SplatValue) { 5984 ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue); 5985 if (CInt && CInt->getValue().isPowerOf2()) 5986 Op2VP = TargetTransformInfo::OP_PowerOf2; 5987 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 5988 } 5989 } 5990 5991 return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, Op2VK, 5992 Op1VP, Op2VP); 5993 } 5994 case Instruction::Select: { 5995 SelectInst *SI = cast<SelectInst>(I); 5996 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 5997 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 5998 Type *CondTy = SI->getCondition()->getType(); 5999 if (!ScalarCond) 6000 CondTy = VectorType::get(CondTy, VF); 6001 6002 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy); 6003 } 6004 case Instruction::ICmp: 6005 case Instruction::FCmp: { 6006 Type *ValTy = I->getOperand(0)->getType(); 6007 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 6008 auto It = MinBWs.find(Op0AsInstruction); 6009 if (VF > 1 && It != MinBWs.end()) 6010 ValTy = IntegerType::get(ValTy->getContext(), It->second); 6011 VectorTy = ToVectorTy(ValTy, VF); 6012 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy); 6013 } 6014 case Instruction::Store: 6015 case Instruction::Load: { 6016 StoreInst *SI = dyn_cast<StoreInst>(I); 6017 LoadInst *LI = dyn_cast<LoadInst>(I); 6018 Type *ValTy = (SI ? SI->getValueOperand()->getType() : LI->getType()); 6019 VectorTy = ToVectorTy(ValTy, VF); 6020 6021 unsigned Alignment = SI ? SI->getAlignment() : LI->getAlignment(); 6022 unsigned AS = 6023 SI ? SI->getPointerAddressSpace() : LI->getPointerAddressSpace(); 6024 Value *Ptr = SI ? SI->getPointerOperand() : LI->getPointerOperand(); 6025 // We add the cost of address computation here instead of with the gep 6026 // instruction because only here we know whether the operation is 6027 // scalarized. 6028 if (VF == 1) 6029 return TTI.getAddressComputationCost(VectorTy) + 6030 TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6031 6032 if (LI && Legal->isUniform(Ptr)) { 6033 // Scalar load + broadcast 6034 unsigned Cost = TTI.getAddressComputationCost(ValTy->getScalarType()); 6035 Cost += TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 6036 Alignment, AS); 6037 return Cost + 6038 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, ValTy); 6039 } 6040 6041 // For an interleaved access, calculate the total cost of the whole 6042 // interleave group. 6043 if (Legal->isAccessInterleaved(I)) { 6044 auto Group = Legal->getInterleavedAccessGroup(I); 6045 assert(Group && "Fail to get an interleaved access group."); 6046 6047 // Only calculate the cost once at the insert position. 6048 if (Group->getInsertPos() != I) 6049 return 0; 6050 6051 unsigned InterleaveFactor = Group->getFactor(); 6052 Type *WideVecTy = 6053 VectorType::get(VectorTy->getVectorElementType(), 6054 VectorTy->getVectorNumElements() * InterleaveFactor); 6055 6056 // Holds the indices of existing members in an interleaved load group. 6057 // An interleaved store group doesn't need this as it doesn't allow gaps. 6058 SmallVector<unsigned, 4> Indices; 6059 if (LI) { 6060 for (unsigned i = 0; i < InterleaveFactor; i++) 6061 if (Group->getMember(i)) 6062 Indices.push_back(i); 6063 } 6064 6065 // Calculate the cost of the whole interleaved group. 6066 unsigned Cost = TTI.getInterleavedMemoryOpCost( 6067 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 6068 Group->getAlignment(), AS); 6069 6070 if (Group->isReverse()) 6071 Cost += 6072 Group->getNumMembers() * 6073 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6074 6075 // FIXME: The interleaved load group with a huge gap could be even more 6076 // expensive than scalar operations. Then we could ignore such group and 6077 // use scalar operations instead. 6078 return Cost; 6079 } 6080 6081 // Scalarized loads/stores. 6082 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6083 bool UseGatherOrScatter = 6084 (ConsecutiveStride == 0) && isGatherOrScatterLegal(I, Ptr, Legal); 6085 6086 bool Reverse = ConsecutiveStride < 0; 6087 const DataLayout &DL = I->getModule()->getDataLayout(); 6088 uint64_t ScalarAllocatedSize = DL.getTypeAllocSize(ValTy); 6089 uint64_t VectorElementSize = DL.getTypeStoreSize(VectorTy) / VF; 6090 if ((!ConsecutiveStride && !UseGatherOrScatter) || 6091 ScalarAllocatedSize != VectorElementSize) { 6092 bool IsComplexComputation = 6093 isLikelyComplexAddressComputation(Ptr, Legal, SE, TheLoop); 6094 unsigned Cost = 0; 6095 // The cost of extracting from the value vector and pointer vector. 6096 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6097 for (unsigned i = 0; i < VF; ++i) { 6098 // The cost of extracting the pointer operand. 6099 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, PtrTy, i); 6100 // In case of STORE, the cost of ExtractElement from the vector. 6101 // In case of LOAD, the cost of InsertElement into the returned 6102 // vector. 6103 Cost += TTI.getVectorInstrCost(SI ? Instruction::ExtractElement 6104 : Instruction::InsertElement, 6105 VectorTy, i); 6106 } 6107 6108 // The cost of the scalar loads/stores. 6109 Cost += VF * TTI.getAddressComputationCost(PtrTy, IsComplexComputation); 6110 Cost += VF * 6111 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 6112 Alignment, AS); 6113 return Cost; 6114 } 6115 6116 unsigned Cost = TTI.getAddressComputationCost(VectorTy); 6117 if (UseGatherOrScatter) { 6118 assert(ConsecutiveStride == 0 && 6119 "Gather/Scatter are not used for consecutive stride"); 6120 return Cost + 6121 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 6122 Legal->isMaskRequired(I), Alignment); 6123 } 6124 // Wide load/stores. 6125 if (Legal->isMaskRequired(I)) 6126 Cost += 6127 TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6128 else 6129 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6130 6131 if (Reverse) 6132 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6133 return Cost; 6134 } 6135 case Instruction::ZExt: 6136 case Instruction::SExt: 6137 case Instruction::FPToUI: 6138 case Instruction::FPToSI: 6139 case Instruction::FPExt: 6140 case Instruction::PtrToInt: 6141 case Instruction::IntToPtr: 6142 case Instruction::SIToFP: 6143 case Instruction::UIToFP: 6144 case Instruction::Trunc: 6145 case Instruction::FPTrunc: 6146 case Instruction::BitCast: { 6147 // We optimize the truncation of induction variable. 6148 // The cost of these is the same as the scalar operation. 6149 if (I->getOpcode() == Instruction::Trunc && 6150 Legal->isInductionVariable(I->getOperand(0))) 6151 return TTI.getCastInstrCost(I->getOpcode(), I->getType(), 6152 I->getOperand(0)->getType()); 6153 6154 Type *SrcScalarTy = I->getOperand(0)->getType(); 6155 Type *SrcVecTy = ToVectorTy(SrcScalarTy, VF); 6156 if (VF > 1 && MinBWs.count(I)) { 6157 // This cast is going to be shrunk. This may remove the cast or it might 6158 // turn it into slightly different cast. For example, if MinBW == 16, 6159 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 6160 // 6161 // Calculate the modified src and dest types. 6162 Type *MinVecTy = VectorTy; 6163 if (I->getOpcode() == Instruction::Trunc) { 6164 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 6165 VectorTy = 6166 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6167 } else if (I->getOpcode() == Instruction::ZExt || 6168 I->getOpcode() == Instruction::SExt) { 6169 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 6170 VectorTy = 6171 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6172 } 6173 } 6174 6175 return TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy); 6176 } 6177 case Instruction::Call: { 6178 bool NeedToScalarize; 6179 CallInst *CI = cast<CallInst>(I); 6180 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 6181 if (getVectorIntrinsicIDForCall(CI, TLI)) 6182 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 6183 return CallCost; 6184 } 6185 default: { 6186 // We are scalarizing the instruction. Return the cost of the scalar 6187 // instruction, plus the cost of insert and extract into vector 6188 // elements, times the vector width. 6189 unsigned Cost = 0; 6190 6191 if (!RetTy->isVoidTy() && VF != 1) { 6192 unsigned InsCost = 6193 TTI.getVectorInstrCost(Instruction::InsertElement, VectorTy); 6194 unsigned ExtCost = 6195 TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy); 6196 6197 // The cost of inserting the results plus extracting each one of the 6198 // operands. 6199 Cost += VF * (InsCost + ExtCost * I->getNumOperands()); 6200 } 6201 6202 // The cost of executing VF copies of the scalar instruction. This opcode 6203 // is unknown. Assume that it is the same as 'mul'. 6204 Cost += VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy); 6205 return Cost; 6206 } 6207 } // end of switch. 6208 } 6209 6210 char LoopVectorize::ID = 0; 6211 static const char lv_name[] = "Loop Vectorization"; 6212 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 6213 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6214 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 6215 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6216 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 6217 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6218 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6219 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6220 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6221 INITIALIZE_PASS_DEPENDENCY(LCSSAWrapperPass) 6222 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6223 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 6224 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 6225 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6226 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 6227 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6228 6229 namespace llvm { 6230 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { 6231 return new LoopVectorize(NoUnrolling, AlwaysVectorize); 6232 } 6233 } 6234 6235 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6236 // Check for a store. 6237 if (auto *ST = dyn_cast<StoreInst>(Inst)) 6238 return Legal->isConsecutivePtr(ST->getPointerOperand()) != 0; 6239 6240 // Check for a load. 6241 if (auto *LI = dyn_cast<LoadInst>(Inst)) 6242 return Legal->isConsecutivePtr(LI->getPointerOperand()) != 0; 6243 6244 return false; 6245 } 6246 6247 /// Take the pointer operand from the Load/Store instruction. 6248 /// Returns NULL if this is not a valid Load/Store instruction. 6249 static Value *getPointerOperand(Value *I) { 6250 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 6251 return LI->getPointerOperand(); 6252 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 6253 return SI->getPointerOperand(); 6254 return nullptr; 6255 } 6256 6257 void LoopVectorizationCostModel::collectValuesToIgnore() { 6258 // Ignore ephemeral values. 6259 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 6260 6261 // Ignore type-promoting instructions we identified during reduction 6262 // detection. 6263 for (auto &Reduction : *Legal->getReductionVars()) { 6264 RecurrenceDescriptor &RedDes = Reduction.second; 6265 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 6266 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6267 } 6268 6269 // Insert uniform instruction into VecValuesToIgnore. 6270 // Collect non-gather/scatter and non-consecutive ptr in NonConsecutivePtr. 6271 SmallPtrSet<Instruction *, 8> NonConsecutivePtr; 6272 for (auto *BB : TheLoop->getBlocks()) { 6273 for (auto &I : *BB) { 6274 if (Legal->isUniformAfterVectorization(&I)) 6275 VecValuesToIgnore.insert(&I); 6276 Instruction *PI = dyn_cast_or_null<Instruction>(getPointerOperand(&I)); 6277 if (PI && !Legal->isConsecutivePtr(PI) && 6278 !isGatherOrScatterLegal(&I, PI, Legal)) 6279 NonConsecutivePtr.insert(PI); 6280 } 6281 } 6282 6283 // Ignore induction phis that are either used in uniform instructions or 6284 // NonConsecutivePtr. 6285 for (auto &Induction : *Legal->getInductionVars()) { 6286 auto *PN = Induction.first; 6287 auto *UpdateV = PN->getIncomingValueForBlock(TheLoop->getLoopLatch()); 6288 6289 if (std::all_of(PN->user_begin(), PN->user_end(), 6290 [&](User *U) -> bool { 6291 Instruction *UI = dyn_cast<Instruction>(U); 6292 return U == UpdateV || !TheLoop->contains(UI) || 6293 Legal->isUniformAfterVectorization(UI) || 6294 NonConsecutivePtr.count(UI); 6295 }) && 6296 std::all_of(UpdateV->user_begin(), UpdateV->user_end(), 6297 [&](User *U) -> bool { 6298 Instruction *UI = dyn_cast<Instruction>(U); 6299 return U == PN || !TheLoop->contains(UI) || 6300 Legal->isUniformAfterVectorization(UI) || 6301 NonConsecutivePtr.count(UI); 6302 })) { 6303 VecValuesToIgnore.insert(PN); 6304 VecValuesToIgnore.insert(UpdateV); 6305 } 6306 } 6307 } 6308 6309 void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr, 6310 bool IfPredicateStore) { 6311 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 6312 // Holds vector parameters or scalars, in case of uniform vals. 6313 SmallVector<VectorParts, 4> Params; 6314 6315 setDebugLocFromInst(Builder, Instr); 6316 6317 // Find all of the vectorized parameters. 6318 for (Value *SrcOp : Instr->operands()) { 6319 // If we are accessing the old induction variable, use the new one. 6320 if (SrcOp == OldInduction) { 6321 Params.push_back(getVectorValue(SrcOp)); 6322 continue; 6323 } 6324 6325 // Try using previously calculated values. 6326 Instruction *SrcInst = dyn_cast<Instruction>(SrcOp); 6327 6328 // If the src is an instruction that appeared earlier in the basic block 6329 // then it should already be vectorized. 6330 if (SrcInst && OrigLoop->contains(SrcInst)) { 6331 assert(WidenMap.has(SrcInst) && "Source operand is unavailable"); 6332 // The parameter is a vector value from earlier. 6333 Params.push_back(WidenMap.get(SrcInst)); 6334 } else { 6335 // The parameter is a scalar from outside the loop. Maybe even a constant. 6336 VectorParts Scalars; 6337 Scalars.append(UF, SrcOp); 6338 Params.push_back(Scalars); 6339 } 6340 } 6341 6342 assert(Params.size() == Instr->getNumOperands() && 6343 "Invalid number of operands"); 6344 6345 // Does this instruction return a value ? 6346 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 6347 6348 Value *UndefVec = IsVoidRetTy ? nullptr : UndefValue::get(Instr->getType()); 6349 // Create a new entry in the WidenMap and initialize it to Undef or Null. 6350 VectorParts &VecResults = WidenMap.splat(Instr, UndefVec); 6351 6352 VectorParts Cond; 6353 if (IfPredicateStore) { 6354 assert(Instr->getParent()->getSinglePredecessor() && 6355 "Only support single predecessor blocks"); 6356 Cond = createEdgeMask(Instr->getParent()->getSinglePredecessor(), 6357 Instr->getParent()); 6358 } 6359 6360 // For each vector unroll 'part': 6361 for (unsigned Part = 0; Part < UF; ++Part) { 6362 // For each scalar that we create: 6363 6364 // Start an "if (pred) a[i] = ..." block. 6365 Value *Cmp = nullptr; 6366 if (IfPredicateStore) { 6367 if (Cond[Part]->getType()->isVectorTy()) 6368 Cond[Part] = 6369 Builder.CreateExtractElement(Cond[Part], Builder.getInt32(0)); 6370 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cond[Part], 6371 ConstantInt::get(Cond[Part]->getType(), 1)); 6372 } 6373 6374 Instruction *Cloned = Instr->clone(); 6375 if (!IsVoidRetTy) 6376 Cloned->setName(Instr->getName() + ".cloned"); 6377 // Replace the operands of the cloned instructions with extracted scalars. 6378 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 6379 Value *Op = Params[op][Part]; 6380 Cloned->setOperand(op, Op); 6381 } 6382 6383 // Place the cloned scalar in the new loop. 6384 Builder.Insert(Cloned); 6385 6386 // If we just cloned a new assumption, add it the assumption cache. 6387 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 6388 if (II->getIntrinsicID() == Intrinsic::assume) 6389 AC->registerAssumption(II); 6390 6391 // If the original scalar returns a value we need to place it in a vector 6392 // so that future users will be able to use it. 6393 if (!IsVoidRetTy) 6394 VecResults[Part] = Cloned; 6395 6396 // End if-block. 6397 if (IfPredicateStore) 6398 PredicatedStores.push_back(std::make_pair(cast<StoreInst>(Cloned), Cmp)); 6399 } 6400 } 6401 6402 void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) { 6403 auto *SI = dyn_cast<StoreInst>(Instr); 6404 bool IfPredicateStore = (SI && Legal->blockNeedsPredication(SI->getParent())); 6405 6406 return scalarizeInstruction(Instr, IfPredicateStore); 6407 } 6408 6409 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 6410 6411 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 6412 6413 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 6414 Instruction::BinaryOps BinOp) { 6415 // When unrolling and the VF is 1, we only need to add a simple scalar. 6416 Type *Ty = Val->getType(); 6417 assert(!Ty->isVectorTy() && "Val must be a scalar"); 6418 6419 if (Ty->isFloatingPointTy()) { 6420 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 6421 6422 // Floating point operations had to be 'fast' to enable the unrolling. 6423 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 6424 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 6425 } 6426 Constant *C = ConstantInt::get(Ty, StartIdx); 6427 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 6428 } 6429 6430 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 6431 SmallVector<Metadata *, 4> MDs; 6432 // Reserve first location for self reference to the LoopID metadata node. 6433 MDs.push_back(nullptr); 6434 bool IsUnrollMetadata = false; 6435 MDNode *LoopID = L->getLoopID(); 6436 if (LoopID) { 6437 // First find existing loop unrolling disable metadata. 6438 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 6439 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 6440 if (MD) { 6441 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 6442 IsUnrollMetadata = 6443 S && S->getString().startswith("llvm.loop.unroll.disable"); 6444 } 6445 MDs.push_back(LoopID->getOperand(i)); 6446 } 6447 } 6448 6449 if (!IsUnrollMetadata) { 6450 // Add runtime unroll disable metadata. 6451 LLVMContext &Context = L->getHeader()->getContext(); 6452 SmallVector<Metadata *, 1> DisableOperands; 6453 DisableOperands.push_back( 6454 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 6455 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 6456 MDs.push_back(DisableNode); 6457 MDNode *NewLoopID = MDNode::get(Context, MDs); 6458 // Set operand 0 to refer to the loop id itself. 6459 NewLoopID->replaceOperandWith(0, NewLoopID); 6460 L->setLoopID(NewLoopID); 6461 } 6462 } 6463 6464 bool LoopVectorizePass::processLoop(Loop *L) { 6465 assert(L->empty() && "Only process inner loops."); 6466 6467 #ifndef NDEBUG 6468 const std::string DebugLocStr = getDebugLocString(L); 6469 #endif /* NDEBUG */ 6470 6471 DEBUG(dbgs() << "\nLV: Checking a loop in \"" 6472 << L->getHeader()->getParent()->getName() << "\" from " 6473 << DebugLocStr << "\n"); 6474 6475 LoopVectorizeHints Hints(L, DisableUnrolling, *ORE); 6476 6477 DEBUG(dbgs() << "LV: Loop hints:" 6478 << " force=" 6479 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 6480 ? "disabled" 6481 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 6482 ? "enabled" 6483 : "?")) 6484 << " width=" << Hints.getWidth() 6485 << " unroll=" << Hints.getInterleave() << "\n"); 6486 6487 // Function containing loop 6488 Function *F = L->getHeader()->getParent(); 6489 6490 // Looking at the diagnostic output is the only way to determine if a loop 6491 // was vectorized (other than looking at the IR or machine code), so it 6492 // is important to generate an optimization remark for each loop. Most of 6493 // these messages are generated by emitOptimizationRemarkAnalysis. Remarks 6494 // generated by emitOptimizationRemark and emitOptimizationRemarkMissed are 6495 // less verbose reporting vectorized loops and unvectorized loops that may 6496 // benefit from vectorization, respectively. 6497 6498 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { 6499 DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 6500 return false; 6501 } 6502 6503 // Check the loop for a trip count threshold: 6504 // do not vectorize loops with a tiny trip count. 6505 const unsigned TC = SE->getSmallConstantTripCount(L); 6506 if (TC > 0u && TC < TinyTripCountVectorThreshold) { 6507 DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 6508 << "This loop is not worth vectorizing."); 6509 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 6510 DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 6511 else { 6512 DEBUG(dbgs() << "\n"); 6513 emitAnalysisDiag(L, Hints, *ORE, VectorizationReport() 6514 << "vectorization is not beneficial " 6515 "and is not explicitly forced"); 6516 return false; 6517 } 6518 } 6519 6520 PredicatedScalarEvolution PSE(*SE, *L); 6521 6522 // Check if it is legal to vectorize the loop. 6523 LoopVectorizationRequirements Requirements(*ORE); 6524 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI, ORE, 6525 &Requirements, &Hints); 6526 if (!LVL.canVectorize()) { 6527 DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 6528 emitMissedWarning(F, L, Hints, ORE); 6529 return false; 6530 } 6531 6532 // Use the cost model. 6533 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F, 6534 &Hints); 6535 CM.collectValuesToIgnore(); 6536 6537 // Check the function attributes to find out if this function should be 6538 // optimized for size. 6539 bool OptForSize = 6540 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 6541 6542 // Compute the weighted frequency of this loop being executed and see if it 6543 // is less than 20% of the function entry baseline frequency. Note that we 6544 // always have a canonical loop here because we think we *can* vectorize. 6545 // FIXME: This is hidden behind a flag due to pervasive problems with 6546 // exactly what block frequency models. 6547 if (LoopVectorizeWithBlockFrequency) { 6548 BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader()); 6549 if (Hints.getForce() != LoopVectorizeHints::FK_Enabled && 6550 LoopEntryFreq < ColdEntryFreq) 6551 OptForSize = true; 6552 } 6553 6554 // Check the function attributes to see if implicit floats are allowed. 6555 // FIXME: This check doesn't seem possibly correct -- what if the loop is 6556 // an integer loop and the vector instructions selected are purely integer 6557 // vector instructions? 6558 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 6559 DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 6560 "attribute is used.\n"); 6561 emitAnalysisDiag( 6562 L, Hints, *ORE, 6563 VectorizationReport() 6564 << "loop not vectorized due to NoImplicitFloat attribute"); 6565 emitMissedWarning(F, L, Hints, ORE); 6566 return false; 6567 } 6568 6569 // Check if the target supports potentially unsafe FP vectorization. 6570 // FIXME: Add a check for the type of safety issue (denormal, signaling) 6571 // for the target we're vectorizing for, to make sure none of the 6572 // additional fp-math flags can help. 6573 if (Hints.isPotentiallyUnsafe() && 6574 TTI->isFPVectorizationPotentiallyUnsafe()) { 6575 DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 6576 emitAnalysisDiag(L, Hints, *ORE, 6577 VectorizationReport() 6578 << "loop not vectorized due to unsafe FP support."); 6579 emitMissedWarning(F, L, Hints, ORE); 6580 return false; 6581 } 6582 6583 // Select the optimal vectorization factor. 6584 const LoopVectorizationCostModel::VectorizationFactor VF = 6585 CM.selectVectorizationFactor(OptForSize); 6586 6587 // Select the interleave count. 6588 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 6589 6590 // Get user interleave count. 6591 unsigned UserIC = Hints.getInterleave(); 6592 6593 // Identify the diagnostic messages that should be produced. 6594 std::string VecDiagMsg, IntDiagMsg; 6595 bool VectorizeLoop = true, InterleaveLoop = true; 6596 if (Requirements.doesNotMeet(F, L, Hints)) { 6597 DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 6598 "requirements.\n"); 6599 emitMissedWarning(F, L, Hints, ORE); 6600 return false; 6601 } 6602 6603 if (VF.Width == 1) { 6604 DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 6605 VecDiagMsg = 6606 "the cost-model indicates that vectorization is not beneficial"; 6607 VectorizeLoop = false; 6608 } 6609 6610 if (IC == 1 && UserIC <= 1) { 6611 // Tell the user interleaving is not beneficial. 6612 DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 6613 IntDiagMsg = 6614 "the cost-model indicates that interleaving is not beneficial"; 6615 InterleaveLoop = false; 6616 if (UserIC == 1) 6617 IntDiagMsg += 6618 " and is explicitly disabled or interleave count is set to 1"; 6619 } else if (IC > 1 && UserIC == 1) { 6620 // Tell the user interleaving is beneficial, but it explicitly disabled. 6621 DEBUG(dbgs() 6622 << "LV: Interleaving is beneficial but is explicitly disabled."); 6623 IntDiagMsg = "the cost-model indicates that interleaving is beneficial " 6624 "but is explicitly disabled or interleave count is set to 1"; 6625 InterleaveLoop = false; 6626 } 6627 6628 // Override IC if user provided an interleave count. 6629 IC = UserIC > 0 ? UserIC : IC; 6630 6631 // Emit diagnostic messages, if any. 6632 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 6633 if (!VectorizeLoop && !InterleaveLoop) { 6634 // Do not vectorize or interleaving the loop. 6635 ORE->emitOptimizationRemarkAnalysis(VAPassName, L, VecDiagMsg); 6636 ORE->emitOptimizationRemarkAnalysis(LV_NAME, L, IntDiagMsg); 6637 return false; 6638 } else if (!VectorizeLoop && InterleaveLoop) { 6639 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 6640 ORE->emitOptimizationRemarkAnalysis(VAPassName, L, VecDiagMsg); 6641 } else if (VectorizeLoop && !InterleaveLoop) { 6642 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 6643 << DebugLocStr << '\n'); 6644 ORE->emitOptimizationRemarkAnalysis(LV_NAME, L, IntDiagMsg); 6645 } else if (VectorizeLoop && InterleaveLoop) { 6646 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 6647 << DebugLocStr << '\n'); 6648 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 6649 } 6650 6651 if (!VectorizeLoop) { 6652 assert(IC > 1 && "interleave count should not be 1 or 0"); 6653 // If we decided that it is not legal to vectorize the loop, then 6654 // interleave it. 6655 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC); 6656 Unroller.vectorize(&LVL, CM.MinBWs, CM.VecValuesToIgnore); 6657 6658 ORE->emitOptimizationRemark(LV_NAME, L, 6659 Twine("interleaved loop (interleaved count: ") + 6660 Twine(IC) + ")"); 6661 } else { 6662 // If we decided that it is *legal* to vectorize the loop, then do it. 6663 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC); 6664 LB.vectorize(&LVL, CM.MinBWs, CM.VecValuesToIgnore); 6665 ++LoopsVectorized; 6666 6667 // Add metadata to disable runtime unrolling a scalar loop when there are 6668 // no runtime checks about strides and memory. A scalar loop that is 6669 // rarely used is not worth unrolling. 6670 if (!LB.areSafetyChecksAdded()) 6671 AddRuntimeUnrollDisableMetaData(L); 6672 6673 // Report the vectorization decision. 6674 ORE->emitOptimizationRemark( 6675 LV_NAME, L, Twine("vectorized loop (vectorization width: ") + 6676 Twine(VF.Width) + ", interleaved count: " + Twine(IC) + 6677 ")"); 6678 } 6679 6680 // Mark the loop as already vectorized to avoid vectorizing again. 6681 Hints.setAlreadyVectorized(); 6682 6683 DEBUG(verifyFunction(*L->getHeader()->getParent())); 6684 return true; 6685 } 6686 6687 bool LoopVectorizePass::runImpl( 6688 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 6689 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 6690 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 6691 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 6692 OptimizationRemarkEmitter &ORE_) { 6693 6694 SE = &SE_; 6695 LI = &LI_; 6696 TTI = &TTI_; 6697 DT = &DT_; 6698 BFI = &BFI_; 6699 TLI = TLI_; 6700 AA = &AA_; 6701 AC = &AC_; 6702 GetLAA = &GetLAA_; 6703 DB = &DB_; 6704 ORE = &ORE_; 6705 6706 // Compute some weights outside of the loop over the loops. Compute this 6707 // using a BranchProbability to re-use its scaling math. 6708 const BranchProbability ColdProb(1, 5); // 20% 6709 ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb; 6710 6711 // Don't attempt if 6712 // 1. the target claims to have no vector registers, and 6713 // 2. interleaving won't help ILP. 6714 // 6715 // The second condition is necessary because, even if the target has no 6716 // vector registers, loop vectorization may still enable scalar 6717 // interleaving. 6718 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 6719 return false; 6720 6721 // Build up a worklist of inner-loops to vectorize. This is necessary as 6722 // the act of vectorizing or partially unrolling a loop creates new loops 6723 // and can invalidate iterators across the loops. 6724 SmallVector<Loop *, 8> Worklist; 6725 6726 for (Loop *L : *LI) 6727 addInnerLoop(*L, Worklist); 6728 6729 LoopsAnalyzed += Worklist.size(); 6730 6731 // Now walk the identified inner loops. 6732 bool Changed = false; 6733 while (!Worklist.empty()) 6734 Changed |= processLoop(Worklist.pop_back_val()); 6735 6736 // Process each loop nest in the function. 6737 return Changed; 6738 6739 } 6740 6741 6742 PreservedAnalyses LoopVectorizePass::run(Function &F, 6743 FunctionAnalysisManager &AM) { 6744 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 6745 auto &LI = AM.getResult<LoopAnalysis>(F); 6746 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 6747 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 6748 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 6749 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 6750 auto &AA = AM.getResult<AAManager>(F); 6751 auto &AC = AM.getResult<AssumptionAnalysis>(F); 6752 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 6753 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 6754 6755 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 6756 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 6757 [&](Loop &L) -> const LoopAccessInfo & { 6758 return LAM.getResult<LoopAccessAnalysis>(L); 6759 }; 6760 bool Changed = 6761 runImpl(F, SE, LI, TTI, DT, BFI, TLI, DB, AA, AC, GetLAA, ORE); 6762 if (!Changed) 6763 return PreservedAnalyses::all(); 6764 PreservedAnalyses PA; 6765 PA.preserve<LoopAnalysis>(); 6766 PA.preserve<DominatorTreeAnalysis>(); 6767 PA.preserve<BasicAA>(); 6768 PA.preserve<GlobalsAA>(); 6769 return PA; 6770 } 6771