1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 11 // and generates target-independent LLVM-IR. 12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 13 // of instructions in order to estimate the profitability of vectorization. 14 // 15 // The loop vectorizer combines consecutive loop iterations into a single 16 // 'wide' iteration. After this transformation the index is incremented 17 // by the SIMD vector width, and not by one. 18 // 19 // This pass has three parts: 20 // 1. The main loop pass that drives the different parts. 21 // 2. LoopVectorizationLegality - A unit that checks for the legality 22 // of the vectorization. 23 // 3. InnerLoopVectorizer - A unit that performs the actual 24 // widening of instructions. 25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 26 // of vectorization. It decides on the optimal vector width, which 27 // can be one, if vectorization is not profitable. 28 // 29 //===----------------------------------------------------------------------===// 30 // 31 // The reduction-variable vectorization is based on the paper: 32 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 33 // 34 // Variable uniformity checks are inspired by: 35 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 36 // 37 // The interleaved access vectorization is based on the paper: 38 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 39 // Data for SIMD 40 // 41 // Other ideas/concepts are from: 42 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 43 // 44 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 45 // Vectorizing Compilers. 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 50 #include "llvm/ADT/DenseMap.h" 51 #include "llvm/ADT/Hashing.h" 52 #include "llvm/ADT/MapVector.h" 53 #include "llvm/ADT/Optional.h" 54 #include "llvm/ADT/SCCIterator.h" 55 #include "llvm/ADT/SetVector.h" 56 #include "llvm/ADT/SmallPtrSet.h" 57 #include "llvm/ADT/SmallSet.h" 58 #include "llvm/ADT/SmallVector.h" 59 #include "llvm/ADT/Statistic.h" 60 #include "llvm/ADT/StringExtras.h" 61 #include "llvm/Analysis/CodeMetrics.h" 62 #include "llvm/Analysis/GlobalsModRef.h" 63 #include "llvm/Analysis/LoopInfo.h" 64 #include "llvm/Analysis/LoopIterator.h" 65 #include "llvm/Analysis/LoopPass.h" 66 #include "llvm/Analysis/ScalarEvolutionExpander.h" 67 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 68 #include "llvm/Analysis/ValueTracking.h" 69 #include "llvm/Analysis/VectorUtils.h" 70 #include "llvm/IR/Constants.h" 71 #include "llvm/IR/DataLayout.h" 72 #include "llvm/IR/DebugInfo.h" 73 #include "llvm/IR/DerivedTypes.h" 74 #include "llvm/IR/DiagnosticInfo.h" 75 #include "llvm/IR/Dominators.h" 76 #include "llvm/IR/Function.h" 77 #include "llvm/IR/IRBuilder.h" 78 #include "llvm/IR/Instructions.h" 79 #include "llvm/IR/IntrinsicInst.h" 80 #include "llvm/IR/LLVMContext.h" 81 #include "llvm/IR/Module.h" 82 #include "llvm/IR/PatternMatch.h" 83 #include "llvm/IR/Type.h" 84 #include "llvm/IR/User.h" 85 #include "llvm/IR/Value.h" 86 #include "llvm/IR/ValueHandle.h" 87 #include "llvm/IR/Verifier.h" 88 #include "llvm/Pass.h" 89 #include "llvm/Support/BranchProbability.h" 90 #include "llvm/Support/CommandLine.h" 91 #include "llvm/Support/Debug.h" 92 #include "llvm/Support/raw_ostream.h" 93 #include "llvm/Transforms/Scalar.h" 94 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 95 #include "llvm/Transforms/Utils/Local.h" 96 #include "llvm/Transforms/Utils/LoopSimplify.h" 97 #include "llvm/Transforms/Utils/LoopUtils.h" 98 #include "llvm/Transforms/Utils/LoopVersioning.h" 99 #include "llvm/Transforms/Vectorize.h" 100 #include <algorithm> 101 #include <map> 102 #include <tuple> 103 104 using namespace llvm; 105 using namespace llvm::PatternMatch; 106 107 #define LV_NAME "loop-vectorize" 108 #define DEBUG_TYPE LV_NAME 109 110 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 111 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 112 113 static cl::opt<bool> 114 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, 115 cl::desc("Enable if-conversion during vectorization.")); 116 117 /// Loops with a known constant trip count below this number are vectorized only 118 /// if no scalar iteration overheads are incurred. 119 static cl::opt<unsigned> TinyTripCountVectorThreshold( 120 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 121 cl::desc("Loops with a constant trip count that is smaller than this " 122 "value are vectorized only if no scalar iteration overheads " 123 "are incurred.")); 124 125 static cl::opt<bool> MaximizeBandwidth( 126 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 127 cl::desc("Maximize bandwidth when selecting vectorization factor which " 128 "will be determined by the smallest type in loop.")); 129 130 static cl::opt<bool> EnableInterleavedMemAccesses( 131 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 132 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 133 134 /// Maximum factor for an interleaved memory access. 135 static cl::opt<unsigned> MaxInterleaveGroupFactor( 136 "max-interleave-group-factor", cl::Hidden, 137 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 138 cl::init(8)); 139 140 /// We don't interleave loops with a known constant trip count below this 141 /// number. 142 static const unsigned TinyTripCountInterleaveThreshold = 128; 143 144 static cl::opt<unsigned> ForceTargetNumScalarRegs( 145 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 146 cl::desc("A flag that overrides the target's number of scalar registers.")); 147 148 static cl::opt<unsigned> ForceTargetNumVectorRegs( 149 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 150 cl::desc("A flag that overrides the target's number of vector registers.")); 151 152 /// Maximum vectorization interleave count. 153 static const unsigned MaxInterleaveFactor = 16; 154 155 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 156 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 157 cl::desc("A flag that overrides the target's max interleave factor for " 158 "scalar loops.")); 159 160 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 161 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 162 cl::desc("A flag that overrides the target's max interleave factor for " 163 "vectorized loops.")); 164 165 static cl::opt<unsigned> ForceTargetInstructionCost( 166 "force-target-instruction-cost", cl::init(0), cl::Hidden, 167 cl::desc("A flag that overrides the target's expected cost for " 168 "an instruction to a single constant value. Mostly " 169 "useful for getting consistent testing.")); 170 171 static cl::opt<unsigned> SmallLoopCost( 172 "small-loop-cost", cl::init(20), cl::Hidden, 173 cl::desc( 174 "The cost of a loop that is considered 'small' by the interleaver.")); 175 176 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 177 "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden, 178 cl::desc("Enable the use of the block frequency analysis to access PGO " 179 "heuristics minimizing code growth in cold regions and being more " 180 "aggressive in hot regions.")); 181 182 // Runtime interleave loops for load/store throughput. 183 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 184 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 185 cl::desc( 186 "Enable runtime interleaving until load/store ports are saturated")); 187 188 /// The number of stores in a loop that are allowed to need predication. 189 static cl::opt<unsigned> NumberOfStoresToPredicate( 190 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 191 cl::desc("Max number of stores to be predicated behind an if.")); 192 193 static cl::opt<bool> EnableIndVarRegisterHeur( 194 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 195 cl::desc("Count the induction variable only once when interleaving")); 196 197 static cl::opt<bool> EnableCondStoresVectorization( 198 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 199 cl::desc("Enable if predication of stores during vectorization.")); 200 201 static cl::opt<unsigned> MaxNestedScalarReductionIC( 202 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 203 cl::desc("The maximum interleave count to use when interleaving a scalar " 204 "reduction in a nested loop.")); 205 206 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 207 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 208 cl::desc("The maximum allowed number of runtime memory checks with a " 209 "vectorize(enable) pragma.")); 210 211 static cl::opt<unsigned> VectorizeSCEVCheckThreshold( 212 "vectorize-scev-check-threshold", cl::init(16), cl::Hidden, 213 cl::desc("The maximum number of SCEV checks allowed.")); 214 215 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold( 216 "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden, 217 cl::desc("The maximum number of SCEV checks allowed with a " 218 "vectorize(enable) pragma")); 219 220 /// Create an analysis remark that explains why vectorization failed 221 /// 222 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 223 /// RemarkName is the identifier for the remark. If \p I is passed it is an 224 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 225 /// the location of the remark. \return the remark object that can be 226 /// streamed to. 227 static OptimizationRemarkAnalysis 228 createMissedAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, 229 Instruction *I = nullptr) { 230 Value *CodeRegion = TheLoop->getHeader(); 231 DebugLoc DL = TheLoop->getStartLoc(); 232 233 if (I) { 234 CodeRegion = I->getParent(); 235 // If there is no debug location attached to the instruction, revert back to 236 // using the loop's. 237 if (I->getDebugLoc()) 238 DL = I->getDebugLoc(); 239 } 240 241 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion); 242 R << "loop not vectorized: "; 243 return R; 244 } 245 246 namespace { 247 248 // Forward declarations. 249 class LoopVectorizeHints; 250 class LoopVectorizationLegality; 251 class LoopVectorizationCostModel; 252 class LoopVectorizationRequirements; 253 254 /// Returns true if the given loop body has a cycle, excluding the loop 255 /// itself. 256 static bool hasCyclesInLoopBody(const Loop &L) { 257 if (!L.empty()) 258 return true; 259 260 for (const auto &SCC : 261 make_range(scc_iterator<Loop, LoopBodyTraits>::begin(L), 262 scc_iterator<Loop, LoopBodyTraits>::end(L))) { 263 if (SCC.size() > 1) { 264 DEBUG(dbgs() << "LVL: Detected a cycle in the loop body:\n"); 265 DEBUG(L.dump()); 266 return true; 267 } 268 } 269 return false; 270 } 271 272 /// A helper function for converting Scalar types to vector types. 273 /// If the incoming type is void, we return void. If the VF is 1, we return 274 /// the scalar type. 275 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 276 if (Scalar->isVoidTy() || VF == 1) 277 return Scalar; 278 return VectorType::get(Scalar, VF); 279 } 280 281 // FIXME: The following helper functions have multiple implementations 282 // in the project. They can be effectively organized in a common Load/Store 283 // utilities unit. 284 285 /// A helper function that returns the pointer operand of a load or store 286 /// instruction. 287 static Value *getPointerOperand(Value *I) { 288 if (auto *LI = dyn_cast<LoadInst>(I)) 289 return LI->getPointerOperand(); 290 if (auto *SI = dyn_cast<StoreInst>(I)) 291 return SI->getPointerOperand(); 292 return nullptr; 293 } 294 295 /// A helper function that returns the type of loaded or stored value. 296 static Type *getMemInstValueType(Value *I) { 297 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 298 "Expected Load or Store instruction"); 299 if (auto *LI = dyn_cast<LoadInst>(I)) 300 return LI->getType(); 301 return cast<StoreInst>(I)->getValueOperand()->getType(); 302 } 303 304 /// A helper function that returns the alignment of load or store instruction. 305 static unsigned getMemInstAlignment(Value *I) { 306 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 307 "Expected Load or Store instruction"); 308 if (auto *LI = dyn_cast<LoadInst>(I)) 309 return LI->getAlignment(); 310 return cast<StoreInst>(I)->getAlignment(); 311 } 312 313 /// A helper function that returns the address space of the pointer operand of 314 /// load or store instruction. 315 static unsigned getMemInstAddressSpace(Value *I) { 316 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 317 "Expected Load or Store instruction"); 318 if (auto *LI = dyn_cast<LoadInst>(I)) 319 return LI->getPointerAddressSpace(); 320 return cast<StoreInst>(I)->getPointerAddressSpace(); 321 } 322 323 /// A helper function that returns true if the given type is irregular. The 324 /// type is irregular if its allocated size doesn't equal the store size of an 325 /// element of the corresponding vector type at the given vectorization factor. 326 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) { 327 328 // Determine if an array of VF elements of type Ty is "bitcast compatible" 329 // with a <VF x Ty> vector. 330 if (VF > 1) { 331 auto *VectorTy = VectorType::get(Ty, VF); 332 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); 333 } 334 335 // If the vectorization factor is one, we just check if an array of type Ty 336 // requires padding between elements. 337 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 338 } 339 340 /// A helper function that returns the reciprocal of the block probability of 341 /// predicated blocks. If we return X, we are assuming the predicated block 342 /// will execute once for for every X iterations of the loop header. 343 /// 344 /// TODO: We should use actual block probability here, if available. Currently, 345 /// we always assume predicated blocks have a 50% chance of executing. 346 static unsigned getReciprocalPredBlockProb() { return 2; } 347 348 /// A helper function that adds a 'fast' flag to floating-point operations. 349 static Value *addFastMathFlag(Value *V) { 350 if (isa<FPMathOperator>(V)) { 351 FastMathFlags Flags; 352 Flags.setUnsafeAlgebra(); 353 cast<Instruction>(V)->setFastMathFlags(Flags); 354 } 355 return V; 356 } 357 358 /// A helper function that returns an integer or floating-point constant with 359 /// value C. 360 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 361 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 362 : ConstantFP::get(Ty, C); 363 } 364 365 /// InnerLoopVectorizer vectorizes loops which contain only one basic 366 /// block to a specified vectorization factor (VF). 367 /// This class performs the widening of scalars into vectors, or multiple 368 /// scalars. This class also implements the following features: 369 /// * It inserts an epilogue loop for handling loops that don't have iteration 370 /// counts that are known to be a multiple of the vectorization factor. 371 /// * It handles the code generation for reduction variables. 372 /// * Scalarization (implementation using scalars) of un-vectorizable 373 /// instructions. 374 /// InnerLoopVectorizer does not perform any vectorization-legality 375 /// checks, and relies on the caller to check for the different legality 376 /// aspects. The InnerLoopVectorizer relies on the 377 /// LoopVectorizationLegality class to provide information about the induction 378 /// and reduction variables that were found to a given vectorization factor. 379 class InnerLoopVectorizer { 380 public: 381 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 382 LoopInfo *LI, DominatorTree *DT, 383 const TargetLibraryInfo *TLI, 384 const TargetTransformInfo *TTI, AssumptionCache *AC, 385 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 386 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 387 LoopVectorizationCostModel *CM) 388 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 389 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 390 Builder(PSE.getSE()->getContext()), Induction(nullptr), 391 OldInduction(nullptr), VectorLoopValueMap(UnrollFactor, VecWidth), 392 TripCount(nullptr), VectorTripCount(nullptr), Legal(LVL), Cost(CM), 393 AddedSafetyChecks(false) {} 394 395 /// Create a new empty loop. Unlink the old loop and connect the new one. 396 void createVectorizedLoopSkeleton(); 397 398 /// Vectorize a single instruction within the innermost loop. 399 void vectorizeInstruction(Instruction &I); 400 401 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 402 void fixVectorizedLoop(); 403 404 // Return true if any runtime check is added. 405 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 406 407 virtual ~InnerLoopVectorizer() {} 408 409 protected: 410 /// A small list of PHINodes. 411 typedef SmallVector<PHINode *, 4> PhiVector; 412 413 /// A type for vectorized values in the new loop. Each value from the 414 /// original loop, when vectorized, is represented by UF vector values in the 415 /// new unrolled loop, where UF is the unroll factor. 416 typedef SmallVector<Value *, 2> VectorParts; 417 418 /// A type for scalarized values in the new loop. Each value from the 419 /// original loop, when scalarized, is represented by UF x VF scalar values 420 /// in the new unrolled loop, where UF is the unroll factor and VF is the 421 /// vectorization factor. 422 typedef SmallVector<SmallVector<Value *, 4>, 2> ScalarParts; 423 424 // When we if-convert we need to create edge masks. We have to cache values 425 // so that we don't end up with exponential recursion/IR. 426 typedef DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts> 427 EdgeMaskCacheTy; 428 typedef DenseMap<BasicBlock *, VectorParts> BlockMaskCacheTy; 429 430 /// Set up the values of the IVs correctly when exiting the vector loop. 431 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 432 Value *CountRoundDown, Value *EndValue, 433 BasicBlock *MiddleBlock); 434 435 /// Create a new induction variable inside L. 436 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 437 Value *Step, Instruction *DL); 438 439 /// Handle all cross-iteration phis in the header. 440 void fixCrossIterationPHIs(); 441 442 /// Fix a first-order recurrence. This is the second phase of vectorizing 443 /// this phi node. 444 void fixFirstOrderRecurrence(PHINode *Phi); 445 446 /// Fix a reduction cross-iteration phi. This is the second phase of 447 /// vectorizing this phi node. 448 void fixReduction(PHINode *Phi); 449 450 /// \brief The Loop exit block may have single value PHI nodes with some 451 /// incoming value. While vectorizing we only handled real values 452 /// that were defined inside the loop and we should have one value for 453 /// each predecessor of its parent basic block. See PR14725. 454 void fixLCSSAPHIs(); 455 456 /// Iteratively sink the scalarized operands of a predicated instruction into 457 /// the block that was created for it. 458 void sinkScalarOperands(Instruction *PredInst); 459 460 /// Predicate conditional instructions that require predication on their 461 /// respective conditions. 462 void predicateInstructions(); 463 464 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 465 /// represented as. 466 void truncateToMinimalBitwidths(); 467 468 /// A helper function that computes the predicate of the block BB, assuming 469 /// that the header block of the loop is set to True. It returns the *entry* 470 /// mask for the block BB. 471 VectorParts createBlockInMask(BasicBlock *BB); 472 /// A helper function that computes the predicate of the edge between SRC 473 /// and DST. 474 VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst); 475 476 /// Vectorize a single PHINode in a block. This method handles the induction 477 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 478 /// arbitrary length vectors. 479 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF); 480 481 /// Insert the new loop to the loop hierarchy and pass manager 482 /// and update the analysis passes. 483 void updateAnalysis(); 484 485 /// This instruction is un-vectorizable. Implement it as a sequence 486 /// of scalars. If \p IfPredicateInstr is true we need to 'hide' each 487 /// scalarized instruction behind an if block predicated on the control 488 /// dependence of the instruction. 489 void scalarizeInstruction(Instruction *Instr, bool IfPredicateInstr = false); 490 491 /// Vectorize Load and Store instructions, 492 virtual void vectorizeMemoryInstruction(Instruction *Instr); 493 494 /// Create a broadcast instruction. This method generates a broadcast 495 /// instruction (shuffle) for loop invariant values and for the induction 496 /// value. If this is the induction variable then we extend it to N, N+1, ... 497 /// this is needed because each iteration in the loop corresponds to a SIMD 498 /// element. 499 virtual Value *getBroadcastInstrs(Value *V); 500 501 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 502 /// to each vector element of Val. The sequence starts at StartIndex. 503 /// \p Opcode is relevant for FP induction variable. 504 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 505 Instruction::BinaryOps Opcode = 506 Instruction::BinaryOpsEnd); 507 508 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 509 /// variable on which to base the steps, \p Step is the size of the step, and 510 /// \p EntryVal is the value from the original loop that maps to the steps. 511 /// Note that \p EntryVal doesn't have to be an induction variable (e.g., it 512 /// can be a truncate instruction). 513 void buildScalarSteps(Value *ScalarIV, Value *Step, Value *EntryVal, 514 const InductionDescriptor &ID); 515 516 /// Create a vector induction phi node based on an existing scalar one. \p 517 /// EntryVal is the value from the original loop that maps to the vector phi 518 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 519 /// truncate instruction, instead of widening the original IV, we widen a 520 /// version of the IV truncated to \p EntryVal's type. 521 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 522 Value *Step, Instruction *EntryVal); 523 524 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 525 /// is provided, the integer induction variable will first be truncated to 526 /// the corresponding type. 527 void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr); 528 529 /// Returns true if an instruction \p I should be scalarized instead of 530 /// vectorized for the chosen vectorization factor. 531 bool shouldScalarizeInstruction(Instruction *I) const; 532 533 /// Returns true if we should generate a scalar version of \p IV. 534 bool needsScalarInduction(Instruction *IV) const; 535 536 /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a 537 /// vector or scalar value on-demand if one is not yet available. When 538 /// vectorizing a loop, we visit the definition of an instruction before its 539 /// uses. When visiting the definition, we either vectorize or scalarize the 540 /// instruction, creating an entry for it in the corresponding map. (In some 541 /// cases, such as induction variables, we will create both vector and scalar 542 /// entries.) Then, as we encounter uses of the definition, we derive values 543 /// for each scalar or vector use unless such a value is already available. 544 /// For example, if we scalarize a definition and one of its uses is vector, 545 /// we build the required vector on-demand with an insertelement sequence 546 /// when visiting the use. Otherwise, if the use is scalar, we can use the 547 /// existing scalar definition. 548 /// 549 /// Return a value in the new loop corresponding to \p V from the original 550 /// loop at unroll index \p Part. If the value has already been vectorized, 551 /// the corresponding vector entry in VectorLoopValueMap is returned. If, 552 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 553 /// a new vector value on-demand by inserting the scalar values into a vector 554 /// with an insertelement sequence. If the value has been neither vectorized 555 /// nor scalarized, it must be loop invariant, so we simply broadcast the 556 /// value into a vector. 557 Value *getOrCreateVectorValue(Value *V, unsigned Part); 558 559 /// Return a value in the new loop corresponding to \p V from the original 560 /// loop at unroll index \p Part and vector index \p Lane. If the value has 561 /// been vectorized but not scalarized, the necessary extractelement 562 /// instruction will be generated. 563 Value *getOrCreateScalarValue(Value *V, unsigned Part, unsigned Lane); 564 565 /// Try to vectorize the interleaved access group that \p Instr belongs to. 566 void vectorizeInterleaveGroup(Instruction *Instr); 567 568 /// Generate a shuffle sequence that will reverse the vector Vec. 569 virtual Value *reverseVector(Value *Vec); 570 571 /// Returns (and creates if needed) the original loop trip count. 572 Value *getOrCreateTripCount(Loop *NewLoop); 573 574 /// Returns (and creates if needed) the trip count of the widened loop. 575 Value *getOrCreateVectorTripCount(Loop *NewLoop); 576 577 /// Emit a bypass check to see if the vector trip count is zero, including if 578 /// it overflows. 579 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 580 /// Emit a bypass check to see if all of the SCEV assumptions we've 581 /// had to make are correct. 582 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 583 /// Emit bypass checks to check any memory assumptions we may have made. 584 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 585 586 /// Add additional metadata to \p To that was not present on \p Orig. 587 /// 588 /// Currently this is used to add the noalias annotations based on the 589 /// inserted memchecks. Use this for instructions that are *cloned* into the 590 /// vector loop. 591 void addNewMetadata(Instruction *To, const Instruction *Orig); 592 593 /// Add metadata from one instruction to another. 594 /// 595 /// This includes both the original MDs from \p From and additional ones (\see 596 /// addNewMetadata). Use this for *newly created* instructions in the vector 597 /// loop. 598 void addMetadata(Instruction *To, Instruction *From); 599 600 /// \brief Similar to the previous function but it adds the metadata to a 601 /// vector of instructions. 602 void addMetadata(ArrayRef<Value *> To, Instruction *From); 603 604 /// \brief Set the debug location in the builder using the debug location in 605 /// the instruction. 606 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 607 608 /// This is a helper class for maintaining vectorization state. It's used for 609 /// mapping values from the original loop to their corresponding values in 610 /// the new loop. Two mappings are maintained: one for vectorized values and 611 /// one for scalarized values. Vectorized values are represented with UF 612 /// vector values in the new loop, and scalarized values are represented with 613 /// UF x VF scalar values in the new loop. UF and VF are the unroll and 614 /// vectorization factors, respectively. 615 /// 616 /// Entries can be added to either map with setVectorValue and setScalarValue, 617 /// which assert that an entry was not already added before. If an entry is to 618 /// replace an existing one, call resetVectorValue. This is currently needed 619 /// to modify the mapped values during "fix-up" operations that occur once the 620 /// first phase of widening is complete. These operations include type 621 /// truncation and the second phase of recurrence widening. 622 /// 623 /// Entries from either map can be retrieved using the getVectorValue and 624 /// getScalarValue functions, which assert that the desired value exists. 625 626 struct ValueMap { 627 628 /// Construct an empty map with the given unroll and vectorization factors. 629 ValueMap(unsigned UF, unsigned VF) : UF(UF), VF(VF) {} 630 631 /// \return True if the map has any vector entry for \p Key. 632 bool hasAnyVectorValue(Value *Key) const { 633 return VectorMapStorage.count(Key); 634 } 635 636 /// \return True if the map has a vector entry for \p Key and \p Part. 637 bool hasVectorValue(Value *Key, unsigned Part) const { 638 assert(Part < UF && "Queried Vector Part is too large."); 639 if (!hasAnyVectorValue(Key)) 640 return false; 641 const VectorParts &Entry = VectorMapStorage.find(Key)->second; 642 assert(Entry.size() == UF && "VectorParts has wrong dimensions."); 643 return Entry[Part] != nullptr; 644 } 645 646 /// \return True if the map has any scalar entry for \p Key. 647 bool hasAnyScalarValue(Value *Key) const { 648 return ScalarMapStorage.count(Key); 649 } 650 651 /// \return True if the map has a scalar entry for \p Key, \p Part and 652 /// \p Part. 653 bool hasScalarValue(Value *Key, unsigned Part, unsigned Lane) const { 654 assert(Part < UF && "Queried Scalar Part is too large."); 655 assert(Lane < VF && "Queried Scalar Lane is too large."); 656 if (!hasAnyScalarValue(Key)) 657 return false; 658 const ScalarParts &Entry = ScalarMapStorage.find(Key)->second; 659 assert(Entry.size() == UF && "ScalarParts has wrong dimensions."); 660 assert(Entry[Part].size() == VF && "ScalarParts has wrong dimensions."); 661 return Entry[Part][Lane] != nullptr; 662 } 663 664 /// Retrieve the existing vector value that corresponds to \p Key and 665 /// \p Part. 666 Value *getVectorValue(Value *Key, unsigned Part) { 667 assert(hasVectorValue(Key, Part) && "Getting non-existent value."); 668 return VectorMapStorage[Key][Part]; 669 } 670 671 /// Retrieve the existing scalar value that corresponds to \p Key, \p Part 672 /// and \p Lane. 673 Value *getScalarValue(Value *Key, unsigned Part, unsigned Lane) { 674 assert(hasScalarValue(Key, Part, Lane) && "Getting non-existent value."); 675 return ScalarMapStorage[Key][Part][Lane]; 676 } 677 678 /// Set a vector value associated with \p Key and \p Part. Assumes such a 679 /// value is not already set. If it is, use resetVectorValue() instead. 680 void setVectorValue(Value *Key, unsigned Part, Value *Vector) { 681 assert(!hasVectorValue(Key, Part) && "Vector value already set for part"); 682 if (!VectorMapStorage.count(Key)) { 683 VectorParts Entry(UF); 684 VectorMapStorage[Key] = Entry; 685 } 686 VectorMapStorage[Key][Part] = Vector; 687 } 688 689 /// Set a scalar value associated with \p Key for \p Part and \p Lane. 690 /// Assumes such a value is not already set. 691 void setScalarValue(Value *Key, unsigned Part, unsigned Lane, 692 Value *Scalar) { 693 assert(!hasScalarValue(Key, Part, Lane) && "Scalar value already set"); 694 if (!ScalarMapStorage.count(Key)) { 695 ScalarParts Entry(UF); 696 for (unsigned Part = 0; Part < UF; ++Part) 697 Entry[Part].resize(VF, nullptr); 698 // TODO: Consider storing uniform values only per-part, as they occupy 699 // lane 0 only, keeping the other VF-1 redundant entries null. 700 ScalarMapStorage[Key] = Entry; 701 } 702 ScalarMapStorage[Key][Part][Lane] = Scalar; 703 } 704 705 /// Reset the vector value associated with \p Key for the given \p Part. 706 /// This function can be used to update values that have already been 707 /// vectorized. This is the case for "fix-up" operations including type 708 /// truncation and the second phase of recurrence vectorization. 709 void resetVectorValue(Value *Key, unsigned Part, Value *Vector) { 710 assert(hasVectorValue(Key, Part) && "Vector value not set for part"); 711 VectorMapStorage[Key][Part] = Vector; 712 } 713 714 private: 715 /// The unroll factor. Each entry in the vector map contains UF vector 716 /// values. 717 unsigned UF; 718 719 /// The vectorization factor. Each entry in the scalar map contains UF x VF 720 /// scalar values. 721 unsigned VF; 722 723 /// The vector and scalar map storage. We use std::map and not DenseMap 724 /// because insertions to DenseMap invalidate its iterators. 725 std::map<Value *, VectorParts> VectorMapStorage; 726 std::map<Value *, ScalarParts> ScalarMapStorage; 727 }; 728 729 /// The original loop. 730 Loop *OrigLoop; 731 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 732 /// dynamic knowledge to simplify SCEV expressions and converts them to a 733 /// more usable form. 734 PredicatedScalarEvolution &PSE; 735 /// Loop Info. 736 LoopInfo *LI; 737 /// Dominator Tree. 738 DominatorTree *DT; 739 /// Alias Analysis. 740 AliasAnalysis *AA; 741 /// Target Library Info. 742 const TargetLibraryInfo *TLI; 743 /// Target Transform Info. 744 const TargetTransformInfo *TTI; 745 /// Assumption Cache. 746 AssumptionCache *AC; 747 /// Interface to emit optimization remarks. 748 OptimizationRemarkEmitter *ORE; 749 750 /// \brief LoopVersioning. It's only set up (non-null) if memchecks were 751 /// used. 752 /// 753 /// This is currently only used to add no-alias metadata based on the 754 /// memchecks. The actually versioning is performed manually. 755 std::unique_ptr<LoopVersioning> LVer; 756 757 /// The vectorization SIMD factor to use. Each vector will have this many 758 /// vector elements. 759 unsigned VF; 760 761 protected: 762 /// The vectorization unroll factor to use. Each scalar is vectorized to this 763 /// many different vector instructions. 764 unsigned UF; 765 766 /// The builder that we use 767 IRBuilder<> Builder; 768 769 // --- Vectorization state --- 770 771 /// The vector-loop preheader. 772 BasicBlock *LoopVectorPreHeader; 773 /// The scalar-loop preheader. 774 BasicBlock *LoopScalarPreHeader; 775 /// Middle Block between the vector and the scalar. 776 BasicBlock *LoopMiddleBlock; 777 /// The ExitBlock of the scalar loop. 778 BasicBlock *LoopExitBlock; 779 /// The vector loop body. 780 BasicBlock *LoopVectorBody; 781 /// The scalar loop body. 782 BasicBlock *LoopScalarBody; 783 /// A list of all bypass blocks. The first block is the entry of the loop. 784 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 785 786 /// The new Induction variable which was added to the new block. 787 PHINode *Induction; 788 /// The induction variable of the old basic block. 789 PHINode *OldInduction; 790 791 /// Maps values from the original loop to their corresponding values in the 792 /// vectorized loop. A key value can map to either vector values, scalar 793 /// values or both kinds of values, depending on whether the key was 794 /// vectorized and scalarized. 795 ValueMap VectorLoopValueMap; 796 797 /// Store instructions that should be predicated, as a pair 798 /// <StoreInst, Predicate> 799 SmallVector<std::pair<Instruction *, Value *>, 4> PredicatedInstructions; 800 EdgeMaskCacheTy EdgeMaskCache; 801 BlockMaskCacheTy BlockMaskCache; 802 /// Trip count of the original loop. 803 Value *TripCount; 804 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 805 Value *VectorTripCount; 806 807 /// The legality analysis. 808 LoopVectorizationLegality *Legal; 809 810 /// The profitablity analysis. 811 LoopVectorizationCostModel *Cost; 812 813 // Record whether runtime checks are added. 814 bool AddedSafetyChecks; 815 816 // Holds the end values for each induction variable. We save the end values 817 // so we can later fix-up the external users of the induction variables. 818 DenseMap<PHINode *, Value *> IVEndValues; 819 }; 820 821 class InnerLoopUnroller : public InnerLoopVectorizer { 822 public: 823 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 824 LoopInfo *LI, DominatorTree *DT, 825 const TargetLibraryInfo *TLI, 826 const TargetTransformInfo *TTI, AssumptionCache *AC, 827 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 828 LoopVectorizationLegality *LVL, 829 LoopVectorizationCostModel *CM) 830 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 831 UnrollFactor, LVL, CM) {} 832 833 private: 834 void vectorizeMemoryInstruction(Instruction *Instr) override; 835 Value *getBroadcastInstrs(Value *V) override; 836 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 837 Instruction::BinaryOps Opcode = 838 Instruction::BinaryOpsEnd) override; 839 Value *reverseVector(Value *Vec) override; 840 }; 841 842 /// \brief Look for a meaningful debug location on the instruction or it's 843 /// operands. 844 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 845 if (!I) 846 return I; 847 848 DebugLoc Empty; 849 if (I->getDebugLoc() != Empty) 850 return I; 851 852 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 853 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 854 if (OpInst->getDebugLoc() != Empty) 855 return OpInst; 856 } 857 858 return I; 859 } 860 861 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 862 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 863 const DILocation *DIL = Inst->getDebugLoc(); 864 if (DIL && Inst->getFunction()->isDebugInfoForProfiling()) 865 B.SetCurrentDebugLocation(DIL->cloneWithDuplicationFactor(UF * VF)); 866 else 867 B.SetCurrentDebugLocation(DIL); 868 } else 869 B.SetCurrentDebugLocation(DebugLoc()); 870 } 871 872 #ifndef NDEBUG 873 /// \return string containing a file name and a line # for the given loop. 874 static std::string getDebugLocString(const Loop *L) { 875 std::string Result; 876 if (L) { 877 raw_string_ostream OS(Result); 878 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 879 LoopDbgLoc.print(OS); 880 else 881 // Just print the module name. 882 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 883 OS.flush(); 884 } 885 return Result; 886 } 887 #endif 888 889 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 890 const Instruction *Orig) { 891 // If the loop was versioned with memchecks, add the corresponding no-alias 892 // metadata. 893 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 894 LVer->annotateInstWithNoAlias(To, Orig); 895 } 896 897 void InnerLoopVectorizer::addMetadata(Instruction *To, 898 Instruction *From) { 899 propagateMetadata(To, From); 900 addNewMetadata(To, From); 901 } 902 903 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 904 Instruction *From) { 905 for (Value *V : To) { 906 if (Instruction *I = dyn_cast<Instruction>(V)) 907 addMetadata(I, From); 908 } 909 } 910 911 /// \brief The group of interleaved loads/stores sharing the same stride and 912 /// close to each other. 913 /// 914 /// Each member in this group has an index starting from 0, and the largest 915 /// index should be less than interleaved factor, which is equal to the absolute 916 /// value of the access's stride. 917 /// 918 /// E.g. An interleaved load group of factor 4: 919 /// for (unsigned i = 0; i < 1024; i+=4) { 920 /// a = A[i]; // Member of index 0 921 /// b = A[i+1]; // Member of index 1 922 /// d = A[i+3]; // Member of index 3 923 /// ... 924 /// } 925 /// 926 /// An interleaved store group of factor 4: 927 /// for (unsigned i = 0; i < 1024; i+=4) { 928 /// ... 929 /// A[i] = a; // Member of index 0 930 /// A[i+1] = b; // Member of index 1 931 /// A[i+2] = c; // Member of index 2 932 /// A[i+3] = d; // Member of index 3 933 /// } 934 /// 935 /// Note: the interleaved load group could have gaps (missing members), but 936 /// the interleaved store group doesn't allow gaps. 937 class InterleaveGroup { 938 public: 939 InterleaveGroup(Instruction *Instr, int Stride, unsigned Align) 940 : Align(Align), SmallestKey(0), LargestKey(0), InsertPos(Instr) { 941 assert(Align && "The alignment should be non-zero"); 942 943 Factor = std::abs(Stride); 944 assert(Factor > 1 && "Invalid interleave factor"); 945 946 Reverse = Stride < 0; 947 Members[0] = Instr; 948 } 949 950 bool isReverse() const { return Reverse; } 951 unsigned getFactor() const { return Factor; } 952 unsigned getAlignment() const { return Align; } 953 unsigned getNumMembers() const { return Members.size(); } 954 955 /// \brief Try to insert a new member \p Instr with index \p Index and 956 /// alignment \p NewAlign. The index is related to the leader and it could be 957 /// negative if it is the new leader. 958 /// 959 /// \returns false if the instruction doesn't belong to the group. 960 bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) { 961 assert(NewAlign && "The new member's alignment should be non-zero"); 962 963 int Key = Index + SmallestKey; 964 965 // Skip if there is already a member with the same index. 966 if (Members.count(Key)) 967 return false; 968 969 if (Key > LargestKey) { 970 // The largest index is always less than the interleave factor. 971 if (Index >= static_cast<int>(Factor)) 972 return false; 973 974 LargestKey = Key; 975 } else if (Key < SmallestKey) { 976 // The largest index is always less than the interleave factor. 977 if (LargestKey - Key >= static_cast<int>(Factor)) 978 return false; 979 980 SmallestKey = Key; 981 } 982 983 // It's always safe to select the minimum alignment. 984 Align = std::min(Align, NewAlign); 985 Members[Key] = Instr; 986 return true; 987 } 988 989 /// \brief Get the member with the given index \p Index 990 /// 991 /// \returns nullptr if contains no such member. 992 Instruction *getMember(unsigned Index) const { 993 int Key = SmallestKey + Index; 994 if (!Members.count(Key)) 995 return nullptr; 996 997 return Members.find(Key)->second; 998 } 999 1000 /// \brief Get the index for the given member. Unlike the key in the member 1001 /// map, the index starts from 0. 1002 unsigned getIndex(Instruction *Instr) const { 1003 for (auto I : Members) 1004 if (I.second == Instr) 1005 return I.first - SmallestKey; 1006 1007 llvm_unreachable("InterleaveGroup contains no such member"); 1008 } 1009 1010 Instruction *getInsertPos() const { return InsertPos; } 1011 void setInsertPos(Instruction *Inst) { InsertPos = Inst; } 1012 1013 private: 1014 unsigned Factor; // Interleave Factor. 1015 bool Reverse; 1016 unsigned Align; 1017 DenseMap<int, Instruction *> Members; 1018 int SmallestKey; 1019 int LargestKey; 1020 1021 // To avoid breaking dependences, vectorized instructions of an interleave 1022 // group should be inserted at either the first load or the last store in 1023 // program order. 1024 // 1025 // E.g. %even = load i32 // Insert Position 1026 // %add = add i32 %even // Use of %even 1027 // %odd = load i32 1028 // 1029 // store i32 %even 1030 // %odd = add i32 // Def of %odd 1031 // store i32 %odd // Insert Position 1032 Instruction *InsertPos; 1033 }; 1034 1035 /// \brief Drive the analysis of interleaved memory accesses in the loop. 1036 /// 1037 /// Use this class to analyze interleaved accesses only when we can vectorize 1038 /// a loop. Otherwise it's meaningless to do analysis as the vectorization 1039 /// on interleaved accesses is unsafe. 1040 /// 1041 /// The analysis collects interleave groups and records the relationships 1042 /// between the member and the group in a map. 1043 class InterleavedAccessInfo { 1044 public: 1045 InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L, 1046 DominatorTree *DT, LoopInfo *LI) 1047 : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(nullptr), 1048 RequiresScalarEpilogue(false) {} 1049 1050 ~InterleavedAccessInfo() { 1051 SmallSet<InterleaveGroup *, 4> DelSet; 1052 // Avoid releasing a pointer twice. 1053 for (auto &I : InterleaveGroupMap) 1054 DelSet.insert(I.second); 1055 for (auto *Ptr : DelSet) 1056 delete Ptr; 1057 } 1058 1059 /// \brief Analyze the interleaved accesses and collect them in interleave 1060 /// groups. Substitute symbolic strides using \p Strides. 1061 void analyzeInterleaving(const ValueToValueMap &Strides); 1062 1063 /// \brief Check if \p Instr belongs to any interleave group. 1064 bool isInterleaved(Instruction *Instr) const { 1065 return InterleaveGroupMap.count(Instr); 1066 } 1067 1068 /// \brief Return the maximum interleave factor of all interleaved groups. 1069 unsigned getMaxInterleaveFactor() const { 1070 unsigned MaxFactor = 1; 1071 for (auto &Entry : InterleaveGroupMap) 1072 MaxFactor = std::max(MaxFactor, Entry.second->getFactor()); 1073 return MaxFactor; 1074 } 1075 1076 /// \brief Get the interleave group that \p Instr belongs to. 1077 /// 1078 /// \returns nullptr if doesn't have such group. 1079 InterleaveGroup *getInterleaveGroup(Instruction *Instr) const { 1080 if (InterleaveGroupMap.count(Instr)) 1081 return InterleaveGroupMap.find(Instr)->second; 1082 return nullptr; 1083 } 1084 1085 /// \brief Returns true if an interleaved group that may access memory 1086 /// out-of-bounds requires a scalar epilogue iteration for correctness. 1087 bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; } 1088 1089 /// \brief Initialize the LoopAccessInfo used for dependence checking. 1090 void setLAI(const LoopAccessInfo *Info) { LAI = Info; } 1091 1092 private: 1093 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks. 1094 /// Simplifies SCEV expressions in the context of existing SCEV assumptions. 1095 /// The interleaved access analysis can also add new predicates (for example 1096 /// by versioning strides of pointers). 1097 PredicatedScalarEvolution &PSE; 1098 Loop *TheLoop; 1099 DominatorTree *DT; 1100 LoopInfo *LI; 1101 const LoopAccessInfo *LAI; 1102 1103 /// True if the loop may contain non-reversed interleaved groups with 1104 /// out-of-bounds accesses. We ensure we don't speculatively access memory 1105 /// out-of-bounds by executing at least one scalar epilogue iteration. 1106 bool RequiresScalarEpilogue; 1107 1108 /// Holds the relationships between the members and the interleave group. 1109 DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap; 1110 1111 /// Holds dependences among the memory accesses in the loop. It maps a source 1112 /// access to a set of dependent sink accesses. 1113 DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences; 1114 1115 /// \brief The descriptor for a strided memory access. 1116 struct StrideDescriptor { 1117 StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size, 1118 unsigned Align) 1119 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {} 1120 1121 StrideDescriptor() = default; 1122 1123 // The access's stride. It is negative for a reverse access. 1124 int64_t Stride = 0; 1125 const SCEV *Scev = nullptr; // The scalar expression of this access 1126 uint64_t Size = 0; // The size of the memory object. 1127 unsigned Align = 0; // The alignment of this access. 1128 }; 1129 1130 /// \brief A type for holding instructions and their stride descriptors. 1131 typedef std::pair<Instruction *, StrideDescriptor> StrideEntry; 1132 1133 /// \brief Create a new interleave group with the given instruction \p Instr, 1134 /// stride \p Stride and alignment \p Align. 1135 /// 1136 /// \returns the newly created interleave group. 1137 InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride, 1138 unsigned Align) { 1139 assert(!InterleaveGroupMap.count(Instr) && 1140 "Already in an interleaved access group"); 1141 InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align); 1142 return InterleaveGroupMap[Instr]; 1143 } 1144 1145 /// \brief Release the group and remove all the relationships. 1146 void releaseGroup(InterleaveGroup *Group) { 1147 for (unsigned i = 0; i < Group->getFactor(); i++) 1148 if (Instruction *Member = Group->getMember(i)) 1149 InterleaveGroupMap.erase(Member); 1150 1151 delete Group; 1152 } 1153 1154 /// \brief Collect all the accesses with a constant stride in program order. 1155 void collectConstStrideAccesses( 1156 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 1157 const ValueToValueMap &Strides); 1158 1159 /// \brief Returns true if \p Stride is allowed in an interleaved group. 1160 static bool isStrided(int Stride) { 1161 unsigned Factor = std::abs(Stride); 1162 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 1163 } 1164 1165 /// \brief Returns true if \p BB is a predicated block. 1166 bool isPredicated(BasicBlock *BB) const { 1167 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 1168 } 1169 1170 /// \brief Returns true if LoopAccessInfo can be used for dependence queries. 1171 bool areDependencesValid() const { 1172 return LAI && LAI->getDepChecker().getDependences(); 1173 } 1174 1175 /// \brief Returns true if memory accesses \p A and \p B can be reordered, if 1176 /// necessary, when constructing interleaved groups. 1177 /// 1178 /// \p A must precede \p B in program order. We return false if reordering is 1179 /// not necessary or is prevented because \p A and \p B may be dependent. 1180 bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A, 1181 StrideEntry *B) const { 1182 1183 // Code motion for interleaved accesses can potentially hoist strided loads 1184 // and sink strided stores. The code below checks the legality of the 1185 // following two conditions: 1186 // 1187 // 1. Potentially moving a strided load (B) before any store (A) that 1188 // precedes B, or 1189 // 1190 // 2. Potentially moving a strided store (A) after any load or store (B) 1191 // that A precedes. 1192 // 1193 // It's legal to reorder A and B if we know there isn't a dependence from A 1194 // to B. Note that this determination is conservative since some 1195 // dependences could potentially be reordered safely. 1196 1197 // A is potentially the source of a dependence. 1198 auto *Src = A->first; 1199 auto SrcDes = A->second; 1200 1201 // B is potentially the sink of a dependence. 1202 auto *Sink = B->first; 1203 auto SinkDes = B->second; 1204 1205 // Code motion for interleaved accesses can't violate WAR dependences. 1206 // Thus, reordering is legal if the source isn't a write. 1207 if (!Src->mayWriteToMemory()) 1208 return true; 1209 1210 // At least one of the accesses must be strided. 1211 if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride)) 1212 return true; 1213 1214 // If dependence information is not available from LoopAccessInfo, 1215 // conservatively assume the instructions can't be reordered. 1216 if (!areDependencesValid()) 1217 return false; 1218 1219 // If we know there is a dependence from source to sink, assume the 1220 // instructions can't be reordered. Otherwise, reordering is legal. 1221 return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink); 1222 } 1223 1224 /// \brief Collect the dependences from LoopAccessInfo. 1225 /// 1226 /// We process the dependences once during the interleaved access analysis to 1227 /// enable constant-time dependence queries. 1228 void collectDependences() { 1229 if (!areDependencesValid()) 1230 return; 1231 auto *Deps = LAI->getDepChecker().getDependences(); 1232 for (auto Dep : *Deps) 1233 Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI)); 1234 } 1235 }; 1236 1237 /// Utility class for getting and setting loop vectorizer hints in the form 1238 /// of loop metadata. 1239 /// This class keeps a number of loop annotations locally (as member variables) 1240 /// and can, upon request, write them back as metadata on the loop. It will 1241 /// initially scan the loop for existing metadata, and will update the local 1242 /// values based on information in the loop. 1243 /// We cannot write all values to metadata, as the mere presence of some info, 1244 /// for example 'force', means a decision has been made. So, we need to be 1245 /// careful NOT to add them if the user hasn't specifically asked so. 1246 class LoopVectorizeHints { 1247 enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE }; 1248 1249 /// Hint - associates name and validation with the hint value. 1250 struct Hint { 1251 const char *Name; 1252 unsigned Value; // This may have to change for non-numeric values. 1253 HintKind Kind; 1254 1255 Hint(const char *Name, unsigned Value, HintKind Kind) 1256 : Name(Name), Value(Value), Kind(Kind) {} 1257 1258 bool validate(unsigned Val) { 1259 switch (Kind) { 1260 case HK_WIDTH: 1261 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth; 1262 case HK_UNROLL: 1263 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor; 1264 case HK_FORCE: 1265 return (Val <= 1); 1266 } 1267 return false; 1268 } 1269 }; 1270 1271 /// Vectorization width. 1272 Hint Width; 1273 /// Vectorization interleave factor. 1274 Hint Interleave; 1275 /// Vectorization forced 1276 Hint Force; 1277 1278 /// Return the loop metadata prefix. 1279 static StringRef Prefix() { return "llvm.loop."; } 1280 1281 /// True if there is any unsafe math in the loop. 1282 bool PotentiallyUnsafe; 1283 1284 public: 1285 enum ForceKind { 1286 FK_Undefined = -1, ///< Not selected. 1287 FK_Disabled = 0, ///< Forcing disabled. 1288 FK_Enabled = 1, ///< Forcing enabled. 1289 }; 1290 1291 LoopVectorizeHints(const Loop *L, bool DisableInterleaving, 1292 OptimizationRemarkEmitter &ORE) 1293 : Width("vectorize.width", VectorizerParams::VectorizationFactor, 1294 HK_WIDTH), 1295 Interleave("interleave.count", DisableInterleaving, HK_UNROLL), 1296 Force("vectorize.enable", FK_Undefined, HK_FORCE), 1297 PotentiallyUnsafe(false), TheLoop(L), ORE(ORE) { 1298 // Populate values with existing loop metadata. 1299 getHintsFromMetadata(); 1300 1301 // force-vector-interleave overrides DisableInterleaving. 1302 if (VectorizerParams::isInterleaveForced()) 1303 Interleave.Value = VectorizerParams::VectorizationInterleave; 1304 1305 DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs() 1306 << "LV: Interleaving disabled by the pass manager\n"); 1307 } 1308 1309 /// Mark the loop L as already vectorized by setting the width to 1. 1310 void setAlreadyVectorized() { 1311 Width.Value = Interleave.Value = 1; 1312 Hint Hints[] = {Width, Interleave}; 1313 writeHintsToMetadata(Hints); 1314 } 1315 1316 bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const { 1317 if (getForce() == LoopVectorizeHints::FK_Disabled) { 1318 DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); 1319 emitRemarkWithHints(); 1320 return false; 1321 } 1322 1323 if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) { 1324 DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); 1325 emitRemarkWithHints(); 1326 return false; 1327 } 1328 1329 if (getWidth() == 1 && getInterleave() == 1) { 1330 // FIXME: Add a separate metadata to indicate when the loop has already 1331 // been vectorized instead of setting width and count to 1. 1332 DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); 1333 // FIXME: Add interleave.disable metadata. This will allow 1334 // vectorize.disable to be used without disabling the pass and errors 1335 // to differentiate between disabled vectorization and a width of 1. 1336 ORE.emit(OptimizationRemarkAnalysis(vectorizeAnalysisPassName(), 1337 "AllDisabled", L->getStartLoc(), 1338 L->getHeader()) 1339 << "loop not vectorized: vectorization and interleaving are " 1340 "explicitly disabled, or vectorize width and interleave " 1341 "count are both set to 1"); 1342 return false; 1343 } 1344 1345 return true; 1346 } 1347 1348 /// Dumps all the hint information. 1349 void emitRemarkWithHints() const { 1350 using namespace ore; 1351 if (Force.Value == LoopVectorizeHints::FK_Disabled) 1352 ORE.emit(OptimizationRemarkMissed(LV_NAME, "MissedExplicitlyDisabled", 1353 TheLoop->getStartLoc(), 1354 TheLoop->getHeader()) 1355 << "loop not vectorized: vectorization is explicitly disabled"); 1356 else { 1357 OptimizationRemarkMissed R(LV_NAME, "MissedDetails", 1358 TheLoop->getStartLoc(), TheLoop->getHeader()); 1359 R << "loop not vectorized"; 1360 if (Force.Value == LoopVectorizeHints::FK_Enabled) { 1361 R << " (Force=" << NV("Force", true); 1362 if (Width.Value != 0) 1363 R << ", Vector Width=" << NV("VectorWidth", Width.Value); 1364 if (Interleave.Value != 0) 1365 R << ", Interleave Count=" << NV("InterleaveCount", Interleave.Value); 1366 R << ")"; 1367 } 1368 ORE.emit(R); 1369 } 1370 } 1371 1372 unsigned getWidth() const { return Width.Value; } 1373 unsigned getInterleave() const { return Interleave.Value; } 1374 enum ForceKind getForce() const { return (ForceKind)Force.Value; } 1375 1376 /// \brief If hints are provided that force vectorization, use the AlwaysPrint 1377 /// pass name to force the frontend to print the diagnostic. 1378 const char *vectorizeAnalysisPassName() const { 1379 if (getWidth() == 1) 1380 return LV_NAME; 1381 if (getForce() == LoopVectorizeHints::FK_Disabled) 1382 return LV_NAME; 1383 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0) 1384 return LV_NAME; 1385 return OptimizationRemarkAnalysis::AlwaysPrint; 1386 } 1387 1388 bool allowReordering() const { 1389 // When enabling loop hints are provided we allow the vectorizer to change 1390 // the order of operations that is given by the scalar loop. This is not 1391 // enabled by default because can be unsafe or inefficient. For example, 1392 // reordering floating-point operations will change the way round-off 1393 // error accumulates in the loop. 1394 return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1; 1395 } 1396 1397 bool isPotentiallyUnsafe() const { 1398 // Avoid FP vectorization if the target is unsure about proper support. 1399 // This may be related to the SIMD unit in the target not handling 1400 // IEEE 754 FP ops properly, or bad single-to-double promotions. 1401 // Otherwise, a sequence of vectorized loops, even without reduction, 1402 // could lead to different end results on the destination vectors. 1403 return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe; 1404 } 1405 1406 void setPotentiallyUnsafe() { PotentiallyUnsafe = true; } 1407 1408 private: 1409 /// Find hints specified in the loop metadata and update local values. 1410 void getHintsFromMetadata() { 1411 MDNode *LoopID = TheLoop->getLoopID(); 1412 if (!LoopID) 1413 return; 1414 1415 // First operand should refer to the loop id itself. 1416 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 1417 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 1418 1419 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1420 const MDString *S = nullptr; 1421 SmallVector<Metadata *, 4> Args; 1422 1423 // The expected hint is either a MDString or a MDNode with the first 1424 // operand a MDString. 1425 if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) { 1426 if (!MD || MD->getNumOperands() == 0) 1427 continue; 1428 S = dyn_cast<MDString>(MD->getOperand(0)); 1429 for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i) 1430 Args.push_back(MD->getOperand(i)); 1431 } else { 1432 S = dyn_cast<MDString>(LoopID->getOperand(i)); 1433 assert(Args.size() == 0 && "too many arguments for MDString"); 1434 } 1435 1436 if (!S) 1437 continue; 1438 1439 // Check if the hint starts with the loop metadata prefix. 1440 StringRef Name = S->getString(); 1441 if (Args.size() == 1) 1442 setHint(Name, Args[0]); 1443 } 1444 } 1445 1446 /// Checks string hint with one operand and set value if valid. 1447 void setHint(StringRef Name, Metadata *Arg) { 1448 if (!Name.startswith(Prefix())) 1449 return; 1450 Name = Name.substr(Prefix().size(), StringRef::npos); 1451 1452 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); 1453 if (!C) 1454 return; 1455 unsigned Val = C->getZExtValue(); 1456 1457 Hint *Hints[] = {&Width, &Interleave, &Force}; 1458 for (auto H : Hints) { 1459 if (Name == H->Name) { 1460 if (H->validate(Val)) 1461 H->Value = Val; 1462 else 1463 DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); 1464 break; 1465 } 1466 } 1467 } 1468 1469 /// Create a new hint from name / value pair. 1470 MDNode *createHintMetadata(StringRef Name, unsigned V) const { 1471 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1472 Metadata *MDs[] = {MDString::get(Context, Name), 1473 ConstantAsMetadata::get( 1474 ConstantInt::get(Type::getInt32Ty(Context), V))}; 1475 return MDNode::get(Context, MDs); 1476 } 1477 1478 /// Matches metadata with hint name. 1479 bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) { 1480 MDString *Name = dyn_cast<MDString>(Node->getOperand(0)); 1481 if (!Name) 1482 return false; 1483 1484 for (auto H : HintTypes) 1485 if (Name->getString().endswith(H.Name)) 1486 return true; 1487 return false; 1488 } 1489 1490 /// Sets current hints into loop metadata, keeping other values intact. 1491 void writeHintsToMetadata(ArrayRef<Hint> HintTypes) { 1492 if (HintTypes.size() == 0) 1493 return; 1494 1495 // Reserve the first element to LoopID (see below). 1496 SmallVector<Metadata *, 4> MDs(1); 1497 // If the loop already has metadata, then ignore the existing operands. 1498 MDNode *LoopID = TheLoop->getLoopID(); 1499 if (LoopID) { 1500 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1501 MDNode *Node = cast<MDNode>(LoopID->getOperand(i)); 1502 // If node in update list, ignore old value. 1503 if (!matchesHintMetadataName(Node, HintTypes)) 1504 MDs.push_back(Node); 1505 } 1506 } 1507 1508 // Now, add the missing hints. 1509 for (auto H : HintTypes) 1510 MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value)); 1511 1512 // Replace current metadata node with new one. 1513 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1514 MDNode *NewLoopID = MDNode::get(Context, MDs); 1515 // Set operand 0 to refer to the loop id itself. 1516 NewLoopID->replaceOperandWith(0, NewLoopID); 1517 1518 TheLoop->setLoopID(NewLoopID); 1519 } 1520 1521 /// The loop these hints belong to. 1522 const Loop *TheLoop; 1523 1524 /// Interface to emit optimization remarks. 1525 OptimizationRemarkEmitter &ORE; 1526 }; 1527 1528 static void emitMissedWarning(Function *F, Loop *L, 1529 const LoopVectorizeHints &LH, 1530 OptimizationRemarkEmitter *ORE) { 1531 LH.emitRemarkWithHints(); 1532 1533 if (LH.getForce() == LoopVectorizeHints::FK_Enabled) { 1534 if (LH.getWidth() != 1) 1535 ORE->emit(DiagnosticInfoOptimizationFailure( 1536 DEBUG_TYPE, "FailedRequestedVectorization", 1537 L->getStartLoc(), L->getHeader()) 1538 << "loop not vectorized: " 1539 << "failed explicitly specified loop vectorization"); 1540 else if (LH.getInterleave() != 1) 1541 ORE->emit(DiagnosticInfoOptimizationFailure( 1542 DEBUG_TYPE, "FailedRequestedInterleaving", L->getStartLoc(), 1543 L->getHeader()) 1544 << "loop not interleaved: " 1545 << "failed explicitly specified loop interleaving"); 1546 } 1547 } 1548 1549 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and 1550 /// to what vectorization factor. 1551 /// This class does not look at the profitability of vectorization, only the 1552 /// legality. This class has two main kinds of checks: 1553 /// * Memory checks - The code in canVectorizeMemory checks if vectorization 1554 /// will change the order of memory accesses in a way that will change the 1555 /// correctness of the program. 1556 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory 1557 /// checks for a number of different conditions, such as the availability of a 1558 /// single induction variable, that all types are supported and vectorize-able, 1559 /// etc. This code reflects the capabilities of InnerLoopVectorizer. 1560 /// This class is also used by InnerLoopVectorizer for identifying 1561 /// induction variable and the different reduction variables. 1562 class LoopVectorizationLegality { 1563 public: 1564 LoopVectorizationLegality( 1565 Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT, 1566 TargetLibraryInfo *TLI, AliasAnalysis *AA, Function *F, 1567 const TargetTransformInfo *TTI, 1568 std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI, 1569 OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R, 1570 LoopVectorizeHints *H) 1571 : NumPredStores(0), TheLoop(L), PSE(PSE), TLI(TLI), TTI(TTI), DT(DT), 1572 GetLAA(GetLAA), LAI(nullptr), ORE(ORE), InterleaveInfo(PSE, L, DT, LI), 1573 PrimaryInduction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false), 1574 Requirements(R), Hints(H) {} 1575 1576 /// ReductionList contains the reduction descriptors for all 1577 /// of the reductions that were found in the loop. 1578 typedef DenseMap<PHINode *, RecurrenceDescriptor> ReductionList; 1579 1580 /// InductionList saves induction variables and maps them to the 1581 /// induction descriptor. 1582 typedef MapVector<PHINode *, InductionDescriptor> InductionList; 1583 1584 /// RecurrenceSet contains the phi nodes that are recurrences other than 1585 /// inductions and reductions. 1586 typedef SmallPtrSet<const PHINode *, 8> RecurrenceSet; 1587 1588 /// Returns true if it is legal to vectorize this loop. 1589 /// This does not mean that it is profitable to vectorize this 1590 /// loop, only that it is legal to do so. 1591 bool canVectorize(); 1592 1593 /// Returns the primary induction variable. 1594 PHINode *getPrimaryInduction() { return PrimaryInduction; } 1595 1596 /// Returns the reduction variables found in the loop. 1597 ReductionList *getReductionVars() { return &Reductions; } 1598 1599 /// Returns the induction variables found in the loop. 1600 InductionList *getInductionVars() { return &Inductions; } 1601 1602 /// Return the first-order recurrences found in the loop. 1603 RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; } 1604 1605 /// Return the set of instructions to sink to handle first-order recurrences. 1606 DenseMap<Instruction *, Instruction *> &getSinkAfter() { return SinkAfter; } 1607 1608 /// Returns the widest induction type. 1609 Type *getWidestInductionType() { return WidestIndTy; } 1610 1611 /// Returns True if V is an induction variable in this loop. 1612 bool isInductionVariable(const Value *V); 1613 1614 /// Returns True if PN is a reduction variable in this loop. 1615 bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); } 1616 1617 /// Returns True if Phi is a first-order recurrence in this loop. 1618 bool isFirstOrderRecurrence(const PHINode *Phi); 1619 1620 /// Return true if the block BB needs to be predicated in order for the loop 1621 /// to be vectorized. 1622 bool blockNeedsPredication(BasicBlock *BB); 1623 1624 /// Check if this pointer is consecutive when vectorizing. This happens 1625 /// when the last index of the GEP is the induction variable, or that the 1626 /// pointer itself is an induction variable. 1627 /// This check allows us to vectorize A[idx] into a wide load/store. 1628 /// Returns: 1629 /// 0 - Stride is unknown or non-consecutive. 1630 /// 1 - Address is consecutive. 1631 /// -1 - Address is consecutive, and decreasing. 1632 int isConsecutivePtr(Value *Ptr); 1633 1634 /// Returns true if the value V is uniform within the loop. 1635 bool isUniform(Value *V); 1636 1637 /// Returns the information that we collected about runtime memory check. 1638 const RuntimePointerChecking *getRuntimePointerChecking() const { 1639 return LAI->getRuntimePointerChecking(); 1640 } 1641 1642 const LoopAccessInfo *getLAI() const { return LAI; } 1643 1644 /// \brief Check if \p Instr belongs to any interleaved access group. 1645 bool isAccessInterleaved(Instruction *Instr) { 1646 return InterleaveInfo.isInterleaved(Instr); 1647 } 1648 1649 /// \brief Return the maximum interleave factor of all interleaved groups. 1650 unsigned getMaxInterleaveFactor() const { 1651 return InterleaveInfo.getMaxInterleaveFactor(); 1652 } 1653 1654 /// \brief Get the interleaved access group that \p Instr belongs to. 1655 const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { 1656 return InterleaveInfo.getInterleaveGroup(Instr); 1657 } 1658 1659 /// \brief Returns true if an interleaved group requires a scalar iteration 1660 /// to handle accesses with gaps. 1661 bool requiresScalarEpilogue() const { 1662 return InterleaveInfo.requiresScalarEpilogue(); 1663 } 1664 1665 unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); } 1666 1667 bool hasStride(Value *V) { return LAI->hasStride(V); } 1668 1669 /// Returns true if the target machine supports masked store operation 1670 /// for the given \p DataType and kind of access to \p Ptr. 1671 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1672 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType); 1673 } 1674 /// Returns true if the target machine supports masked load operation 1675 /// for the given \p DataType and kind of access to \p Ptr. 1676 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1677 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType); 1678 } 1679 /// Returns true if the target machine supports masked scatter operation 1680 /// for the given \p DataType. 1681 bool isLegalMaskedScatter(Type *DataType) { 1682 return TTI->isLegalMaskedScatter(DataType); 1683 } 1684 /// Returns true if the target machine supports masked gather operation 1685 /// for the given \p DataType. 1686 bool isLegalMaskedGather(Type *DataType) { 1687 return TTI->isLegalMaskedGather(DataType); 1688 } 1689 /// Returns true if the target machine can represent \p V as a masked gather 1690 /// or scatter operation. 1691 bool isLegalGatherOrScatter(Value *V) { 1692 auto *LI = dyn_cast<LoadInst>(V); 1693 auto *SI = dyn_cast<StoreInst>(V); 1694 if (!LI && !SI) 1695 return false; 1696 auto *Ptr = getPointerOperand(V); 1697 auto *Ty = cast<PointerType>(Ptr->getType())->getElementType(); 1698 return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty)); 1699 } 1700 1701 /// Returns true if vector representation of the instruction \p I 1702 /// requires mask. 1703 bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); } 1704 unsigned getNumStores() const { return LAI->getNumStores(); } 1705 unsigned getNumLoads() const { return LAI->getNumLoads(); } 1706 unsigned getNumPredStores() const { return NumPredStores; } 1707 1708 /// Returns true if \p I is an instruction that will be scalarized with 1709 /// predication. Such instructions include conditional stores and 1710 /// instructions that may divide by zero. 1711 bool isScalarWithPredication(Instruction *I); 1712 1713 /// Returns true if \p I is a memory instruction with consecutive memory 1714 /// access that can be widened. 1715 bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1); 1716 1717 // Returns true if the NoNaN attribute is set on the function. 1718 bool hasFunNoNaNAttr() const { return HasFunNoNaNAttr; } 1719 1720 private: 1721 /// Check if a single basic block loop is vectorizable. 1722 /// At this point we know that this is a loop with a constant trip count 1723 /// and we only need to check individual instructions. 1724 bool canVectorizeInstrs(); 1725 1726 /// When we vectorize loops we may change the order in which 1727 /// we read and write from memory. This method checks if it is 1728 /// legal to vectorize the code, considering only memory constrains. 1729 /// Returns true if the loop is vectorizable 1730 bool canVectorizeMemory(); 1731 1732 /// Return true if we can vectorize this loop using the IF-conversion 1733 /// transformation. 1734 bool canVectorizeWithIfConvert(); 1735 1736 /// Return true if all of the instructions in the block can be speculatively 1737 /// executed. \p SafePtrs is a list of addresses that are known to be legal 1738 /// and we know that we can read from them without segfault. 1739 bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs); 1740 1741 /// Updates the vectorization state by adding \p Phi to the inductions list. 1742 /// This can set \p Phi as the main induction of the loop if \p Phi is a 1743 /// better choice for the main induction than the existing one. 1744 void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID, 1745 SmallPtrSetImpl<Value *> &AllowedExit); 1746 1747 /// Create an analysis remark that explains why vectorization failed 1748 /// 1749 /// \p RemarkName is the identifier for the remark. If \p I is passed it is 1750 /// an instruction that prevents vectorization. Otherwise the loop is used 1751 /// for the location of the remark. \return the remark object that can be 1752 /// streamed to. 1753 OptimizationRemarkAnalysis 1754 createMissedAnalysis(StringRef RemarkName, Instruction *I = nullptr) const { 1755 return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(), 1756 RemarkName, TheLoop, I); 1757 } 1758 1759 /// \brief If an access has a symbolic strides, this maps the pointer value to 1760 /// the stride symbol. 1761 const ValueToValueMap *getSymbolicStrides() { 1762 // FIXME: Currently, the set of symbolic strides is sometimes queried before 1763 // it's collected. This happens from canVectorizeWithIfConvert, when the 1764 // pointer is checked to reference consecutive elements suitable for a 1765 // masked access. 1766 return LAI ? &LAI->getSymbolicStrides() : nullptr; 1767 } 1768 1769 unsigned NumPredStores; 1770 1771 /// The loop that we evaluate. 1772 Loop *TheLoop; 1773 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. 1774 /// Applies dynamic knowledge to simplify SCEV expressions in the context 1775 /// of existing SCEV assumptions. The analysis will also add a minimal set 1776 /// of new predicates if this is required to enable vectorization and 1777 /// unrolling. 1778 PredicatedScalarEvolution &PSE; 1779 /// Target Library Info. 1780 TargetLibraryInfo *TLI; 1781 /// Target Transform Info 1782 const TargetTransformInfo *TTI; 1783 /// Dominator Tree. 1784 DominatorTree *DT; 1785 // LoopAccess analysis. 1786 std::function<const LoopAccessInfo &(Loop &)> *GetLAA; 1787 // And the loop-accesses info corresponding to this loop. This pointer is 1788 // null until canVectorizeMemory sets it up. 1789 const LoopAccessInfo *LAI; 1790 /// Interface to emit optimization remarks. 1791 OptimizationRemarkEmitter *ORE; 1792 1793 /// The interleave access information contains groups of interleaved accesses 1794 /// with the same stride and close to each other. 1795 InterleavedAccessInfo InterleaveInfo; 1796 1797 // --- vectorization state --- // 1798 1799 /// Holds the primary induction variable. This is the counter of the 1800 /// loop. 1801 PHINode *PrimaryInduction; 1802 /// Holds the reduction variables. 1803 ReductionList Reductions; 1804 /// Holds all of the induction variables that we found in the loop. 1805 /// Notice that inductions don't need to start at zero and that induction 1806 /// variables can be pointers. 1807 InductionList Inductions; 1808 /// Holds the phi nodes that are first-order recurrences. 1809 RecurrenceSet FirstOrderRecurrences; 1810 /// Holds instructions that need to sink past other instructions to handle 1811 /// first-order recurrences. 1812 DenseMap<Instruction *, Instruction *> SinkAfter; 1813 /// Holds the widest induction type encountered. 1814 Type *WidestIndTy; 1815 1816 /// Allowed outside users. This holds the induction and reduction 1817 /// vars which can be accessed from outside the loop. 1818 SmallPtrSet<Value *, 4> AllowedExit; 1819 1820 /// Can we assume the absence of NaNs. 1821 bool HasFunNoNaNAttr; 1822 1823 /// Vectorization requirements that will go through late-evaluation. 1824 LoopVectorizationRequirements *Requirements; 1825 1826 /// Used to emit an analysis of any legality issues. 1827 LoopVectorizeHints *Hints; 1828 1829 /// While vectorizing these instructions we have to generate a 1830 /// call to the appropriate masked intrinsic 1831 SmallPtrSet<const Instruction *, 8> MaskedOp; 1832 }; 1833 1834 /// LoopVectorizationCostModel - estimates the expected speedups due to 1835 /// vectorization. 1836 /// In many cases vectorization is not profitable. This can happen because of 1837 /// a number of reasons. In this class we mainly attempt to predict the 1838 /// expected speedup/slowdowns due to the supported instruction set. We use the 1839 /// TargetTransformInfo to query the different backends for the cost of 1840 /// different operations. 1841 class LoopVectorizationCostModel { 1842 public: 1843 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, 1844 LoopInfo *LI, LoopVectorizationLegality *Legal, 1845 const TargetTransformInfo &TTI, 1846 const TargetLibraryInfo *TLI, DemandedBits *DB, 1847 AssumptionCache *AC, 1848 OptimizationRemarkEmitter *ORE, const Function *F, 1849 const LoopVectorizeHints *Hints) 1850 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 1851 AC(AC), ORE(ORE), TheFunction(F), Hints(Hints) {} 1852 1853 /// \return An upper bound for the vectorization factor, or None if 1854 /// vectorization should be avoided up front. 1855 Optional<unsigned> computeMaxVF(bool OptForSize); 1856 1857 /// Information about vectorization costs 1858 struct VectorizationFactor { 1859 unsigned Width; // Vector width with best cost 1860 unsigned Cost; // Cost of the loop with that width 1861 }; 1862 /// \return The most profitable vectorization factor and the cost of that VF. 1863 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 1864 /// then this vectorization factor will be selected if vectorization is 1865 /// possible. 1866 VectorizationFactor selectVectorizationFactor(unsigned MaxVF); 1867 1868 /// Setup cost-based decisions for user vectorization factor. 1869 void selectUserVectorizationFactor(unsigned UserVF) { 1870 collectUniformsAndScalars(UserVF); 1871 collectInstsToScalarize(UserVF); 1872 } 1873 1874 /// \return The size (in bits) of the smallest and widest types in the code 1875 /// that needs to be vectorized. We ignore values that remain scalar such as 1876 /// 64 bit loop indices. 1877 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1878 1879 /// \return The desired interleave count. 1880 /// If interleave count has been specified by metadata it will be returned. 1881 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1882 /// are the selected vectorization factor and the cost of the selected VF. 1883 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 1884 unsigned LoopCost); 1885 1886 /// Memory access instruction may be vectorized in more than one way. 1887 /// Form of instruction after vectorization depends on cost. 1888 /// This function takes cost-based decisions for Load/Store instructions 1889 /// and collects them in a map. This decisions map is used for building 1890 /// the lists of loop-uniform and loop-scalar instructions. 1891 /// The calculated cost is saved with widening decision in order to 1892 /// avoid redundant calculations. 1893 void setCostBasedWideningDecision(unsigned VF); 1894 1895 /// \brief A struct that represents some properties of the register usage 1896 /// of a loop. 1897 struct RegisterUsage { 1898 /// Holds the number of loop invariant values that are used in the loop. 1899 unsigned LoopInvariantRegs; 1900 /// Holds the maximum number of concurrent live intervals in the loop. 1901 unsigned MaxLocalUsers; 1902 /// Holds the number of instructions in the loop. 1903 unsigned NumInstructions; 1904 }; 1905 1906 /// \return Returns information about the register usages of the loop for the 1907 /// given vectorization factors. 1908 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 1909 1910 /// Collect values we want to ignore in the cost model. 1911 void collectValuesToIgnore(); 1912 1913 /// \returns The smallest bitwidth each instruction can be represented with. 1914 /// The vector equivalents of these instructions should be truncated to this 1915 /// type. 1916 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1917 return MinBWs; 1918 } 1919 1920 /// \returns True if it is more profitable to scalarize instruction \p I for 1921 /// vectorization factor \p VF. 1922 bool isProfitableToScalarize(Instruction *I, unsigned VF) const { 1923 auto Scalars = InstsToScalarize.find(VF); 1924 assert(Scalars != InstsToScalarize.end() && 1925 "VF not yet analyzed for scalarization profitability"); 1926 return Scalars->second.count(I); 1927 } 1928 1929 /// Returns true if \p I is known to be uniform after vectorization. 1930 bool isUniformAfterVectorization(Instruction *I, unsigned VF) const { 1931 if (VF == 1) 1932 return true; 1933 assert(Uniforms.count(VF) && "VF not yet analyzed for uniformity"); 1934 auto UniformsPerVF = Uniforms.find(VF); 1935 return UniformsPerVF->second.count(I); 1936 } 1937 1938 /// Returns true if \p I is known to be scalar after vectorization. 1939 bool isScalarAfterVectorization(Instruction *I, unsigned VF) const { 1940 if (VF == 1) 1941 return true; 1942 assert(Scalars.count(VF) && "Scalar values are not calculated for VF"); 1943 auto ScalarsPerVF = Scalars.find(VF); 1944 return ScalarsPerVF->second.count(I); 1945 } 1946 1947 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1948 /// for vectorization factor \p VF. 1949 bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const { 1950 return VF > 1 && MinBWs.count(I) && !isProfitableToScalarize(I, VF) && 1951 !isScalarAfterVectorization(I, VF); 1952 } 1953 1954 /// Decision that was taken during cost calculation for memory instruction. 1955 enum InstWidening { 1956 CM_Unknown, 1957 CM_Widen, 1958 CM_Interleave, 1959 CM_GatherScatter, 1960 CM_Scalarize 1961 }; 1962 1963 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1964 /// instruction \p I and vector width \p VF. 1965 void setWideningDecision(Instruction *I, unsigned VF, InstWidening W, 1966 unsigned Cost) { 1967 assert(VF >= 2 && "Expected VF >=2"); 1968 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1969 } 1970 1971 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1972 /// interleaving group \p Grp and vector width \p VF. 1973 void setWideningDecision(const InterleaveGroup *Grp, unsigned VF, 1974 InstWidening W, unsigned Cost) { 1975 assert(VF >= 2 && "Expected VF >=2"); 1976 /// Broadcast this decicion to all instructions inside the group. 1977 /// But the cost will be assigned to one instruction only. 1978 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1979 if (auto *I = Grp->getMember(i)) { 1980 if (Grp->getInsertPos() == I) 1981 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1982 else 1983 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1984 } 1985 } 1986 } 1987 1988 /// Return the cost model decision for the given instruction \p I and vector 1989 /// width \p VF. Return CM_Unknown if this instruction did not pass 1990 /// through the cost modeling. 1991 InstWidening getWideningDecision(Instruction *I, unsigned VF) { 1992 assert(VF >= 2 && "Expected VF >=2"); 1993 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1994 auto Itr = WideningDecisions.find(InstOnVF); 1995 if (Itr == WideningDecisions.end()) 1996 return CM_Unknown; 1997 return Itr->second.first; 1998 } 1999 2000 /// Return the vectorization cost for the given instruction \p I and vector 2001 /// width \p VF. 2002 unsigned getWideningCost(Instruction *I, unsigned VF) { 2003 assert(VF >= 2 && "Expected VF >=2"); 2004 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 2005 assert(WideningDecisions.count(InstOnVF) && "The cost is not calculated"); 2006 return WideningDecisions[InstOnVF].second; 2007 } 2008 2009 /// Return True if instruction \p I is an optimizable truncate whose operand 2010 /// is an induction variable. Such a truncate will be removed by adding a new 2011 /// induction variable with the destination type. 2012 bool isOptimizableIVTruncate(Instruction *I, unsigned VF) { 2013 2014 // If the instruction is not a truncate, return false. 2015 auto *Trunc = dyn_cast<TruncInst>(I); 2016 if (!Trunc) 2017 return false; 2018 2019 // Get the source and destination types of the truncate. 2020 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 2021 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 2022 2023 // If the truncate is free for the given types, return false. Replacing a 2024 // free truncate with an induction variable would add an induction variable 2025 // update instruction to each iteration of the loop. We exclude from this 2026 // check the primary induction variable since it will need an update 2027 // instruction regardless. 2028 Value *Op = Trunc->getOperand(0); 2029 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 2030 return false; 2031 2032 // If the truncated value is not an induction variable, return false. 2033 return Legal->isInductionVariable(Op); 2034 } 2035 2036 private: 2037 /// \return An upper bound for the vectorization factor, larger than zero. 2038 /// One is returned if vectorization should best be avoided due to cost. 2039 unsigned computeFeasibleMaxVF(bool OptForSize); 2040 2041 /// The vectorization cost is a combination of the cost itself and a boolean 2042 /// indicating whether any of the contributing operations will actually 2043 /// operate on 2044 /// vector values after type legalization in the backend. If this latter value 2045 /// is 2046 /// false, then all operations will be scalarized (i.e. no vectorization has 2047 /// actually taken place). 2048 typedef std::pair<unsigned, bool> VectorizationCostTy; 2049 2050 /// Returns the expected execution cost. The unit of the cost does 2051 /// not matter because we use the 'cost' units to compare different 2052 /// vector widths. The cost that is returned is *not* normalized by 2053 /// the factor width. 2054 VectorizationCostTy expectedCost(unsigned VF); 2055 2056 /// Returns the execution time cost of an instruction for a given vector 2057 /// width. Vector width of one means scalar. 2058 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 2059 2060 /// The cost-computation logic from getInstructionCost which provides 2061 /// the vector type as an output parameter. 2062 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 2063 2064 /// Calculate vectorization cost of memory instruction \p I. 2065 unsigned getMemoryInstructionCost(Instruction *I, unsigned VF); 2066 2067 /// The cost computation for scalarized memory instruction. 2068 unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF); 2069 2070 /// The cost computation for interleaving group of memory instructions. 2071 unsigned getInterleaveGroupCost(Instruction *I, unsigned VF); 2072 2073 /// The cost computation for Gather/Scatter instruction. 2074 unsigned getGatherScatterCost(Instruction *I, unsigned VF); 2075 2076 /// The cost computation for widening instruction \p I with consecutive 2077 /// memory access. 2078 unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF); 2079 2080 /// The cost calculation for Load instruction \p I with uniform pointer - 2081 /// scalar load + broadcast. 2082 unsigned getUniformMemOpCost(Instruction *I, unsigned VF); 2083 2084 /// Returns whether the instruction is a load or store and will be a emitted 2085 /// as a vector operation. 2086 bool isConsecutiveLoadOrStore(Instruction *I); 2087 2088 /// Create an analysis remark that explains why vectorization failed 2089 /// 2090 /// \p RemarkName is the identifier for the remark. \return the remark object 2091 /// that can be streamed to. 2092 OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) { 2093 return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(), 2094 RemarkName, TheLoop); 2095 } 2096 2097 /// Map of scalar integer values to the smallest bitwidth they can be legally 2098 /// represented as. The vector equivalents of these values should be truncated 2099 /// to this type. 2100 MapVector<Instruction *, uint64_t> MinBWs; 2101 2102 /// A type representing the costs for instructions if they were to be 2103 /// scalarized rather than vectorized. The entries are Instruction-Cost 2104 /// pairs. 2105 typedef DenseMap<Instruction *, unsigned> ScalarCostsTy; 2106 2107 /// A set containing all BasicBlocks that are known to present after 2108 /// vectorization as a predicated block. 2109 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 2110 2111 /// A map holding scalar costs for different vectorization factors. The 2112 /// presence of a cost for an instruction in the mapping indicates that the 2113 /// instruction will be scalarized when vectorizing with the associated 2114 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 2115 DenseMap<unsigned, ScalarCostsTy> InstsToScalarize; 2116 2117 /// Holds the instructions known to be uniform after vectorization. 2118 /// The data is collected per VF. 2119 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms; 2120 2121 /// Holds the instructions known to be scalar after vectorization. 2122 /// The data is collected per VF. 2123 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars; 2124 2125 /// Holds the instructions (address computations) that are forced to be 2126 /// scalarized. 2127 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars; 2128 2129 /// Returns the expected difference in cost from scalarizing the expression 2130 /// feeding a predicated instruction \p PredInst. The instructions to 2131 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 2132 /// non-negative return value implies the expression will be scalarized. 2133 /// Currently, only single-use chains are considered for scalarization. 2134 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 2135 unsigned VF); 2136 2137 /// Collects the instructions to scalarize for each predicated instruction in 2138 /// the loop. 2139 void collectInstsToScalarize(unsigned VF); 2140 2141 /// Collect the instructions that are uniform after vectorization. An 2142 /// instruction is uniform if we represent it with a single scalar value in 2143 /// the vectorized loop corresponding to each vector iteration. Examples of 2144 /// uniform instructions include pointer operands of consecutive or 2145 /// interleaved memory accesses. Note that although uniformity implies an 2146 /// instruction will be scalar, the reverse is not true. In general, a 2147 /// scalarized instruction will be represented by VF scalar values in the 2148 /// vectorized loop, each corresponding to an iteration of the original 2149 /// scalar loop. 2150 void collectLoopUniforms(unsigned VF); 2151 2152 /// Collect the instructions that are scalar after vectorization. An 2153 /// instruction is scalar if it is known to be uniform or will be scalarized 2154 /// during vectorization. Non-uniform scalarized instructions will be 2155 /// represented by VF values in the vectorized loop, each corresponding to an 2156 /// iteration of the original scalar loop. 2157 void collectLoopScalars(unsigned VF); 2158 2159 /// Collect Uniform and Scalar values for the given \p VF. 2160 /// The sets depend on CM decision for Load/Store instructions 2161 /// that may be vectorized as interleave, gather-scatter or scalarized. 2162 void collectUniformsAndScalars(unsigned VF) { 2163 // Do the analysis once. 2164 if (VF == 1 || Uniforms.count(VF)) 2165 return; 2166 setCostBasedWideningDecision(VF); 2167 collectLoopUniforms(VF); 2168 collectLoopScalars(VF); 2169 } 2170 2171 /// Keeps cost model vectorization decision and cost for instructions. 2172 /// Right now it is used for memory instructions only. 2173 typedef DenseMap<std::pair<Instruction *, unsigned>, 2174 std::pair<InstWidening, unsigned>> 2175 DecisionList; 2176 2177 DecisionList WideningDecisions; 2178 2179 public: 2180 /// The loop that we evaluate. 2181 Loop *TheLoop; 2182 /// Predicated scalar evolution analysis. 2183 PredicatedScalarEvolution &PSE; 2184 /// Loop Info analysis. 2185 LoopInfo *LI; 2186 /// Vectorization legality. 2187 LoopVectorizationLegality *Legal; 2188 /// Vector target information. 2189 const TargetTransformInfo &TTI; 2190 /// Target Library Info. 2191 const TargetLibraryInfo *TLI; 2192 /// Demanded bits analysis. 2193 DemandedBits *DB; 2194 /// Assumption cache. 2195 AssumptionCache *AC; 2196 /// Interface to emit optimization remarks. 2197 OptimizationRemarkEmitter *ORE; 2198 2199 const Function *TheFunction; 2200 /// Loop Vectorize Hint. 2201 const LoopVectorizeHints *Hints; 2202 /// Values to ignore in the cost model. 2203 SmallPtrSet<const Value *, 16> ValuesToIgnore; 2204 /// Values to ignore in the cost model when VF > 1. 2205 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 2206 }; 2207 2208 /// LoopVectorizationPlanner - drives the vectorization process after having 2209 /// passed Legality checks. 2210 class LoopVectorizationPlanner { 2211 public: 2212 LoopVectorizationPlanner(Loop *OrigLoop, LoopInfo *LI, 2213 LoopVectorizationLegality *Legal, 2214 LoopVectorizationCostModel &CM) 2215 : OrigLoop(OrigLoop), LI(LI), Legal(Legal), CM(CM) {} 2216 2217 ~LoopVectorizationPlanner() {} 2218 2219 /// Plan how to best vectorize, return the best VF and its cost. 2220 LoopVectorizationCostModel::VectorizationFactor plan(bool OptForSize, 2221 unsigned UserVF); 2222 2223 /// Generate the IR code for the vectorized loop. 2224 void executePlan(InnerLoopVectorizer &ILV); 2225 2226 protected: 2227 /// Collect the instructions from the original loop that would be trivially 2228 /// dead in the vectorized loop if generated. 2229 void collectTriviallyDeadInstructions( 2230 SmallPtrSetImpl<Instruction *> &DeadInstructions); 2231 2232 private: 2233 /// The loop that we evaluate. 2234 Loop *OrigLoop; 2235 2236 /// Loop Info analysis. 2237 LoopInfo *LI; 2238 2239 /// The legality analysis. 2240 LoopVectorizationLegality *Legal; 2241 2242 /// The profitablity analysis. 2243 LoopVectorizationCostModel &CM; 2244 }; 2245 2246 /// \brief This holds vectorization requirements that must be verified late in 2247 /// the process. The requirements are set by legalize and costmodel. Once 2248 /// vectorization has been determined to be possible and profitable the 2249 /// requirements can be verified by looking for metadata or compiler options. 2250 /// For example, some loops require FP commutativity which is only allowed if 2251 /// vectorization is explicitly specified or if the fast-math compiler option 2252 /// has been provided. 2253 /// Late evaluation of these requirements allows helpful diagnostics to be 2254 /// composed that tells the user what need to be done to vectorize the loop. For 2255 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late 2256 /// evaluation should be used only when diagnostics can generated that can be 2257 /// followed by a non-expert user. 2258 class LoopVectorizationRequirements { 2259 public: 2260 LoopVectorizationRequirements(OptimizationRemarkEmitter &ORE) 2261 : NumRuntimePointerChecks(0), UnsafeAlgebraInst(nullptr), ORE(ORE) {} 2262 2263 void addUnsafeAlgebraInst(Instruction *I) { 2264 // First unsafe algebra instruction. 2265 if (!UnsafeAlgebraInst) 2266 UnsafeAlgebraInst = I; 2267 } 2268 2269 void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; } 2270 2271 bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) { 2272 const char *PassName = Hints.vectorizeAnalysisPassName(); 2273 bool Failed = false; 2274 if (UnsafeAlgebraInst && !Hints.allowReordering()) { 2275 ORE.emit( 2276 OptimizationRemarkAnalysisFPCommute(PassName, "CantReorderFPOps", 2277 UnsafeAlgebraInst->getDebugLoc(), 2278 UnsafeAlgebraInst->getParent()) 2279 << "loop not vectorized: cannot prove it is safe to reorder " 2280 "floating-point operations"); 2281 Failed = true; 2282 } 2283 2284 // Test if runtime memcheck thresholds are exceeded. 2285 bool PragmaThresholdReached = 2286 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 2287 bool ThresholdReached = 2288 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 2289 if ((ThresholdReached && !Hints.allowReordering()) || 2290 PragmaThresholdReached) { 2291 ORE.emit(OptimizationRemarkAnalysisAliasing(PassName, "CantReorderMemOps", 2292 L->getStartLoc(), 2293 L->getHeader()) 2294 << "loop not vectorized: cannot prove it is safe to reorder " 2295 "memory operations"); 2296 DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 2297 Failed = true; 2298 } 2299 2300 return Failed; 2301 } 2302 2303 private: 2304 unsigned NumRuntimePointerChecks; 2305 Instruction *UnsafeAlgebraInst; 2306 2307 /// Interface to emit optimization remarks. 2308 OptimizationRemarkEmitter &ORE; 2309 }; 2310 2311 static void addAcyclicInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) { 2312 if (L.empty()) { 2313 if (!hasCyclesInLoopBody(L)) 2314 V.push_back(&L); 2315 return; 2316 } 2317 for (Loop *InnerL : L) 2318 addAcyclicInnerLoop(*InnerL, V); 2319 } 2320 2321 /// The LoopVectorize Pass. 2322 struct LoopVectorize : public FunctionPass { 2323 /// Pass identification, replacement for typeid 2324 static char ID; 2325 2326 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) 2327 : FunctionPass(ID) { 2328 Impl.DisableUnrolling = NoUnrolling; 2329 Impl.AlwaysVectorize = AlwaysVectorize; 2330 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2331 } 2332 2333 LoopVectorizePass Impl; 2334 2335 bool runOnFunction(Function &F) override { 2336 if (skipFunction(F)) 2337 return false; 2338 2339 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2340 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2341 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2342 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2343 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2344 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2345 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 2346 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2347 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2348 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2349 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2350 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2351 2352 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2353 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2354 2355 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2356 GetLAA, *ORE); 2357 } 2358 2359 void getAnalysisUsage(AnalysisUsage &AU) const override { 2360 AU.addRequired<AssumptionCacheTracker>(); 2361 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2362 AU.addRequired<DominatorTreeWrapperPass>(); 2363 AU.addRequired<LoopInfoWrapperPass>(); 2364 AU.addRequired<ScalarEvolutionWrapperPass>(); 2365 AU.addRequired<TargetTransformInfoWrapperPass>(); 2366 AU.addRequired<AAResultsWrapperPass>(); 2367 AU.addRequired<LoopAccessLegacyAnalysis>(); 2368 AU.addRequired<DemandedBitsWrapperPass>(); 2369 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2370 AU.addPreserved<LoopInfoWrapperPass>(); 2371 AU.addPreserved<DominatorTreeWrapperPass>(); 2372 AU.addPreserved<BasicAAWrapperPass>(); 2373 AU.addPreserved<GlobalsAAWrapperPass>(); 2374 } 2375 }; 2376 2377 } // end anonymous namespace 2378 2379 //===----------------------------------------------------------------------===// 2380 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2381 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2382 //===----------------------------------------------------------------------===// 2383 2384 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2385 // We need to place the broadcast of invariant variables outside the loop. 2386 Instruction *Instr = dyn_cast<Instruction>(V); 2387 bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody); 2388 bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr; 2389 2390 // Place the code for broadcasting invariant variables in the new preheader. 2391 IRBuilder<>::InsertPointGuard Guard(Builder); 2392 if (Invariant) 2393 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2394 2395 // Broadcast the scalar into all locations in the vector. 2396 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2397 2398 return Shuf; 2399 } 2400 2401 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2402 const InductionDescriptor &II, Value *Step, Instruction *EntryVal) { 2403 Value *Start = II.getStartValue(); 2404 2405 // Construct the initial value of the vector IV in the vector loop preheader 2406 auto CurrIP = Builder.saveIP(); 2407 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2408 if (isa<TruncInst>(EntryVal)) { 2409 assert(Start->getType()->isIntegerTy() && 2410 "Truncation requires an integer type"); 2411 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2412 Step = Builder.CreateTrunc(Step, TruncType); 2413 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2414 } 2415 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2416 Value *SteppedStart = 2417 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 2418 2419 // We create vector phi nodes for both integer and floating-point induction 2420 // variables. Here, we determine the kind of arithmetic we will perform. 2421 Instruction::BinaryOps AddOp; 2422 Instruction::BinaryOps MulOp; 2423 if (Step->getType()->isIntegerTy()) { 2424 AddOp = Instruction::Add; 2425 MulOp = Instruction::Mul; 2426 } else { 2427 AddOp = II.getInductionOpcode(); 2428 MulOp = Instruction::FMul; 2429 } 2430 2431 // Multiply the vectorization factor by the step using integer or 2432 // floating-point arithmetic as appropriate. 2433 Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF); 2434 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF)); 2435 2436 // Create a vector splat to use in the induction update. 2437 // 2438 // FIXME: If the step is non-constant, we create the vector splat with 2439 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2440 // handle a constant vector splat. 2441 Value *SplatVF = isa<Constant>(Mul) 2442 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 2443 : Builder.CreateVectorSplat(VF, Mul); 2444 Builder.restoreIP(CurrIP); 2445 2446 // We may need to add the step a number of times, depending on the unroll 2447 // factor. The last of those goes into the PHI. 2448 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2449 &*LoopVectorBody->getFirstInsertionPt()); 2450 Instruction *LastInduction = VecInd; 2451 for (unsigned Part = 0; Part < UF; ++Part) { 2452 VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction); 2453 if (isa<TruncInst>(EntryVal)) 2454 addMetadata(LastInduction, EntryVal); 2455 LastInduction = cast<Instruction>(addFastMathFlag( 2456 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"))); 2457 } 2458 2459 // Move the last step to the end of the latch block. This ensures consistent 2460 // placement of all induction updates. 2461 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2462 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2463 auto *ICmp = cast<Instruction>(Br->getCondition()); 2464 LastInduction->moveBefore(ICmp); 2465 LastInduction->setName("vec.ind.next"); 2466 2467 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2468 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2469 } 2470 2471 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2472 return Cost->isScalarAfterVectorization(I, VF) || 2473 Cost->isProfitableToScalarize(I, VF); 2474 } 2475 2476 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2477 if (shouldScalarizeInstruction(IV)) 2478 return true; 2479 auto isScalarInst = [&](User *U) -> bool { 2480 auto *I = cast<Instruction>(U); 2481 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2482 }; 2483 return any_of(IV->users(), isScalarInst); 2484 } 2485 2486 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) { 2487 2488 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2489 "Primary induction variable must have an integer type"); 2490 2491 auto II = Legal->getInductionVars()->find(IV); 2492 assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); 2493 2494 auto ID = II->second; 2495 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2496 2497 // The scalar value to broadcast. This will be derived from the canonical 2498 // induction variable. 2499 Value *ScalarIV = nullptr; 2500 2501 // The value from the original loop to which we are mapping the new induction 2502 // variable. 2503 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2504 2505 // True if we have vectorized the induction variable. 2506 auto VectorizedIV = false; 2507 2508 // Determine if we want a scalar version of the induction variable. This is 2509 // true if the induction variable itself is not widened, or if it has at 2510 // least one user in the loop that is not widened. 2511 auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal); 2512 2513 // Generate code for the induction step. Note that induction steps are 2514 // required to be loop-invariant 2515 assert(PSE.getSE()->isLoopInvariant(ID.getStep(), OrigLoop) && 2516 "Induction step should be loop invariant"); 2517 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2518 Value *Step = nullptr; 2519 if (PSE.getSE()->isSCEVable(IV->getType())) { 2520 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2521 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), 2522 LoopVectorPreHeader->getTerminator()); 2523 } else { 2524 Step = cast<SCEVUnknown>(ID.getStep())->getValue(); 2525 } 2526 2527 // Try to create a new independent vector induction variable. If we can't 2528 // create the phi node, we will splat the scalar induction variable in each 2529 // loop iteration. 2530 if (VF > 1 && !shouldScalarizeInstruction(EntryVal)) { 2531 createVectorIntOrFpInductionPHI(ID, Step, EntryVal); 2532 VectorizedIV = true; 2533 } 2534 2535 // If we haven't yet vectorized the induction variable, or if we will create 2536 // a scalar one, we need to define the scalar induction variable and step 2537 // values. If we were given a truncation type, truncate the canonical 2538 // induction variable and step. Otherwise, derive these values from the 2539 // induction descriptor. 2540 if (!VectorizedIV || NeedsScalarIV) { 2541 ScalarIV = Induction; 2542 if (IV != OldInduction) { 2543 ScalarIV = IV->getType()->isIntegerTy() 2544 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2545 : Builder.CreateCast(Instruction::SIToFP, Induction, 2546 IV->getType()); 2547 ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL); 2548 ScalarIV->setName("offset.idx"); 2549 } 2550 if (Trunc) { 2551 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2552 assert(Step->getType()->isIntegerTy() && 2553 "Truncation requires an integer step"); 2554 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2555 Step = Builder.CreateTrunc(Step, TruncType); 2556 } 2557 } 2558 2559 // If we haven't yet vectorized the induction variable, splat the scalar 2560 // induction variable, and build the necessary step vectors. 2561 if (!VectorizedIV) { 2562 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2563 for (unsigned Part = 0; Part < UF; ++Part) { 2564 Value *EntryPart = 2565 getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode()); 2566 VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart); 2567 if (Trunc) 2568 addMetadata(EntryPart, Trunc); 2569 } 2570 } 2571 2572 // If an induction variable is only used for counting loop iterations or 2573 // calculating addresses, it doesn't need to be widened. Create scalar steps 2574 // that can be used by instructions we will later scalarize. Note that the 2575 // addition of the scalar steps will not increase the number of instructions 2576 // in the loop in the common case prior to InstCombine. We will be trading 2577 // one vector extract for each scalar step. 2578 if (NeedsScalarIV) 2579 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 2580 } 2581 2582 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2583 Instruction::BinaryOps BinOp) { 2584 // Create and check the types. 2585 assert(Val->getType()->isVectorTy() && "Must be a vector"); 2586 int VLen = Val->getType()->getVectorNumElements(); 2587 2588 Type *STy = Val->getType()->getScalarType(); 2589 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2590 "Induction Step must be an integer or FP"); 2591 assert(Step->getType() == STy && "Step has wrong type"); 2592 2593 SmallVector<Constant *, 8> Indices; 2594 2595 if (STy->isIntegerTy()) { 2596 // Create a vector of consecutive numbers from zero to VF. 2597 for (int i = 0; i < VLen; ++i) 2598 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 2599 2600 // Add the consecutive indices to the vector value. 2601 Constant *Cv = ConstantVector::get(Indices); 2602 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2603 Step = Builder.CreateVectorSplat(VLen, Step); 2604 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2605 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2606 // which can be found from the original scalar operations. 2607 Step = Builder.CreateMul(Cv, Step); 2608 return Builder.CreateAdd(Val, Step, "induction"); 2609 } 2610 2611 // Floating point induction. 2612 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2613 "Binary Opcode should be specified for FP induction"); 2614 // Create a vector of consecutive numbers from zero to VF. 2615 for (int i = 0; i < VLen; ++i) 2616 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2617 2618 // Add the consecutive indices to the vector value. 2619 Constant *Cv = ConstantVector::get(Indices); 2620 2621 Step = Builder.CreateVectorSplat(VLen, Step); 2622 2623 // Floating point operations had to be 'fast' to enable the induction. 2624 FastMathFlags Flags; 2625 Flags.setUnsafeAlgebra(); 2626 2627 Value *MulOp = Builder.CreateFMul(Cv, Step); 2628 if (isa<Instruction>(MulOp)) 2629 // Have to check, MulOp may be a constant 2630 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2631 2632 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2633 if (isa<Instruction>(BOp)) 2634 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2635 return BOp; 2636 } 2637 2638 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2639 Value *EntryVal, 2640 const InductionDescriptor &ID) { 2641 2642 // We shouldn't have to build scalar steps if we aren't vectorizing. 2643 assert(VF > 1 && "VF should be greater than one"); 2644 2645 // Get the value type and ensure it and the step have the same integer type. 2646 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2647 assert(ScalarIVTy == Step->getType() && 2648 "Val and Step should have the same type"); 2649 2650 // We build scalar steps for both integer and floating-point induction 2651 // variables. Here, we determine the kind of arithmetic we will perform. 2652 Instruction::BinaryOps AddOp; 2653 Instruction::BinaryOps MulOp; 2654 if (ScalarIVTy->isIntegerTy()) { 2655 AddOp = Instruction::Add; 2656 MulOp = Instruction::Mul; 2657 } else { 2658 AddOp = ID.getInductionOpcode(); 2659 MulOp = Instruction::FMul; 2660 } 2661 2662 // Determine the number of scalars we need to generate for each unroll 2663 // iteration. If EntryVal is uniform, we only need to generate the first 2664 // lane. Otherwise, we generate all VF values. 2665 unsigned Lanes = 2666 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1 : VF; 2667 2668 // Compute the scalar steps and save the results in VectorLoopValueMap. 2669 for (unsigned Part = 0; Part < UF; ++Part) { 2670 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2671 auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane); 2672 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step)); 2673 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul)); 2674 VectorLoopValueMap.setScalarValue(EntryVal, Part, Lane, Add); 2675 } 2676 } 2677 } 2678 2679 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { 2680 2681 const ValueToValueMap &Strides = getSymbolicStrides() ? *getSymbolicStrides() : 2682 ValueToValueMap(); 2683 2684 int Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, true, false); 2685 if (Stride == 1 || Stride == -1) 2686 return Stride; 2687 return 0; 2688 } 2689 2690 bool LoopVectorizationLegality::isUniform(Value *V) { 2691 return LAI->isUniform(V); 2692 } 2693 2694 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) { 2695 assert(V != Induction && "The new induction variable should not be used."); 2696 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2697 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2698 2699 // If we have a stride that is replaced by one, do it here. 2700 if (Legal->hasStride(V)) 2701 V = ConstantInt::get(V->getType(), 1); 2702 2703 // If we have a vector mapped to this value, return it. 2704 if (VectorLoopValueMap.hasVectorValue(V, Part)) 2705 return VectorLoopValueMap.getVectorValue(V, Part); 2706 2707 // If the value has not been vectorized, check if it has been scalarized 2708 // instead. If it has been scalarized, and we actually need the value in 2709 // vector form, we will construct the vector values on demand. 2710 if (VectorLoopValueMap.hasAnyScalarValue(V)) { 2711 2712 Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, Part, 0); 2713 2714 // If we've scalarized a value, that value should be an instruction. 2715 auto *I = cast<Instruction>(V); 2716 2717 // If we aren't vectorizing, we can just copy the scalar map values over to 2718 // the vector map. 2719 if (VF == 1) { 2720 VectorLoopValueMap.setVectorValue(V, Part, ScalarValue); 2721 return ScalarValue; 2722 } 2723 2724 // Get the last scalar instruction we generated for V and Part. If the value 2725 // is known to be uniform after vectorization, this corresponds to lane zero 2726 // of the Part unroll iteration. Otherwise, the last instruction is the one 2727 // we created for the last vector lane of the Part unroll iteration. 2728 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1; 2729 auto *LastInst = 2730 cast<Instruction>(VectorLoopValueMap.getScalarValue(V, Part, LastLane)); 2731 2732 // Set the insert point after the last scalarized instruction. This ensures 2733 // the insertelement sequence will directly follow the scalar definitions. 2734 auto OldIP = Builder.saveIP(); 2735 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 2736 Builder.SetInsertPoint(&*NewIP); 2737 2738 // However, if we are vectorizing, we need to construct the vector values. 2739 // If the value is known to be uniform after vectorization, we can just 2740 // broadcast the scalar value corresponding to lane zero for each unroll 2741 // iteration. Otherwise, we construct the vector values using insertelement 2742 // instructions. Since the resulting vectors are stored in 2743 // VectorLoopValueMap, we will only generate the insertelements once. 2744 Value *VectorValue = nullptr; 2745 if (Cost->isUniformAfterVectorization(I, VF)) { 2746 VectorValue = getBroadcastInstrs(ScalarValue); 2747 } else { 2748 VectorValue = UndefValue::get(VectorType::get(V->getType(), VF)); 2749 for (unsigned Lane = 0; Lane < VF; ++Lane) 2750 VectorValue = Builder.CreateInsertElement( 2751 VectorValue, getOrCreateScalarValue(V, Part, Lane), 2752 Builder.getInt32(Lane)); 2753 } 2754 VectorLoopValueMap.setVectorValue(V, Part, VectorValue); 2755 Builder.restoreIP(OldIP); 2756 return VectorValue; 2757 } 2758 2759 // If this scalar is unknown, assume that it is a constant or that it is 2760 // loop invariant. Broadcast V and save the value for future uses. 2761 Value *B = getBroadcastInstrs(V); 2762 VectorLoopValueMap.setVectorValue(V, Part, B); 2763 return B; 2764 } 2765 2766 Value *InnerLoopVectorizer::getOrCreateScalarValue(Value *V, unsigned Part, 2767 unsigned Lane) { 2768 2769 // If the value is not an instruction contained in the loop, it should 2770 // already be scalar. 2771 if (OrigLoop->isLoopInvariant(V)) 2772 return V; 2773 2774 assert(Lane > 0 ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF) 2775 : true && "Uniform values only have lane zero"); 2776 2777 // If the value from the original loop has not been vectorized, it is 2778 // represented by UF x VF scalar values in the new loop. Return the requested 2779 // scalar value. 2780 if (VectorLoopValueMap.hasScalarValue(V, Part, Lane)) 2781 return VectorLoopValueMap.getScalarValue(V, Part, Lane); 2782 2783 // If the value has not been scalarized, get its entry in VectorLoopValueMap 2784 // for the given unroll part. If this entry is not a vector type (i.e., the 2785 // vectorization factor is one), there is no need to generate an 2786 // extractelement instruction. 2787 auto *U = getOrCreateVectorValue(V, Part); 2788 if (!U->getType()->isVectorTy()) { 2789 assert(VF == 1 && "Value not scalarized has non-vector type"); 2790 return U; 2791 } 2792 2793 // Otherwise, the value from the original loop has been vectorized and is 2794 // represented by UF vector values. Extract and return the requested scalar 2795 // value from the appropriate vector lane. 2796 return Builder.CreateExtractElement(U, Builder.getInt32(Lane)); 2797 } 2798 2799 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2800 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2801 SmallVector<Constant *, 8> ShuffleMask; 2802 for (unsigned i = 0; i < VF; ++i) 2803 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 2804 2805 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2806 ConstantVector::get(ShuffleMask), 2807 "reverse"); 2808 } 2809 2810 // Try to vectorize the interleave group that \p Instr belongs to. 2811 // 2812 // E.g. Translate following interleaved load group (factor = 3): 2813 // for (i = 0; i < N; i+=3) { 2814 // R = Pic[i]; // Member of index 0 2815 // G = Pic[i+1]; // Member of index 1 2816 // B = Pic[i+2]; // Member of index 2 2817 // ... // do something to R, G, B 2818 // } 2819 // To: 2820 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2821 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2822 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2823 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2824 // 2825 // Or translate following interleaved store group (factor = 3): 2826 // for (i = 0; i < N; i+=3) { 2827 // ... do something to R, G, B 2828 // Pic[i] = R; // Member of index 0 2829 // Pic[i+1] = G; // Member of index 1 2830 // Pic[i+2] = B; // Member of index 2 2831 // } 2832 // To: 2833 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2834 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2835 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2836 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2837 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2838 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { 2839 const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr); 2840 assert(Group && "Fail to get an interleaved access group."); 2841 2842 // Skip if current instruction is not the insert position. 2843 if (Instr != Group->getInsertPos()) 2844 return; 2845 2846 Value *Ptr = getPointerOperand(Instr); 2847 2848 // Prepare for the vector type of the interleaved load/store. 2849 Type *ScalarTy = getMemInstValueType(Instr); 2850 unsigned InterleaveFactor = Group->getFactor(); 2851 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2852 Type *PtrTy = VecTy->getPointerTo(getMemInstAddressSpace(Instr)); 2853 2854 // Prepare for the new pointers. 2855 setDebugLocFromInst(Builder, Ptr); 2856 SmallVector<Value *, 2> NewPtrs; 2857 unsigned Index = Group->getIndex(Instr); 2858 2859 // If the group is reverse, adjust the index to refer to the last vector lane 2860 // instead of the first. We adjust the index from the first vector lane, 2861 // rather than directly getting the pointer for lane VF - 1, because the 2862 // pointer operand of the interleaved access is supposed to be uniform. For 2863 // uniform instructions, we're only required to generate a value for the 2864 // first vector lane in each unroll iteration. 2865 if (Group->isReverse()) 2866 Index += (VF - 1) * Group->getFactor(); 2867 2868 for (unsigned Part = 0; Part < UF; Part++) { 2869 Value *NewPtr = getOrCreateScalarValue(Ptr, Part, 0); 2870 2871 // Notice current instruction could be any index. Need to adjust the address 2872 // to the member of index 0. 2873 // 2874 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2875 // b = A[i]; // Member of index 0 2876 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2877 // 2878 // E.g. A[i+1] = a; // Member of index 1 2879 // A[i] = b; // Member of index 0 2880 // A[i+2] = c; // Member of index 2 (Current instruction) 2881 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2882 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 2883 2884 // Cast to the vector pointer type. 2885 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2886 } 2887 2888 setDebugLocFromInst(Builder, Instr); 2889 Value *UndefVec = UndefValue::get(VecTy); 2890 2891 // Vectorize the interleaved load group. 2892 if (isa<LoadInst>(Instr)) { 2893 2894 // For each unroll part, create a wide load for the group. 2895 SmallVector<Value *, 2> NewLoads; 2896 for (unsigned Part = 0; Part < UF; Part++) { 2897 auto *NewLoad = Builder.CreateAlignedLoad( 2898 NewPtrs[Part], Group->getAlignment(), "wide.vec"); 2899 addMetadata(NewLoad, Instr); 2900 NewLoads.push_back(NewLoad); 2901 } 2902 2903 // For each member in the group, shuffle out the appropriate data from the 2904 // wide loads. 2905 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2906 Instruction *Member = Group->getMember(I); 2907 2908 // Skip the gaps in the group. 2909 if (!Member) 2910 continue; 2911 2912 Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF); 2913 for (unsigned Part = 0; Part < UF; Part++) { 2914 Value *StridedVec = Builder.CreateShuffleVector( 2915 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 2916 2917 // If this member has different type, cast the result type. 2918 if (Member->getType() != ScalarTy) { 2919 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2920 StridedVec = Builder.CreateBitOrPointerCast(StridedVec, OtherVTy); 2921 } 2922 2923 if (Group->isReverse()) 2924 StridedVec = reverseVector(StridedVec); 2925 2926 VectorLoopValueMap.setVectorValue(Member, Part, StridedVec); 2927 } 2928 } 2929 return; 2930 } 2931 2932 // The sub vector type for current instruction. 2933 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2934 2935 // Vectorize the interleaved store group. 2936 for (unsigned Part = 0; Part < UF; Part++) { 2937 // Collect the stored vector from each member. 2938 SmallVector<Value *, 4> StoredVecs; 2939 for (unsigned i = 0; i < InterleaveFactor; i++) { 2940 // Interleaved store group doesn't allow a gap, so each index has a member 2941 Instruction *Member = Group->getMember(i); 2942 assert(Member && "Fail to get a member from an interleaved store group"); 2943 2944 Value *StoredVec = getOrCreateVectorValue( 2945 cast<StoreInst>(Member)->getValueOperand(), Part); 2946 if (Group->isReverse()) 2947 StoredVec = reverseVector(StoredVec); 2948 2949 // If this member has different type, cast it to an unified type. 2950 if (StoredVec->getType() != SubVT) 2951 StoredVec = Builder.CreateBitOrPointerCast(StoredVec, SubVT); 2952 2953 StoredVecs.push_back(StoredVec); 2954 } 2955 2956 // Concatenate all vectors into a wide vector. 2957 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2958 2959 // Interleave the elements in the wide vector. 2960 Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor); 2961 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2962 "interleaved.vec"); 2963 2964 Instruction *NewStoreInstr = 2965 Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment()); 2966 addMetadata(NewStoreInstr, Instr); 2967 } 2968 } 2969 2970 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { 2971 // Attempt to issue a wide load. 2972 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2973 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2974 2975 assert((LI || SI) && "Invalid Load/Store instruction"); 2976 2977 LoopVectorizationCostModel::InstWidening Decision = 2978 Cost->getWideningDecision(Instr, VF); 2979 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 2980 "CM decision should be taken at this point"); 2981 if (Decision == LoopVectorizationCostModel::CM_Interleave) 2982 return vectorizeInterleaveGroup(Instr); 2983 2984 Type *ScalarDataTy = getMemInstValueType(Instr); 2985 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2986 Value *Ptr = getPointerOperand(Instr); 2987 unsigned Alignment = getMemInstAlignment(Instr); 2988 // An alignment of 0 means target abi alignment. We need to use the scalar's 2989 // target abi alignment in such a case. 2990 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2991 if (!Alignment) 2992 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2993 unsigned AddressSpace = getMemInstAddressSpace(Instr); 2994 2995 // Scalarize the memory instruction if necessary. 2996 if (Decision == LoopVectorizationCostModel::CM_Scalarize) 2997 return scalarizeInstruction(Instr, Legal->isScalarWithPredication(Instr)); 2998 2999 // Determine if the pointer operand of the access is either consecutive or 3000 // reverse consecutive. 3001 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 3002 bool Reverse = ConsecutiveStride < 0; 3003 bool CreateGatherScatter = 3004 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 3005 3006 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 3007 // gather/scatter. Otherwise Decision should have been to Scalarize. 3008 assert((ConsecutiveStride || CreateGatherScatter) && 3009 "The instruction should be scalarized"); 3010 3011 // Handle consecutive loads/stores. 3012 if (ConsecutiveStride) 3013 Ptr = getOrCreateScalarValue(Ptr, 0, 0); 3014 3015 VectorParts Mask = createBlockInMask(Instr->getParent()); 3016 // Handle Stores: 3017 if (SI) { 3018 assert(!Legal->isUniform(SI->getPointerOperand()) && 3019 "We do not allow storing to uniform addresses"); 3020 setDebugLocFromInst(Builder, SI); 3021 3022 for (unsigned Part = 0; Part < UF; ++Part) { 3023 Instruction *NewSI = nullptr; 3024 Value *StoredVal = getOrCreateVectorValue(SI->getValueOperand(), Part); 3025 if (CreateGatherScatter) { 3026 Value *MaskPart = Legal->isMaskRequired(SI) ? Mask[Part] : nullptr; 3027 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 3028 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 3029 MaskPart); 3030 } else { 3031 // Calculate the pointer for the specific unroll-part. 3032 Value *PartPtr = 3033 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 3034 3035 if (Reverse) { 3036 // If we store to reverse consecutive memory locations, then we need 3037 // to reverse the order of elements in the stored value. 3038 StoredVal = reverseVector(StoredVal); 3039 // We don't want to update the value in the map as it might be used in 3040 // another expression. So don't call resetVectorValue(StoredVal). 3041 3042 // If the address is consecutive but reversed, then the 3043 // wide store needs to start at the last vector element. 3044 PartPtr = 3045 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 3046 PartPtr = 3047 Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 3048 if (Mask[Part]) // The reverse of a null all-one mask is a null mask. 3049 Mask[Part] = reverseVector(Mask[Part]); 3050 } 3051 3052 Value *VecPtr = 3053 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 3054 3055 if (Legal->isMaskRequired(SI) && Mask[Part]) 3056 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 3057 Mask[Part]); 3058 else 3059 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 3060 } 3061 addMetadata(NewSI, SI); 3062 } 3063 return; 3064 } 3065 3066 // Handle loads. 3067 assert(LI && "Must have a load instruction"); 3068 setDebugLocFromInst(Builder, LI); 3069 for (unsigned Part = 0; Part < UF; ++Part) { 3070 Value *NewLI; 3071 if (CreateGatherScatter) { 3072 Value *MaskPart = Legal->isMaskRequired(LI) ? Mask[Part] : nullptr; 3073 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 3074 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 3075 nullptr, "wide.masked.gather"); 3076 addMetadata(NewLI, LI); 3077 } else { 3078 // Calculate the pointer for the specific unroll-part. 3079 Value *PartPtr = 3080 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 3081 3082 if (Reverse) { 3083 // If the address is consecutive but reversed, then the 3084 // wide load needs to start at the last vector element. 3085 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 3086 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 3087 if (Mask[Part]) // The reverse of a null all-one mask is a null mask. 3088 Mask[Part] = reverseVector(Mask[Part]); 3089 } 3090 3091 Value *VecPtr = 3092 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 3093 if (Legal->isMaskRequired(LI) && Mask[Part]) 3094 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 3095 UndefValue::get(DataTy), 3096 "wide.masked.load"); 3097 else 3098 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 3099 3100 // Add metadata to the load, but setVectorValue to the reverse shuffle. 3101 addMetadata(NewLI, LI); 3102 if (Reverse) 3103 NewLI = reverseVector(NewLI); 3104 } 3105 VectorLoopValueMap.setVectorValue(Instr, Part, NewLI); 3106 } 3107 } 3108 3109 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 3110 bool IfPredicateInstr) { 3111 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 3112 DEBUG(dbgs() << "LV: Scalarizing" 3113 << (IfPredicateInstr ? " and predicating:" : ":") << *Instr 3114 << '\n'); 3115 // Holds vector parameters or scalars, in case of uniform vals. 3116 SmallVector<VectorParts, 4> Params; 3117 3118 setDebugLocFromInst(Builder, Instr); 3119 3120 // Does this instruction return a value ? 3121 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 3122 3123 VectorParts Cond; 3124 if (IfPredicateInstr) 3125 Cond = createBlockInMask(Instr->getParent()); 3126 3127 // Determine the number of scalars we need to generate for each unroll 3128 // iteration. If the instruction is uniform, we only need to generate the 3129 // first lane. Otherwise, we generate all VF values. 3130 unsigned Lanes = Cost->isUniformAfterVectorization(Instr, VF) ? 1 : VF; 3131 3132 // For each vector unroll 'part': 3133 for (unsigned Part = 0; Part < UF; ++Part) { 3134 // For each scalar that we create: 3135 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 3136 3137 // Start if-block. 3138 Value *Cmp = nullptr; 3139 if (IfPredicateInstr) { 3140 Cmp = Cond[Part]; 3141 if (!Cmp) // Block in mask is all-one. 3142 Cmp = Builder.getTrue(); 3143 else if (Cmp->getType()->isVectorTy()) 3144 Cmp = Builder.CreateExtractElement(Cmp, Builder.getInt32(Lane)); 3145 } 3146 3147 Instruction *Cloned = Instr->clone(); 3148 if (!IsVoidRetTy) 3149 Cloned->setName(Instr->getName() + ".cloned"); 3150 3151 // Replace the operands of the cloned instructions with their scalar 3152 // equivalents in the new loop. 3153 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 3154 auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Part, Lane); 3155 Cloned->setOperand(op, NewOp); 3156 } 3157 addNewMetadata(Cloned, Instr); 3158 3159 // Place the cloned scalar in the new loop. 3160 Builder.Insert(Cloned); 3161 3162 // Add the cloned scalar to the scalar map entry. 3163 VectorLoopValueMap.setScalarValue(Instr, Part, Lane, Cloned); 3164 3165 // If we just cloned a new assumption, add it the assumption cache. 3166 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 3167 if (II->getIntrinsicID() == Intrinsic::assume) 3168 AC->registerAssumption(II); 3169 3170 // End if-block. 3171 if (IfPredicateInstr) 3172 PredicatedInstructions.push_back(std::make_pair(Cloned, Cmp)); 3173 } 3174 } 3175 } 3176 3177 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3178 Value *End, Value *Step, 3179 Instruction *DL) { 3180 BasicBlock *Header = L->getHeader(); 3181 BasicBlock *Latch = L->getLoopLatch(); 3182 // As we're just creating this loop, it's possible no latch exists 3183 // yet. If so, use the header as this will be a single block loop. 3184 if (!Latch) 3185 Latch = Header; 3186 3187 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 3188 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 3189 setDebugLocFromInst(Builder, OldInst); 3190 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 3191 3192 Builder.SetInsertPoint(Latch->getTerminator()); 3193 setDebugLocFromInst(Builder, OldInst); 3194 3195 // Create i+1 and fill the PHINode. 3196 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 3197 Induction->addIncoming(Start, L->getLoopPreheader()); 3198 Induction->addIncoming(Next, Latch); 3199 // Create the compare. 3200 Value *ICmp = Builder.CreateICmpEQ(Next, End); 3201 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 3202 3203 // Now we have two terminators. Remove the old one from the block. 3204 Latch->getTerminator()->eraseFromParent(); 3205 3206 return Induction; 3207 } 3208 3209 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3210 if (TripCount) 3211 return TripCount; 3212 3213 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3214 // Find the loop boundaries. 3215 ScalarEvolution *SE = PSE.getSE(); 3216 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3217 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 3218 "Invalid loop count"); 3219 3220 Type *IdxTy = Legal->getWidestInductionType(); 3221 3222 // The exit count might have the type of i64 while the phi is i32. This can 3223 // happen if we have an induction variable that is sign extended before the 3224 // compare. The only way that we get a backedge taken count is that the 3225 // induction variable was signed and as such will not overflow. In such a case 3226 // truncation is legal. 3227 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 3228 IdxTy->getPrimitiveSizeInBits()) 3229 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3230 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3231 3232 // Get the total trip count from the count by adding 1. 3233 const SCEV *ExitCount = SE->getAddExpr( 3234 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3235 3236 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3237 3238 // Expand the trip count and place the new instructions in the preheader. 3239 // Notice that the pre-header does not change, only the loop body. 3240 SCEVExpander Exp(*SE, DL, "induction"); 3241 3242 // Count holds the overall loop count (N). 3243 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3244 L->getLoopPreheader()->getTerminator()); 3245 3246 if (TripCount->getType()->isPointerTy()) 3247 TripCount = 3248 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3249 L->getLoopPreheader()->getTerminator()); 3250 3251 return TripCount; 3252 } 3253 3254 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3255 if (VectorTripCount) 3256 return VectorTripCount; 3257 3258 Value *TC = getOrCreateTripCount(L); 3259 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3260 3261 // Now we need to generate the expression for the part of the loop that the 3262 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3263 // iterations are not required for correctness, or N - Step, otherwise. Step 3264 // is equal to the vectorization factor (number of SIMD elements) times the 3265 // unroll factor (number of SIMD instructions). 3266 Constant *Step = ConstantInt::get(TC->getType(), VF * UF); 3267 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3268 3269 // If there is a non-reversed interleaved group that may speculatively access 3270 // memory out-of-bounds, we need to ensure that there will be at least one 3271 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 3272 // the trip count, we set the remainder to be equal to the step. If the step 3273 // does not evenly divide the trip count, no adjustment is necessary since 3274 // there will already be scalar iterations. Note that the minimum iterations 3275 // check ensures that N >= Step. 3276 if (VF > 1 && Legal->requiresScalarEpilogue()) { 3277 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3278 R = Builder.CreateSelect(IsZero, Step, R); 3279 } 3280 3281 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3282 3283 return VectorTripCount; 3284 } 3285 3286 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3287 BasicBlock *Bypass) { 3288 Value *Count = getOrCreateTripCount(L); 3289 BasicBlock *BB = L->getLoopPreheader(); 3290 IRBuilder<> Builder(BB->getTerminator()); 3291 3292 // Generate code to check if the loop's trip count is less than VF * UF, or 3293 // equal to it in case a scalar epilogue is required; this implies that the 3294 // vector trip count is zero. This check also covers the case where adding one 3295 // to the backedge-taken count overflowed leading to an incorrect trip count 3296 // of zero. In this case we will also jump to the scalar loop. 3297 auto P = Legal->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 3298 : ICmpInst::ICMP_ULT; 3299 Value *CheckMinIters = Builder.CreateICmp( 3300 P, Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check"); 3301 3302 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3303 // Update dominator tree immediately if the generated block is a 3304 // LoopBypassBlock because SCEV expansions to generate loop bypass 3305 // checks may query it before the current function is finished. 3306 DT->addNewBlock(NewBB, BB); 3307 if (L->getParentLoop()) 3308 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3309 ReplaceInstWithInst(BB->getTerminator(), 3310 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 3311 LoopBypassBlocks.push_back(BB); 3312 } 3313 3314 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3315 BasicBlock *BB = L->getLoopPreheader(); 3316 3317 // Generate the code to check that the SCEV assumptions that we made. 3318 // We want the new basic block to start at the first instruction in a 3319 // sequence of instructions that form a check. 3320 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 3321 "scev.check"); 3322 Value *SCEVCheck = 3323 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 3324 3325 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 3326 if (C->isZero()) 3327 return; 3328 3329 // Create a new block containing the stride check. 3330 BB->setName("vector.scevcheck"); 3331 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3332 // Update dominator tree immediately if the generated block is a 3333 // LoopBypassBlock because SCEV expansions to generate loop bypass 3334 // checks may query it before the current function is finished. 3335 DT->addNewBlock(NewBB, BB); 3336 if (L->getParentLoop()) 3337 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3338 ReplaceInstWithInst(BB->getTerminator(), 3339 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 3340 LoopBypassBlocks.push_back(BB); 3341 AddedSafetyChecks = true; 3342 } 3343 3344 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 3345 BasicBlock *BB = L->getLoopPreheader(); 3346 3347 // Generate the code that checks in runtime if arrays overlap. We put the 3348 // checks into a separate block to make the more common case of few elements 3349 // faster. 3350 Instruction *FirstCheckInst; 3351 Instruction *MemRuntimeCheck; 3352 std::tie(FirstCheckInst, MemRuntimeCheck) = 3353 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 3354 if (!MemRuntimeCheck) 3355 return; 3356 3357 // Create a new block containing the memory check. 3358 BB->setName("vector.memcheck"); 3359 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3360 // Update dominator tree immediately if the generated block is a 3361 // LoopBypassBlock because SCEV expansions to generate loop bypass 3362 // checks may query it before the current function is finished. 3363 DT->addNewBlock(NewBB, BB); 3364 if (L->getParentLoop()) 3365 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3366 ReplaceInstWithInst(BB->getTerminator(), 3367 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 3368 LoopBypassBlocks.push_back(BB); 3369 AddedSafetyChecks = true; 3370 3371 // We currently don't use LoopVersioning for the actual loop cloning but we 3372 // still use it to add the noalias metadata. 3373 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 3374 PSE.getSE()); 3375 LVer->prepareNoAliasMetadata(); 3376 } 3377 3378 void InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3379 /* 3380 In this function we generate a new loop. The new loop will contain 3381 the vectorized instructions while the old loop will continue to run the 3382 scalar remainder. 3383 3384 [ ] <-- loop iteration number check. 3385 / | 3386 / v 3387 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3388 | / | 3389 | / v 3390 || [ ] <-- vector pre header. 3391 |/ | 3392 | v 3393 | [ ] \ 3394 | [ ]_| <-- vector loop. 3395 | | 3396 | v 3397 | -[ ] <--- middle-block. 3398 | / | 3399 | / v 3400 -|- >[ ] <--- new preheader. 3401 | | 3402 | v 3403 | [ ] \ 3404 | [ ]_| <-- old scalar loop to handle remainder. 3405 \ | 3406 \ v 3407 >[ ] <-- exit block. 3408 ... 3409 */ 3410 3411 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 3412 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 3413 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 3414 assert(VectorPH && "Invalid loop structure"); 3415 assert(ExitBlock && "Must have an exit block"); 3416 3417 // Some loops have a single integer induction variable, while other loops 3418 // don't. One example is c++ iterators that often have multiple pointer 3419 // induction variables. In the code below we also support a case where we 3420 // don't have a single induction variable. 3421 // 3422 // We try to obtain an induction variable from the original loop as hard 3423 // as possible. However if we don't find one that: 3424 // - is an integer 3425 // - counts from zero, stepping by one 3426 // - is the size of the widest induction variable type 3427 // then we create a new one. 3428 OldInduction = Legal->getPrimaryInduction(); 3429 Type *IdxTy = Legal->getWidestInductionType(); 3430 3431 // Split the single block loop into the two loop structure described above. 3432 BasicBlock *VecBody = 3433 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 3434 BasicBlock *MiddleBlock = 3435 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 3436 BasicBlock *ScalarPH = 3437 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 3438 3439 // Create and register the new vector loop. 3440 Loop *Lp = new Loop(); 3441 Loop *ParentLoop = OrigLoop->getParentLoop(); 3442 3443 // Insert the new loop into the loop nest and register the new basic blocks 3444 // before calling any utilities such as SCEV that require valid LoopInfo. 3445 if (ParentLoop) { 3446 ParentLoop->addChildLoop(Lp); 3447 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 3448 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 3449 } else { 3450 LI->addTopLevelLoop(Lp); 3451 } 3452 Lp->addBasicBlockToLoop(VecBody, *LI); 3453 3454 // Find the loop boundaries. 3455 Value *Count = getOrCreateTripCount(Lp); 3456 3457 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3458 3459 // Now, compare the new count to zero. If it is zero skip the vector loop and 3460 // jump to the scalar loop. This check also covers the case where the 3461 // backedge-taken count is uint##_max: adding one to it will overflow leading 3462 // to an incorrect trip count of zero. In this (rare) case we will also jump 3463 // to the scalar loop. 3464 emitMinimumIterationCountCheck(Lp, ScalarPH); 3465 3466 // Generate the code to check any assumptions that we've made for SCEV 3467 // expressions. 3468 emitSCEVChecks(Lp, ScalarPH); 3469 3470 // Generate the code that checks in runtime if arrays overlap. We put the 3471 // checks into a separate block to make the more common case of few elements 3472 // faster. 3473 emitMemRuntimeChecks(Lp, ScalarPH); 3474 3475 // Generate the induction variable. 3476 // The loop step is equal to the vectorization factor (num of SIMD elements) 3477 // times the unroll factor (num of SIMD instructions). 3478 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3479 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 3480 Induction = 3481 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3482 getDebugLocFromInstOrOperands(OldInduction)); 3483 3484 // We are going to resume the execution of the scalar loop. 3485 // Go over all of the induction variables that we found and fix the 3486 // PHIs that are left in the scalar version of the loop. 3487 // The starting values of PHI nodes depend on the counter of the last 3488 // iteration in the vectorized loop. 3489 // If we come from a bypass edge then we need to start from the original 3490 // start value. 3491 3492 // This variable saves the new starting index for the scalar loop. It is used 3493 // to test if there are any tail iterations left once the vector loop has 3494 // completed. 3495 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 3496 for (auto &InductionEntry : *List) { 3497 PHINode *OrigPhi = InductionEntry.first; 3498 InductionDescriptor II = InductionEntry.second; 3499 3500 // Create phi nodes to merge from the backedge-taken check block. 3501 PHINode *BCResumeVal = PHINode::Create( 3502 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 3503 Value *&EndValue = IVEndValues[OrigPhi]; 3504 if (OrigPhi == OldInduction) { 3505 // We know what the end value is. 3506 EndValue = CountRoundDown; 3507 } else { 3508 IRBuilder<> B(Lp->getLoopPreheader()->getTerminator()); 3509 Type *StepType = II.getStep()->getType(); 3510 Instruction::CastOps CastOp = 3511 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 3512 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 3513 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3514 EndValue = II.transform(B, CRD, PSE.getSE(), DL); 3515 EndValue->setName("ind.end"); 3516 } 3517 3518 // The new PHI merges the original incoming value, in case of a bypass, 3519 // or the value at the end of the vectorized loop. 3520 BCResumeVal->addIncoming(EndValue, MiddleBlock); 3521 3522 // Fix the scalar body counter (PHI node). 3523 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 3524 3525 // The old induction's phi node in the scalar body needs the truncated 3526 // value. 3527 for (BasicBlock *BB : LoopBypassBlocks) 3528 BCResumeVal->addIncoming(II.getStartValue(), BB); 3529 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 3530 } 3531 3532 // Add a check in the middle block to see if we have completed 3533 // all of the iterations in the first vector loop. 3534 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3535 Value *CmpN = 3536 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 3537 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 3538 ReplaceInstWithInst(MiddleBlock->getTerminator(), 3539 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 3540 3541 // Get ready to start creating new instructions into the vectorized body. 3542 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 3543 3544 // Save the state. 3545 LoopVectorPreHeader = Lp->getLoopPreheader(); 3546 LoopScalarPreHeader = ScalarPH; 3547 LoopMiddleBlock = MiddleBlock; 3548 LoopExitBlock = ExitBlock; 3549 LoopVectorBody = VecBody; 3550 LoopScalarBody = OldBasicBlock; 3551 3552 // Keep all loop hints from the original loop on the vector loop (we'll 3553 // replace the vectorizer-specific hints below). 3554 if (MDNode *LID = OrigLoop->getLoopID()) 3555 Lp->setLoopID(LID); 3556 3557 LoopVectorizeHints Hints(Lp, true, *ORE); 3558 Hints.setAlreadyVectorized(); 3559 } 3560 3561 // Fix up external users of the induction variable. At this point, we are 3562 // in LCSSA form, with all external PHIs that use the IV having one input value, 3563 // coming from the remainder loop. We need those PHIs to also have a correct 3564 // value for the IV when arriving directly from the middle block. 3565 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3566 const InductionDescriptor &II, 3567 Value *CountRoundDown, Value *EndValue, 3568 BasicBlock *MiddleBlock) { 3569 // There are two kinds of external IV usages - those that use the value 3570 // computed in the last iteration (the PHI) and those that use the penultimate 3571 // value (the value that feeds into the phi from the loop latch). 3572 // We allow both, but they, obviously, have different values. 3573 3574 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 3575 3576 DenseMap<Value *, Value *> MissingVals; 3577 3578 // An external user of the last iteration's value should see the value that 3579 // the remainder loop uses to initialize its own IV. 3580 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3581 for (User *U : PostInc->users()) { 3582 Instruction *UI = cast<Instruction>(U); 3583 if (!OrigLoop->contains(UI)) { 3584 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3585 MissingVals[UI] = EndValue; 3586 } 3587 } 3588 3589 // An external user of the penultimate value need to see EndValue - Step. 3590 // The simplest way to get this is to recompute it from the constituent SCEVs, 3591 // that is Start + (Step * (CRD - 1)). 3592 for (User *U : OrigPhi->users()) { 3593 auto *UI = cast<Instruction>(U); 3594 if (!OrigLoop->contains(UI)) { 3595 const DataLayout &DL = 3596 OrigLoop->getHeader()->getModule()->getDataLayout(); 3597 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3598 3599 IRBuilder<> B(MiddleBlock->getTerminator()); 3600 Value *CountMinusOne = B.CreateSub( 3601 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3602 Value *CMO = 3603 !II.getStep()->getType()->isIntegerTy() 3604 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3605 II.getStep()->getType()) 3606 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3607 CMO->setName("cast.cmo"); 3608 Value *Escape = II.transform(B, CMO, PSE.getSE(), DL); 3609 Escape->setName("ind.escape"); 3610 MissingVals[UI] = Escape; 3611 } 3612 } 3613 3614 for (auto &I : MissingVals) { 3615 PHINode *PHI = cast<PHINode>(I.first); 3616 // One corner case we have to handle is two IVs "chasing" each-other, 3617 // that is %IV2 = phi [...], [ %IV1, %latch ] 3618 // In this case, if IV1 has an external use, we need to avoid adding both 3619 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3620 // don't already have an incoming value for the middle block. 3621 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3622 PHI->addIncoming(I.second, MiddleBlock); 3623 } 3624 } 3625 3626 namespace { 3627 struct CSEDenseMapInfo { 3628 static bool canHandle(const Instruction *I) { 3629 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3630 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3631 } 3632 static inline Instruction *getEmptyKey() { 3633 return DenseMapInfo<Instruction *>::getEmptyKey(); 3634 } 3635 static inline Instruction *getTombstoneKey() { 3636 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3637 } 3638 static unsigned getHashValue(const Instruction *I) { 3639 assert(canHandle(I) && "Unknown instruction!"); 3640 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3641 I->value_op_end())); 3642 } 3643 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3644 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3645 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3646 return LHS == RHS; 3647 return LHS->isIdenticalTo(RHS); 3648 } 3649 }; 3650 } 3651 3652 ///\brief Perform cse of induction variable instructions. 3653 static void cse(BasicBlock *BB) { 3654 // Perform simple cse. 3655 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3656 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3657 Instruction *In = &*I++; 3658 3659 if (!CSEDenseMapInfo::canHandle(In)) 3660 continue; 3661 3662 // Check if we can replace this instruction with any of the 3663 // visited instructions. 3664 if (Instruction *V = CSEMap.lookup(In)) { 3665 In->replaceAllUsesWith(V); 3666 In->eraseFromParent(); 3667 continue; 3668 } 3669 3670 CSEMap[In] = In; 3671 } 3672 } 3673 3674 /// \brief Estimate the overhead of scalarizing an instruction. This is a 3675 /// convenience wrapper for the type-based getScalarizationOverhead API. 3676 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF, 3677 const TargetTransformInfo &TTI) { 3678 if (VF == 1) 3679 return 0; 3680 3681 unsigned Cost = 0; 3682 Type *RetTy = ToVectorTy(I->getType(), VF); 3683 if (!RetTy->isVoidTy() && 3684 (!isa<LoadInst>(I) || 3685 !TTI.supportsEfficientVectorElementLoadStore())) 3686 Cost += TTI.getScalarizationOverhead(RetTy, true, false); 3687 3688 if (CallInst *CI = dyn_cast<CallInst>(I)) { 3689 SmallVector<const Value *, 4> Operands(CI->arg_operands()); 3690 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3691 } 3692 else if (!isa<StoreInst>(I) || 3693 !TTI.supportsEfficientVectorElementLoadStore()) { 3694 SmallVector<const Value *, 4> Operands(I->operand_values()); 3695 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3696 } 3697 3698 return Cost; 3699 } 3700 3701 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3702 // Return the cost of the instruction, including scalarization overhead if it's 3703 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3704 // i.e. either vector version isn't available, or is too expensive. 3705 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3706 const TargetTransformInfo &TTI, 3707 const TargetLibraryInfo *TLI, 3708 bool &NeedToScalarize) { 3709 Function *F = CI->getCalledFunction(); 3710 StringRef FnName = CI->getCalledFunction()->getName(); 3711 Type *ScalarRetTy = CI->getType(); 3712 SmallVector<Type *, 4> Tys, ScalarTys; 3713 for (auto &ArgOp : CI->arg_operands()) 3714 ScalarTys.push_back(ArgOp->getType()); 3715 3716 // Estimate cost of scalarized vector call. The source operands are assumed 3717 // to be vectors, so we need to extract individual elements from there, 3718 // execute VF scalar calls, and then gather the result into the vector return 3719 // value. 3720 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3721 if (VF == 1) 3722 return ScalarCallCost; 3723 3724 // Compute corresponding vector type for return value and arguments. 3725 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3726 for (Type *ScalarTy : ScalarTys) 3727 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3728 3729 // Compute costs of unpacking argument values for the scalar calls and 3730 // packing the return values to a vector. 3731 unsigned ScalarizationCost = getScalarizationOverhead(CI, VF, TTI); 3732 3733 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3734 3735 // If we can't emit a vector call for this function, then the currently found 3736 // cost is the cost we need to return. 3737 NeedToScalarize = true; 3738 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3739 return Cost; 3740 3741 // If the corresponding vector cost is cheaper, return its cost. 3742 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3743 if (VectorCallCost < Cost) { 3744 NeedToScalarize = false; 3745 return VectorCallCost; 3746 } 3747 return Cost; 3748 } 3749 3750 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3751 // factor VF. Return the cost of the instruction, including scalarization 3752 // overhead if it's needed. 3753 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3754 const TargetTransformInfo &TTI, 3755 const TargetLibraryInfo *TLI) { 3756 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3757 assert(ID && "Expected intrinsic call!"); 3758 3759 FastMathFlags FMF; 3760 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3761 FMF = FPMO->getFastMathFlags(); 3762 3763 SmallVector<Value *, 4> Operands(CI->arg_operands()); 3764 return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF); 3765 } 3766 3767 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3768 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3769 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3770 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3771 } 3772 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3773 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3774 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3775 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3776 } 3777 3778 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3779 // For every instruction `I` in MinBWs, truncate the operands, create a 3780 // truncated version of `I` and reextend its result. InstCombine runs 3781 // later and will remove any ext/trunc pairs. 3782 // 3783 SmallPtrSet<Value *, 4> Erased; 3784 for (const auto &KV : Cost->getMinimalBitwidths()) { 3785 // If the value wasn't vectorized, we must maintain the original scalar 3786 // type. The absence of the value from VectorLoopValueMap indicates that it 3787 // wasn't vectorized. 3788 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3789 continue; 3790 for (unsigned Part = 0; Part < UF; ++Part) { 3791 Value *I = getOrCreateVectorValue(KV.first, Part); 3792 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3793 continue; 3794 Type *OriginalTy = I->getType(); 3795 Type *ScalarTruncatedTy = 3796 IntegerType::get(OriginalTy->getContext(), KV.second); 3797 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3798 OriginalTy->getVectorNumElements()); 3799 if (TruncatedTy == OriginalTy) 3800 continue; 3801 3802 IRBuilder<> B(cast<Instruction>(I)); 3803 auto ShrinkOperand = [&](Value *V) -> Value * { 3804 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3805 if (ZI->getSrcTy() == TruncatedTy) 3806 return ZI->getOperand(0); 3807 return B.CreateZExtOrTrunc(V, TruncatedTy); 3808 }; 3809 3810 // The actual instruction modification depends on the instruction type, 3811 // unfortunately. 3812 Value *NewI = nullptr; 3813 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3814 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3815 ShrinkOperand(BO->getOperand(1))); 3816 3817 // Any wrapping introduced by shrinking this operation shouldn't be 3818 // considered undefined behavior. So, we can't unconditionally copy 3819 // arithmetic wrapping flags to NewI. 3820 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3821 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3822 NewI = 3823 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3824 ShrinkOperand(CI->getOperand(1))); 3825 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3826 NewI = B.CreateSelect(SI->getCondition(), 3827 ShrinkOperand(SI->getTrueValue()), 3828 ShrinkOperand(SI->getFalseValue())); 3829 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3830 switch (CI->getOpcode()) { 3831 default: 3832 llvm_unreachable("Unhandled cast!"); 3833 case Instruction::Trunc: 3834 NewI = ShrinkOperand(CI->getOperand(0)); 3835 break; 3836 case Instruction::SExt: 3837 NewI = B.CreateSExtOrTrunc( 3838 CI->getOperand(0), 3839 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3840 break; 3841 case Instruction::ZExt: 3842 NewI = B.CreateZExtOrTrunc( 3843 CI->getOperand(0), 3844 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3845 break; 3846 } 3847 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3848 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3849 auto *O0 = B.CreateZExtOrTrunc( 3850 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3851 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3852 auto *O1 = B.CreateZExtOrTrunc( 3853 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3854 3855 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3856 } else if (isa<LoadInst>(I)) { 3857 // Don't do anything with the operands, just extend the result. 3858 continue; 3859 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3860 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3861 auto *O0 = B.CreateZExtOrTrunc( 3862 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3863 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3864 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3865 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3866 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3867 auto *O0 = B.CreateZExtOrTrunc( 3868 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3869 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3870 } else { 3871 llvm_unreachable("Unhandled instruction type!"); 3872 } 3873 3874 // Lastly, extend the result. 3875 NewI->takeName(cast<Instruction>(I)); 3876 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3877 I->replaceAllUsesWith(Res); 3878 cast<Instruction>(I)->eraseFromParent(); 3879 Erased.insert(I); 3880 VectorLoopValueMap.resetVectorValue(KV.first, Part, Res); 3881 } 3882 } 3883 3884 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3885 for (const auto &KV : Cost->getMinimalBitwidths()) { 3886 // If the value wasn't vectorized, we must maintain the original scalar 3887 // type. The absence of the value from VectorLoopValueMap indicates that it 3888 // wasn't vectorized. 3889 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3890 continue; 3891 for (unsigned Part = 0; Part < UF; ++Part) { 3892 Value *I = getOrCreateVectorValue(KV.first, Part); 3893 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3894 if (Inst && Inst->use_empty()) { 3895 Value *NewI = Inst->getOperand(0); 3896 Inst->eraseFromParent(); 3897 VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI); 3898 } 3899 } 3900 } 3901 } 3902 3903 void InnerLoopVectorizer::fixVectorizedLoop() { 3904 // Insert truncates and extends for any truncated instructions as hints to 3905 // InstCombine. 3906 if (VF > 1) 3907 truncateToMinimalBitwidths(); 3908 3909 // At this point every instruction in the original loop is widened to a 3910 // vector form. Now we need to fix the recurrences in the loop. These PHI 3911 // nodes are currently empty because we did not want to introduce cycles. 3912 // This is the second stage of vectorizing recurrences. 3913 fixCrossIterationPHIs(); 3914 3915 // Update the dominator tree. 3916 // 3917 // FIXME: After creating the structure of the new loop, the dominator tree is 3918 // no longer up-to-date, and it remains that way until we update it 3919 // here. An out-of-date dominator tree is problematic for SCEV, 3920 // because SCEVExpander uses it to guide code generation. The 3921 // vectorizer use SCEVExpanders in several places. Instead, we should 3922 // keep the dominator tree up-to-date as we go. 3923 updateAnalysis(); 3924 3925 // Fix-up external users of the induction variables. 3926 for (auto &Entry : *Legal->getInductionVars()) 3927 fixupIVUsers(Entry.first, Entry.second, 3928 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 3929 IVEndValues[Entry.first], LoopMiddleBlock); 3930 3931 fixLCSSAPHIs(); 3932 predicateInstructions(); 3933 3934 // Remove redundant induction instructions. 3935 cse(LoopVectorBody); 3936 } 3937 3938 void InnerLoopVectorizer::fixCrossIterationPHIs() { 3939 // In order to support recurrences we need to be able to vectorize Phi nodes. 3940 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3941 // stage #2: We now need to fix the recurrences by adding incoming edges to 3942 // the currently empty PHI nodes. At this point every instruction in the 3943 // original loop is widened to a vector form so we can use them to construct 3944 // the incoming edges. 3945 for (Instruction &I : *OrigLoop->getHeader()) { 3946 PHINode *Phi = dyn_cast<PHINode>(&I); 3947 if (!Phi) 3948 break; 3949 // Handle first-order recurrences and reductions that need to be fixed. 3950 if (Legal->isFirstOrderRecurrence(Phi)) 3951 fixFirstOrderRecurrence(Phi); 3952 else if (Legal->isReductionVariable(Phi)) 3953 fixReduction(Phi); 3954 } 3955 } 3956 3957 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 3958 3959 // This is the second phase of vectorizing first-order recurrences. An 3960 // overview of the transformation is described below. Suppose we have the 3961 // following loop. 3962 // 3963 // for (int i = 0; i < n; ++i) 3964 // b[i] = a[i] - a[i - 1]; 3965 // 3966 // There is a first-order recurrence on "a". For this loop, the shorthand 3967 // scalar IR looks like: 3968 // 3969 // scalar.ph: 3970 // s_init = a[-1] 3971 // br scalar.body 3972 // 3973 // scalar.body: 3974 // i = phi [0, scalar.ph], [i+1, scalar.body] 3975 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3976 // s2 = a[i] 3977 // b[i] = s2 - s1 3978 // br cond, scalar.body, ... 3979 // 3980 // In this example, s1 is a recurrence because it's value depends on the 3981 // previous iteration. In the first phase of vectorization, we created a 3982 // temporary value for s1. We now complete the vectorization and produce the 3983 // shorthand vector IR shown below (for VF = 4, UF = 1). 3984 // 3985 // vector.ph: 3986 // v_init = vector(..., ..., ..., a[-1]) 3987 // br vector.body 3988 // 3989 // vector.body 3990 // i = phi [0, vector.ph], [i+4, vector.body] 3991 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3992 // v2 = a[i, i+1, i+2, i+3]; 3993 // v3 = vector(v1(3), v2(0, 1, 2)) 3994 // b[i, i+1, i+2, i+3] = v2 - v3 3995 // br cond, vector.body, middle.block 3996 // 3997 // middle.block: 3998 // x = v2(3) 3999 // br scalar.ph 4000 // 4001 // scalar.ph: 4002 // s_init = phi [x, middle.block], [a[-1], otherwise] 4003 // br scalar.body 4004 // 4005 // After execution completes the vector loop, we extract the next value of 4006 // the recurrence (x) to use as the initial value in the scalar loop. 4007 4008 // Get the original loop preheader and single loop latch. 4009 auto *Preheader = OrigLoop->getLoopPreheader(); 4010 auto *Latch = OrigLoop->getLoopLatch(); 4011 4012 // Get the initial and previous values of the scalar recurrence. 4013 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 4014 auto *Previous = Phi->getIncomingValueForBlock(Latch); 4015 4016 // Create a vector from the initial value. 4017 auto *VectorInit = ScalarInit; 4018 if (VF > 1) { 4019 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4020 VectorInit = Builder.CreateInsertElement( 4021 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 4022 Builder.getInt32(VF - 1), "vector.recur.init"); 4023 } 4024 4025 // We constructed a temporary phi node in the first phase of vectorization. 4026 // This phi node will eventually be deleted. 4027 Builder.SetInsertPoint( 4028 cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0))); 4029 4030 // Create a phi node for the new recurrence. The current value will either be 4031 // the initial value inserted into a vector or loop-varying vector value. 4032 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 4033 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 4034 4035 // Get the vectorized previous value of the last part UF - 1. It appears last 4036 // among all unrolled iterations, due to the order of their construction. 4037 Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1); 4038 4039 // Set the insertion point after the previous value if it is an instruction. 4040 // Note that the previous value may have been constant-folded so it is not 4041 // guaranteed to be an instruction in the vector loop. Also, if the previous 4042 // value is a phi node, we should insert after all the phi nodes to avoid 4043 // breaking basic block verification. 4044 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart) || 4045 isa<PHINode>(PreviousLastPart)) 4046 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 4047 else 4048 Builder.SetInsertPoint( 4049 &*++BasicBlock::iterator(cast<Instruction>(PreviousLastPart))); 4050 4051 // We will construct a vector for the recurrence by combining the values for 4052 // the current and previous iterations. This is the required shuffle mask. 4053 SmallVector<Constant *, 8> ShuffleMask(VF); 4054 ShuffleMask[0] = Builder.getInt32(VF - 1); 4055 for (unsigned I = 1; I < VF; ++I) 4056 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 4057 4058 // The vector from which to take the initial value for the current iteration 4059 // (actual or unrolled). Initially, this is the vector phi node. 4060 Value *Incoming = VecPhi; 4061 4062 // Shuffle the current and previous vector and update the vector parts. 4063 for (unsigned Part = 0; Part < UF; ++Part) { 4064 Value *PreviousPart = getOrCreateVectorValue(Previous, Part); 4065 Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part); 4066 auto *Shuffle = 4067 VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart, 4068 ConstantVector::get(ShuffleMask)) 4069 : Incoming; 4070 PhiPart->replaceAllUsesWith(Shuffle); 4071 cast<Instruction>(PhiPart)->eraseFromParent(); 4072 VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle); 4073 Incoming = PreviousPart; 4074 } 4075 4076 // Fix the latch value of the new recurrence in the vector loop. 4077 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4078 4079 // Extract the last vector element in the middle block. This will be the 4080 // initial value for the recurrence when jumping to the scalar loop. 4081 auto *ExtractForScalar = Incoming; 4082 if (VF > 1) { 4083 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4084 ExtractForScalar = Builder.CreateExtractElement( 4085 ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract"); 4086 } 4087 // Extract the second last element in the middle block if the 4088 // Phi is used outside the loop. We need to extract the phi itself 4089 // and not the last element (the phi update in the current iteration). This 4090 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4091 // when the scalar loop is not run at all. 4092 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4093 if (VF > 1) 4094 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4095 Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi"); 4096 // When loop is unrolled without vectorizing, initialize 4097 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 4098 // `Incoming`. This is analogous to the vectorized case above: extracting the 4099 // second last element when VF > 1. 4100 else if (UF > 1) 4101 ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2); 4102 4103 // Fix the initial value of the original recurrence in the scalar loop. 4104 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4105 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4106 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4107 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4108 Start->addIncoming(Incoming, BB); 4109 } 4110 4111 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 4112 Phi->setName("scalar.recur"); 4113 4114 // Finally, fix users of the recurrence outside the loop. The users will need 4115 // either the last value of the scalar recurrence or the last value of the 4116 // vector recurrence we extracted in the middle block. Since the loop is in 4117 // LCSSA form, we just need to find the phi node for the original scalar 4118 // recurrence in the exit block, and then add an edge for the middle block. 4119 for (auto &I : *LoopExitBlock) { 4120 auto *LCSSAPhi = dyn_cast<PHINode>(&I); 4121 if (!LCSSAPhi) 4122 break; 4123 if (LCSSAPhi->getIncomingValue(0) == Phi) { 4124 LCSSAPhi->addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4125 break; 4126 } 4127 } 4128 } 4129 4130 void InnerLoopVectorizer::fixReduction(PHINode *Phi) { 4131 Constant *Zero = Builder.getInt32(0); 4132 4133 // Get it's reduction variable descriptor. 4134 assert(Legal->isReductionVariable(Phi) && 4135 "Unable to find the reduction variable"); 4136 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 4137 4138 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 4139 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4140 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4141 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 4142 RdxDesc.getMinMaxRecurrenceKind(); 4143 setDebugLocFromInst(Builder, ReductionStartValue); 4144 4145 // We need to generate a reduction vector from the incoming scalar. 4146 // To do so, we need to generate the 'identity' vector and override 4147 // one of the elements with the incoming scalar reduction. We need 4148 // to do it in the vector-loop preheader. 4149 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4150 4151 // This is the vector-clone of the value that leaves the loop. 4152 Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType(); 4153 4154 // Find the reduction identity variable. Zero for addition, or, xor, 4155 // one for multiplication, -1 for And. 4156 Value *Identity; 4157 Value *VectorStart; 4158 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 4159 RK == RecurrenceDescriptor::RK_FloatMinMax) { 4160 // MinMax reduction have the start value as their identify. 4161 if (VF == 1) { 4162 VectorStart = Identity = ReductionStartValue; 4163 } else { 4164 VectorStart = Identity = 4165 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 4166 } 4167 } else { 4168 // Handle other reduction kinds: 4169 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 4170 RK, VecTy->getScalarType()); 4171 if (VF == 1) { 4172 Identity = Iden; 4173 // This vector is the Identity vector where the first element is the 4174 // incoming scalar reduction. 4175 VectorStart = ReductionStartValue; 4176 } else { 4177 Identity = ConstantVector::getSplat(VF, Iden); 4178 4179 // This vector is the Identity vector where the first element is the 4180 // incoming scalar reduction. 4181 VectorStart = 4182 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 4183 } 4184 } 4185 4186 // Fix the vector-loop phi. 4187 4188 // Reductions do not have to start at zero. They can start with 4189 // any loop invariant values. 4190 BasicBlock *Latch = OrigLoop->getLoopLatch(); 4191 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 4192 for (unsigned Part = 0; Part < UF; ++Part) { 4193 Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part); 4194 Value *Val = getOrCreateVectorValue(LoopVal, Part); 4195 // Make sure to add the reduction stat value only to the 4196 // first unroll part. 4197 Value *StartVal = (Part == 0) ? VectorStart : Identity; 4198 cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader); 4199 cast<PHINode>(VecRdxPhi) 4200 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4201 } 4202 4203 // Before each round, move the insertion point right between 4204 // the PHIs and the values we are going to write. 4205 // This allows us to write both PHINodes and the extractelement 4206 // instructions. 4207 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4208 4209 setDebugLocFromInst(Builder, LoopExitInst); 4210 4211 // If the vector reduction can be performed in a smaller type, we truncate 4212 // then extend the loop exit value to enable InstCombine to evaluate the 4213 // entire expression in the smaller type. 4214 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 4215 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4216 Builder.SetInsertPoint(LoopVectorBody->getTerminator()); 4217 VectorParts RdxParts(UF); 4218 for (unsigned Part = 0; Part < UF; ++Part) { 4219 RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 4220 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4221 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4222 : Builder.CreateZExt(Trunc, VecTy); 4223 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 4224 UI != RdxParts[Part]->user_end();) 4225 if (*UI != Trunc) { 4226 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 4227 RdxParts[Part] = Extnd; 4228 } else { 4229 ++UI; 4230 } 4231 } 4232 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4233 for (unsigned Part = 0; Part < UF; ++Part) { 4234 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4235 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]); 4236 } 4237 } 4238 4239 // Reduce all of the unrolled parts into a single vector. 4240 Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0); 4241 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 4242 setDebugLocFromInst(Builder, ReducedPartRdx); 4243 for (unsigned Part = 1; Part < UF; ++Part) { 4244 Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 4245 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 4246 // Floating point operations had to be 'fast' to enable the reduction. 4247 ReducedPartRdx = addFastMathFlag( 4248 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart, 4249 ReducedPartRdx, "bin.rdx")); 4250 else 4251 ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp( 4252 Builder, MinMaxKind, ReducedPartRdx, RdxPart); 4253 } 4254 4255 if (VF > 1) { 4256 bool NoNaN = Legal->hasFunNoNaNAttr(); 4257 ReducedPartRdx = 4258 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN); 4259 // If the reduction can be performed in a smaller type, we need to extend 4260 // the reduction to the wider type before we branch to the original loop. 4261 if (Phi->getType() != RdxDesc.getRecurrenceType()) 4262 ReducedPartRdx = 4263 RdxDesc.isSigned() 4264 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 4265 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 4266 } 4267 4268 // Create a phi node that merges control-flow from the backedge-taken check 4269 // block and the middle block. 4270 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 4271 LoopScalarPreHeader->getTerminator()); 4272 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4273 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4274 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4275 4276 // Now, we need to fix the users of the reduction variable 4277 // inside and outside of the scalar remainder loop. 4278 // We know that the loop is in LCSSA form. We need to update the 4279 // PHI nodes in the exit blocks. 4280 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 4281 LEE = LoopExitBlock->end(); 4282 LEI != LEE; ++LEI) { 4283 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 4284 if (!LCSSAPhi) 4285 break; 4286 4287 // All PHINodes need to have a single entry edge, or two if 4288 // we already fixed them. 4289 assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 4290 4291 // We found a reduction value exit-PHI. Update it with the 4292 // incoming bypass edge. 4293 if (LCSSAPhi->getIncomingValue(0) == LoopExitInst) 4294 LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4295 } // end of the LCSSA phi scan. 4296 4297 // Fix the scalar loop reduction variable with the incoming reduction sum 4298 // from the vector body and from the backedge value. 4299 int IncomingEdgeBlockIdx = 4300 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4301 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4302 // Pick the other block. 4303 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4304 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4305 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4306 } 4307 4308 void InnerLoopVectorizer::fixLCSSAPHIs() { 4309 for (Instruction &LEI : *LoopExitBlock) { 4310 auto *LCSSAPhi = dyn_cast<PHINode>(&LEI); 4311 if (!LCSSAPhi) 4312 break; 4313 if (LCSSAPhi->getNumIncomingValues() == 1) { 4314 assert(OrigLoop->isLoopInvariant(LCSSAPhi->getIncomingValue(0)) && 4315 "Incoming value isn't loop invariant"); 4316 LCSSAPhi->addIncoming(LCSSAPhi->getIncomingValue(0), LoopMiddleBlock); 4317 } 4318 } 4319 } 4320 4321 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4322 4323 // The basic block and loop containing the predicated instruction. 4324 auto *PredBB = PredInst->getParent(); 4325 auto *VectorLoop = LI->getLoopFor(PredBB); 4326 4327 // Initialize a worklist with the operands of the predicated instruction. 4328 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4329 4330 // Holds instructions that we need to analyze again. An instruction may be 4331 // reanalyzed if we don't yet know if we can sink it or not. 4332 SmallVector<Instruction *, 8> InstsToReanalyze; 4333 4334 // Returns true if a given use occurs in the predicated block. Phi nodes use 4335 // their operands in their corresponding predecessor blocks. 4336 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4337 auto *I = cast<Instruction>(U.getUser()); 4338 BasicBlock *BB = I->getParent(); 4339 if (auto *Phi = dyn_cast<PHINode>(I)) 4340 BB = Phi->getIncomingBlock( 4341 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4342 return BB == PredBB; 4343 }; 4344 4345 // Iteratively sink the scalarized operands of the predicated instruction 4346 // into the block we created for it. When an instruction is sunk, it's 4347 // operands are then added to the worklist. The algorithm ends after one pass 4348 // through the worklist doesn't sink a single instruction. 4349 bool Changed; 4350 do { 4351 4352 // Add the instructions that need to be reanalyzed to the worklist, and 4353 // reset the changed indicator. 4354 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4355 InstsToReanalyze.clear(); 4356 Changed = false; 4357 4358 while (!Worklist.empty()) { 4359 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4360 4361 // We can't sink an instruction if it is a phi node, is already in the 4362 // predicated block, is not in the loop, or may have side effects. 4363 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 4364 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 4365 continue; 4366 4367 // It's legal to sink the instruction if all its uses occur in the 4368 // predicated block. Otherwise, there's nothing to do yet, and we may 4369 // need to reanalyze the instruction. 4370 if (!all_of(I->uses(), isBlockOfUsePredicated)) { 4371 InstsToReanalyze.push_back(I); 4372 continue; 4373 } 4374 4375 // Move the instruction to the beginning of the predicated block, and add 4376 // it's operands to the worklist. 4377 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4378 Worklist.insert(I->op_begin(), I->op_end()); 4379 4380 // The sinking may have enabled other instructions to be sunk, so we will 4381 // need to iterate. 4382 Changed = true; 4383 } 4384 } while (Changed); 4385 } 4386 4387 void InnerLoopVectorizer::predicateInstructions() { 4388 4389 // For each instruction I marked for predication on value C, split I into its 4390 // own basic block to form an if-then construct over C. Since I may be fed by 4391 // an extractelement instruction or other scalar operand, we try to 4392 // iteratively sink its scalar operands into the predicated block. If I feeds 4393 // an insertelement instruction, we try to move this instruction into the 4394 // predicated block as well. For non-void types, a phi node will be created 4395 // for the resulting value (either vector or scalar). 4396 // 4397 // So for some predicated instruction, e.g. the conditional sdiv in: 4398 // 4399 // for.body: 4400 // ... 4401 // %add = add nsw i32 %mul, %0 4402 // %cmp5 = icmp sgt i32 %2, 7 4403 // br i1 %cmp5, label %if.then, label %if.end 4404 // 4405 // if.then: 4406 // %div = sdiv i32 %0, %1 4407 // br label %if.end 4408 // 4409 // if.end: 4410 // %x.0 = phi i32 [ %div, %if.then ], [ %add, %for.body ] 4411 // 4412 // the sdiv at this point is scalarized and if-converted using a select. 4413 // The inactive elements in the vector are not used, but the predicated 4414 // instruction is still executed for all vector elements, essentially: 4415 // 4416 // vector.body: 4417 // ... 4418 // %17 = add nsw <2 x i32> %16, %wide.load 4419 // %29 = extractelement <2 x i32> %wide.load, i32 0 4420 // %30 = extractelement <2 x i32> %wide.load51, i32 0 4421 // %31 = sdiv i32 %29, %30 4422 // %32 = insertelement <2 x i32> undef, i32 %31, i32 0 4423 // %35 = extractelement <2 x i32> %wide.load, i32 1 4424 // %36 = extractelement <2 x i32> %wide.load51, i32 1 4425 // %37 = sdiv i32 %35, %36 4426 // %38 = insertelement <2 x i32> %32, i32 %37, i32 1 4427 // %predphi = select <2 x i1> %26, <2 x i32> %38, <2 x i32> %17 4428 // 4429 // Predication will now re-introduce the original control flow to avoid false 4430 // side-effects by the sdiv instructions on the inactive elements, yielding 4431 // (after cleanup): 4432 // 4433 // vector.body: 4434 // ... 4435 // %5 = add nsw <2 x i32> %4, %wide.load 4436 // %8 = icmp sgt <2 x i32> %wide.load52, <i32 7, i32 7> 4437 // %9 = extractelement <2 x i1> %8, i32 0 4438 // br i1 %9, label %pred.sdiv.if, label %pred.sdiv.continue 4439 // 4440 // pred.sdiv.if: 4441 // %10 = extractelement <2 x i32> %wide.load, i32 0 4442 // %11 = extractelement <2 x i32> %wide.load51, i32 0 4443 // %12 = sdiv i32 %10, %11 4444 // %13 = insertelement <2 x i32> undef, i32 %12, i32 0 4445 // br label %pred.sdiv.continue 4446 // 4447 // pred.sdiv.continue: 4448 // %14 = phi <2 x i32> [ undef, %vector.body ], [ %13, %pred.sdiv.if ] 4449 // %15 = extractelement <2 x i1> %8, i32 1 4450 // br i1 %15, label %pred.sdiv.if54, label %pred.sdiv.continue55 4451 // 4452 // pred.sdiv.if54: 4453 // %16 = extractelement <2 x i32> %wide.load, i32 1 4454 // %17 = extractelement <2 x i32> %wide.load51, i32 1 4455 // %18 = sdiv i32 %16, %17 4456 // %19 = insertelement <2 x i32> %14, i32 %18, i32 1 4457 // br label %pred.sdiv.continue55 4458 // 4459 // pred.sdiv.continue55: 4460 // %20 = phi <2 x i32> [ %14, %pred.sdiv.continue ], [ %19, %pred.sdiv.if54 ] 4461 // %predphi = select <2 x i1> %8, <2 x i32> %20, <2 x i32> %5 4462 4463 for (auto KV : PredicatedInstructions) { 4464 BasicBlock::iterator I(KV.first); 4465 BasicBlock *Head = I->getParent(); 4466 auto *T = SplitBlockAndInsertIfThen(KV.second, &*I, /*Unreachable=*/false, 4467 /*BranchWeights=*/nullptr, DT, LI); 4468 I->moveBefore(T); 4469 sinkScalarOperands(&*I); 4470 4471 BasicBlock *PredicatedBlock = I->getParent(); 4472 Twine BBNamePrefix = Twine("pred.") + I->getOpcodeName(); 4473 PredicatedBlock->setName(BBNamePrefix + ".if"); 4474 PredicatedBlock->getSingleSuccessor()->setName(BBNamePrefix + ".continue"); 4475 4476 // If the instruction is non-void create a Phi node at reconvergence point. 4477 if (!I->getType()->isVoidTy()) { 4478 Value *IncomingTrue = nullptr; 4479 Value *IncomingFalse = nullptr; 4480 4481 if (I->hasOneUse() && isa<InsertElementInst>(*I->user_begin())) { 4482 // If the predicated instruction is feeding an insert-element, move it 4483 // into the Then block; Phi node will be created for the vector. 4484 InsertElementInst *IEI = cast<InsertElementInst>(*I->user_begin()); 4485 IEI->moveBefore(T); 4486 IncomingTrue = IEI; // the new vector with the inserted element. 4487 IncomingFalse = IEI->getOperand(0); // the unmodified vector 4488 } else { 4489 // Phi node will be created for the scalar predicated instruction. 4490 IncomingTrue = &*I; 4491 IncomingFalse = UndefValue::get(I->getType()); 4492 } 4493 4494 BasicBlock *PostDom = I->getParent()->getSingleSuccessor(); 4495 assert(PostDom && "Then block has multiple successors"); 4496 PHINode *Phi = 4497 PHINode::Create(IncomingTrue->getType(), 2, "", &PostDom->front()); 4498 IncomingTrue->replaceAllUsesWith(Phi); 4499 Phi->addIncoming(IncomingFalse, Head); 4500 Phi->addIncoming(IncomingTrue, I->getParent()); 4501 } 4502 } 4503 4504 DEBUG(DT->verifyDomTree()); 4505 } 4506 4507 InnerLoopVectorizer::VectorParts 4508 InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) { 4509 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 4510 4511 // Look for cached value. 4512 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 4513 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 4514 if (ECEntryIt != EdgeMaskCache.end()) 4515 return ECEntryIt->second; 4516 4517 VectorParts SrcMask = createBlockInMask(Src); 4518 4519 // The terminator has to be a branch inst! 4520 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 4521 assert(BI && "Unexpected terminator found"); 4522 4523 if (!BI->isConditional()) 4524 return EdgeMaskCache[Edge] = SrcMask; 4525 4526 VectorParts EdgeMask(UF); 4527 for (unsigned Part = 0; Part < UF; ++Part) { 4528 auto *EdgeMaskPart = getOrCreateVectorValue(BI->getCondition(), Part); 4529 if (BI->getSuccessor(0) != Dst) 4530 EdgeMaskPart = Builder.CreateNot(EdgeMaskPart); 4531 4532 if (SrcMask[Part]) // Otherwise block in-mask is all-one, no need to AND. 4533 EdgeMaskPart = Builder.CreateAnd(EdgeMaskPart, SrcMask[Part]); 4534 4535 EdgeMask[Part] = EdgeMaskPart; 4536 } 4537 4538 return EdgeMaskCache[Edge] = EdgeMask; 4539 } 4540 4541 InnerLoopVectorizer::VectorParts 4542 InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) { 4543 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 4544 4545 // Look for cached value. 4546 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 4547 if (BCEntryIt != BlockMaskCache.end()) 4548 return BCEntryIt->second; 4549 4550 // All-one mask is modelled as no-mask following the convention for masked 4551 // load/store/gather/scatter. Initialize BlockMask to no-mask. 4552 VectorParts BlockMask(UF); 4553 for (unsigned Part = 0; Part < UF; ++Part) 4554 BlockMask[Part] = nullptr; 4555 4556 // Loop incoming mask is all-one. 4557 if (OrigLoop->getHeader() == BB) 4558 return BlockMaskCache[BB] = BlockMask; 4559 4560 // This is the block mask. We OR all incoming edges. 4561 for (auto *Predecessor : predecessors(BB)) { 4562 VectorParts EdgeMask = createEdgeMask(Predecessor, BB); 4563 if (!EdgeMask[0]) // Mask of predecessor is all-one so mask of block is too. 4564 return BlockMaskCache[BB] = EdgeMask; 4565 4566 if (!BlockMask[0]) { // BlockMask has its initialized nullptr value. 4567 BlockMask = EdgeMask; 4568 continue; 4569 } 4570 4571 for (unsigned Part = 0; Part < UF; ++Part) 4572 BlockMask[Part] = Builder.CreateOr(BlockMask[Part], EdgeMask[Part]); 4573 } 4574 4575 return BlockMaskCache[BB] = BlockMask; 4576 } 4577 4578 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 4579 unsigned VF) { 4580 PHINode *P = cast<PHINode>(PN); 4581 // In order to support recurrences we need to be able to vectorize Phi nodes. 4582 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4583 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4584 // this value when we vectorize all of the instructions that use the PHI. 4585 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 4586 for (unsigned Part = 0; Part < UF; ++Part) { 4587 // This is phase one of vectorizing PHIs. 4588 Type *VecTy = 4589 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4590 Value *EntryPart = PHINode::Create( 4591 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4592 VectorLoopValueMap.setVectorValue(P, Part, EntryPart); 4593 } 4594 return; 4595 } 4596 4597 setDebugLocFromInst(Builder, P); 4598 // Check for PHI nodes that are lowered to vector selects. 4599 if (P->getParent() != OrigLoop->getHeader()) { 4600 // We know that all PHIs in non-header blocks are converted into 4601 // selects, so we don't have to worry about the insertion order and we 4602 // can just use the builder. 4603 // At this point we generate the predication tree. There may be 4604 // duplications since this is a simple recursive scan, but future 4605 // optimizations will clean it up. 4606 4607 unsigned NumIncoming = P->getNumIncomingValues(); 4608 4609 // Generate a sequence of selects of the form: 4610 // SELECT(Mask3, In3, 4611 // SELECT(Mask2, In2, 4612 // ( ...))) 4613 VectorParts Entry(UF); 4614 for (unsigned In = 0; In < NumIncoming; In++) { 4615 VectorParts Cond = 4616 createEdgeMask(P->getIncomingBlock(In), P->getParent()); 4617 4618 for (unsigned Part = 0; Part < UF; ++Part) { 4619 Value *In0 = getOrCreateVectorValue(P->getIncomingValue(In), Part); 4620 // We might have single edge PHIs (blocks) - use an identity 4621 // 'select' for the first PHI operand. 4622 if (In == 0) 4623 Entry[Part] = Builder.CreateSelect(Cond[Part], In0, In0); 4624 else 4625 // Select between the current value and the previous incoming edge 4626 // based on the incoming mask. 4627 Entry[Part] = Builder.CreateSelect(Cond[Part], In0, Entry[Part], 4628 "predphi"); 4629 } 4630 } 4631 for (unsigned Part = 0; Part < UF; ++Part) 4632 VectorLoopValueMap.setVectorValue(P, Part, Entry[Part]); 4633 return; 4634 } 4635 4636 // This PHINode must be an induction variable. 4637 // Make sure that we know about it. 4638 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 4639 4640 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 4641 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4642 4643 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4644 // which can be found from the original scalar operations. 4645 switch (II.getKind()) { 4646 case InductionDescriptor::IK_NoInduction: 4647 llvm_unreachable("Unknown induction"); 4648 case InductionDescriptor::IK_IntInduction: 4649 case InductionDescriptor::IK_FpInduction: 4650 return widenIntOrFpInduction(P); 4651 case InductionDescriptor::IK_PtrInduction: { 4652 // Handle the pointer induction variable case. 4653 assert(P->getType()->isPointerTy() && "Unexpected type."); 4654 // This is the normalized GEP that starts counting at zero. 4655 Value *PtrInd = Induction; 4656 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 4657 // Determine the number of scalars we need to generate for each unroll 4658 // iteration. If the instruction is uniform, we only need to generate the 4659 // first lane. Otherwise, we generate all VF values. 4660 unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF; 4661 // These are the scalar results. Notice that we don't generate vector GEPs 4662 // because scalar GEPs result in better code. 4663 for (unsigned Part = 0; Part < UF; ++Part) { 4664 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4665 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 4666 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4667 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 4668 SclrGep->setName("next.gep"); 4669 VectorLoopValueMap.setScalarValue(P, Part, Lane, SclrGep); 4670 } 4671 } 4672 return; 4673 } 4674 } 4675 } 4676 4677 /// A helper function for checking whether an integer division-related 4678 /// instruction may divide by zero (in which case it must be predicated if 4679 /// executed conditionally in the scalar code). 4680 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4681 /// Non-zero divisors that are non compile-time constants will not be 4682 /// converted into multiplication, so we will still end up scalarizing 4683 /// the division, but can do so w/o predication. 4684 static bool mayDivideByZero(Instruction &I) { 4685 assert((I.getOpcode() == Instruction::UDiv || 4686 I.getOpcode() == Instruction::SDiv || 4687 I.getOpcode() == Instruction::URem || 4688 I.getOpcode() == Instruction::SRem) && 4689 "Unexpected instruction"); 4690 Value *Divisor = I.getOperand(1); 4691 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4692 return !CInt || CInt->isZero(); 4693 } 4694 4695 void InnerLoopVectorizer::vectorizeInstruction(Instruction &I) { 4696 // Scalarize instructions that should remain scalar after vectorization. 4697 if (VF > 1 && 4698 !(isa<BranchInst>(&I) || isa<PHINode>(&I) || isa<DbgInfoIntrinsic>(&I)) && 4699 shouldScalarizeInstruction(&I)) { 4700 scalarizeInstruction(&I, Legal->isScalarWithPredication(&I)); 4701 return; 4702 } 4703 4704 switch (I.getOpcode()) { 4705 case Instruction::Br: 4706 // Nothing to do for PHIs and BR, since we already took care of the 4707 // loop control flow instructions. 4708 break; 4709 case Instruction::PHI: { 4710 // Vectorize PHINodes. 4711 widenPHIInstruction(&I, UF, VF); 4712 break; 4713 } // End of PHI. 4714 case Instruction::GetElementPtr: { 4715 // Construct a vector GEP by widening the operands of the scalar GEP as 4716 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4717 // results in a vector of pointers when at least one operand of the GEP 4718 // is vector-typed. Thus, to keep the representation compact, we only use 4719 // vector-typed operands for loop-varying values. 4720 auto *GEP = cast<GetElementPtrInst>(&I); 4721 4722 if (VF > 1 && OrigLoop->hasLoopInvariantOperands(GEP)) { 4723 // If we are vectorizing, but the GEP has only loop-invariant operands, 4724 // the GEP we build (by only using vector-typed operands for 4725 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4726 // produce a vector of pointers, we need to either arbitrarily pick an 4727 // operand to broadcast, or broadcast a clone of the original GEP. 4728 // Here, we broadcast a clone of the original. 4729 // 4730 // TODO: If at some point we decide to scalarize instructions having 4731 // loop-invariant operands, this special case will no longer be 4732 // required. We would add the scalarization decision to 4733 // collectLoopScalars() and teach getVectorValue() to broadcast 4734 // the lane-zero scalar value. 4735 auto *Clone = Builder.Insert(GEP->clone()); 4736 for (unsigned Part = 0; Part < UF; ++Part) { 4737 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4738 VectorLoopValueMap.setVectorValue(&I, Part, EntryPart); 4739 addMetadata(EntryPart, GEP); 4740 } 4741 } else { 4742 // If the GEP has at least one loop-varying operand, we are sure to 4743 // produce a vector of pointers. But if we are only unrolling, we want 4744 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4745 // produce with the code below will be scalar (if VF == 1) or vector 4746 // (otherwise). Note that for the unroll-only case, we still maintain 4747 // values in the vector mapping with initVector, as we do for other 4748 // instructions. 4749 for (unsigned Part = 0; Part < UF; ++Part) { 4750 4751 // The pointer operand of the new GEP. If it's loop-invariant, we 4752 // won't broadcast it. 4753 auto *Ptr = 4754 OrigLoop->isLoopInvariant(GEP->getPointerOperand()) 4755 ? GEP->getPointerOperand() 4756 : getOrCreateVectorValue(GEP->getPointerOperand(), Part); 4757 4758 // Collect all the indices for the new GEP. If any index is 4759 // loop-invariant, we won't broadcast it. 4760 SmallVector<Value *, 4> Indices; 4761 for (auto &U : make_range(GEP->idx_begin(), GEP->idx_end())) { 4762 if (OrigLoop->isLoopInvariant(U.get())) 4763 Indices.push_back(U.get()); 4764 else 4765 Indices.push_back(getOrCreateVectorValue(U.get(), Part)); 4766 } 4767 4768 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4769 // but it should be a vector, otherwise. 4770 auto *NewGEP = GEP->isInBounds() 4771 ? Builder.CreateInBoundsGEP(Ptr, Indices) 4772 : Builder.CreateGEP(Ptr, Indices); 4773 assert((VF == 1 || NewGEP->getType()->isVectorTy()) && 4774 "NewGEP is not a pointer vector"); 4775 VectorLoopValueMap.setVectorValue(&I, Part, NewGEP); 4776 addMetadata(NewGEP, GEP); 4777 } 4778 } 4779 4780 break; 4781 } 4782 case Instruction::UDiv: 4783 case Instruction::SDiv: 4784 case Instruction::SRem: 4785 case Instruction::URem: 4786 // Scalarize with predication if this instruction may divide by zero and 4787 // block execution is conditional, otherwise fallthrough. 4788 if (Legal->isScalarWithPredication(&I)) { 4789 scalarizeInstruction(&I, true); 4790 break; 4791 } 4792 LLVM_FALLTHROUGH; 4793 case Instruction::Add: 4794 case Instruction::FAdd: 4795 case Instruction::Sub: 4796 case Instruction::FSub: 4797 case Instruction::Mul: 4798 case Instruction::FMul: 4799 case Instruction::FDiv: 4800 case Instruction::FRem: 4801 case Instruction::Shl: 4802 case Instruction::LShr: 4803 case Instruction::AShr: 4804 case Instruction::And: 4805 case Instruction::Or: 4806 case Instruction::Xor: { 4807 // Just widen binops. 4808 auto *BinOp = cast<BinaryOperator>(&I); 4809 setDebugLocFromInst(Builder, BinOp); 4810 4811 for (unsigned Part = 0; Part < UF; ++Part) { 4812 Value *A = getOrCreateVectorValue(BinOp->getOperand(0), Part); 4813 Value *B = getOrCreateVectorValue(BinOp->getOperand(1), Part); 4814 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A, B); 4815 4816 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 4817 VecOp->copyIRFlags(BinOp); 4818 4819 // Use this vector value for all users of the original instruction. 4820 VectorLoopValueMap.setVectorValue(&I, Part, V); 4821 addMetadata(V, BinOp); 4822 } 4823 4824 break; 4825 } 4826 case Instruction::Select: { 4827 // Widen selects. 4828 // If the selector is loop invariant we can create a select 4829 // instruction with a scalar condition. Otherwise, use vector-select. 4830 auto *SE = PSE.getSE(); 4831 bool InvariantCond = 4832 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); 4833 setDebugLocFromInst(Builder, &I); 4834 4835 // The condition can be loop invariant but still defined inside the 4836 // loop. This means that we can't just use the original 'cond' value. 4837 // We have to take the 'vectorized' value and pick the first lane. 4838 // Instcombine will make this a no-op. 4839 4840 auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), 0, 0); 4841 4842 for (unsigned Part = 0; Part < UF; ++Part) { 4843 Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part); 4844 Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part); 4845 Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part); 4846 Value *Sel = 4847 Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1); 4848 VectorLoopValueMap.setVectorValue(&I, Part, Sel); 4849 addMetadata(Sel, &I); 4850 } 4851 4852 break; 4853 } 4854 4855 case Instruction::ICmp: 4856 case Instruction::FCmp: { 4857 // Widen compares. Generate vector compares. 4858 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4859 auto *Cmp = dyn_cast<CmpInst>(&I); 4860 setDebugLocFromInst(Builder, Cmp); 4861 for (unsigned Part = 0; Part < UF; ++Part) { 4862 Value *A = getOrCreateVectorValue(Cmp->getOperand(0), Part); 4863 Value *B = getOrCreateVectorValue(Cmp->getOperand(1), Part); 4864 Value *C = nullptr; 4865 if (FCmp) { 4866 // Propagate fast math flags. 4867 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4868 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4869 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4870 } else { 4871 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4872 } 4873 VectorLoopValueMap.setVectorValue(&I, Part, C); 4874 addMetadata(C, &I); 4875 } 4876 4877 break; 4878 } 4879 4880 case Instruction::Store: 4881 case Instruction::Load: 4882 vectorizeMemoryInstruction(&I); 4883 break; 4884 case Instruction::ZExt: 4885 case Instruction::SExt: 4886 case Instruction::FPToUI: 4887 case Instruction::FPToSI: 4888 case Instruction::FPExt: 4889 case Instruction::PtrToInt: 4890 case Instruction::IntToPtr: 4891 case Instruction::SIToFP: 4892 case Instruction::UIToFP: 4893 case Instruction::Trunc: 4894 case Instruction::FPTrunc: 4895 case Instruction::BitCast: { 4896 auto *CI = dyn_cast<CastInst>(&I); 4897 setDebugLocFromInst(Builder, CI); 4898 4899 // Optimize the special case where the source is a constant integer 4900 // induction variable. Notice that we can only optimize the 'trunc' case 4901 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 4902 // (c) other casts depend on pointer size. 4903 if (Cost->isOptimizableIVTruncate(CI, VF)) { 4904 widenIntOrFpInduction(cast<PHINode>(CI->getOperand(0)), 4905 cast<TruncInst>(CI)); 4906 break; 4907 } 4908 4909 /// Vectorize casts. 4910 Type *DestTy = 4911 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4912 4913 for (unsigned Part = 0; Part < UF; ++Part) { 4914 Value *A = getOrCreateVectorValue(CI->getOperand(0), Part); 4915 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4916 VectorLoopValueMap.setVectorValue(&I, Part, Cast); 4917 addMetadata(Cast, &I); 4918 } 4919 break; 4920 } 4921 4922 case Instruction::Call: { 4923 // Ignore dbg intrinsics. 4924 if (isa<DbgInfoIntrinsic>(I)) 4925 break; 4926 setDebugLocFromInst(Builder, &I); 4927 4928 Module *M = I.getParent()->getParent()->getParent(); 4929 auto *CI = cast<CallInst>(&I); 4930 4931 StringRef FnName = CI->getCalledFunction()->getName(); 4932 Function *F = CI->getCalledFunction(); 4933 Type *RetTy = ToVectorTy(CI->getType(), VF); 4934 SmallVector<Type *, 4> Tys; 4935 for (Value *ArgOperand : CI->arg_operands()) 4936 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4937 4938 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4939 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 4940 ID == Intrinsic::lifetime_start)) { 4941 scalarizeInstruction(&I); 4942 break; 4943 } 4944 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4945 // version of the instruction. 4946 // Is it beneficial to perform intrinsic call compared to lib call? 4947 bool NeedToScalarize; 4948 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4949 bool UseVectorIntrinsic = 4950 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4951 if (!UseVectorIntrinsic && NeedToScalarize) { 4952 scalarizeInstruction(&I); 4953 break; 4954 } 4955 4956 for (unsigned Part = 0; Part < UF; ++Part) { 4957 SmallVector<Value *, 4> Args; 4958 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4959 Value *Arg = CI->getArgOperand(i); 4960 // Some intrinsics have a scalar argument - don't replace it with a 4961 // vector. 4962 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) 4963 Arg = getOrCreateVectorValue(CI->getArgOperand(i), Part); 4964 Args.push_back(Arg); 4965 } 4966 4967 Function *VectorF; 4968 if (UseVectorIntrinsic) { 4969 // Use vector version of the intrinsic. 4970 Type *TysForDecl[] = {CI->getType()}; 4971 if (VF > 1) 4972 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4973 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4974 } else { 4975 // Use vector version of the library call. 4976 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4977 assert(!VFnName.empty() && "Vector function name is empty."); 4978 VectorF = M->getFunction(VFnName); 4979 if (!VectorF) { 4980 // Generate a declaration 4981 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4982 VectorF = 4983 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4984 VectorF->copyAttributesFrom(F); 4985 } 4986 } 4987 assert(VectorF && "Can't create vector function."); 4988 4989 SmallVector<OperandBundleDef, 1> OpBundles; 4990 CI->getOperandBundlesAsDefs(OpBundles); 4991 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4992 4993 if (isa<FPMathOperator>(V)) 4994 V->copyFastMathFlags(CI); 4995 4996 VectorLoopValueMap.setVectorValue(&I, Part, V); 4997 addMetadata(V, &I); 4998 } 4999 5000 break; 5001 } 5002 5003 default: 5004 // All other instructions are unsupported. Scalarize them. 5005 scalarizeInstruction(&I); 5006 break; 5007 } // end of switch. 5008 } 5009 5010 void InnerLoopVectorizer::updateAnalysis() { 5011 // Forget the original basic block. 5012 PSE.getSE()->forgetLoop(OrigLoop); 5013 5014 // Update the dominator tree information. 5015 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 5016 "Entry does not dominate exit."); 5017 5018 DT->addNewBlock(LI->getLoopFor(LoopVectorBody)->getHeader(), 5019 LoopVectorPreHeader); 5020 DT->addNewBlock(LoopMiddleBlock, 5021 LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 5022 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 5023 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 5024 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 5025 5026 DEBUG(DT->verifyDomTree()); 5027 } 5028 5029 /// \brief Check whether it is safe to if-convert this phi node. 5030 /// 5031 /// Phi nodes with constant expressions that can trap are not safe to if 5032 /// convert. 5033 static bool canIfConvertPHINodes(BasicBlock *BB) { 5034 for (Instruction &I : *BB) { 5035 auto *Phi = dyn_cast<PHINode>(&I); 5036 if (!Phi) 5037 return true; 5038 for (Value *V : Phi->incoming_values()) 5039 if (auto *C = dyn_cast<Constant>(V)) 5040 if (C->canTrap()) 5041 return false; 5042 } 5043 return true; 5044 } 5045 5046 bool LoopVectorizationLegality::canVectorizeWithIfConvert() { 5047 if (!EnableIfConversion) { 5048 ORE->emit(createMissedAnalysis("IfConversionDisabled") 5049 << "if-conversion is disabled"); 5050 return false; 5051 } 5052 5053 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable"); 5054 5055 // A list of pointers that we can safely read and write to. 5056 SmallPtrSet<Value *, 8> SafePointes; 5057 5058 // Collect safe addresses. 5059 for (BasicBlock *BB : TheLoop->blocks()) { 5060 if (blockNeedsPredication(BB)) 5061 continue; 5062 5063 for (Instruction &I : *BB) 5064 if (auto *Ptr = getPointerOperand(&I)) 5065 SafePointes.insert(Ptr); 5066 } 5067 5068 // Collect the blocks that need predication. 5069 BasicBlock *Header = TheLoop->getHeader(); 5070 for (BasicBlock *BB : TheLoop->blocks()) { 5071 // We don't support switch statements inside loops. 5072 if (!isa<BranchInst>(BB->getTerminator())) { 5073 ORE->emit(createMissedAnalysis("LoopContainsSwitch", BB->getTerminator()) 5074 << "loop contains a switch statement"); 5075 return false; 5076 } 5077 5078 // We must be able to predicate all blocks that need to be predicated. 5079 if (blockNeedsPredication(BB)) { 5080 if (!blockCanBePredicated(BB, SafePointes)) { 5081 ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator()) 5082 << "control flow cannot be substituted for a select"); 5083 return false; 5084 } 5085 } else if (BB != Header && !canIfConvertPHINodes(BB)) { 5086 ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator()) 5087 << "control flow cannot be substituted for a select"); 5088 return false; 5089 } 5090 } 5091 5092 // We can if-convert this loop. 5093 return true; 5094 } 5095 5096 bool LoopVectorizationLegality::canVectorize() { 5097 // Store the result and return it at the end instead of exiting early, in case 5098 // allowExtraAnalysis is used to report multiple reasons for not vectorizing. 5099 bool Result = true; 5100 // We must have a loop in canonical form. Loops with indirectbr in them cannot 5101 // be canonicalized. 5102 if (!TheLoop->getLoopPreheader()) { 5103 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 5104 << "loop control flow is not understood by vectorizer"); 5105 if (ORE->allowExtraAnalysis()) 5106 Result = false; 5107 else 5108 return false; 5109 } 5110 5111 // FIXME: The code is currently dead, since the loop gets sent to 5112 // LoopVectorizationLegality is already an innermost loop. 5113 // 5114 // We can only vectorize innermost loops. 5115 if (!TheLoop->empty()) { 5116 ORE->emit(createMissedAnalysis("NotInnermostLoop") 5117 << "loop is not the innermost loop"); 5118 if (ORE->allowExtraAnalysis()) 5119 Result = false; 5120 else 5121 return false; 5122 } 5123 5124 // We must have a single backedge. 5125 if (TheLoop->getNumBackEdges() != 1) { 5126 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 5127 << "loop control flow is not understood by vectorizer"); 5128 if (ORE->allowExtraAnalysis()) 5129 Result = false; 5130 else 5131 return false; 5132 } 5133 5134 // We must have a single exiting block. 5135 if (!TheLoop->getExitingBlock()) { 5136 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 5137 << "loop control flow is not understood by vectorizer"); 5138 if (ORE->allowExtraAnalysis()) 5139 Result = false; 5140 else 5141 return false; 5142 } 5143 5144 // We only handle bottom-tested loops, i.e. loop in which the condition is 5145 // checked at the end of each iteration. With that we can assume that all 5146 // instructions in the loop are executed the same number of times. 5147 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5148 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 5149 << "loop control flow is not understood by vectorizer"); 5150 if (ORE->allowExtraAnalysis()) 5151 Result = false; 5152 else 5153 return false; 5154 } 5155 5156 // We need to have a loop header. 5157 DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName() 5158 << '\n'); 5159 5160 // Check if we can if-convert non-single-bb loops. 5161 unsigned NumBlocks = TheLoop->getNumBlocks(); 5162 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { 5163 DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); 5164 if (ORE->allowExtraAnalysis()) 5165 Result = false; 5166 else 5167 return false; 5168 } 5169 5170 // Check if we can vectorize the instructions and CFG in this loop. 5171 if (!canVectorizeInstrs()) { 5172 DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); 5173 if (ORE->allowExtraAnalysis()) 5174 Result = false; 5175 else 5176 return false; 5177 } 5178 5179 // Go over each instruction and look at memory deps. 5180 if (!canVectorizeMemory()) { 5181 DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); 5182 if (ORE->allowExtraAnalysis()) 5183 Result = false; 5184 else 5185 return false; 5186 } 5187 5188 DEBUG(dbgs() << "LV: We can vectorize this loop" 5189 << (LAI->getRuntimePointerChecking()->Need 5190 ? " (with a runtime bound check)" 5191 : "") 5192 << "!\n"); 5193 5194 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 5195 5196 // If an override option has been passed in for interleaved accesses, use it. 5197 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 5198 UseInterleaved = EnableInterleavedMemAccesses; 5199 5200 // Analyze interleaved memory accesses. 5201 if (UseInterleaved) 5202 InterleaveInfo.analyzeInterleaving(*getSymbolicStrides()); 5203 5204 unsigned SCEVThreshold = VectorizeSCEVCheckThreshold; 5205 if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) 5206 SCEVThreshold = PragmaVectorizeSCEVCheckThreshold; 5207 5208 if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) { 5209 ORE->emit(createMissedAnalysis("TooManySCEVRunTimeChecks") 5210 << "Too many SCEV assumptions need to be made and checked " 5211 << "at runtime"); 5212 DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n"); 5213 if (ORE->allowExtraAnalysis()) 5214 Result = false; 5215 else 5216 return false; 5217 } 5218 5219 // Okay! We've done all the tests. If any have failed, return false. Otherwise 5220 // we can vectorize, and at this point we don't have any other mem analysis 5221 // which may limit our maximum vectorization factor, so just return true with 5222 // no restrictions. 5223 return Result; 5224 } 5225 5226 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { 5227 if (Ty->isPointerTy()) 5228 return DL.getIntPtrType(Ty); 5229 5230 // It is possible that char's or short's overflow when we ask for the loop's 5231 // trip count, work around this by changing the type size. 5232 if (Ty->getScalarSizeInBits() < 32) 5233 return Type::getInt32Ty(Ty->getContext()); 5234 5235 return Ty; 5236 } 5237 5238 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { 5239 Ty0 = convertPointerToIntegerType(DL, Ty0); 5240 Ty1 = convertPointerToIntegerType(DL, Ty1); 5241 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) 5242 return Ty0; 5243 return Ty1; 5244 } 5245 5246 /// \brief Check that the instruction has outside loop users and is not an 5247 /// identified reduction variable. 5248 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, 5249 SmallPtrSetImpl<Value *> &AllowedExit) { 5250 // Reduction and Induction instructions are allowed to have exit users. All 5251 // other instructions must not have external users. 5252 if (!AllowedExit.count(Inst)) 5253 // Check that all of the users of the loop are inside the BB. 5254 for (User *U : Inst->users()) { 5255 Instruction *UI = cast<Instruction>(U); 5256 // This user may be a reduction exit value. 5257 if (!TheLoop->contains(UI)) { 5258 DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); 5259 return true; 5260 } 5261 } 5262 return false; 5263 } 5264 5265 void LoopVectorizationLegality::addInductionPhi( 5266 PHINode *Phi, const InductionDescriptor &ID, 5267 SmallPtrSetImpl<Value *> &AllowedExit) { 5268 Inductions[Phi] = ID; 5269 Type *PhiTy = Phi->getType(); 5270 const DataLayout &DL = Phi->getModule()->getDataLayout(); 5271 5272 // Get the widest type. 5273 if (!PhiTy->isFloatingPointTy()) { 5274 if (!WidestIndTy) 5275 WidestIndTy = convertPointerToIntegerType(DL, PhiTy); 5276 else 5277 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); 5278 } 5279 5280 // Int inductions are special because we only allow one IV. 5281 if (ID.getKind() == InductionDescriptor::IK_IntInduction && 5282 ID.getConstIntStepValue() && 5283 ID.getConstIntStepValue()->isOne() && 5284 isa<Constant>(ID.getStartValue()) && 5285 cast<Constant>(ID.getStartValue())->isNullValue()) { 5286 5287 // Use the phi node with the widest type as induction. Use the last 5288 // one if there are multiple (no good reason for doing this other 5289 // than it is expedient). We've checked that it begins at zero and 5290 // steps by one, so this is a canonical induction variable. 5291 if (!PrimaryInduction || PhiTy == WidestIndTy) 5292 PrimaryInduction = Phi; 5293 } 5294 5295 // Both the PHI node itself, and the "post-increment" value feeding 5296 // back into the PHI node may have external users. 5297 // We can allow those uses, except if the SCEVs we have for them rely 5298 // on predicates that only hold within the loop, since allowing the exit 5299 // currently means re-using this SCEV outside the loop. 5300 if (PSE.getUnionPredicate().isAlwaysTrue()) { 5301 AllowedExit.insert(Phi); 5302 AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch())); 5303 } 5304 5305 DEBUG(dbgs() << "LV: Found an induction variable.\n"); 5306 return; 5307 } 5308 5309 bool LoopVectorizationLegality::canVectorizeInstrs() { 5310 BasicBlock *Header = TheLoop->getHeader(); 5311 5312 // Look for the attribute signaling the absence of NaNs. 5313 Function &F = *Header->getParent(); 5314 HasFunNoNaNAttr = 5315 F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; 5316 5317 // For each block in the loop. 5318 for (BasicBlock *BB : TheLoop->blocks()) { 5319 // Scan the instructions in the block and look for hazards. 5320 for (Instruction &I : *BB) { 5321 if (auto *Phi = dyn_cast<PHINode>(&I)) { 5322 Type *PhiTy = Phi->getType(); 5323 // Check that this PHI type is allowed. 5324 if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() && 5325 !PhiTy->isPointerTy()) { 5326 ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi) 5327 << "loop control flow is not understood by vectorizer"); 5328 DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); 5329 return false; 5330 } 5331 5332 // If this PHINode is not in the header block, then we know that we 5333 // can convert it to select during if-conversion. No need to check if 5334 // the PHIs in this block are induction or reduction variables. 5335 if (BB != Header) { 5336 // Check that this instruction has no outside users or is an 5337 // identified reduction value with an outside user. 5338 if (!hasOutsideLoopUser(TheLoop, Phi, AllowedExit)) 5339 continue; 5340 ORE->emit(createMissedAnalysis("NeitherInductionNorReduction", Phi) 5341 << "value could not be identified as " 5342 "an induction or reduction variable"); 5343 return false; 5344 } 5345 5346 // We only allow if-converted PHIs with exactly two incoming values. 5347 if (Phi->getNumIncomingValues() != 2) { 5348 ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi) 5349 << "control flow not understood by vectorizer"); 5350 DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); 5351 return false; 5352 } 5353 5354 RecurrenceDescriptor RedDes; 5355 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) { 5356 if (RedDes.hasUnsafeAlgebra()) 5357 Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst()); 5358 AllowedExit.insert(RedDes.getLoopExitInstr()); 5359 Reductions[Phi] = RedDes; 5360 continue; 5361 } 5362 5363 InductionDescriptor ID; 5364 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID)) { 5365 addInductionPhi(Phi, ID, AllowedExit); 5366 if (ID.hasUnsafeAlgebra() && !HasFunNoNaNAttr) 5367 Requirements->addUnsafeAlgebraInst(ID.getUnsafeAlgebraInst()); 5368 continue; 5369 } 5370 5371 if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, 5372 SinkAfter, DT)) { 5373 FirstOrderRecurrences.insert(Phi); 5374 continue; 5375 } 5376 5377 // As a last resort, coerce the PHI to a AddRec expression 5378 // and re-try classifying it a an induction PHI. 5379 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true)) { 5380 addInductionPhi(Phi, ID, AllowedExit); 5381 continue; 5382 } 5383 5384 ORE->emit(createMissedAnalysis("NonReductionValueUsedOutsideLoop", Phi) 5385 << "value that could not be identified as " 5386 "reduction is used outside the loop"); 5387 DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n"); 5388 return false; 5389 } // end of PHI handling 5390 5391 // We handle calls that: 5392 // * Are debug info intrinsics. 5393 // * Have a mapping to an IR intrinsic. 5394 // * Have a vector version available. 5395 auto *CI = dyn_cast<CallInst>(&I); 5396 if (CI && !getVectorIntrinsicIDForCall(CI, TLI) && 5397 !isa<DbgInfoIntrinsic>(CI) && 5398 !(CI->getCalledFunction() && TLI && 5399 TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) { 5400 ORE->emit(createMissedAnalysis("CantVectorizeCall", CI) 5401 << "call instruction cannot be vectorized"); 5402 DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); 5403 return false; 5404 } 5405 5406 // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the 5407 // second argument is the same (i.e. loop invariant) 5408 if (CI && hasVectorInstrinsicScalarOpd( 5409 getVectorIntrinsicIDForCall(CI, TLI), 1)) { 5410 auto *SE = PSE.getSE(); 5411 if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) { 5412 ORE->emit(createMissedAnalysis("CantVectorizeIntrinsic", CI) 5413 << "intrinsic instruction cannot be vectorized"); 5414 DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n"); 5415 return false; 5416 } 5417 } 5418 5419 // Check that the instruction return type is vectorizable. 5420 // Also, we can't vectorize extractelement instructions. 5421 if ((!VectorType::isValidElementType(I.getType()) && 5422 !I.getType()->isVoidTy()) || 5423 isa<ExtractElementInst>(I)) { 5424 ORE->emit(createMissedAnalysis("CantVectorizeInstructionReturnType", &I) 5425 << "instruction return type cannot be vectorized"); 5426 DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); 5427 return false; 5428 } 5429 5430 // Check that the stored type is vectorizable. 5431 if (auto *ST = dyn_cast<StoreInst>(&I)) { 5432 Type *T = ST->getValueOperand()->getType(); 5433 if (!VectorType::isValidElementType(T)) { 5434 ORE->emit(createMissedAnalysis("CantVectorizeStore", ST) 5435 << "store instruction cannot be vectorized"); 5436 return false; 5437 } 5438 5439 // FP instructions can allow unsafe algebra, thus vectorizable by 5440 // non-IEEE-754 compliant SIMD units. 5441 // This applies to floating-point math operations and calls, not memory 5442 // operations, shuffles, or casts, as they don't change precision or 5443 // semantics. 5444 } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) && 5445 !I.hasUnsafeAlgebra()) { 5446 DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n"); 5447 Hints->setPotentiallyUnsafe(); 5448 } 5449 5450 // Reduction instructions are allowed to have exit users. 5451 // All other instructions must not have external users. 5452 if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) { 5453 ORE->emit(createMissedAnalysis("ValueUsedOutsideLoop", &I) 5454 << "value cannot be used outside the loop"); 5455 return false; 5456 } 5457 5458 } // next instr. 5459 } 5460 5461 if (!PrimaryInduction) { 5462 DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); 5463 if (Inductions.empty()) { 5464 ORE->emit(createMissedAnalysis("NoInductionVariable") 5465 << "loop induction variable could not be identified"); 5466 return false; 5467 } 5468 } 5469 5470 // Now we know the widest induction type, check if our found induction 5471 // is the same size. If it's not, unset it here and InnerLoopVectorizer 5472 // will create another. 5473 if (PrimaryInduction && WidestIndTy != PrimaryInduction->getType()) 5474 PrimaryInduction = nullptr; 5475 5476 return true; 5477 } 5478 5479 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) { 5480 5481 // We should not collect Scalars more than once per VF. Right now, this 5482 // function is called from collectUniformsAndScalars(), which already does 5483 // this check. Collecting Scalars for VF=1 does not make any sense. 5484 assert(VF >= 2 && !Scalars.count(VF) && 5485 "This function should not be visited twice for the same VF"); 5486 5487 SmallSetVector<Instruction *, 8> Worklist; 5488 5489 // These sets are used to seed the analysis with pointers used by memory 5490 // accesses that will remain scalar. 5491 SmallSetVector<Instruction *, 8> ScalarPtrs; 5492 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 5493 5494 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 5495 // The pointer operands of loads and stores will be scalar as long as the 5496 // memory access is not a gather or scatter operation. The value operand of a 5497 // store will remain scalar if the store is scalarized. 5498 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 5499 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 5500 assert(WideningDecision != CM_Unknown && 5501 "Widening decision should be ready at this moment"); 5502 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 5503 if (Ptr == Store->getValueOperand()) 5504 return WideningDecision == CM_Scalarize; 5505 assert(Ptr == getPointerOperand(MemAccess) && 5506 "Ptr is neither a value or pointer operand"); 5507 return WideningDecision != CM_GatherScatter; 5508 }; 5509 5510 // A helper that returns true if the given value is a bitcast or 5511 // getelementptr instruction contained in the loop. 5512 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 5513 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 5514 isa<GetElementPtrInst>(V)) && 5515 !TheLoop->isLoopInvariant(V); 5516 }; 5517 5518 // A helper that evaluates a memory access's use of a pointer. If the use 5519 // will be a scalar use, and the pointer is only used by memory accesses, we 5520 // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in 5521 // PossibleNonScalarPtrs. 5522 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 5523 5524 // We only care about bitcast and getelementptr instructions contained in 5525 // the loop. 5526 if (!isLoopVaryingBitCastOrGEP(Ptr)) 5527 return; 5528 5529 // If the pointer has already been identified as scalar (e.g., if it was 5530 // also identified as uniform), there's nothing to do. 5531 auto *I = cast<Instruction>(Ptr); 5532 if (Worklist.count(I)) 5533 return; 5534 5535 // If the use of the pointer will be a scalar use, and all users of the 5536 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 5537 // place the pointer in PossibleNonScalarPtrs. 5538 if (isScalarUse(MemAccess, Ptr) && all_of(I->users(), [&](User *U) { 5539 return isa<LoadInst>(U) || isa<StoreInst>(U); 5540 })) 5541 ScalarPtrs.insert(I); 5542 else 5543 PossibleNonScalarPtrs.insert(I); 5544 }; 5545 5546 // We seed the scalars analysis with three classes of instructions: (1) 5547 // instructions marked uniform-after-vectorization, (2) bitcast and 5548 // getelementptr instructions used by memory accesses requiring a scalar use, 5549 // and (3) pointer induction variables and their update instructions (we 5550 // currently only scalarize these). 5551 // 5552 // (1) Add to the worklist all instructions that have been identified as 5553 // uniform-after-vectorization. 5554 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 5555 5556 // (2) Add to the worklist all bitcast and getelementptr instructions used by 5557 // memory accesses requiring a scalar use. The pointer operands of loads and 5558 // stores will be scalar as long as the memory accesses is not a gather or 5559 // scatter operation. The value operand of a store will remain scalar if the 5560 // store is scalarized. 5561 for (auto *BB : TheLoop->blocks()) 5562 for (auto &I : *BB) { 5563 if (auto *Load = dyn_cast<LoadInst>(&I)) { 5564 evaluatePtrUse(Load, Load->getPointerOperand()); 5565 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 5566 evaluatePtrUse(Store, Store->getPointerOperand()); 5567 evaluatePtrUse(Store, Store->getValueOperand()); 5568 } 5569 } 5570 for (auto *I : ScalarPtrs) 5571 if (!PossibleNonScalarPtrs.count(I)) { 5572 DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 5573 Worklist.insert(I); 5574 } 5575 5576 // (3) Add to the worklist all pointer induction variables and their update 5577 // instructions. 5578 // 5579 // TODO: Once we are able to vectorize pointer induction variables we should 5580 // no longer insert them into the worklist here. 5581 auto *Latch = TheLoop->getLoopLatch(); 5582 for (auto &Induction : *Legal->getInductionVars()) { 5583 auto *Ind = Induction.first; 5584 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5585 if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction) 5586 continue; 5587 Worklist.insert(Ind); 5588 Worklist.insert(IndUpdate); 5589 DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5590 DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n"); 5591 } 5592 5593 // Insert the forced scalars. 5594 // FIXME: Currently widenPHIInstruction() often creates a dead vector 5595 // induction variable when the PHI user is scalarized. 5596 if (ForcedScalars.count(VF)) 5597 for (auto *I : ForcedScalars.find(VF)->second) 5598 Worklist.insert(I); 5599 5600 // Expand the worklist by looking through any bitcasts and getelementptr 5601 // instructions we've already identified as scalar. This is similar to the 5602 // expansion step in collectLoopUniforms(); however, here we're only 5603 // expanding to include additional bitcasts and getelementptr instructions. 5604 unsigned Idx = 0; 5605 while (Idx != Worklist.size()) { 5606 Instruction *Dst = Worklist[Idx++]; 5607 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 5608 continue; 5609 auto *Src = cast<Instruction>(Dst->getOperand(0)); 5610 if (all_of(Src->users(), [&](User *U) -> bool { 5611 auto *J = cast<Instruction>(U); 5612 return !TheLoop->contains(J) || Worklist.count(J) || 5613 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 5614 isScalarUse(J, Src)); 5615 })) { 5616 Worklist.insert(Src); 5617 DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 5618 } 5619 } 5620 5621 // An induction variable will remain scalar if all users of the induction 5622 // variable and induction variable update remain scalar. 5623 for (auto &Induction : *Legal->getInductionVars()) { 5624 auto *Ind = Induction.first; 5625 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5626 5627 // We already considered pointer induction variables, so there's no reason 5628 // to look at their users again. 5629 // 5630 // TODO: Once we are able to vectorize pointer induction variables we 5631 // should no longer skip over them here. 5632 if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction) 5633 continue; 5634 5635 // Determine if all users of the induction variable are scalar after 5636 // vectorization. 5637 auto ScalarInd = all_of(Ind->users(), [&](User *U) -> bool { 5638 auto *I = cast<Instruction>(U); 5639 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 5640 }); 5641 if (!ScalarInd) 5642 continue; 5643 5644 // Determine if all users of the induction variable update instruction are 5645 // scalar after vectorization. 5646 auto ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool { 5647 auto *I = cast<Instruction>(U); 5648 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 5649 }); 5650 if (!ScalarIndUpdate) 5651 continue; 5652 5653 // The induction variable and its update instruction will remain scalar. 5654 Worklist.insert(Ind); 5655 Worklist.insert(IndUpdate); 5656 DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5657 DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n"); 5658 } 5659 5660 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 5661 } 5662 5663 bool LoopVectorizationLegality::isScalarWithPredication(Instruction *I) { 5664 if (!blockNeedsPredication(I->getParent())) 5665 return false; 5666 switch(I->getOpcode()) { 5667 default: 5668 break; 5669 case Instruction::Store: 5670 return !isMaskRequired(I); 5671 case Instruction::UDiv: 5672 case Instruction::SDiv: 5673 case Instruction::SRem: 5674 case Instruction::URem: 5675 return mayDivideByZero(*I); 5676 } 5677 return false; 5678 } 5679 5680 bool LoopVectorizationLegality::memoryInstructionCanBeWidened(Instruction *I, 5681 unsigned VF) { 5682 // Get and ensure we have a valid memory instruction. 5683 LoadInst *LI = dyn_cast<LoadInst>(I); 5684 StoreInst *SI = dyn_cast<StoreInst>(I); 5685 assert((LI || SI) && "Invalid memory instruction"); 5686 5687 auto *Ptr = getPointerOperand(I); 5688 5689 // In order to be widened, the pointer should be consecutive, first of all. 5690 if (!isConsecutivePtr(Ptr)) 5691 return false; 5692 5693 // If the instruction is a store located in a predicated block, it will be 5694 // scalarized. 5695 if (isScalarWithPredication(I)) 5696 return false; 5697 5698 // If the instruction's allocated size doesn't equal it's type size, it 5699 // requires padding and will be scalarized. 5700 auto &DL = I->getModule()->getDataLayout(); 5701 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 5702 if (hasIrregularType(ScalarTy, DL, VF)) 5703 return false; 5704 5705 return true; 5706 } 5707 5708 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { 5709 5710 // We should not collect Uniforms more than once per VF. Right now, 5711 // this function is called from collectUniformsAndScalars(), which 5712 // already does this check. Collecting Uniforms for VF=1 does not make any 5713 // sense. 5714 5715 assert(VF >= 2 && !Uniforms.count(VF) && 5716 "This function should not be visited twice for the same VF"); 5717 5718 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5719 // not analyze again. Uniforms.count(VF) will return 1. 5720 Uniforms[VF].clear(); 5721 5722 // We now know that the loop is vectorizable! 5723 // Collect instructions inside the loop that will remain uniform after 5724 // vectorization. 5725 5726 // Global values, params and instructions outside of current loop are out of 5727 // scope. 5728 auto isOutOfScope = [&](Value *V) -> bool { 5729 Instruction *I = dyn_cast<Instruction>(V); 5730 return (!I || !TheLoop->contains(I)); 5731 }; 5732 5733 SetVector<Instruction *> Worklist; 5734 BasicBlock *Latch = TheLoop->getLoopLatch(); 5735 5736 // Start with the conditional branch. If the branch condition is an 5737 // instruction contained in the loop that is only used by the branch, it is 5738 // uniform. 5739 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5740 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) { 5741 Worklist.insert(Cmp); 5742 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); 5743 } 5744 5745 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers 5746 // are pointers that are treated like consecutive pointers during 5747 // vectorization. The pointer operands of interleaved accesses are an 5748 // example. 5749 SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs; 5750 5751 // Holds pointer operands of instructions that are possibly non-uniform. 5752 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs; 5753 5754 auto isUniformDecision = [&](Instruction *I, unsigned VF) { 5755 InstWidening WideningDecision = getWideningDecision(I, VF); 5756 assert(WideningDecision != CM_Unknown && 5757 "Widening decision should be ready at this moment"); 5758 5759 return (WideningDecision == CM_Widen || 5760 WideningDecision == CM_Interleave); 5761 }; 5762 // Iterate over the instructions in the loop, and collect all 5763 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible 5764 // that a consecutive-like pointer operand will be scalarized, we collect it 5765 // in PossibleNonUniformPtrs instead. We use two sets here because a single 5766 // getelementptr instruction can be used by both vectorized and scalarized 5767 // memory instructions. For example, if a loop loads and stores from the same 5768 // location, but the store is conditional, the store will be scalarized, and 5769 // the getelementptr won't remain uniform. 5770 for (auto *BB : TheLoop->blocks()) 5771 for (auto &I : *BB) { 5772 5773 // If there's no pointer operand, there's nothing to do. 5774 auto *Ptr = dyn_cast_or_null<Instruction>(getPointerOperand(&I)); 5775 if (!Ptr) 5776 continue; 5777 5778 // True if all users of Ptr are memory accesses that have Ptr as their 5779 // pointer operand. 5780 auto UsersAreMemAccesses = all_of(Ptr->users(), [&](User *U) -> bool { 5781 return getPointerOperand(U) == Ptr; 5782 }); 5783 5784 // Ensure the memory instruction will not be scalarized or used by 5785 // gather/scatter, making its pointer operand non-uniform. If the pointer 5786 // operand is used by any instruction other than a memory access, we 5787 // conservatively assume the pointer operand may be non-uniform. 5788 if (!UsersAreMemAccesses || !isUniformDecision(&I, VF)) 5789 PossibleNonUniformPtrs.insert(Ptr); 5790 5791 // If the memory instruction will be vectorized and its pointer operand 5792 // is consecutive-like, or interleaving - the pointer operand should 5793 // remain uniform. 5794 else 5795 ConsecutiveLikePtrs.insert(Ptr); 5796 } 5797 5798 // Add to the Worklist all consecutive and consecutive-like pointers that 5799 // aren't also identified as possibly non-uniform. 5800 for (auto *V : ConsecutiveLikePtrs) 5801 if (!PossibleNonUniformPtrs.count(V)) { 5802 DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n"); 5803 Worklist.insert(V); 5804 } 5805 5806 // Expand Worklist in topological order: whenever a new instruction 5807 // is added , its users should be either already inside Worklist, or 5808 // out of scope. It ensures a uniform instruction will only be used 5809 // by uniform instructions or out of scope instructions. 5810 unsigned idx = 0; 5811 while (idx != Worklist.size()) { 5812 Instruction *I = Worklist[idx++]; 5813 5814 for (auto OV : I->operand_values()) { 5815 if (isOutOfScope(OV)) 5816 continue; 5817 auto *OI = cast<Instruction>(OV); 5818 if (all_of(OI->users(), [&](User *U) -> bool { 5819 auto *J = cast<Instruction>(U); 5820 return !TheLoop->contains(J) || Worklist.count(J) || 5821 (OI == getPointerOperand(J) && isUniformDecision(J, VF)); 5822 })) { 5823 Worklist.insert(OI); 5824 DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); 5825 } 5826 } 5827 } 5828 5829 // Returns true if Ptr is the pointer operand of a memory access instruction 5830 // I, and I is known to not require scalarization. 5831 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5832 return getPointerOperand(I) == Ptr && isUniformDecision(I, VF); 5833 }; 5834 5835 // For an instruction to be added into Worklist above, all its users inside 5836 // the loop should also be in Worklist. However, this condition cannot be 5837 // true for phi nodes that form a cyclic dependence. We must process phi 5838 // nodes separately. An induction variable will remain uniform if all users 5839 // of the induction variable and induction variable update remain uniform. 5840 // The code below handles both pointer and non-pointer induction variables. 5841 for (auto &Induction : *Legal->getInductionVars()) { 5842 auto *Ind = Induction.first; 5843 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5844 5845 // Determine if all users of the induction variable are uniform after 5846 // vectorization. 5847 auto UniformInd = all_of(Ind->users(), [&](User *U) -> bool { 5848 auto *I = cast<Instruction>(U); 5849 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5850 isVectorizedMemAccessUse(I, Ind); 5851 }); 5852 if (!UniformInd) 5853 continue; 5854 5855 // Determine if all users of the induction variable update instruction are 5856 // uniform after vectorization. 5857 auto UniformIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool { 5858 auto *I = cast<Instruction>(U); 5859 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5860 isVectorizedMemAccessUse(I, IndUpdate); 5861 }); 5862 if (!UniformIndUpdate) 5863 continue; 5864 5865 // The induction variable and its update instruction will remain uniform. 5866 Worklist.insert(Ind); 5867 Worklist.insert(IndUpdate); 5868 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n"); 5869 DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate << "\n"); 5870 } 5871 5872 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5873 } 5874 5875 bool LoopVectorizationLegality::canVectorizeMemory() { 5876 LAI = &(*GetLAA)(*TheLoop); 5877 InterleaveInfo.setLAI(LAI); 5878 const OptimizationRemarkAnalysis *LAR = LAI->getReport(); 5879 if (LAR) { 5880 OptimizationRemarkAnalysis VR(Hints->vectorizeAnalysisPassName(), 5881 "loop not vectorized: ", *LAR); 5882 ORE->emit(VR); 5883 } 5884 if (!LAI->canVectorizeMemory()) 5885 return false; 5886 5887 if (LAI->hasStoreToLoopInvariantAddress()) { 5888 ORE->emit(createMissedAnalysis("CantVectorizeStoreToLoopInvariantAddress") 5889 << "write to a loop invariant address could not be vectorized"); 5890 DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); 5891 return false; 5892 } 5893 5894 Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks()); 5895 PSE.addPredicate(LAI->getPSE().getUnionPredicate()); 5896 5897 return true; 5898 } 5899 5900 bool LoopVectorizationLegality::isInductionVariable(const Value *V) { 5901 Value *In0 = const_cast<Value *>(V); 5902 PHINode *PN = dyn_cast_or_null<PHINode>(In0); 5903 if (!PN) 5904 return false; 5905 5906 return Inductions.count(PN); 5907 } 5908 5909 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) { 5910 return FirstOrderRecurrences.count(Phi); 5911 } 5912 5913 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { 5914 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 5915 } 5916 5917 bool LoopVectorizationLegality::blockCanBePredicated( 5918 BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) { 5919 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 5920 5921 for (Instruction &I : *BB) { 5922 // Check that we don't have a constant expression that can trap as operand. 5923 for (Value *Operand : I.operands()) { 5924 if (auto *C = dyn_cast<Constant>(Operand)) 5925 if (C->canTrap()) 5926 return false; 5927 } 5928 // We might be able to hoist the load. 5929 if (I.mayReadFromMemory()) { 5930 auto *LI = dyn_cast<LoadInst>(&I); 5931 if (!LI) 5932 return false; 5933 if (!SafePtrs.count(LI->getPointerOperand())) { 5934 if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) || 5935 isLegalMaskedGather(LI->getType())) { 5936 MaskedOp.insert(LI); 5937 continue; 5938 } 5939 // !llvm.mem.parallel_loop_access implies if-conversion safety. 5940 if (IsAnnotatedParallel) 5941 continue; 5942 return false; 5943 } 5944 } 5945 5946 if (I.mayWriteToMemory()) { 5947 auto *SI = dyn_cast<StoreInst>(&I); 5948 // We only support predication of stores in basic blocks with one 5949 // predecessor. 5950 if (!SI) 5951 return false; 5952 5953 // Build a masked store if it is legal for the target. 5954 if (isLegalMaskedStore(SI->getValueOperand()->getType(), 5955 SI->getPointerOperand()) || 5956 isLegalMaskedScatter(SI->getValueOperand()->getType())) { 5957 MaskedOp.insert(SI); 5958 continue; 5959 } 5960 5961 bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0); 5962 bool isSinglePredecessor = SI->getParent()->getSinglePredecessor(); 5963 5964 if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr || 5965 !isSinglePredecessor) 5966 return false; 5967 } 5968 if (I.mayThrow()) 5969 return false; 5970 } 5971 5972 return true; 5973 } 5974 5975 void InterleavedAccessInfo::collectConstStrideAccesses( 5976 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 5977 const ValueToValueMap &Strides) { 5978 5979 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 5980 5981 // Since it's desired that the load/store instructions be maintained in 5982 // "program order" for the interleaved access analysis, we have to visit the 5983 // blocks in the loop in reverse postorder (i.e., in a topological order). 5984 // Such an ordering will ensure that any load/store that may be executed 5985 // before a second load/store will precede the second load/store in 5986 // AccessStrideInfo. 5987 LoopBlocksDFS DFS(TheLoop); 5988 DFS.perform(LI); 5989 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 5990 for (auto &I : *BB) { 5991 auto *LI = dyn_cast<LoadInst>(&I); 5992 auto *SI = dyn_cast<StoreInst>(&I); 5993 if (!LI && !SI) 5994 continue; 5995 5996 Value *Ptr = getPointerOperand(&I); 5997 // We don't check wrapping here because we don't know yet if Ptr will be 5998 // part of a full group or a group with gaps. Checking wrapping for all 5999 // pointers (even those that end up in groups with no gaps) will be overly 6000 // conservative. For full groups, wrapping should be ok since if we would 6001 // wrap around the address space we would do a memory access at nullptr 6002 // even without the transformation. The wrapping checks are therefore 6003 // deferred until after we've formed the interleaved groups. 6004 int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, 6005 /*Assume=*/true, /*ShouldCheckWrap=*/false); 6006 6007 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 6008 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 6009 uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 6010 6011 // An alignment of 0 means target ABI alignment. 6012 unsigned Align = getMemInstAlignment(&I); 6013 if (!Align) 6014 Align = DL.getABITypeAlignment(PtrTy->getElementType()); 6015 6016 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align); 6017 } 6018 } 6019 6020 // Analyze interleaved accesses and collect them into interleaved load and 6021 // store groups. 6022 // 6023 // When generating code for an interleaved load group, we effectively hoist all 6024 // loads in the group to the location of the first load in program order. When 6025 // generating code for an interleaved store group, we sink all stores to the 6026 // location of the last store. This code motion can change the order of load 6027 // and store instructions and may break dependences. 6028 // 6029 // The code generation strategy mentioned above ensures that we won't violate 6030 // any write-after-read (WAR) dependences. 6031 // 6032 // E.g., for the WAR dependence: a = A[i]; // (1) 6033 // A[i] = b; // (2) 6034 // 6035 // The store group of (2) is always inserted at or below (2), and the load 6036 // group of (1) is always inserted at or above (1). Thus, the instructions will 6037 // never be reordered. All other dependences are checked to ensure the 6038 // correctness of the instruction reordering. 6039 // 6040 // The algorithm visits all memory accesses in the loop in bottom-up program 6041 // order. Program order is established by traversing the blocks in the loop in 6042 // reverse postorder when collecting the accesses. 6043 // 6044 // We visit the memory accesses in bottom-up order because it can simplify the 6045 // construction of store groups in the presence of write-after-write (WAW) 6046 // dependences. 6047 // 6048 // E.g., for the WAW dependence: A[i] = a; // (1) 6049 // A[i] = b; // (2) 6050 // A[i + 1] = c; // (3) 6051 // 6052 // We will first create a store group with (3) and (2). (1) can't be added to 6053 // this group because it and (2) are dependent. However, (1) can be grouped 6054 // with other accesses that may precede it in program order. Note that a 6055 // bottom-up order does not imply that WAW dependences should not be checked. 6056 void InterleavedAccessInfo::analyzeInterleaving( 6057 const ValueToValueMap &Strides) { 6058 DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 6059 6060 // Holds all accesses with a constant stride. 6061 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 6062 collectConstStrideAccesses(AccessStrideInfo, Strides); 6063 6064 if (AccessStrideInfo.empty()) 6065 return; 6066 6067 // Collect the dependences in the loop. 6068 collectDependences(); 6069 6070 // Holds all interleaved store groups temporarily. 6071 SmallSetVector<InterleaveGroup *, 4> StoreGroups; 6072 // Holds all interleaved load groups temporarily. 6073 SmallSetVector<InterleaveGroup *, 4> LoadGroups; 6074 6075 // Search in bottom-up program order for pairs of accesses (A and B) that can 6076 // form interleaved load or store groups. In the algorithm below, access A 6077 // precedes access B in program order. We initialize a group for B in the 6078 // outer loop of the algorithm, and then in the inner loop, we attempt to 6079 // insert each A into B's group if: 6080 // 6081 // 1. A and B have the same stride, 6082 // 2. A and B have the same memory object size, and 6083 // 3. A belongs in B's group according to its distance from B. 6084 // 6085 // Special care is taken to ensure group formation will not break any 6086 // dependences. 6087 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 6088 BI != E; ++BI) { 6089 Instruction *B = BI->first; 6090 StrideDescriptor DesB = BI->second; 6091 6092 // Initialize a group for B if it has an allowable stride. Even if we don't 6093 // create a group for B, we continue with the bottom-up algorithm to ensure 6094 // we don't break any of B's dependences. 6095 InterleaveGroup *Group = nullptr; 6096 if (isStrided(DesB.Stride)) { 6097 Group = getInterleaveGroup(B); 6098 if (!Group) { 6099 DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n'); 6100 Group = createInterleaveGroup(B, DesB.Stride, DesB.Align); 6101 } 6102 if (B->mayWriteToMemory()) 6103 StoreGroups.insert(Group); 6104 else 6105 LoadGroups.insert(Group); 6106 } 6107 6108 for (auto AI = std::next(BI); AI != E; ++AI) { 6109 Instruction *A = AI->first; 6110 StrideDescriptor DesA = AI->second; 6111 6112 // Our code motion strategy implies that we can't have dependences 6113 // between accesses in an interleaved group and other accesses located 6114 // between the first and last member of the group. Note that this also 6115 // means that a group can't have more than one member at a given offset. 6116 // The accesses in a group can have dependences with other accesses, but 6117 // we must ensure we don't extend the boundaries of the group such that 6118 // we encompass those dependent accesses. 6119 // 6120 // For example, assume we have the sequence of accesses shown below in a 6121 // stride-2 loop: 6122 // 6123 // (1, 2) is a group | A[i] = a; // (1) 6124 // | A[i-1] = b; // (2) | 6125 // A[i-3] = c; // (3) 6126 // A[i] = d; // (4) | (2, 4) is not a group 6127 // 6128 // Because accesses (2) and (3) are dependent, we can group (2) with (1) 6129 // but not with (4). If we did, the dependent access (3) would be within 6130 // the boundaries of the (2, 4) group. 6131 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { 6132 6133 // If a dependence exists and A is already in a group, we know that A 6134 // must be a store since A precedes B and WAR dependences are allowed. 6135 // Thus, A would be sunk below B. We release A's group to prevent this 6136 // illegal code motion. A will then be free to form another group with 6137 // instructions that precede it. 6138 if (isInterleaved(A)) { 6139 InterleaveGroup *StoreGroup = getInterleaveGroup(A); 6140 StoreGroups.remove(StoreGroup); 6141 releaseGroup(StoreGroup); 6142 } 6143 6144 // If a dependence exists and A is not already in a group (or it was 6145 // and we just released it), B might be hoisted above A (if B is a 6146 // load) or another store might be sunk below A (if B is a store). In 6147 // either case, we can't add additional instructions to B's group. B 6148 // will only form a group with instructions that it precedes. 6149 break; 6150 } 6151 6152 // At this point, we've checked for illegal code motion. If either A or B 6153 // isn't strided, there's nothing left to do. 6154 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 6155 continue; 6156 6157 // Ignore A if it's already in a group or isn't the same kind of memory 6158 // operation as B. 6159 if (isInterleaved(A) || A->mayReadFromMemory() != B->mayReadFromMemory()) 6160 continue; 6161 6162 // Check rules 1 and 2. Ignore A if its stride or size is different from 6163 // that of B. 6164 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 6165 continue; 6166 6167 // Ignore A if the memory object of A and B don't belong to the same 6168 // address space 6169 if (getMemInstAddressSpace(A) != getMemInstAddressSpace(B)) 6170 continue; 6171 6172 // Calculate the distance from A to B. 6173 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 6174 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 6175 if (!DistToB) 6176 continue; 6177 int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 6178 6179 // Check rule 3. Ignore A if its distance to B is not a multiple of the 6180 // size. 6181 if (DistanceToB % static_cast<int64_t>(DesB.Size)) 6182 continue; 6183 6184 // Ignore A if either A or B is in a predicated block. Although we 6185 // currently prevent group formation for predicated accesses, we may be 6186 // able to relax this limitation in the future once we handle more 6187 // complicated blocks. 6188 if (isPredicated(A->getParent()) || isPredicated(B->getParent())) 6189 continue; 6190 6191 // The index of A is the index of B plus A's distance to B in multiples 6192 // of the size. 6193 int IndexA = 6194 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 6195 6196 // Try to insert A into B's group. 6197 if (Group->insertMember(A, IndexA, DesA.Align)) { 6198 DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 6199 << " into the interleave group with" << *B << '\n'); 6200 InterleaveGroupMap[A] = Group; 6201 6202 // Set the first load in program order as the insert position. 6203 if (A->mayReadFromMemory()) 6204 Group->setInsertPos(A); 6205 } 6206 } // Iteration over A accesses. 6207 } // Iteration over B accesses. 6208 6209 // Remove interleaved store groups with gaps. 6210 for (InterleaveGroup *Group : StoreGroups) 6211 if (Group->getNumMembers() != Group->getFactor()) 6212 releaseGroup(Group); 6213 6214 // Remove interleaved groups with gaps (currently only loads) whose memory 6215 // accesses may wrap around. We have to revisit the getPtrStride analysis, 6216 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does 6217 // not check wrapping (see documentation there). 6218 // FORNOW we use Assume=false; 6219 // TODO: Change to Assume=true but making sure we don't exceed the threshold 6220 // of runtime SCEV assumptions checks (thereby potentially failing to 6221 // vectorize altogether). 6222 // Additional optional optimizations: 6223 // TODO: If we are peeling the loop and we know that the first pointer doesn't 6224 // wrap then we can deduce that all pointers in the group don't wrap. 6225 // This means that we can forcefully peel the loop in order to only have to 6226 // check the first pointer for no-wrap. When we'll change to use Assume=true 6227 // we'll only need at most one runtime check per interleaved group. 6228 // 6229 for (InterleaveGroup *Group : LoadGroups) { 6230 6231 // Case 1: A full group. Can Skip the checks; For full groups, if the wide 6232 // load would wrap around the address space we would do a memory access at 6233 // nullptr even without the transformation. 6234 if (Group->getNumMembers() == Group->getFactor()) 6235 continue; 6236 6237 // Case 2: If first and last members of the group don't wrap this implies 6238 // that all the pointers in the group don't wrap. 6239 // So we check only group member 0 (which is always guaranteed to exist), 6240 // and group member Factor - 1; If the latter doesn't exist we rely on 6241 // peeling (if it is a non-reveresed accsess -- see Case 3). 6242 Value *FirstMemberPtr = getPointerOperand(Group->getMember(0)); 6243 if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false, 6244 /*ShouldCheckWrap=*/true)) { 6245 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 6246 "first group member potentially pointer-wrapping.\n"); 6247 releaseGroup(Group); 6248 continue; 6249 } 6250 Instruction *LastMember = Group->getMember(Group->getFactor() - 1); 6251 if (LastMember) { 6252 Value *LastMemberPtr = getPointerOperand(LastMember); 6253 if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false, 6254 /*ShouldCheckWrap=*/true)) { 6255 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 6256 "last group member potentially pointer-wrapping.\n"); 6257 releaseGroup(Group); 6258 } 6259 } else { 6260 // Case 3: A non-reversed interleaved load group with gaps: We need 6261 // to execute at least one scalar epilogue iteration. This will ensure 6262 // we don't speculatively access memory out-of-bounds. We only need 6263 // to look for a member at index factor - 1, since every group must have 6264 // a member at index zero. 6265 if (Group->isReverse()) { 6266 releaseGroup(Group); 6267 continue; 6268 } 6269 DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 6270 RequiresScalarEpilogue = true; 6271 } 6272 } 6273 } 6274 6275 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) { 6276 if (!EnableCondStoresVectorization && Legal->getNumPredStores()) { 6277 ORE->emit(createMissedAnalysis("ConditionalStore") 6278 << "store that is conditionally executed prevents vectorization"); 6279 DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); 6280 return None; 6281 } 6282 6283 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 6284 // TODO: It may by useful to do since it's still likely to be dynamically 6285 // uniform if the target can skip. 6286 DEBUG(dbgs() << "LV: Not inserting runtime ptr check for divergent target"); 6287 6288 ORE->emit( 6289 createMissedAnalysis("CantVersionLoopWithDivergentTarget") 6290 << "runtime pointer checks needed. Not enabled for divergent target"); 6291 6292 return None; 6293 } 6294 6295 if (!OptForSize) // Remaining checks deal with scalar loop when OptForSize. 6296 return computeFeasibleMaxVF(OptForSize); 6297 6298 if (Legal->getRuntimePointerChecking()->Need) { 6299 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 6300 << "runtime pointer checks needed. Enable vectorization of this " 6301 "loop with '#pragma clang loop vectorize(enable)' when " 6302 "compiling with -Os/-Oz"); 6303 DEBUG(dbgs() 6304 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 6305 return None; 6306 } 6307 6308 // If we optimize the program for size, avoid creating the tail loop. 6309 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 6310 DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 6311 6312 // If we don't know the precise trip count, don't try to vectorize. 6313 if (TC < 2) { 6314 ORE->emit( 6315 createMissedAnalysis("UnknownLoopCountComplexCFG") 6316 << "unable to calculate the loop count due to complex control flow"); 6317 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 6318 return None; 6319 } 6320 6321 unsigned MaxVF = computeFeasibleMaxVF(OptForSize); 6322 6323 if (TC % MaxVF != 0) { 6324 // If the trip count that we found modulo the vectorization factor is not 6325 // zero then we require a tail. 6326 // FIXME: look for a smaller MaxVF that does divide TC rather than give up. 6327 // FIXME: return None if loop requiresScalarEpilog(<MaxVF>), or look for a 6328 // smaller MaxVF that does not require a scalar epilog. 6329 6330 ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize") 6331 << "cannot optimize for size and vectorize at the " 6332 "same time. Enable vectorization of this loop " 6333 "with '#pragma clang loop vectorize(enable)' " 6334 "when compiling with -Os/-Oz"); 6335 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 6336 return None; 6337 } 6338 6339 return MaxVF; 6340 } 6341 6342 unsigned LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize) { 6343 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 6344 unsigned SmallestType, WidestType; 6345 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 6346 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 6347 unsigned MaxSafeDepDist = -1U; 6348 6349 // Get the maximum safe dependence distance in bits computed by LAA. If the 6350 // loop contains any interleaved accesses, we divide the dependence distance 6351 // by the maximum interleave factor of all interleaved groups. Note that 6352 // although the division ensures correctness, this is a fairly conservative 6353 // computation because the maximum distance computed by LAA may not involve 6354 // any of the interleaved accesses. 6355 if (Legal->getMaxSafeDepDistBytes() != -1U) 6356 MaxSafeDepDist = 6357 Legal->getMaxSafeDepDistBytes() * 8 / Legal->getMaxInterleaveFactor(); 6358 6359 WidestRegister = 6360 ((WidestRegister < MaxSafeDepDist) ? WidestRegister : MaxSafeDepDist); 6361 unsigned MaxVectorSize = WidestRegister / WidestType; 6362 6363 DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " 6364 << WidestType << " bits.\n"); 6365 DEBUG(dbgs() << "LV: The Widest register is: " << WidestRegister 6366 << " bits.\n"); 6367 6368 if (MaxVectorSize == 0) { 6369 DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 6370 MaxVectorSize = 1; 6371 } 6372 6373 assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements" 6374 " into one vector!"); 6375 6376 unsigned MaxVF = MaxVectorSize; 6377 if (MaximizeBandwidth && !OptForSize) { 6378 // Collect all viable vectorization factors. 6379 SmallVector<unsigned, 8> VFs; 6380 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 6381 for (unsigned VS = MaxVectorSize; VS <= NewMaxVectorSize; VS *= 2) 6382 VFs.push_back(VS); 6383 6384 // For each VF calculate its register usage. 6385 auto RUs = calculateRegisterUsage(VFs); 6386 6387 // Select the largest VF which doesn't require more registers than existing 6388 // ones. 6389 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 6390 for (int i = RUs.size() - 1; i >= 0; --i) { 6391 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 6392 MaxVF = VFs[i]; 6393 break; 6394 } 6395 } 6396 } 6397 return MaxVF; 6398 } 6399 6400 LoopVectorizationCostModel::VectorizationFactor 6401 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) { 6402 float Cost = expectedCost(1).first; 6403 #ifndef NDEBUG 6404 const float ScalarCost = Cost; 6405 #endif /* NDEBUG */ 6406 unsigned Width = 1; 6407 DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 6408 6409 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 6410 // Ignore scalar width, because the user explicitly wants vectorization. 6411 if (ForceVectorization && MaxVF > 1) { 6412 Width = 2; 6413 Cost = expectedCost(Width).first / (float)Width; 6414 } 6415 6416 for (unsigned i = 2; i <= MaxVF; i *= 2) { 6417 // Notice that the vector loop needs to be executed less times, so 6418 // we need to divide the cost of the vector loops by the width of 6419 // the vector elements. 6420 VectorizationCostTy C = expectedCost(i); 6421 float VectorCost = C.first / (float)i; 6422 DEBUG(dbgs() << "LV: Vector loop of width " << i 6423 << " costs: " << (int)VectorCost << ".\n"); 6424 if (!C.second && !ForceVectorization) { 6425 DEBUG( 6426 dbgs() << "LV: Not considering vector loop of width " << i 6427 << " because it will not generate any vector instructions.\n"); 6428 continue; 6429 } 6430 if (VectorCost < Cost) { 6431 Cost = VectorCost; 6432 Width = i; 6433 } 6434 } 6435 6436 DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 6437 << "LV: Vectorization seems to be not beneficial, " 6438 << "but was forced by a user.\n"); 6439 DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 6440 VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)}; 6441 return Factor; 6442 } 6443 6444 std::pair<unsigned, unsigned> 6445 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6446 unsigned MinWidth = -1U; 6447 unsigned MaxWidth = 8; 6448 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6449 6450 // For each block. 6451 for (BasicBlock *BB : TheLoop->blocks()) { 6452 // For each instruction in the loop. 6453 for (Instruction &I : *BB) { 6454 Type *T = I.getType(); 6455 6456 // Skip ignored values. 6457 if (ValuesToIgnore.count(&I)) 6458 continue; 6459 6460 // Only examine Loads, Stores and PHINodes. 6461 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6462 continue; 6463 6464 // Examine PHI nodes that are reduction variables. Update the type to 6465 // account for the recurrence type. 6466 if (auto *PN = dyn_cast<PHINode>(&I)) { 6467 if (!Legal->isReductionVariable(PN)) 6468 continue; 6469 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 6470 T = RdxDesc.getRecurrenceType(); 6471 } 6472 6473 // Examine the stored values. 6474 if (auto *ST = dyn_cast<StoreInst>(&I)) 6475 T = ST->getValueOperand()->getType(); 6476 6477 // Ignore loaded pointer types and stored pointer types that are not 6478 // vectorizable. 6479 // 6480 // FIXME: The check here attempts to predict whether a load or store will 6481 // be vectorized. We only know this for certain after a VF has 6482 // been selected. Here, we assume that if an access can be 6483 // vectorized, it will be. We should also look at extending this 6484 // optimization to non-pointer types. 6485 // 6486 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 6487 !Legal->isAccessInterleaved(&I) && !Legal->isLegalGatherOrScatter(&I)) 6488 continue; 6489 6490 MinWidth = std::min(MinWidth, 6491 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6492 MaxWidth = std::max(MaxWidth, 6493 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6494 } 6495 } 6496 6497 return {MinWidth, MaxWidth}; 6498 } 6499 6500 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 6501 unsigned VF, 6502 unsigned LoopCost) { 6503 6504 // -- The interleave heuristics -- 6505 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6506 // There are many micro-architectural considerations that we can't predict 6507 // at this level. For example, frontend pressure (on decode or fetch) due to 6508 // code size, or the number and capabilities of the execution ports. 6509 // 6510 // We use the following heuristics to select the interleave count: 6511 // 1. If the code has reductions, then we interleave to break the cross 6512 // iteration dependency. 6513 // 2. If the loop is really small, then we interleave to reduce the loop 6514 // overhead. 6515 // 3. We don't interleave if we think that we will spill registers to memory 6516 // due to the increased register pressure. 6517 6518 // When we optimize for size, we don't interleave. 6519 if (OptForSize) 6520 return 1; 6521 6522 // We used the distance for the interleave count. 6523 if (Legal->getMaxSafeDepDistBytes() != -1U) 6524 return 1; 6525 6526 // Do not interleave loops with a relatively small trip count. 6527 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 6528 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 6529 return 1; 6530 6531 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 6532 DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6533 << " registers\n"); 6534 6535 if (VF == 1) { 6536 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6537 TargetNumRegisters = ForceTargetNumScalarRegs; 6538 } else { 6539 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6540 TargetNumRegisters = ForceTargetNumVectorRegs; 6541 } 6542 6543 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6544 // We divide by these constants so assume that we have at least one 6545 // instruction that uses at least one register. 6546 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 6547 R.NumInstructions = std::max(R.NumInstructions, 1U); 6548 6549 // We calculate the interleave count using the following formula. 6550 // Subtract the number of loop invariants from the number of available 6551 // registers. These registers are used by all of the interleaved instances. 6552 // Next, divide the remaining registers by the number of registers that is 6553 // required by the loop, in order to estimate how many parallel instances 6554 // fit without causing spills. All of this is rounded down if necessary to be 6555 // a power of two. We want power of two interleave count to simplify any 6556 // addressing operations or alignment considerations. 6557 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 6558 R.MaxLocalUsers); 6559 6560 // Don't count the induction variable as interleaved. 6561 if (EnableIndVarRegisterHeur) 6562 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 6563 std::max(1U, (R.MaxLocalUsers - 1))); 6564 6565 // Clamp the interleave ranges to reasonable counts. 6566 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 6567 6568 // Check if the user has overridden the max. 6569 if (VF == 1) { 6570 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6571 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6572 } else { 6573 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6574 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6575 } 6576 6577 // If we did not calculate the cost for VF (because the user selected the VF) 6578 // then we calculate the cost of VF here. 6579 if (LoopCost == 0) 6580 LoopCost = expectedCost(VF).first; 6581 6582 // Clamp the calculated IC to be between the 1 and the max interleave count 6583 // that the target allows. 6584 if (IC > MaxInterleaveCount) 6585 IC = MaxInterleaveCount; 6586 else if (IC < 1) 6587 IC = 1; 6588 6589 // Interleave if we vectorized this loop and there is a reduction that could 6590 // benefit from interleaving. 6591 if (VF > 1 && Legal->getReductionVars()->size()) { 6592 DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6593 return IC; 6594 } 6595 6596 // Note that if we've already vectorized the loop we will have done the 6597 // runtime check and so interleaving won't require further checks. 6598 bool InterleavingRequiresRuntimePointerCheck = 6599 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 6600 6601 // We want to interleave small loops in order to reduce the loop overhead and 6602 // potentially expose ILP opportunities. 6603 DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 6604 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6605 // We assume that the cost overhead is 1 and we use the cost model 6606 // to estimate the cost of the loop and interleave until the cost of the 6607 // loop overhead is about 5% of the cost of the loop. 6608 unsigned SmallIC = 6609 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6610 6611 // Interleave until store/load ports (estimated by max interleave count) are 6612 // saturated. 6613 unsigned NumStores = Legal->getNumStores(); 6614 unsigned NumLoads = Legal->getNumLoads(); 6615 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6616 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6617 6618 // If we have a scalar reduction (vector reductions are already dealt with 6619 // by this point), we can increase the critical path length if the loop 6620 // we're interleaving is inside another loop. Limit, by default to 2, so the 6621 // critical path only gets increased by one reduction operation. 6622 if (Legal->getReductionVars()->size() && TheLoop->getLoopDepth() > 1) { 6623 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6624 SmallIC = std::min(SmallIC, F); 6625 StoresIC = std::min(StoresIC, F); 6626 LoadsIC = std::min(LoadsIC, F); 6627 } 6628 6629 if (EnableLoadStoreRuntimeInterleave && 6630 std::max(StoresIC, LoadsIC) > SmallIC) { 6631 DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6632 return std::max(StoresIC, LoadsIC); 6633 } 6634 6635 DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6636 return SmallIC; 6637 } 6638 6639 // Interleave if this is a large loop (small loops are already dealt with by 6640 // this point) that could benefit from interleaving. 6641 bool HasReductions = (Legal->getReductionVars()->size() > 0); 6642 if (TTI.enableAggressiveInterleaving(HasReductions)) { 6643 DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6644 return IC; 6645 } 6646 6647 DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6648 return 1; 6649 } 6650 6651 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6652 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 6653 // This function calculates the register usage by measuring the highest number 6654 // of values that are alive at a single location. Obviously, this is a very 6655 // rough estimation. We scan the loop in a topological order in order and 6656 // assign a number to each instruction. We use RPO to ensure that defs are 6657 // met before their users. We assume that each instruction that has in-loop 6658 // users starts an interval. We record every time that an in-loop value is 6659 // used, so we have a list of the first and last occurrences of each 6660 // instruction. Next, we transpose this data structure into a multi map that 6661 // holds the list of intervals that *end* at a specific location. This multi 6662 // map allows us to perform a linear search. We scan the instructions linearly 6663 // and record each time that a new interval starts, by placing it in a set. 6664 // If we find this value in the multi-map then we remove it from the set. 6665 // The max register usage is the maximum size of the set. 6666 // We also search for instructions that are defined outside the loop, but are 6667 // used inside the loop. We need this number separately from the max-interval 6668 // usage number because when we unroll, loop-invariant values do not take 6669 // more register. 6670 LoopBlocksDFS DFS(TheLoop); 6671 DFS.perform(LI); 6672 6673 RegisterUsage RU; 6674 RU.NumInstructions = 0; 6675 6676 // Each 'key' in the map opens a new interval. The values 6677 // of the map are the index of the 'last seen' usage of the 6678 // instruction that is the key. 6679 typedef DenseMap<Instruction *, unsigned> IntervalMap; 6680 // Maps instruction to its index. 6681 DenseMap<unsigned, Instruction *> IdxToInstr; 6682 // Marks the end of each interval. 6683 IntervalMap EndPoint; 6684 // Saves the list of instruction indices that are used in the loop. 6685 SmallSet<Instruction *, 8> Ends; 6686 // Saves the list of values that are used in the loop but are 6687 // defined outside the loop, such as arguments and constants. 6688 SmallPtrSet<Value *, 8> LoopInvariants; 6689 6690 unsigned Index = 0; 6691 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6692 RU.NumInstructions += BB->size(); 6693 for (Instruction &I : *BB) { 6694 IdxToInstr[Index++] = &I; 6695 6696 // Save the end location of each USE. 6697 for (Value *U : I.operands()) { 6698 auto *Instr = dyn_cast<Instruction>(U); 6699 6700 // Ignore non-instruction values such as arguments, constants, etc. 6701 if (!Instr) 6702 continue; 6703 6704 // If this instruction is outside the loop then record it and continue. 6705 if (!TheLoop->contains(Instr)) { 6706 LoopInvariants.insert(Instr); 6707 continue; 6708 } 6709 6710 // Overwrite previous end points. 6711 EndPoint[Instr] = Index; 6712 Ends.insert(Instr); 6713 } 6714 } 6715 } 6716 6717 // Saves the list of intervals that end with the index in 'key'. 6718 typedef SmallVector<Instruction *, 2> InstrList; 6719 DenseMap<unsigned, InstrList> TransposeEnds; 6720 6721 // Transpose the EndPoints to a list of values that end at each index. 6722 for (auto &Interval : EndPoint) 6723 TransposeEnds[Interval.second].push_back(Interval.first); 6724 6725 SmallSet<Instruction *, 8> OpenIntervals; 6726 6727 // Get the size of the widest register. 6728 unsigned MaxSafeDepDist = -1U; 6729 if (Legal->getMaxSafeDepDistBytes() != -1U) 6730 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 6731 unsigned WidestRegister = 6732 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 6733 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6734 6735 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6736 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 6737 6738 DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6739 6740 // A lambda that gets the register usage for the given type and VF. 6741 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 6742 if (Ty->isTokenTy()) 6743 return 0U; 6744 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 6745 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 6746 }; 6747 6748 for (unsigned int i = 0; i < Index; ++i) { 6749 Instruction *I = IdxToInstr[i]; 6750 6751 // Remove all of the instructions that end at this location. 6752 InstrList &List = TransposeEnds[i]; 6753 for (Instruction *ToRemove : List) 6754 OpenIntervals.erase(ToRemove); 6755 6756 // Ignore instructions that are never used within the loop. 6757 if (!Ends.count(I)) 6758 continue; 6759 6760 // Skip ignored values. 6761 if (ValuesToIgnore.count(I)) 6762 continue; 6763 6764 // For each VF find the maximum usage of registers. 6765 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6766 if (VFs[j] == 1) { 6767 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 6768 continue; 6769 } 6770 collectUniformsAndScalars(VFs[j]); 6771 // Count the number of live intervals. 6772 unsigned RegUsage = 0; 6773 for (auto Inst : OpenIntervals) { 6774 // Skip ignored values for VF > 1. 6775 if (VecValuesToIgnore.count(Inst) || 6776 isScalarAfterVectorization(Inst, VFs[j])) 6777 continue; 6778 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 6779 } 6780 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 6781 } 6782 6783 DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6784 << OpenIntervals.size() << '\n'); 6785 6786 // Add the current instruction to the list of open intervals. 6787 OpenIntervals.insert(I); 6788 } 6789 6790 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6791 unsigned Invariant = 0; 6792 if (VFs[i] == 1) 6793 Invariant = LoopInvariants.size(); 6794 else { 6795 for (auto Inst : LoopInvariants) 6796 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 6797 } 6798 6799 DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 6800 DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 6801 DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); 6802 DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n'); 6803 6804 RU.LoopInvariantRegs = Invariant; 6805 RU.MaxLocalUsers = MaxUsages[i]; 6806 RUs[i] = RU; 6807 } 6808 6809 return RUs; 6810 } 6811 6812 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) { 6813 6814 // If we aren't vectorizing the loop, or if we've already collected the 6815 // instructions to scalarize, there's nothing to do. Collection may already 6816 // have occurred if we have a user-selected VF and are now computing the 6817 // expected cost for interleaving. 6818 if (VF < 2 || InstsToScalarize.count(VF)) 6819 return; 6820 6821 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6822 // not profitable to scalarize any instructions, the presence of VF in the 6823 // map will indicate that we've analyzed it already. 6824 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6825 6826 // Find all the instructions that are scalar with predication in the loop and 6827 // determine if it would be better to not if-convert the blocks they are in. 6828 // If so, we also record the instructions to scalarize. 6829 for (BasicBlock *BB : TheLoop->blocks()) { 6830 if (!Legal->blockNeedsPredication(BB)) 6831 continue; 6832 for (Instruction &I : *BB) 6833 if (Legal->isScalarWithPredication(&I)) { 6834 ScalarCostsTy ScalarCosts; 6835 if (computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6836 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6837 6838 // Remember that BB will remain after vectorization. 6839 PredicatedBBsAfterVectorization.insert(BB); 6840 } 6841 } 6842 } 6843 6844 int LoopVectorizationCostModel::computePredInstDiscount( 6845 Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts, 6846 unsigned VF) { 6847 6848 assert(!isUniformAfterVectorization(PredInst, VF) && 6849 "Instruction marked uniform-after-vectorization will be predicated"); 6850 6851 // Initialize the discount to zero, meaning that the scalar version and the 6852 // vector version cost the same. 6853 int Discount = 0; 6854 6855 // Holds instructions to analyze. The instructions we visit are mapped in 6856 // ScalarCosts. Those instructions are the ones that would be scalarized if 6857 // we find that the scalar version costs less. 6858 SmallVector<Instruction *, 8> Worklist; 6859 6860 // Returns true if the given instruction can be scalarized. 6861 auto canBeScalarized = [&](Instruction *I) -> bool { 6862 6863 // We only attempt to scalarize instructions forming a single-use chain 6864 // from the original predicated block that would otherwise be vectorized. 6865 // Although not strictly necessary, we give up on instructions we know will 6866 // already be scalar to avoid traversing chains that are unlikely to be 6867 // beneficial. 6868 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6869 isScalarAfterVectorization(I, VF)) 6870 return false; 6871 6872 // If the instruction is scalar with predication, it will be analyzed 6873 // separately. We ignore it within the context of PredInst. 6874 if (Legal->isScalarWithPredication(I)) 6875 return false; 6876 6877 // If any of the instruction's operands are uniform after vectorization, 6878 // the instruction cannot be scalarized. This prevents, for example, a 6879 // masked load from being scalarized. 6880 // 6881 // We assume we will only emit a value for lane zero of an instruction 6882 // marked uniform after vectorization, rather than VF identical values. 6883 // Thus, if we scalarize an instruction that uses a uniform, we would 6884 // create uses of values corresponding to the lanes we aren't emitting code 6885 // for. This behavior can be changed by allowing getScalarValue to clone 6886 // the lane zero values for uniforms rather than asserting. 6887 for (Use &U : I->operands()) 6888 if (auto *J = dyn_cast<Instruction>(U.get())) 6889 if (isUniformAfterVectorization(J, VF)) 6890 return false; 6891 6892 // Otherwise, we can scalarize the instruction. 6893 return true; 6894 }; 6895 6896 // Returns true if an operand that cannot be scalarized must be extracted 6897 // from a vector. We will account for this scalarization overhead below. Note 6898 // that the non-void predicated instructions are placed in their own blocks, 6899 // and their return values are inserted into vectors. Thus, an extract would 6900 // still be required. 6901 auto needsExtract = [&](Instruction *I) -> bool { 6902 return TheLoop->contains(I) && !isScalarAfterVectorization(I, VF); 6903 }; 6904 6905 // Compute the expected cost discount from scalarizing the entire expression 6906 // feeding the predicated instruction. We currently only consider expressions 6907 // that are single-use instruction chains. 6908 Worklist.push_back(PredInst); 6909 while (!Worklist.empty()) { 6910 Instruction *I = Worklist.pop_back_val(); 6911 6912 // If we've already analyzed the instruction, there's nothing to do. 6913 if (ScalarCosts.count(I)) 6914 continue; 6915 6916 // Compute the cost of the vector instruction. Note that this cost already 6917 // includes the scalarization overhead of the predicated instruction. 6918 unsigned VectorCost = getInstructionCost(I, VF).first; 6919 6920 // Compute the cost of the scalarized instruction. This cost is the cost of 6921 // the instruction as if it wasn't if-converted and instead remained in the 6922 // predicated block. We will scale this cost by block probability after 6923 // computing the scalarization overhead. 6924 unsigned ScalarCost = VF * getInstructionCost(I, 1).first; 6925 6926 // Compute the scalarization overhead of needed insertelement instructions 6927 // and phi nodes. 6928 if (Legal->isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6929 ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF), 6930 true, false); 6931 ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI); 6932 } 6933 6934 // Compute the scalarization overhead of needed extractelement 6935 // instructions. For each of the instruction's operands, if the operand can 6936 // be scalarized, add it to the worklist; otherwise, account for the 6937 // overhead. 6938 for (Use &U : I->operands()) 6939 if (auto *J = dyn_cast<Instruction>(U.get())) { 6940 assert(VectorType::isValidElementType(J->getType()) && 6941 "Instruction has non-scalar type"); 6942 if (canBeScalarized(J)) 6943 Worklist.push_back(J); 6944 else if (needsExtract(J)) 6945 ScalarCost += TTI.getScalarizationOverhead( 6946 ToVectorTy(J->getType(),VF), false, true); 6947 } 6948 6949 // Scale the total scalar cost by block probability. 6950 ScalarCost /= getReciprocalPredBlockProb(); 6951 6952 // Compute the discount. A non-negative discount means the vector version 6953 // of the instruction costs more, and scalarizing would be beneficial. 6954 Discount += VectorCost - ScalarCost; 6955 ScalarCosts[I] = ScalarCost; 6956 } 6957 6958 return Discount; 6959 } 6960 6961 LoopVectorizationCostModel::VectorizationCostTy 6962 LoopVectorizationCostModel::expectedCost(unsigned VF) { 6963 VectorizationCostTy Cost; 6964 6965 // Collect Uniform and Scalar instructions after vectorization with VF. 6966 collectUniformsAndScalars(VF); 6967 6968 // Collect the instructions (and their associated costs) that will be more 6969 // profitable to scalarize. 6970 collectInstsToScalarize(VF); 6971 6972 // For each block. 6973 for (BasicBlock *BB : TheLoop->blocks()) { 6974 VectorizationCostTy BlockCost; 6975 6976 // For each instruction in the old loop. 6977 for (Instruction &I : *BB) { 6978 // Skip dbg intrinsics. 6979 if (isa<DbgInfoIntrinsic>(I)) 6980 continue; 6981 6982 // Skip ignored values. 6983 if (ValuesToIgnore.count(&I)) 6984 continue; 6985 6986 VectorizationCostTy C = getInstructionCost(&I, VF); 6987 6988 // Check if we should override the cost. 6989 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6990 C.first = ForceTargetInstructionCost; 6991 6992 BlockCost.first += C.first; 6993 BlockCost.second |= C.second; 6994 DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " 6995 << VF << " For instruction: " << I << '\n'); 6996 } 6997 6998 // If we are vectorizing a predicated block, it will have been 6999 // if-converted. This means that the block's instructions (aside from 7000 // stores and instructions that may divide by zero) will now be 7001 // unconditionally executed. For the scalar case, we may not always execute 7002 // the predicated block. Thus, scale the block's cost by the probability of 7003 // executing it. 7004 if (VF == 1 && Legal->blockNeedsPredication(BB)) 7005 BlockCost.first /= getReciprocalPredBlockProb(); 7006 7007 Cost.first += BlockCost.first; 7008 Cost.second |= BlockCost.second; 7009 } 7010 7011 return Cost; 7012 } 7013 7014 /// \brief Gets Address Access SCEV after verifying that the access pattern 7015 /// is loop invariant except the induction variable dependence. 7016 /// 7017 /// This SCEV can be sent to the Target in order to estimate the address 7018 /// calculation cost. 7019 static const SCEV *getAddressAccessSCEV( 7020 Value *Ptr, 7021 LoopVectorizationLegality *Legal, 7022 ScalarEvolution *SE, 7023 const Loop *TheLoop) { 7024 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 7025 if (!Gep) 7026 return nullptr; 7027 7028 // We are looking for a gep with all loop invariant indices except for one 7029 // which should be an induction variable. 7030 unsigned NumOperands = Gep->getNumOperands(); 7031 for (unsigned i = 1; i < NumOperands; ++i) { 7032 Value *Opd = Gep->getOperand(i); 7033 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 7034 !Legal->isInductionVariable(Opd)) 7035 return nullptr; 7036 } 7037 7038 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 7039 return SE->getSCEV(Ptr); 7040 } 7041 7042 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 7043 return Legal->hasStride(I->getOperand(0)) || 7044 Legal->hasStride(I->getOperand(1)); 7045 } 7046 7047 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 7048 unsigned VF) { 7049 Type *ValTy = getMemInstValueType(I); 7050 auto SE = PSE.getSE(); 7051 7052 unsigned Alignment = getMemInstAlignment(I); 7053 unsigned AS = getMemInstAddressSpace(I); 7054 Value *Ptr = getPointerOperand(I); 7055 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 7056 7057 // Figure out whether the access is strided and get the stride value 7058 // if it's known in compile time 7059 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, SE, TheLoop); 7060 7061 // Get the cost of the scalar memory instruction and address computation. 7062 unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 7063 7064 Cost += VF * 7065 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 7066 AS, I); 7067 7068 // Get the overhead of the extractelement and insertelement instructions 7069 // we might create due to scalarization. 7070 Cost += getScalarizationOverhead(I, VF, TTI); 7071 7072 // If we have a predicated store, it may not be executed for each vector 7073 // lane. Scale the cost by the probability of executing the predicated 7074 // block. 7075 if (Legal->isScalarWithPredication(I)) 7076 Cost /= getReciprocalPredBlockProb(); 7077 7078 return Cost; 7079 } 7080 7081 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 7082 unsigned VF) { 7083 Type *ValTy = getMemInstValueType(I); 7084 Type *VectorTy = ToVectorTy(ValTy, VF); 7085 unsigned Alignment = getMemInstAlignment(I); 7086 Value *Ptr = getPointerOperand(I); 7087 unsigned AS = getMemInstAddressSpace(I); 7088 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 7089 7090 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7091 "Stride should be 1 or -1 for consecutive memory access"); 7092 unsigned Cost = 0; 7093 if (Legal->isMaskRequired(I)) 7094 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 7095 else 7096 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I); 7097 7098 bool Reverse = ConsecutiveStride < 0; 7099 if (Reverse) 7100 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 7101 return Cost; 7102 } 7103 7104 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 7105 unsigned VF) { 7106 LoadInst *LI = cast<LoadInst>(I); 7107 Type *ValTy = LI->getType(); 7108 Type *VectorTy = ToVectorTy(ValTy, VF); 7109 unsigned Alignment = LI->getAlignment(); 7110 unsigned AS = LI->getPointerAddressSpace(); 7111 7112 return TTI.getAddressComputationCost(ValTy) + 7113 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) + 7114 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 7115 } 7116 7117 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 7118 unsigned VF) { 7119 Type *ValTy = getMemInstValueType(I); 7120 Type *VectorTy = ToVectorTy(ValTy, VF); 7121 unsigned Alignment = getMemInstAlignment(I); 7122 Value *Ptr = getPointerOperand(I); 7123 7124 return TTI.getAddressComputationCost(VectorTy) + 7125 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 7126 Legal->isMaskRequired(I), Alignment); 7127 } 7128 7129 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 7130 unsigned VF) { 7131 Type *ValTy = getMemInstValueType(I); 7132 Type *VectorTy = ToVectorTy(ValTy, VF); 7133 unsigned AS = getMemInstAddressSpace(I); 7134 7135 auto Group = Legal->getInterleavedAccessGroup(I); 7136 assert(Group && "Fail to get an interleaved access group."); 7137 7138 unsigned InterleaveFactor = Group->getFactor(); 7139 Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 7140 7141 // Holds the indices of existing members in an interleaved load group. 7142 // An interleaved store group doesn't need this as it doesn't allow gaps. 7143 SmallVector<unsigned, 4> Indices; 7144 if (isa<LoadInst>(I)) { 7145 for (unsigned i = 0; i < InterleaveFactor; i++) 7146 if (Group->getMember(i)) 7147 Indices.push_back(i); 7148 } 7149 7150 // Calculate the cost of the whole interleaved group. 7151 unsigned Cost = TTI.getInterleavedMemoryOpCost(I->getOpcode(), WideVecTy, 7152 Group->getFactor(), Indices, 7153 Group->getAlignment(), AS); 7154 7155 if (Group->isReverse()) 7156 Cost += Group->getNumMembers() * 7157 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 7158 return Cost; 7159 } 7160 7161 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 7162 unsigned VF) { 7163 7164 // Calculate scalar cost only. Vectorization cost should be ready at this 7165 // moment. 7166 if (VF == 1) { 7167 Type *ValTy = getMemInstValueType(I); 7168 unsigned Alignment = getMemInstAlignment(I); 7169 unsigned AS = getMemInstAddressSpace(I); 7170 7171 return TTI.getAddressComputationCost(ValTy) + 7172 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I); 7173 } 7174 return getWideningCost(I, VF); 7175 } 7176 7177 LoopVectorizationCostModel::VectorizationCostTy 7178 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 7179 // If we know that this instruction will remain uniform, check the cost of 7180 // the scalar version. 7181 if (isUniformAfterVectorization(I, VF)) 7182 VF = 1; 7183 7184 if (VF > 1 && isProfitableToScalarize(I, VF)) 7185 return VectorizationCostTy(InstsToScalarize[VF][I], false); 7186 7187 // Forced scalars do not have any scalarization overhead. 7188 if (VF > 1 && ForcedScalars.count(VF) && 7189 ForcedScalars.find(VF)->second.count(I)) 7190 return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false); 7191 7192 Type *VectorTy; 7193 unsigned C = getInstructionCost(I, VF, VectorTy); 7194 7195 bool TypeNotScalarized = 7196 VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF; 7197 return VectorizationCostTy(C, TypeNotScalarized); 7198 } 7199 7200 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) { 7201 if (VF == 1) 7202 return; 7203 for (BasicBlock *BB : TheLoop->blocks()) { 7204 // For each instruction in the old loop. 7205 for (Instruction &I : *BB) { 7206 Value *Ptr = getPointerOperand(&I); 7207 if (!Ptr) 7208 continue; 7209 7210 if (isa<LoadInst>(&I) && Legal->isUniform(Ptr)) { 7211 // Scalar load + broadcast 7212 unsigned Cost = getUniformMemOpCost(&I, VF); 7213 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7214 continue; 7215 } 7216 7217 // We assume that widening is the best solution when possible. 7218 if (Legal->memoryInstructionCanBeWidened(&I, VF)) { 7219 unsigned Cost = getConsecutiveMemOpCost(&I, VF); 7220 setWideningDecision(&I, VF, CM_Widen, Cost); 7221 continue; 7222 } 7223 7224 // Choose between Interleaving, Gather/Scatter or Scalarization. 7225 unsigned InterleaveCost = UINT_MAX; 7226 unsigned NumAccesses = 1; 7227 if (Legal->isAccessInterleaved(&I)) { 7228 auto Group = Legal->getInterleavedAccessGroup(&I); 7229 assert(Group && "Fail to get an interleaved access group."); 7230 7231 // Make one decision for the whole group. 7232 if (getWideningDecision(&I, VF) != CM_Unknown) 7233 continue; 7234 7235 NumAccesses = Group->getNumMembers(); 7236 InterleaveCost = getInterleaveGroupCost(&I, VF); 7237 } 7238 7239 unsigned GatherScatterCost = 7240 Legal->isLegalGatherOrScatter(&I) 7241 ? getGatherScatterCost(&I, VF) * NumAccesses 7242 : UINT_MAX; 7243 7244 unsigned ScalarizationCost = 7245 getMemInstScalarizationCost(&I, VF) * NumAccesses; 7246 7247 // Choose better solution for the current VF, 7248 // write down this decision and use it during vectorization. 7249 unsigned Cost; 7250 InstWidening Decision; 7251 if (InterleaveCost <= GatherScatterCost && 7252 InterleaveCost < ScalarizationCost) { 7253 Decision = CM_Interleave; 7254 Cost = InterleaveCost; 7255 } else if (GatherScatterCost < ScalarizationCost) { 7256 Decision = CM_GatherScatter; 7257 Cost = GatherScatterCost; 7258 } else { 7259 Decision = CM_Scalarize; 7260 Cost = ScalarizationCost; 7261 } 7262 // If the instructions belongs to an interleave group, the whole group 7263 // receives the same decision. The whole group receives the cost, but 7264 // the cost will actually be assigned to one instruction. 7265 if (auto Group = Legal->getInterleavedAccessGroup(&I)) 7266 setWideningDecision(Group, VF, Decision, Cost); 7267 else 7268 setWideningDecision(&I, VF, Decision, Cost); 7269 } 7270 } 7271 7272 // Make sure that any load of address and any other address computation 7273 // remains scalar unless there is gather/scatter support. This avoids 7274 // inevitable extracts into address registers, and also has the benefit of 7275 // activating LSR more, since that pass can't optimize vectorized 7276 // addresses. 7277 if (TTI.prefersVectorizedAddressing()) 7278 return; 7279 7280 // Start with all scalar pointer uses. 7281 SmallPtrSet<Instruction *, 8> AddrDefs; 7282 for (BasicBlock *BB : TheLoop->blocks()) 7283 for (Instruction &I : *BB) { 7284 Instruction *PtrDef = 7285 dyn_cast_or_null<Instruction>(getPointerOperand(&I)); 7286 if (PtrDef && TheLoop->contains(PtrDef) && 7287 getWideningDecision(&I, VF) != CM_GatherScatter) 7288 AddrDefs.insert(PtrDef); 7289 } 7290 7291 // Add all instructions used to generate the addresses. 7292 SmallVector<Instruction *, 4> Worklist; 7293 for (auto *I : AddrDefs) 7294 Worklist.push_back(I); 7295 while (!Worklist.empty()) { 7296 Instruction *I = Worklist.pop_back_val(); 7297 for (auto &Op : I->operands()) 7298 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7299 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7300 AddrDefs.insert(InstOp).second == true) 7301 Worklist.push_back(InstOp); 7302 } 7303 7304 for (auto *I : AddrDefs) { 7305 if (isa<LoadInst>(I)) { 7306 // Setting the desired widening decision should ideally be handled in 7307 // by cost functions, but since this involves the task of finding out 7308 // if the loaded register is involved in an address computation, it is 7309 // instead changed here when we know this is the case. 7310 if (getWideningDecision(I, VF) == CM_Widen) 7311 // Scalarize a widened load of address. 7312 setWideningDecision(I, VF, CM_Scalarize, 7313 (VF * getMemoryInstructionCost(I, 1))); 7314 else if (auto Group = Legal->getInterleavedAccessGroup(I)) { 7315 // Scalarize an interleave group of address loads. 7316 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7317 if (Instruction *Member = Group->getMember(I)) 7318 setWideningDecision(Member, VF, CM_Scalarize, 7319 (VF * getMemoryInstructionCost(Member, 1))); 7320 } 7321 } 7322 } else 7323 // Make sure I gets scalarized and a cost estimate without 7324 // scalarization overhead. 7325 ForcedScalars[VF].insert(I); 7326 } 7327 } 7328 7329 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7330 unsigned VF, 7331 Type *&VectorTy) { 7332 Type *RetTy = I->getType(); 7333 if (canTruncateToMinimalBitwidth(I, VF)) 7334 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7335 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 7336 auto SE = PSE.getSE(); 7337 7338 // TODO: We need to estimate the cost of intrinsic calls. 7339 switch (I->getOpcode()) { 7340 case Instruction::GetElementPtr: 7341 // We mark this instruction as zero-cost because the cost of GEPs in 7342 // vectorized code depends on whether the corresponding memory instruction 7343 // is scalarized or not. Therefore, we handle GEPs with the memory 7344 // instruction cost. 7345 return 0; 7346 case Instruction::Br: { 7347 // In cases of scalarized and predicated instructions, there will be VF 7348 // predicated blocks in the vectorized loop. Each branch around these 7349 // blocks requires also an extract of its vector compare i1 element. 7350 bool ScalarPredicatedBB = false; 7351 BranchInst *BI = cast<BranchInst>(I); 7352 if (VF > 1 && BI->isConditional() && 7353 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7354 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7355 ScalarPredicatedBB = true; 7356 7357 if (ScalarPredicatedBB) { 7358 // Return cost for branches around scalarized and predicated blocks. 7359 Type *Vec_i1Ty = 7360 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7361 return (TTI.getScalarizationOverhead(Vec_i1Ty, false, true) + 7362 (TTI.getCFInstrCost(Instruction::Br) * VF)); 7363 } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1) 7364 // The back-edge branch will remain, as will all scalar branches. 7365 return TTI.getCFInstrCost(Instruction::Br); 7366 else 7367 // This branch will be eliminated by if-conversion. 7368 return 0; 7369 // Note: We currently assume zero cost for an unconditional branch inside 7370 // a predicated block since it will become a fall-through, although we 7371 // may decide in the future to call TTI for all branches. 7372 } 7373 case Instruction::PHI: { 7374 auto *Phi = cast<PHINode>(I); 7375 7376 // First-order recurrences are replaced by vector shuffles inside the loop. 7377 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 7378 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 7379 VectorTy, VF - 1, VectorTy); 7380 7381 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7382 // converted into select instructions. We require N - 1 selects per phi 7383 // node, where N is the number of incoming values. 7384 if (VF > 1 && Phi->getParent() != TheLoop->getHeader()) 7385 return (Phi->getNumIncomingValues() - 1) * 7386 TTI.getCmpSelInstrCost( 7387 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7388 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF)); 7389 7390 return TTI.getCFInstrCost(Instruction::PHI); 7391 } 7392 case Instruction::UDiv: 7393 case Instruction::SDiv: 7394 case Instruction::URem: 7395 case Instruction::SRem: 7396 // If we have a predicated instruction, it may not be executed for each 7397 // vector lane. Get the scalarization cost and scale this amount by the 7398 // probability of executing the predicated block. If the instruction is not 7399 // predicated, we fall through to the next case. 7400 if (VF > 1 && Legal->isScalarWithPredication(I)) { 7401 unsigned Cost = 0; 7402 7403 // These instructions have a non-void type, so account for the phi nodes 7404 // that we will create. This cost is likely to be zero. The phi node 7405 // cost, if any, should be scaled by the block probability because it 7406 // models a copy at the end of each predicated block. 7407 Cost += VF * TTI.getCFInstrCost(Instruction::PHI); 7408 7409 // The cost of the non-predicated instruction. 7410 Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy); 7411 7412 // The cost of insertelement and extractelement instructions needed for 7413 // scalarization. 7414 Cost += getScalarizationOverhead(I, VF, TTI); 7415 7416 // Scale the cost by the probability of executing the predicated blocks. 7417 // This assumes the predicated block for each vector lane is equally 7418 // likely. 7419 return Cost / getReciprocalPredBlockProb(); 7420 } 7421 LLVM_FALLTHROUGH; 7422 case Instruction::Add: 7423 case Instruction::FAdd: 7424 case Instruction::Sub: 7425 case Instruction::FSub: 7426 case Instruction::Mul: 7427 case Instruction::FMul: 7428 case Instruction::FDiv: 7429 case Instruction::FRem: 7430 case Instruction::Shl: 7431 case Instruction::LShr: 7432 case Instruction::AShr: 7433 case Instruction::And: 7434 case Instruction::Or: 7435 case Instruction::Xor: { 7436 // Since we will replace the stride by 1 the multiplication should go away. 7437 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7438 return 0; 7439 // Certain instructions can be cheaper to vectorize if they have a constant 7440 // second vector operand. One example of this are shifts on x86. 7441 TargetTransformInfo::OperandValueKind Op1VK = 7442 TargetTransformInfo::OK_AnyValue; 7443 TargetTransformInfo::OperandValueKind Op2VK = 7444 TargetTransformInfo::OK_AnyValue; 7445 TargetTransformInfo::OperandValueProperties Op1VP = 7446 TargetTransformInfo::OP_None; 7447 TargetTransformInfo::OperandValueProperties Op2VP = 7448 TargetTransformInfo::OP_None; 7449 Value *Op2 = I->getOperand(1); 7450 7451 // Check for a splat or for a non uniform vector of constants. 7452 if (isa<ConstantInt>(Op2)) { 7453 ConstantInt *CInt = cast<ConstantInt>(Op2); 7454 if (CInt && CInt->getValue().isPowerOf2()) 7455 Op2VP = TargetTransformInfo::OP_PowerOf2; 7456 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 7457 } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) { 7458 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 7459 Constant *SplatValue = cast<Constant>(Op2)->getSplatValue(); 7460 if (SplatValue) { 7461 ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue); 7462 if (CInt && CInt->getValue().isPowerOf2()) 7463 Op2VP = TargetTransformInfo::OP_PowerOf2; 7464 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 7465 } 7466 } else if (Legal->isUniform(Op2)) { 7467 Op2VK = TargetTransformInfo::OK_UniformValue; 7468 } 7469 SmallVector<const Value *, 4> Operands(I->operand_values()); 7470 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 7471 return N * TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, 7472 Op2VK, Op1VP, Op2VP, Operands); 7473 } 7474 case Instruction::Select: { 7475 SelectInst *SI = cast<SelectInst>(I); 7476 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7477 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7478 Type *CondTy = SI->getCondition()->getType(); 7479 if (!ScalarCond) 7480 CondTy = VectorType::get(CondTy, VF); 7481 7482 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I); 7483 } 7484 case Instruction::ICmp: 7485 case Instruction::FCmp: { 7486 Type *ValTy = I->getOperand(0)->getType(); 7487 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7488 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7489 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7490 VectorTy = ToVectorTy(ValTy, VF); 7491 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I); 7492 } 7493 case Instruction::Store: 7494 case Instruction::Load: { 7495 unsigned Width = VF; 7496 if (Width > 1) { 7497 InstWidening Decision = getWideningDecision(I, Width); 7498 assert(Decision != CM_Unknown && 7499 "CM decision should be taken at this point"); 7500 if (Decision == CM_Scalarize) 7501 Width = 1; 7502 } 7503 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 7504 return getMemoryInstructionCost(I, VF); 7505 } 7506 case Instruction::ZExt: 7507 case Instruction::SExt: 7508 case Instruction::FPToUI: 7509 case Instruction::FPToSI: 7510 case Instruction::FPExt: 7511 case Instruction::PtrToInt: 7512 case Instruction::IntToPtr: 7513 case Instruction::SIToFP: 7514 case Instruction::UIToFP: 7515 case Instruction::Trunc: 7516 case Instruction::FPTrunc: 7517 case Instruction::BitCast: { 7518 // We optimize the truncation of induction variables having constant 7519 // integer steps. The cost of these truncations is the same as the scalar 7520 // operation. 7521 if (isOptimizableIVTruncate(I, VF)) { 7522 auto *Trunc = cast<TruncInst>(I); 7523 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7524 Trunc->getSrcTy(), Trunc); 7525 } 7526 7527 Type *SrcScalarTy = I->getOperand(0)->getType(); 7528 Type *SrcVecTy = 7529 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7530 if (canTruncateToMinimalBitwidth(I, VF)) { 7531 // This cast is going to be shrunk. This may remove the cast or it might 7532 // turn it into slightly different cast. For example, if MinBW == 16, 7533 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7534 // 7535 // Calculate the modified src and dest types. 7536 Type *MinVecTy = VectorTy; 7537 if (I->getOpcode() == Instruction::Trunc) { 7538 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7539 VectorTy = 7540 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7541 } else if (I->getOpcode() == Instruction::ZExt || 7542 I->getOpcode() == Instruction::SExt) { 7543 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7544 VectorTy = 7545 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7546 } 7547 } 7548 7549 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 7550 return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I); 7551 } 7552 case Instruction::Call: { 7553 bool NeedToScalarize; 7554 CallInst *CI = cast<CallInst>(I); 7555 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 7556 if (getVectorIntrinsicIDForCall(CI, TLI)) 7557 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 7558 return CallCost; 7559 } 7560 default: 7561 // The cost of executing VF copies of the scalar instruction. This opcode 7562 // is unknown. Assume that it is the same as 'mul'. 7563 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) + 7564 getScalarizationOverhead(I, VF, TTI); 7565 } // end of switch. 7566 } 7567 7568 char LoopVectorize::ID = 0; 7569 static const char lv_name[] = "Loop Vectorization"; 7570 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7571 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7572 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7573 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7574 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7575 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7576 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7577 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7578 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7579 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7580 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7581 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7582 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7583 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7584 7585 namespace llvm { 7586 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { 7587 return new LoopVectorize(NoUnrolling, AlwaysVectorize); 7588 } 7589 } 7590 7591 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7592 7593 // Check if the pointer operand of a load or store instruction is 7594 // consecutive. 7595 if (auto *Ptr = getPointerOperand(Inst)) 7596 return Legal->isConsecutivePtr(Ptr); 7597 return false; 7598 } 7599 7600 void LoopVectorizationCostModel::collectValuesToIgnore() { 7601 // Ignore ephemeral values. 7602 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7603 7604 // Ignore type-promoting instructions we identified during reduction 7605 // detection. 7606 for (auto &Reduction : *Legal->getReductionVars()) { 7607 RecurrenceDescriptor &RedDes = Reduction.second; 7608 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7609 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7610 } 7611 } 7612 7613 LoopVectorizationCostModel::VectorizationFactor 7614 LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) { 7615 7616 // Width 1 means no vectorize, cost 0 means uncomputed cost. 7617 const LoopVectorizationCostModel::VectorizationFactor NoVectorization = {1U, 7618 0U}; 7619 Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(OptForSize); 7620 if (!MaybeMaxVF.hasValue()) // Cases considered too costly to vectorize. 7621 return NoVectorization; 7622 7623 if (UserVF) { 7624 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 7625 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 7626 // Collect the instructions (and their associated costs) that will be more 7627 // profitable to scalarize. 7628 CM.selectUserVectorizationFactor(UserVF); 7629 return {UserVF, 0}; 7630 } 7631 7632 unsigned MaxVF = MaybeMaxVF.getValue(); 7633 assert(MaxVF != 0 && "MaxVF is zero."); 7634 if (MaxVF == 1) 7635 return NoVectorization; 7636 7637 // Select the optimal vectorization factor. 7638 return CM.selectVectorizationFactor(MaxVF); 7639 } 7640 7641 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV) { 7642 // Perform the actual loop transformation. 7643 7644 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7645 ILV.createVectorizedLoopSkeleton(); 7646 7647 //===------------------------------------------------===// 7648 // 7649 // Notice: any optimization or new instruction that go 7650 // into the code below should also be implemented in 7651 // the cost-model. 7652 // 7653 //===------------------------------------------------===// 7654 7655 // 2. Copy and widen instructions from the old loop into the new loop. 7656 7657 // Move instructions to handle first-order recurrences. 7658 DenseMap<Instruction *, Instruction *> SinkAfter = Legal->getSinkAfter(); 7659 for (auto &Entry : SinkAfter) { 7660 Entry.first->removeFromParent(); 7661 Entry.first->insertAfter(Entry.second); 7662 DEBUG(dbgs() << "Sinking" << *Entry.first << " after" << *Entry.second 7663 << " to vectorize a 1st order recurrence.\n"); 7664 } 7665 7666 // Collect instructions from the original loop that will become trivially dead 7667 // in the vectorized loop. We don't need to vectorize these instructions. For 7668 // example, original induction update instructions can become dead because we 7669 // separately emit induction "steps" when generating code for the new loop. 7670 // Similarly, we create a new latch condition when setting up the structure 7671 // of the new loop, so the old one can become dead. 7672 SmallPtrSet<Instruction *, 4> DeadInstructions; 7673 collectTriviallyDeadInstructions(DeadInstructions); 7674 7675 // Scan the loop in a topological order to ensure that defs are vectorized 7676 // before users. 7677 LoopBlocksDFS DFS(OrigLoop); 7678 DFS.perform(LI); 7679 7680 // Vectorize all instructions in the original loop that will not become 7681 // trivially dead when vectorized. 7682 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 7683 for (Instruction &I : *BB) 7684 if (!DeadInstructions.count(&I)) 7685 ILV.vectorizeInstruction(I); 7686 7687 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7688 // predication, updating analyses. 7689 ILV.fixVectorizedLoop(); 7690 } 7691 7692 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 7693 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 7694 BasicBlock *Latch = OrigLoop->getLoopLatch(); 7695 7696 // We create new control-flow for the vectorized loop, so the original 7697 // condition will be dead after vectorization if it's only used by the 7698 // branch. 7699 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 7700 if (Cmp && Cmp->hasOneUse()) 7701 DeadInstructions.insert(Cmp); 7702 7703 // We create new "steps" for induction variable updates to which the original 7704 // induction variables map. An original update instruction will be dead if 7705 // all its users except the induction variable are dead. 7706 for (auto &Induction : *Legal->getInductionVars()) { 7707 PHINode *Ind = Induction.first; 7708 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 7709 if (all_of(IndUpdate->users(), [&](User *U) -> bool { 7710 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 7711 })) 7712 DeadInstructions.insert(IndUpdate); 7713 } 7714 } 7715 7716 void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) { 7717 auto *SI = dyn_cast<StoreInst>(Instr); 7718 bool IfPredicateInstr = (SI && Legal->blockNeedsPredication(SI->getParent())); 7719 7720 return scalarizeInstruction(Instr, IfPredicateInstr); 7721 } 7722 7723 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 7724 7725 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7726 7727 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 7728 Instruction::BinaryOps BinOp) { 7729 // When unrolling and the VF is 1, we only need to add a simple scalar. 7730 Type *Ty = Val->getType(); 7731 assert(!Ty->isVectorTy() && "Val must be a scalar"); 7732 7733 if (Ty->isFloatingPointTy()) { 7734 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 7735 7736 // Floating point operations had to be 'fast' to enable the unrolling. 7737 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 7738 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 7739 } 7740 Constant *C = ConstantInt::get(Ty, StartIdx); 7741 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 7742 } 7743 7744 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7745 SmallVector<Metadata *, 4> MDs; 7746 // Reserve first location for self reference to the LoopID metadata node. 7747 MDs.push_back(nullptr); 7748 bool IsUnrollMetadata = false; 7749 MDNode *LoopID = L->getLoopID(); 7750 if (LoopID) { 7751 // First find existing loop unrolling disable metadata. 7752 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7753 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7754 if (MD) { 7755 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7756 IsUnrollMetadata = 7757 S && S->getString().startswith("llvm.loop.unroll.disable"); 7758 } 7759 MDs.push_back(LoopID->getOperand(i)); 7760 } 7761 } 7762 7763 if (!IsUnrollMetadata) { 7764 // Add runtime unroll disable metadata. 7765 LLVMContext &Context = L->getHeader()->getContext(); 7766 SmallVector<Metadata *, 1> DisableOperands; 7767 DisableOperands.push_back( 7768 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7769 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7770 MDs.push_back(DisableNode); 7771 MDNode *NewLoopID = MDNode::get(Context, MDs); 7772 // Set operand 0 to refer to the loop id itself. 7773 NewLoopID->replaceOperandWith(0, NewLoopID); 7774 L->setLoopID(NewLoopID); 7775 } 7776 } 7777 7778 bool LoopVectorizePass::processLoop(Loop *L) { 7779 assert(L->empty() && "Only process inner loops."); 7780 7781 #ifndef NDEBUG 7782 const std::string DebugLocStr = getDebugLocString(L); 7783 #endif /* NDEBUG */ 7784 7785 DEBUG(dbgs() << "\nLV: Checking a loop in \"" 7786 << L->getHeader()->getParent()->getName() << "\" from " 7787 << DebugLocStr << "\n"); 7788 7789 LoopVectorizeHints Hints(L, DisableUnrolling, *ORE); 7790 7791 DEBUG(dbgs() << "LV: Loop hints:" 7792 << " force=" 7793 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 7794 ? "disabled" 7795 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 7796 ? "enabled" 7797 : "?")) 7798 << " width=" << Hints.getWidth() 7799 << " unroll=" << Hints.getInterleave() << "\n"); 7800 7801 // Function containing loop 7802 Function *F = L->getHeader()->getParent(); 7803 7804 // Looking at the diagnostic output is the only way to determine if a loop 7805 // was vectorized (other than looking at the IR or machine code), so it 7806 // is important to generate an optimization remark for each loop. Most of 7807 // these messages are generated as OptimizationRemarkAnalysis. Remarks 7808 // generated as OptimizationRemark and OptimizationRemarkMissed are 7809 // less verbose reporting vectorized loops and unvectorized loops that may 7810 // benefit from vectorization, respectively. 7811 7812 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { 7813 DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 7814 return false; 7815 } 7816 7817 PredicatedScalarEvolution PSE(*SE, *L); 7818 7819 // Check if it is legal to vectorize the loop. 7820 LoopVectorizationRequirements Requirements(*ORE); 7821 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI, ORE, 7822 &Requirements, &Hints); 7823 if (!LVL.canVectorize()) { 7824 DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 7825 emitMissedWarning(F, L, Hints, ORE); 7826 return false; 7827 } 7828 7829 // Check the function attributes to find out if this function should be 7830 // optimized for size. 7831 bool OptForSize = 7832 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 7833 7834 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 7835 // count by optimizing for size, to minimize overheads. 7836 unsigned ExpectedTC = SE->getSmallConstantMaxTripCount(L); 7837 bool HasExpectedTC = (ExpectedTC > 0); 7838 7839 if (!HasExpectedTC && LoopVectorizeWithBlockFrequency) { 7840 auto EstimatedTC = getLoopEstimatedTripCount(L); 7841 if (EstimatedTC) { 7842 ExpectedTC = *EstimatedTC; 7843 HasExpectedTC = true; 7844 } 7845 } 7846 7847 if (HasExpectedTC && ExpectedTC < TinyTripCountVectorThreshold) { 7848 DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 7849 << "This loop is worth vectorizing only if no scalar " 7850 << "iteration overheads are incurred."); 7851 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 7852 DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 7853 else { 7854 DEBUG(dbgs() << "\n"); 7855 // Loops with a very small trip count are considered for vectorization 7856 // under OptForSize, thereby making sure the cost of their loop body is 7857 // dominant, free of runtime guards and scalar iteration overheads. 7858 OptForSize = true; 7859 } 7860 } 7861 7862 // Check the function attributes to see if implicit floats are allowed. 7863 // FIXME: This check doesn't seem possibly correct -- what if the loop is 7864 // an integer loop and the vector instructions selected are purely integer 7865 // vector instructions? 7866 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 7867 DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 7868 "attribute is used.\n"); 7869 ORE->emit(createMissedAnalysis(Hints.vectorizeAnalysisPassName(), 7870 "NoImplicitFloat", L) 7871 << "loop not vectorized due to NoImplicitFloat attribute"); 7872 emitMissedWarning(F, L, Hints, ORE); 7873 return false; 7874 } 7875 7876 // Check if the target supports potentially unsafe FP vectorization. 7877 // FIXME: Add a check for the type of safety issue (denormal, signaling) 7878 // for the target we're vectorizing for, to make sure none of the 7879 // additional fp-math flags can help. 7880 if (Hints.isPotentiallyUnsafe() && 7881 TTI->isFPVectorizationPotentiallyUnsafe()) { 7882 DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 7883 ORE->emit( 7884 createMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L) 7885 << "loop not vectorized due to unsafe FP support."); 7886 emitMissedWarning(F, L, Hints, ORE); 7887 return false; 7888 } 7889 7890 // Use the cost model. 7891 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F, 7892 &Hints); 7893 CM.collectValuesToIgnore(); 7894 7895 // Use the planner for vectorization. 7896 LoopVectorizationPlanner LVP(L, LI, &LVL, CM); 7897 7898 // Get user vectorization factor. 7899 unsigned UserVF = Hints.getWidth(); 7900 7901 // Plan how to best vectorize, return the best VF and its cost. 7902 LoopVectorizationCostModel::VectorizationFactor VF = 7903 LVP.plan(OptForSize, UserVF); 7904 7905 // Select the interleave count. 7906 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 7907 7908 // Get user interleave count. 7909 unsigned UserIC = Hints.getInterleave(); 7910 7911 // Identify the diagnostic messages that should be produced. 7912 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 7913 bool VectorizeLoop = true, InterleaveLoop = true; 7914 if (Requirements.doesNotMeet(F, L, Hints)) { 7915 DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 7916 "requirements.\n"); 7917 emitMissedWarning(F, L, Hints, ORE); 7918 return false; 7919 } 7920 7921 if (VF.Width == 1) { 7922 DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 7923 VecDiagMsg = std::make_pair( 7924 "VectorizationNotBeneficial", 7925 "the cost-model indicates that vectorization is not beneficial"); 7926 VectorizeLoop = false; 7927 } 7928 7929 if (IC == 1 && UserIC <= 1) { 7930 // Tell the user interleaving is not beneficial. 7931 DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 7932 IntDiagMsg = std::make_pair( 7933 "InterleavingNotBeneficial", 7934 "the cost-model indicates that interleaving is not beneficial"); 7935 InterleaveLoop = false; 7936 if (UserIC == 1) { 7937 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 7938 IntDiagMsg.second += 7939 " and is explicitly disabled or interleave count is set to 1"; 7940 } 7941 } else if (IC > 1 && UserIC == 1) { 7942 // Tell the user interleaving is beneficial, but it explicitly disabled. 7943 DEBUG(dbgs() 7944 << "LV: Interleaving is beneficial but is explicitly disabled."); 7945 IntDiagMsg = std::make_pair( 7946 "InterleavingBeneficialButDisabled", 7947 "the cost-model indicates that interleaving is beneficial " 7948 "but is explicitly disabled or interleave count is set to 1"); 7949 InterleaveLoop = false; 7950 } 7951 7952 // Override IC if user provided an interleave count. 7953 IC = UserIC > 0 ? UserIC : IC; 7954 7955 // Emit diagnostic messages, if any. 7956 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 7957 if (!VectorizeLoop && !InterleaveLoop) { 7958 // Do not vectorize or interleaving the loop. 7959 ORE->emit(OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 7960 L->getStartLoc(), L->getHeader()) 7961 << VecDiagMsg.second); 7962 ORE->emit(OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 7963 L->getStartLoc(), L->getHeader()) 7964 << IntDiagMsg.second); 7965 return false; 7966 } else if (!VectorizeLoop && InterleaveLoop) { 7967 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7968 ORE->emit(OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 7969 L->getStartLoc(), L->getHeader()) 7970 << VecDiagMsg.second); 7971 } else if (VectorizeLoop && !InterleaveLoop) { 7972 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 7973 << DebugLocStr << '\n'); 7974 ORE->emit(OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 7975 L->getStartLoc(), L->getHeader()) 7976 << IntDiagMsg.second); 7977 } else if (VectorizeLoop && InterleaveLoop) { 7978 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 7979 << DebugLocStr << '\n'); 7980 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7981 } 7982 7983 using namespace ore; 7984 if (!VectorizeLoop) { 7985 assert(IC > 1 && "interleave count should not be 1 or 0"); 7986 // If we decided that it is not legal to vectorize the loop, then 7987 // interleave it. 7988 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 7989 &CM); 7990 LVP.executePlan(Unroller); 7991 7992 ORE->emit(OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 7993 L->getHeader()) 7994 << "interleaved loop (interleaved count: " 7995 << NV("InterleaveCount", IC) << ")"); 7996 } else { 7997 // If we decided that it is *legal* to vectorize the loop, then do it. 7998 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 7999 &LVL, &CM); 8000 LVP.executePlan(LB); 8001 ++LoopsVectorized; 8002 8003 // Add metadata to disable runtime unrolling a scalar loop when there are 8004 // no runtime checks about strides and memory. A scalar loop that is 8005 // rarely used is not worth unrolling. 8006 if (!LB.areSafetyChecksAdded()) 8007 AddRuntimeUnrollDisableMetaData(L); 8008 8009 // Report the vectorization decision. 8010 ORE->emit(OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 8011 L->getHeader()) 8012 << "vectorized loop (vectorization width: " 8013 << NV("VectorizationFactor", VF.Width) 8014 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"); 8015 } 8016 8017 // Mark the loop as already vectorized to avoid vectorizing again. 8018 Hints.setAlreadyVectorized(); 8019 8020 DEBUG(verifyFunction(*L->getHeader()->getParent())); 8021 return true; 8022 } 8023 8024 bool LoopVectorizePass::runImpl( 8025 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 8026 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 8027 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 8028 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 8029 OptimizationRemarkEmitter &ORE_) { 8030 8031 SE = &SE_; 8032 LI = &LI_; 8033 TTI = &TTI_; 8034 DT = &DT_; 8035 BFI = &BFI_; 8036 TLI = TLI_; 8037 AA = &AA_; 8038 AC = &AC_; 8039 GetLAA = &GetLAA_; 8040 DB = &DB_; 8041 ORE = &ORE_; 8042 8043 // Don't attempt if 8044 // 1. the target claims to have no vector registers, and 8045 // 2. interleaving won't help ILP. 8046 // 8047 // The second condition is necessary because, even if the target has no 8048 // vector registers, loop vectorization may still enable scalar 8049 // interleaving. 8050 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 8051 return false; 8052 8053 bool Changed = false; 8054 8055 // The vectorizer requires loops to be in simplified form. 8056 // Since simplification may add new inner loops, it has to run before the 8057 // legality and profitability checks. This means running the loop vectorizer 8058 // will simplify all loops, regardless of whether anything end up being 8059 // vectorized. 8060 for (auto &L : *LI) 8061 Changed |= simplifyLoop(L, DT, LI, SE, AC, false /* PreserveLCSSA */); 8062 8063 // Build up a worklist of inner-loops to vectorize. This is necessary as 8064 // the act of vectorizing or partially unrolling a loop creates new loops 8065 // and can invalidate iterators across the loops. 8066 SmallVector<Loop *, 8> Worklist; 8067 8068 for (Loop *L : *LI) 8069 addAcyclicInnerLoop(*L, Worklist); 8070 8071 LoopsAnalyzed += Worklist.size(); 8072 8073 // Now walk the identified inner loops. 8074 while (!Worklist.empty()) { 8075 Loop *L = Worklist.pop_back_val(); 8076 8077 // For the inner loops we actually process, form LCSSA to simplify the 8078 // transform. 8079 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 8080 8081 Changed |= processLoop(L); 8082 } 8083 8084 // Process each loop nest in the function. 8085 return Changed; 8086 8087 } 8088 8089 8090 PreservedAnalyses LoopVectorizePass::run(Function &F, 8091 FunctionAnalysisManager &AM) { 8092 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 8093 auto &LI = AM.getResult<LoopAnalysis>(F); 8094 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 8095 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 8096 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 8097 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 8098 auto &AA = AM.getResult<AAManager>(F); 8099 auto &AC = AM.getResult<AssumptionAnalysis>(F); 8100 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 8101 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 8102 8103 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 8104 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 8105 [&](Loop &L) -> const LoopAccessInfo & { 8106 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI}; 8107 return LAM.getResult<LoopAccessAnalysis>(L, AR); 8108 }; 8109 bool Changed = 8110 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE); 8111 if (!Changed) 8112 return PreservedAnalyses::all(); 8113 PreservedAnalyses PA; 8114 PA.preserve<LoopAnalysis>(); 8115 PA.preserve<DominatorTreeAnalysis>(); 8116 PA.preserve<BasicAA>(); 8117 PA.preserve<GlobalsAA>(); 8118 return PA; 8119 } 8120