1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 11 // and generates target-independent LLVM-IR. 12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 13 // of instructions in order to estimate the profitability of vectorization. 14 // 15 // The loop vectorizer combines consecutive loop iterations into a single 16 // 'wide' iteration. After this transformation the index is incremented 17 // by the SIMD vector width, and not by one. 18 // 19 // This pass has three parts: 20 // 1. The main loop pass that drives the different parts. 21 // 2. LoopVectorizationLegality - A unit that checks for the legality 22 // of the vectorization. 23 // 3. InnerLoopVectorizer - A unit that performs the actual 24 // widening of instructions. 25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 26 // of vectorization. It decides on the optimal vector width, which 27 // can be one, if vectorization is not profitable. 28 // 29 //===----------------------------------------------------------------------===// 30 // 31 // The reduction-variable vectorization is based on the paper: 32 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 33 // 34 // Variable uniformity checks are inspired by: 35 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 36 // 37 // The interleaved access vectorization is based on the paper: 38 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 39 // Data for SIMD 40 // 41 // Other ideas/concepts are from: 42 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 43 // 44 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 45 // Vectorizing Compilers. 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 50 #include "LoopVectorizationPlanner.h" 51 #include "llvm/ADT/APInt.h" 52 #include "llvm/ADT/ArrayRef.h" 53 #include "llvm/ADT/DenseMap.h" 54 #include "llvm/ADT/DenseMapInfo.h" 55 #include "llvm/ADT/Hashing.h" 56 #include "llvm/ADT/MapVector.h" 57 #include "llvm/ADT/None.h" 58 #include "llvm/ADT/Optional.h" 59 #include "llvm/ADT/STLExtras.h" 60 #include "llvm/ADT/SetVector.h" 61 #include "llvm/ADT/SmallPtrSet.h" 62 #include "llvm/ADT/SmallSet.h" 63 #include "llvm/ADT/SmallVector.h" 64 #include "llvm/ADT/Statistic.h" 65 #include "llvm/ADT/StringRef.h" 66 #include "llvm/ADT/Twine.h" 67 #include "llvm/ADT/iterator_range.h" 68 #include "llvm/Analysis/AssumptionCache.h" 69 #include "llvm/Analysis/BasicAliasAnalysis.h" 70 #include "llvm/Analysis/BlockFrequencyInfo.h" 71 #include "llvm/Analysis/CFG.h" 72 #include "llvm/Analysis/CodeMetrics.h" 73 #include "llvm/Analysis/DemandedBits.h" 74 #include "llvm/Analysis/GlobalsModRef.h" 75 #include "llvm/Analysis/LoopAccessAnalysis.h" 76 #include "llvm/Analysis/LoopAnalysisManager.h" 77 #include "llvm/Analysis/LoopInfo.h" 78 #include "llvm/Analysis/LoopIterator.h" 79 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 80 #include "llvm/Analysis/ScalarEvolution.h" 81 #include "llvm/Analysis/ScalarEvolutionExpander.h" 82 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 83 #include "llvm/Analysis/TargetLibraryInfo.h" 84 #include "llvm/Analysis/TargetTransformInfo.h" 85 #include "llvm/Analysis/VectorUtils.h" 86 #include "llvm/IR/Attributes.h" 87 #include "llvm/IR/BasicBlock.h" 88 #include "llvm/IR/CFG.h" 89 #include "llvm/IR/Constant.h" 90 #include "llvm/IR/Constants.h" 91 #include "llvm/IR/DataLayout.h" 92 #include "llvm/IR/DebugInfoMetadata.h" 93 #include "llvm/IR/DebugLoc.h" 94 #include "llvm/IR/DerivedTypes.h" 95 #include "llvm/IR/DiagnosticInfo.h" 96 #include "llvm/IR/Dominators.h" 97 #include "llvm/IR/Function.h" 98 #include "llvm/IR/IRBuilder.h" 99 #include "llvm/IR/InstrTypes.h" 100 #include "llvm/IR/Instruction.h" 101 #include "llvm/IR/Instructions.h" 102 #include "llvm/IR/IntrinsicInst.h" 103 #include "llvm/IR/Intrinsics.h" 104 #include "llvm/IR/LLVMContext.h" 105 #include "llvm/IR/Metadata.h" 106 #include "llvm/IR/Module.h" 107 #include "llvm/IR/Operator.h" 108 #include "llvm/IR/Type.h" 109 #include "llvm/IR/Use.h" 110 #include "llvm/IR/User.h" 111 #include "llvm/IR/Value.h" 112 #include "llvm/IR/ValueHandle.h" 113 #include "llvm/IR/Verifier.h" 114 #include "llvm/Pass.h" 115 #include "llvm/Support/Casting.h" 116 #include "llvm/Support/CommandLine.h" 117 #include "llvm/Support/Compiler.h" 118 #include "llvm/Support/Debug.h" 119 #include "llvm/Support/ErrorHandling.h" 120 #include "llvm/Support/MathExtras.h" 121 #include "llvm/Support/raw_ostream.h" 122 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 123 #include "llvm/Transforms/Utils/LoopSimplify.h" 124 #include "llvm/Transforms/Utils/LoopUtils.h" 125 #include "llvm/Transforms/Utils/LoopVersioning.h" 126 #include <algorithm> 127 #include <cassert> 128 #include <cstdint> 129 #include <cstdlib> 130 #include <functional> 131 #include <iterator> 132 #include <limits> 133 #include <memory> 134 #include <string> 135 #include <tuple> 136 #include <utility> 137 #include <vector> 138 139 using namespace llvm; 140 141 #define LV_NAME "loop-vectorize" 142 #define DEBUG_TYPE LV_NAME 143 144 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 145 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 146 147 static cl::opt<bool> 148 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, 149 cl::desc("Enable if-conversion during vectorization.")); 150 151 /// Loops with a known constant trip count below this number are vectorized only 152 /// if no scalar iteration overheads are incurred. 153 static cl::opt<unsigned> TinyTripCountVectorThreshold( 154 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 155 cl::desc("Loops with a constant trip count that is smaller than this " 156 "value are vectorized only if no scalar iteration overheads " 157 "are incurred.")); 158 159 static cl::opt<bool> MaximizeBandwidth( 160 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 161 cl::desc("Maximize bandwidth when selecting vectorization factor which " 162 "will be determined by the smallest type in loop.")); 163 164 static cl::opt<bool> EnableInterleavedMemAccesses( 165 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 166 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 167 168 /// Maximum factor for an interleaved memory access. 169 static cl::opt<unsigned> MaxInterleaveGroupFactor( 170 "max-interleave-group-factor", cl::Hidden, 171 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 172 cl::init(8)); 173 174 /// We don't interleave loops with a known constant trip count below this 175 /// number. 176 static const unsigned TinyTripCountInterleaveThreshold = 128; 177 178 static cl::opt<unsigned> ForceTargetNumScalarRegs( 179 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 180 cl::desc("A flag that overrides the target's number of scalar registers.")); 181 182 static cl::opt<unsigned> ForceTargetNumVectorRegs( 183 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 184 cl::desc("A flag that overrides the target's number of vector registers.")); 185 186 /// Maximum vectorization interleave count. 187 static const unsigned MaxInterleaveFactor = 16; 188 189 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 190 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 191 cl::desc("A flag that overrides the target's max interleave factor for " 192 "scalar loops.")); 193 194 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 195 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 196 cl::desc("A flag that overrides the target's max interleave factor for " 197 "vectorized loops.")); 198 199 static cl::opt<unsigned> ForceTargetInstructionCost( 200 "force-target-instruction-cost", cl::init(0), cl::Hidden, 201 cl::desc("A flag that overrides the target's expected cost for " 202 "an instruction to a single constant value. Mostly " 203 "useful for getting consistent testing.")); 204 205 static cl::opt<unsigned> SmallLoopCost( 206 "small-loop-cost", cl::init(20), cl::Hidden, 207 cl::desc( 208 "The cost of a loop that is considered 'small' by the interleaver.")); 209 210 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 211 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 212 cl::desc("Enable the use of the block frequency analysis to access PGO " 213 "heuristics minimizing code growth in cold regions and being more " 214 "aggressive in hot regions.")); 215 216 // Runtime interleave loops for load/store throughput. 217 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 218 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 219 cl::desc( 220 "Enable runtime interleaving until load/store ports are saturated")); 221 222 /// The number of stores in a loop that are allowed to need predication. 223 static cl::opt<unsigned> NumberOfStoresToPredicate( 224 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 225 cl::desc("Max number of stores to be predicated behind an if.")); 226 227 static cl::opt<bool> EnableIndVarRegisterHeur( 228 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 229 cl::desc("Count the induction variable only once when interleaving")); 230 231 static cl::opt<bool> EnableCondStoresVectorization( 232 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 233 cl::desc("Enable if predication of stores during vectorization.")); 234 235 static cl::opt<unsigned> MaxNestedScalarReductionIC( 236 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 237 cl::desc("The maximum interleave count to use when interleaving a scalar " 238 "reduction in a nested loop.")); 239 240 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 241 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 242 cl::desc("The maximum allowed number of runtime memory checks with a " 243 "vectorize(enable) pragma.")); 244 245 static cl::opt<unsigned> VectorizeSCEVCheckThreshold( 246 "vectorize-scev-check-threshold", cl::init(16), cl::Hidden, 247 cl::desc("The maximum number of SCEV checks allowed.")); 248 249 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold( 250 "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden, 251 cl::desc("The maximum number of SCEV checks allowed with a " 252 "vectorize(enable) pragma")); 253 254 /// Create an analysis remark that explains why vectorization failed 255 /// 256 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 257 /// RemarkName is the identifier for the remark. If \p I is passed it is an 258 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 259 /// the location of the remark. \return the remark object that can be 260 /// streamed to. 261 static OptimizationRemarkAnalysis 262 createMissedAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, 263 Instruction *I = nullptr) { 264 Value *CodeRegion = TheLoop->getHeader(); 265 DebugLoc DL = TheLoop->getStartLoc(); 266 267 if (I) { 268 CodeRegion = I->getParent(); 269 // If there is no debug location attached to the instruction, revert back to 270 // using the loop's. 271 if (I->getDebugLoc()) 272 DL = I->getDebugLoc(); 273 } 274 275 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion); 276 R << "loop not vectorized: "; 277 return R; 278 } 279 280 namespace { 281 282 class LoopVectorizationRequirements; 283 284 } // end anonymous namespace 285 286 /// A helper function for converting Scalar types to vector types. 287 /// If the incoming type is void, we return void. If the VF is 1, we return 288 /// the scalar type. 289 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 290 if (Scalar->isVoidTy() || VF == 1) 291 return Scalar; 292 return VectorType::get(Scalar, VF); 293 } 294 295 // FIXME: The following helper functions have multiple implementations 296 // in the project. They can be effectively organized in a common Load/Store 297 // utilities unit. 298 299 /// A helper function that returns the type of loaded or stored value. 300 static Type *getMemInstValueType(Value *I) { 301 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 302 "Expected Load or Store instruction"); 303 if (auto *LI = dyn_cast<LoadInst>(I)) 304 return LI->getType(); 305 return cast<StoreInst>(I)->getValueOperand()->getType(); 306 } 307 308 /// A helper function that returns the alignment of load or store instruction. 309 static unsigned getMemInstAlignment(Value *I) { 310 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 311 "Expected Load or Store instruction"); 312 if (auto *LI = dyn_cast<LoadInst>(I)) 313 return LI->getAlignment(); 314 return cast<StoreInst>(I)->getAlignment(); 315 } 316 317 /// A helper function that returns the address space of the pointer operand of 318 /// load or store instruction. 319 static unsigned getMemInstAddressSpace(Value *I) { 320 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 321 "Expected Load or Store instruction"); 322 if (auto *LI = dyn_cast<LoadInst>(I)) 323 return LI->getPointerAddressSpace(); 324 return cast<StoreInst>(I)->getPointerAddressSpace(); 325 } 326 327 /// A helper function that returns true if the given type is irregular. The 328 /// type is irregular if its allocated size doesn't equal the store size of an 329 /// element of the corresponding vector type at the given vectorization factor. 330 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) { 331 // Determine if an array of VF elements of type Ty is "bitcast compatible" 332 // with a <VF x Ty> vector. 333 if (VF > 1) { 334 auto *VectorTy = VectorType::get(Ty, VF); 335 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); 336 } 337 338 // If the vectorization factor is one, we just check if an array of type Ty 339 // requires padding between elements. 340 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 341 } 342 343 /// A helper function that returns the reciprocal of the block probability of 344 /// predicated blocks. If we return X, we are assuming the predicated block 345 /// will execute once for every X iterations of the loop header. 346 /// 347 /// TODO: We should use actual block probability here, if available. Currently, 348 /// we always assume predicated blocks have a 50% chance of executing. 349 static unsigned getReciprocalPredBlockProb() { return 2; } 350 351 /// A helper function that adds a 'fast' flag to floating-point operations. 352 static Value *addFastMathFlag(Value *V) { 353 if (isa<FPMathOperator>(V)) { 354 FastMathFlags Flags; 355 Flags.setFast(); 356 cast<Instruction>(V)->setFastMathFlags(Flags); 357 } 358 return V; 359 } 360 361 /// A helper function that returns an integer or floating-point constant with 362 /// value C. 363 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 364 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 365 : ConstantFP::get(Ty, C); 366 } 367 368 namespace llvm { 369 370 /// InnerLoopVectorizer vectorizes loops which contain only one basic 371 /// block to a specified vectorization factor (VF). 372 /// This class performs the widening of scalars into vectors, or multiple 373 /// scalars. This class also implements the following features: 374 /// * It inserts an epilogue loop for handling loops that don't have iteration 375 /// counts that are known to be a multiple of the vectorization factor. 376 /// * It handles the code generation for reduction variables. 377 /// * Scalarization (implementation using scalars) of un-vectorizable 378 /// instructions. 379 /// InnerLoopVectorizer does not perform any vectorization-legality 380 /// checks, and relies on the caller to check for the different legality 381 /// aspects. The InnerLoopVectorizer relies on the 382 /// LoopVectorizationLegality class to provide information about the induction 383 /// and reduction variables that were found to a given vectorization factor. 384 class InnerLoopVectorizer { 385 public: 386 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 387 LoopInfo *LI, DominatorTree *DT, 388 const TargetLibraryInfo *TLI, 389 const TargetTransformInfo *TTI, AssumptionCache *AC, 390 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 391 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 392 LoopVectorizationCostModel *CM) 393 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 394 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 395 Builder(PSE.getSE()->getContext()), 396 VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {} 397 virtual ~InnerLoopVectorizer() = default; 398 399 /// Create a new empty loop. Unlink the old loop and connect the new one. 400 /// Return the pre-header block of the new loop. 401 BasicBlock *createVectorizedLoopSkeleton(); 402 403 /// Widen a single instruction within the innermost loop. 404 void widenInstruction(Instruction &I); 405 406 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 407 void fixVectorizedLoop(); 408 409 // Return true if any runtime check is added. 410 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 411 412 /// A type for vectorized values in the new loop. Each value from the 413 /// original loop, when vectorized, is represented by UF vector values in the 414 /// new unrolled loop, where UF is the unroll factor. 415 using VectorParts = SmallVector<Value *, 2>; 416 417 /// Vectorize a single PHINode in a block. This method handles the induction 418 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 419 /// arbitrary length vectors. 420 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF); 421 422 /// A helper function to scalarize a single Instruction in the innermost loop. 423 /// Generates a sequence of scalar instances for each lane between \p MinLane 424 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 425 /// inclusive.. 426 void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance, 427 bool IfPredicateInstr); 428 429 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 430 /// is provided, the integer induction variable will first be truncated to 431 /// the corresponding type. 432 void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr); 433 434 /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a 435 /// vector or scalar value on-demand if one is not yet available. When 436 /// vectorizing a loop, we visit the definition of an instruction before its 437 /// uses. When visiting the definition, we either vectorize or scalarize the 438 /// instruction, creating an entry for it in the corresponding map. (In some 439 /// cases, such as induction variables, we will create both vector and scalar 440 /// entries.) Then, as we encounter uses of the definition, we derive values 441 /// for each scalar or vector use unless such a value is already available. 442 /// For example, if we scalarize a definition and one of its uses is vector, 443 /// we build the required vector on-demand with an insertelement sequence 444 /// when visiting the use. Otherwise, if the use is scalar, we can use the 445 /// existing scalar definition. 446 /// 447 /// Return a value in the new loop corresponding to \p V from the original 448 /// loop at unroll index \p Part. If the value has already been vectorized, 449 /// the corresponding vector entry in VectorLoopValueMap is returned. If, 450 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 451 /// a new vector value on-demand by inserting the scalar values into a vector 452 /// with an insertelement sequence. If the value has been neither vectorized 453 /// nor scalarized, it must be loop invariant, so we simply broadcast the 454 /// value into a vector. 455 Value *getOrCreateVectorValue(Value *V, unsigned Part); 456 457 /// Return a value in the new loop corresponding to \p V from the original 458 /// loop at unroll and vector indices \p Instance. If the value has been 459 /// vectorized but not scalarized, the necessary extractelement instruction 460 /// will be generated. 461 Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance); 462 463 /// Construct the vector value of a scalarized value \p V one lane at a time. 464 void packScalarIntoVectorValue(Value *V, const VPIteration &Instance); 465 466 /// Try to vectorize the interleaved access group that \p Instr belongs to. 467 void vectorizeInterleaveGroup(Instruction *Instr); 468 469 /// Vectorize Load and Store instructions, optionally masking the vector 470 /// operations if \p BlockInMask is non-null. 471 void vectorizeMemoryInstruction(Instruction *Instr, 472 VectorParts *BlockInMask = nullptr); 473 474 /// \brief Set the debug location in the builder using the debug location in 475 /// the instruction. 476 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 477 478 protected: 479 friend class LoopVectorizationPlanner; 480 481 /// A small list of PHINodes. 482 using PhiVector = SmallVector<PHINode *, 4>; 483 484 /// A type for scalarized values in the new loop. Each value from the 485 /// original loop, when scalarized, is represented by UF x VF scalar values 486 /// in the new unrolled loop, where UF is the unroll factor and VF is the 487 /// vectorization factor. 488 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 489 490 /// Set up the values of the IVs correctly when exiting the vector loop. 491 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 492 Value *CountRoundDown, Value *EndValue, 493 BasicBlock *MiddleBlock); 494 495 /// Create a new induction variable inside L. 496 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 497 Value *Step, Instruction *DL); 498 499 /// Handle all cross-iteration phis in the header. 500 void fixCrossIterationPHIs(); 501 502 /// Fix a first-order recurrence. This is the second phase of vectorizing 503 /// this phi node. 504 void fixFirstOrderRecurrence(PHINode *Phi); 505 506 /// Fix a reduction cross-iteration phi. This is the second phase of 507 /// vectorizing this phi node. 508 void fixReduction(PHINode *Phi); 509 510 /// \brief The Loop exit block may have single value PHI nodes with some 511 /// incoming value. While vectorizing we only handled real values 512 /// that were defined inside the loop and we should have one value for 513 /// each predecessor of its parent basic block. See PR14725. 514 void fixLCSSAPHIs(); 515 516 /// Iteratively sink the scalarized operands of a predicated instruction into 517 /// the block that was created for it. 518 void sinkScalarOperands(Instruction *PredInst); 519 520 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 521 /// represented as. 522 void truncateToMinimalBitwidths(); 523 524 /// Insert the new loop to the loop hierarchy and pass manager 525 /// and update the analysis passes. 526 void updateAnalysis(); 527 528 /// Create a broadcast instruction. This method generates a broadcast 529 /// instruction (shuffle) for loop invariant values and for the induction 530 /// value. If this is the induction variable then we extend it to N, N+1, ... 531 /// this is needed because each iteration in the loop corresponds to a SIMD 532 /// element. 533 virtual Value *getBroadcastInstrs(Value *V); 534 535 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 536 /// to each vector element of Val. The sequence starts at StartIndex. 537 /// \p Opcode is relevant for FP induction variable. 538 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 539 Instruction::BinaryOps Opcode = 540 Instruction::BinaryOpsEnd); 541 542 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 543 /// variable on which to base the steps, \p Step is the size of the step, and 544 /// \p EntryVal is the value from the original loop that maps to the steps. 545 /// Note that \p EntryVal doesn't have to be an induction variable - it 546 /// can also be a truncate instruction. 547 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 548 const InductionDescriptor &ID); 549 550 /// Create a vector induction phi node based on an existing scalar one. \p 551 /// EntryVal is the value from the original loop that maps to the vector phi 552 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 553 /// truncate instruction, instead of widening the original IV, we widen a 554 /// version of the IV truncated to \p EntryVal's type. 555 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 556 Value *Step, Instruction *EntryVal); 557 558 /// Returns true if an instruction \p I should be scalarized instead of 559 /// vectorized for the chosen vectorization factor. 560 bool shouldScalarizeInstruction(Instruction *I) const; 561 562 /// Returns true if we should generate a scalar version of \p IV. 563 bool needsScalarInduction(Instruction *IV) const; 564 565 /// If there is a cast involved in the induction variable \p ID, which should 566 /// be ignored in the vectorized loop body, this function records the 567 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 568 /// cast. We had already proved that the casted Phi is equal to the uncasted 569 /// Phi in the vectorized loop (under a runtime guard), and therefore 570 /// there is no need to vectorize the cast - the same value can be used in the 571 /// vector loop for both the Phi and the cast. 572 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 573 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 574 /// 575 /// \p EntryVal is the value from the original loop that maps to the vector 576 /// phi node and is used to distinguish what is the IV currently being 577 /// processed - original one (if \p EntryVal is a phi corresponding to the 578 /// original IV) or the "newly-created" one based on the proof mentioned above 579 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 580 /// latter case \p EntryVal is a TruncInst and we must not record anything for 581 /// that IV, but it's error-prone to expect callers of this routine to care 582 /// about that, hence this explicit parameter. 583 void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID, 584 const Instruction *EntryVal, 585 Value *VectorLoopValue, 586 unsigned Part, 587 unsigned Lane = UINT_MAX); 588 589 /// Generate a shuffle sequence that will reverse the vector Vec. 590 virtual Value *reverseVector(Value *Vec); 591 592 /// Returns (and creates if needed) the original loop trip count. 593 Value *getOrCreateTripCount(Loop *NewLoop); 594 595 /// Returns (and creates if needed) the trip count of the widened loop. 596 Value *getOrCreateVectorTripCount(Loop *NewLoop); 597 598 /// Returns a bitcasted value to the requested vector type. 599 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 600 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 601 const DataLayout &DL); 602 603 /// Emit a bypass check to see if the vector trip count is zero, including if 604 /// it overflows. 605 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 606 607 /// Emit a bypass check to see if all of the SCEV assumptions we've 608 /// had to make are correct. 609 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 610 611 /// Emit bypass checks to check any memory assumptions we may have made. 612 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 613 614 /// Add additional metadata to \p To that was not present on \p Orig. 615 /// 616 /// Currently this is used to add the noalias annotations based on the 617 /// inserted memchecks. Use this for instructions that are *cloned* into the 618 /// vector loop. 619 void addNewMetadata(Instruction *To, const Instruction *Orig); 620 621 /// Add metadata from one instruction to another. 622 /// 623 /// This includes both the original MDs from \p From and additional ones (\see 624 /// addNewMetadata). Use this for *newly created* instructions in the vector 625 /// loop. 626 void addMetadata(Instruction *To, Instruction *From); 627 628 /// \brief Similar to the previous function but it adds the metadata to a 629 /// vector of instructions. 630 void addMetadata(ArrayRef<Value *> To, Instruction *From); 631 632 /// The original loop. 633 Loop *OrigLoop; 634 635 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 636 /// dynamic knowledge to simplify SCEV expressions and converts them to a 637 /// more usable form. 638 PredicatedScalarEvolution &PSE; 639 640 /// Loop Info. 641 LoopInfo *LI; 642 643 /// Dominator Tree. 644 DominatorTree *DT; 645 646 /// Alias Analysis. 647 AliasAnalysis *AA; 648 649 /// Target Library Info. 650 const TargetLibraryInfo *TLI; 651 652 /// Target Transform Info. 653 const TargetTransformInfo *TTI; 654 655 /// Assumption Cache. 656 AssumptionCache *AC; 657 658 /// Interface to emit optimization remarks. 659 OptimizationRemarkEmitter *ORE; 660 661 /// \brief LoopVersioning. It's only set up (non-null) if memchecks were 662 /// used. 663 /// 664 /// This is currently only used to add no-alias metadata based on the 665 /// memchecks. The actually versioning is performed manually. 666 std::unique_ptr<LoopVersioning> LVer; 667 668 /// The vectorization SIMD factor to use. Each vector will have this many 669 /// vector elements. 670 unsigned VF; 671 672 /// The vectorization unroll factor to use. Each scalar is vectorized to this 673 /// many different vector instructions. 674 unsigned UF; 675 676 /// The builder that we use 677 IRBuilder<> Builder; 678 679 // --- Vectorization state --- 680 681 /// The vector-loop preheader. 682 BasicBlock *LoopVectorPreHeader; 683 684 /// The scalar-loop preheader. 685 BasicBlock *LoopScalarPreHeader; 686 687 /// Middle Block between the vector and the scalar. 688 BasicBlock *LoopMiddleBlock; 689 690 /// The ExitBlock of the scalar loop. 691 BasicBlock *LoopExitBlock; 692 693 /// The vector loop body. 694 BasicBlock *LoopVectorBody; 695 696 /// The scalar loop body. 697 BasicBlock *LoopScalarBody; 698 699 /// A list of all bypass blocks. The first block is the entry of the loop. 700 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 701 702 /// The new Induction variable which was added to the new block. 703 PHINode *Induction = nullptr; 704 705 /// The induction variable of the old basic block. 706 PHINode *OldInduction = nullptr; 707 708 /// Maps values from the original loop to their corresponding values in the 709 /// vectorized loop. A key value can map to either vector values, scalar 710 /// values or both kinds of values, depending on whether the key was 711 /// vectorized and scalarized. 712 VectorizerValueMap VectorLoopValueMap; 713 714 /// Store instructions that were predicated. 715 SmallVector<Instruction *, 4> PredicatedInstructions; 716 717 /// Trip count of the original loop. 718 Value *TripCount = nullptr; 719 720 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 721 Value *VectorTripCount = nullptr; 722 723 /// The legality analysis. 724 LoopVectorizationLegality *Legal; 725 726 /// The profitablity analysis. 727 LoopVectorizationCostModel *Cost; 728 729 // Record whether runtime checks are added. 730 bool AddedSafetyChecks = false; 731 732 // Holds the end values for each induction variable. We save the end values 733 // so we can later fix-up the external users of the induction variables. 734 DenseMap<PHINode *, Value *> IVEndValues; 735 }; 736 737 class InnerLoopUnroller : public InnerLoopVectorizer { 738 public: 739 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 740 LoopInfo *LI, DominatorTree *DT, 741 const TargetLibraryInfo *TLI, 742 const TargetTransformInfo *TTI, AssumptionCache *AC, 743 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 744 LoopVectorizationLegality *LVL, 745 LoopVectorizationCostModel *CM) 746 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 747 UnrollFactor, LVL, CM) {} 748 749 private: 750 Value *getBroadcastInstrs(Value *V) override; 751 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 752 Instruction::BinaryOps Opcode = 753 Instruction::BinaryOpsEnd) override; 754 Value *reverseVector(Value *Vec) override; 755 }; 756 757 } // end namespace llvm 758 759 /// \brief Look for a meaningful debug location on the instruction or it's 760 /// operands. 761 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 762 if (!I) 763 return I; 764 765 DebugLoc Empty; 766 if (I->getDebugLoc() != Empty) 767 return I; 768 769 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 770 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 771 if (OpInst->getDebugLoc() != Empty) 772 return OpInst; 773 } 774 775 return I; 776 } 777 778 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 779 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 780 const DILocation *DIL = Inst->getDebugLoc(); 781 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 782 !isa<DbgInfoIntrinsic>(Inst)) 783 B.SetCurrentDebugLocation(DIL->cloneWithDuplicationFactor(UF * VF)); 784 else 785 B.SetCurrentDebugLocation(DIL); 786 } else 787 B.SetCurrentDebugLocation(DebugLoc()); 788 } 789 790 #ifndef NDEBUG 791 /// \return string containing a file name and a line # for the given loop. 792 static std::string getDebugLocString(const Loop *L) { 793 std::string Result; 794 if (L) { 795 raw_string_ostream OS(Result); 796 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 797 LoopDbgLoc.print(OS); 798 else 799 // Just print the module name. 800 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 801 OS.flush(); 802 } 803 return Result; 804 } 805 #endif 806 807 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 808 const Instruction *Orig) { 809 // If the loop was versioned with memchecks, add the corresponding no-alias 810 // metadata. 811 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 812 LVer->annotateInstWithNoAlias(To, Orig); 813 } 814 815 void InnerLoopVectorizer::addMetadata(Instruction *To, 816 Instruction *From) { 817 propagateMetadata(To, From); 818 addNewMetadata(To, From); 819 } 820 821 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 822 Instruction *From) { 823 for (Value *V : To) { 824 if (Instruction *I = dyn_cast<Instruction>(V)) 825 addMetadata(I, From); 826 } 827 } 828 829 namespace llvm { 830 831 /// \brief The group of interleaved loads/stores sharing the same stride and 832 /// close to each other. 833 /// 834 /// Each member in this group has an index starting from 0, and the largest 835 /// index should be less than interleaved factor, which is equal to the absolute 836 /// value of the access's stride. 837 /// 838 /// E.g. An interleaved load group of factor 4: 839 /// for (unsigned i = 0; i < 1024; i+=4) { 840 /// a = A[i]; // Member of index 0 841 /// b = A[i+1]; // Member of index 1 842 /// d = A[i+3]; // Member of index 3 843 /// ... 844 /// } 845 /// 846 /// An interleaved store group of factor 4: 847 /// for (unsigned i = 0; i < 1024; i+=4) { 848 /// ... 849 /// A[i] = a; // Member of index 0 850 /// A[i+1] = b; // Member of index 1 851 /// A[i+2] = c; // Member of index 2 852 /// A[i+3] = d; // Member of index 3 853 /// } 854 /// 855 /// Note: the interleaved load group could have gaps (missing members), but 856 /// the interleaved store group doesn't allow gaps. 857 class InterleaveGroup { 858 public: 859 InterleaveGroup(Instruction *Instr, int Stride, unsigned Align) 860 : Align(Align), InsertPos(Instr) { 861 assert(Align && "The alignment should be non-zero"); 862 863 Factor = std::abs(Stride); 864 assert(Factor > 1 && "Invalid interleave factor"); 865 866 Reverse = Stride < 0; 867 Members[0] = Instr; 868 } 869 870 bool isReverse() const { return Reverse; } 871 unsigned getFactor() const { return Factor; } 872 unsigned getAlignment() const { return Align; } 873 unsigned getNumMembers() const { return Members.size(); } 874 875 /// \brief Try to insert a new member \p Instr with index \p Index and 876 /// alignment \p NewAlign. The index is related to the leader and it could be 877 /// negative if it is the new leader. 878 /// 879 /// \returns false if the instruction doesn't belong to the group. 880 bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) { 881 assert(NewAlign && "The new member's alignment should be non-zero"); 882 883 int Key = Index + SmallestKey; 884 885 // Skip if there is already a member with the same index. 886 if (Members.count(Key)) 887 return false; 888 889 if (Key > LargestKey) { 890 // The largest index is always less than the interleave factor. 891 if (Index >= static_cast<int>(Factor)) 892 return false; 893 894 LargestKey = Key; 895 } else if (Key < SmallestKey) { 896 // The largest index is always less than the interleave factor. 897 if (LargestKey - Key >= static_cast<int>(Factor)) 898 return false; 899 900 SmallestKey = Key; 901 } 902 903 // It's always safe to select the minimum alignment. 904 Align = std::min(Align, NewAlign); 905 Members[Key] = Instr; 906 return true; 907 } 908 909 /// \brief Get the member with the given index \p Index 910 /// 911 /// \returns nullptr if contains no such member. 912 Instruction *getMember(unsigned Index) const { 913 int Key = SmallestKey + Index; 914 if (!Members.count(Key)) 915 return nullptr; 916 917 return Members.find(Key)->second; 918 } 919 920 /// \brief Get the index for the given member. Unlike the key in the member 921 /// map, the index starts from 0. 922 unsigned getIndex(Instruction *Instr) const { 923 for (auto I : Members) 924 if (I.second == Instr) 925 return I.first - SmallestKey; 926 927 llvm_unreachable("InterleaveGroup contains no such member"); 928 } 929 930 Instruction *getInsertPos() const { return InsertPos; } 931 void setInsertPos(Instruction *Inst) { InsertPos = Inst; } 932 933 /// Add metadata (e.g. alias info) from the instructions in this group to \p 934 /// NewInst. 935 /// 936 /// FIXME: this function currently does not add noalias metadata a'la 937 /// addNewMedata. To do that we need to compute the intersection of the 938 /// noalias info from all members. 939 void addMetadata(Instruction *NewInst) const { 940 SmallVector<Value *, 4> VL; 941 std::transform(Members.begin(), Members.end(), std::back_inserter(VL), 942 [](std::pair<int, Instruction *> p) { return p.second; }); 943 propagateMetadata(NewInst, VL); 944 } 945 946 private: 947 unsigned Factor; // Interleave Factor. 948 bool Reverse; 949 unsigned Align; 950 DenseMap<int, Instruction *> Members; 951 int SmallestKey = 0; 952 int LargestKey = 0; 953 954 // To avoid breaking dependences, vectorized instructions of an interleave 955 // group should be inserted at either the first load or the last store in 956 // program order. 957 // 958 // E.g. %even = load i32 // Insert Position 959 // %add = add i32 %even // Use of %even 960 // %odd = load i32 961 // 962 // store i32 %even 963 // %odd = add i32 // Def of %odd 964 // store i32 %odd // Insert Position 965 Instruction *InsertPos; 966 }; 967 } // end namespace llvm 968 969 namespace { 970 971 /// \brief Drive the analysis of interleaved memory accesses in the loop. 972 /// 973 /// Use this class to analyze interleaved accesses only when we can vectorize 974 /// a loop. Otherwise it's meaningless to do analysis as the vectorization 975 /// on interleaved accesses is unsafe. 976 /// 977 /// The analysis collects interleave groups and records the relationships 978 /// between the member and the group in a map. 979 class InterleavedAccessInfo { 980 public: 981 InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L, 982 DominatorTree *DT, LoopInfo *LI) 983 : PSE(PSE), TheLoop(L), DT(DT), LI(LI) {} 984 985 ~InterleavedAccessInfo() { 986 SmallSet<InterleaveGroup *, 4> DelSet; 987 // Avoid releasing a pointer twice. 988 for (auto &I : InterleaveGroupMap) 989 DelSet.insert(I.second); 990 for (auto *Ptr : DelSet) 991 delete Ptr; 992 } 993 994 /// \brief Analyze the interleaved accesses and collect them in interleave 995 /// groups. Substitute symbolic strides using \p Strides. 996 void analyzeInterleaving(const ValueToValueMap &Strides); 997 998 /// \brief Check if \p Instr belongs to any interleave group. 999 bool isInterleaved(Instruction *Instr) const { 1000 return InterleaveGroupMap.count(Instr); 1001 } 1002 1003 /// \brief Get the interleave group that \p Instr belongs to. 1004 /// 1005 /// \returns nullptr if doesn't have such group. 1006 InterleaveGroup *getInterleaveGroup(Instruction *Instr) const { 1007 if (InterleaveGroupMap.count(Instr)) 1008 return InterleaveGroupMap.find(Instr)->second; 1009 return nullptr; 1010 } 1011 1012 /// \brief Returns true if an interleaved group that may access memory 1013 /// out-of-bounds requires a scalar epilogue iteration for correctness. 1014 bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; } 1015 1016 /// \brief Initialize the LoopAccessInfo used for dependence checking. 1017 void setLAI(const LoopAccessInfo *Info) { LAI = Info; } 1018 1019 private: 1020 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks. 1021 /// Simplifies SCEV expressions in the context of existing SCEV assumptions. 1022 /// The interleaved access analysis can also add new predicates (for example 1023 /// by versioning strides of pointers). 1024 PredicatedScalarEvolution &PSE; 1025 1026 Loop *TheLoop; 1027 DominatorTree *DT; 1028 LoopInfo *LI; 1029 const LoopAccessInfo *LAI = nullptr; 1030 1031 /// True if the loop may contain non-reversed interleaved groups with 1032 /// out-of-bounds accesses. We ensure we don't speculatively access memory 1033 /// out-of-bounds by executing at least one scalar epilogue iteration. 1034 bool RequiresScalarEpilogue = false; 1035 1036 /// Holds the relationships between the members and the interleave group. 1037 DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap; 1038 1039 /// Holds dependences among the memory accesses in the loop. It maps a source 1040 /// access to a set of dependent sink accesses. 1041 DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences; 1042 1043 /// \brief The descriptor for a strided memory access. 1044 struct StrideDescriptor { 1045 StrideDescriptor() = default; 1046 StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size, 1047 unsigned Align) 1048 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {} 1049 1050 // The access's stride. It is negative for a reverse access. 1051 int64_t Stride = 0; 1052 1053 // The scalar expression of this access. 1054 const SCEV *Scev = nullptr; 1055 1056 // The size of the memory object. 1057 uint64_t Size = 0; 1058 1059 // The alignment of this access. 1060 unsigned Align = 0; 1061 }; 1062 1063 /// \brief A type for holding instructions and their stride descriptors. 1064 using StrideEntry = std::pair<Instruction *, StrideDescriptor>; 1065 1066 /// \brief Create a new interleave group with the given instruction \p Instr, 1067 /// stride \p Stride and alignment \p Align. 1068 /// 1069 /// \returns the newly created interleave group. 1070 InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride, 1071 unsigned Align) { 1072 assert(!InterleaveGroupMap.count(Instr) && 1073 "Already in an interleaved access group"); 1074 InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align); 1075 return InterleaveGroupMap[Instr]; 1076 } 1077 1078 /// \brief Release the group and remove all the relationships. 1079 void releaseGroup(InterleaveGroup *Group) { 1080 for (unsigned i = 0; i < Group->getFactor(); i++) 1081 if (Instruction *Member = Group->getMember(i)) 1082 InterleaveGroupMap.erase(Member); 1083 1084 delete Group; 1085 } 1086 1087 /// \brief Collect all the accesses with a constant stride in program order. 1088 void collectConstStrideAccesses( 1089 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 1090 const ValueToValueMap &Strides); 1091 1092 /// \brief Returns true if \p Stride is allowed in an interleaved group. 1093 static bool isStrided(int Stride) { 1094 unsigned Factor = std::abs(Stride); 1095 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 1096 } 1097 1098 /// \brief Returns true if \p BB is a predicated block. 1099 bool isPredicated(BasicBlock *BB) const { 1100 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 1101 } 1102 1103 /// \brief Returns true if LoopAccessInfo can be used for dependence queries. 1104 bool areDependencesValid() const { 1105 return LAI && LAI->getDepChecker().getDependences(); 1106 } 1107 1108 /// \brief Returns true if memory accesses \p A and \p B can be reordered, if 1109 /// necessary, when constructing interleaved groups. 1110 /// 1111 /// \p A must precede \p B in program order. We return false if reordering is 1112 /// not necessary or is prevented because \p A and \p B may be dependent. 1113 bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A, 1114 StrideEntry *B) const { 1115 // Code motion for interleaved accesses can potentially hoist strided loads 1116 // and sink strided stores. The code below checks the legality of the 1117 // following two conditions: 1118 // 1119 // 1. Potentially moving a strided load (B) before any store (A) that 1120 // precedes B, or 1121 // 1122 // 2. Potentially moving a strided store (A) after any load or store (B) 1123 // that A precedes. 1124 // 1125 // It's legal to reorder A and B if we know there isn't a dependence from A 1126 // to B. Note that this determination is conservative since some 1127 // dependences could potentially be reordered safely. 1128 1129 // A is potentially the source of a dependence. 1130 auto *Src = A->first; 1131 auto SrcDes = A->second; 1132 1133 // B is potentially the sink of a dependence. 1134 auto *Sink = B->first; 1135 auto SinkDes = B->second; 1136 1137 // Code motion for interleaved accesses can't violate WAR dependences. 1138 // Thus, reordering is legal if the source isn't a write. 1139 if (!Src->mayWriteToMemory()) 1140 return true; 1141 1142 // At least one of the accesses must be strided. 1143 if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride)) 1144 return true; 1145 1146 // If dependence information is not available from LoopAccessInfo, 1147 // conservatively assume the instructions can't be reordered. 1148 if (!areDependencesValid()) 1149 return false; 1150 1151 // If we know there is a dependence from source to sink, assume the 1152 // instructions can't be reordered. Otherwise, reordering is legal. 1153 return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink); 1154 } 1155 1156 /// \brief Collect the dependences from LoopAccessInfo. 1157 /// 1158 /// We process the dependences once during the interleaved access analysis to 1159 /// enable constant-time dependence queries. 1160 void collectDependences() { 1161 if (!areDependencesValid()) 1162 return; 1163 auto *Deps = LAI->getDepChecker().getDependences(); 1164 for (auto Dep : *Deps) 1165 Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI)); 1166 } 1167 }; 1168 1169 /// Utility class for getting and setting loop vectorizer hints in the form 1170 /// of loop metadata. 1171 /// This class keeps a number of loop annotations locally (as member variables) 1172 /// and can, upon request, write them back as metadata on the loop. It will 1173 /// initially scan the loop for existing metadata, and will update the local 1174 /// values based on information in the loop. 1175 /// We cannot write all values to metadata, as the mere presence of some info, 1176 /// for example 'force', means a decision has been made. So, we need to be 1177 /// careful NOT to add them if the user hasn't specifically asked so. 1178 class LoopVectorizeHints { 1179 enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE, HK_ISVECTORIZED }; 1180 1181 /// Hint - associates name and validation with the hint value. 1182 struct Hint { 1183 const char *Name; 1184 unsigned Value; // This may have to change for non-numeric values. 1185 HintKind Kind; 1186 1187 Hint(const char *Name, unsigned Value, HintKind Kind) 1188 : Name(Name), Value(Value), Kind(Kind) {} 1189 1190 bool validate(unsigned Val) { 1191 switch (Kind) { 1192 case HK_WIDTH: 1193 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth; 1194 case HK_UNROLL: 1195 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor; 1196 case HK_FORCE: 1197 return (Val <= 1); 1198 case HK_ISVECTORIZED: 1199 return (Val==0 || Val==1); 1200 } 1201 return false; 1202 } 1203 }; 1204 1205 /// Vectorization width. 1206 Hint Width; 1207 1208 /// Vectorization interleave factor. 1209 Hint Interleave; 1210 1211 /// Vectorization forced 1212 Hint Force; 1213 1214 /// Already Vectorized 1215 Hint IsVectorized; 1216 1217 /// Return the loop metadata prefix. 1218 static StringRef Prefix() { return "llvm.loop."; } 1219 1220 /// True if there is any unsafe math in the loop. 1221 bool PotentiallyUnsafe = false; 1222 1223 public: 1224 enum ForceKind { 1225 FK_Undefined = -1, ///< Not selected. 1226 FK_Disabled = 0, ///< Forcing disabled. 1227 FK_Enabled = 1, ///< Forcing enabled. 1228 }; 1229 1230 LoopVectorizeHints(const Loop *L, bool DisableInterleaving, 1231 OptimizationRemarkEmitter &ORE) 1232 : Width("vectorize.width", VectorizerParams::VectorizationFactor, 1233 HK_WIDTH), 1234 Interleave("interleave.count", DisableInterleaving, HK_UNROLL), 1235 Force("vectorize.enable", FK_Undefined, HK_FORCE), 1236 IsVectorized("isvectorized", 0, HK_ISVECTORIZED), TheLoop(L), ORE(ORE) { 1237 // Populate values with existing loop metadata. 1238 getHintsFromMetadata(); 1239 1240 // force-vector-interleave overrides DisableInterleaving. 1241 if (VectorizerParams::isInterleaveForced()) 1242 Interleave.Value = VectorizerParams::VectorizationInterleave; 1243 1244 if (IsVectorized.Value != 1) 1245 // If the vectorization width and interleaving count are both 1 then 1246 // consider the loop to have been already vectorized because there's 1247 // nothing more that we can do. 1248 IsVectorized.Value = Width.Value == 1 && Interleave.Value == 1; 1249 DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs() 1250 << "LV: Interleaving disabled by the pass manager\n"); 1251 } 1252 1253 /// Mark the loop L as already vectorized by setting the width to 1. 1254 void setAlreadyVectorized() { 1255 IsVectorized.Value = 1; 1256 Hint Hints[] = {IsVectorized}; 1257 writeHintsToMetadata(Hints); 1258 } 1259 1260 bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const { 1261 if (getForce() == LoopVectorizeHints::FK_Disabled) { 1262 DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); 1263 emitRemarkWithHints(); 1264 return false; 1265 } 1266 1267 if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) { 1268 DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); 1269 emitRemarkWithHints(); 1270 return false; 1271 } 1272 1273 if (getIsVectorized() == 1) { 1274 DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); 1275 // FIXME: Add interleave.disable metadata. This will allow 1276 // vectorize.disable to be used without disabling the pass and errors 1277 // to differentiate between disabled vectorization and a width of 1. 1278 ORE.emit([&]() { 1279 return OptimizationRemarkAnalysis(vectorizeAnalysisPassName(), 1280 "AllDisabled", L->getStartLoc(), 1281 L->getHeader()) 1282 << "loop not vectorized: vectorization and interleaving are " 1283 "explicitly disabled, or the loop has already been " 1284 "vectorized"; 1285 }); 1286 return false; 1287 } 1288 1289 return true; 1290 } 1291 1292 /// Dumps all the hint information. 1293 void emitRemarkWithHints() const { 1294 using namespace ore; 1295 1296 ORE.emit([&]() { 1297 if (Force.Value == LoopVectorizeHints::FK_Disabled) 1298 return OptimizationRemarkMissed(LV_NAME, "MissedExplicitlyDisabled", 1299 TheLoop->getStartLoc(), 1300 TheLoop->getHeader()) 1301 << "loop not vectorized: vectorization is explicitly disabled"; 1302 else { 1303 OptimizationRemarkMissed R(LV_NAME, "MissedDetails", 1304 TheLoop->getStartLoc(), 1305 TheLoop->getHeader()); 1306 R << "loop not vectorized"; 1307 if (Force.Value == LoopVectorizeHints::FK_Enabled) { 1308 R << " (Force=" << NV("Force", true); 1309 if (Width.Value != 0) 1310 R << ", Vector Width=" << NV("VectorWidth", Width.Value); 1311 if (Interleave.Value != 0) 1312 R << ", Interleave Count=" 1313 << NV("InterleaveCount", Interleave.Value); 1314 R << ")"; 1315 } 1316 return R; 1317 } 1318 }); 1319 } 1320 1321 unsigned getWidth() const { return Width.Value; } 1322 unsigned getInterleave() const { return Interleave.Value; } 1323 unsigned getIsVectorized() const { return IsVectorized.Value; } 1324 enum ForceKind getForce() const { return (ForceKind)Force.Value; } 1325 1326 /// \brief If hints are provided that force vectorization, use the AlwaysPrint 1327 /// pass name to force the frontend to print the diagnostic. 1328 const char *vectorizeAnalysisPassName() const { 1329 if (getWidth() == 1) 1330 return LV_NAME; 1331 if (getForce() == LoopVectorizeHints::FK_Disabled) 1332 return LV_NAME; 1333 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0) 1334 return LV_NAME; 1335 return OptimizationRemarkAnalysis::AlwaysPrint; 1336 } 1337 1338 bool allowReordering() const { 1339 // When enabling loop hints are provided we allow the vectorizer to change 1340 // the order of operations that is given by the scalar loop. This is not 1341 // enabled by default because can be unsafe or inefficient. For example, 1342 // reordering floating-point operations will change the way round-off 1343 // error accumulates in the loop. 1344 return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1; 1345 } 1346 1347 bool isPotentiallyUnsafe() const { 1348 // Avoid FP vectorization if the target is unsure about proper support. 1349 // This may be related to the SIMD unit in the target not handling 1350 // IEEE 754 FP ops properly, or bad single-to-double promotions. 1351 // Otherwise, a sequence of vectorized loops, even without reduction, 1352 // could lead to different end results on the destination vectors. 1353 return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe; 1354 } 1355 1356 void setPotentiallyUnsafe() { PotentiallyUnsafe = true; } 1357 1358 private: 1359 /// Find hints specified in the loop metadata and update local values. 1360 void getHintsFromMetadata() { 1361 MDNode *LoopID = TheLoop->getLoopID(); 1362 if (!LoopID) 1363 return; 1364 1365 // First operand should refer to the loop id itself. 1366 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 1367 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 1368 1369 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1370 const MDString *S = nullptr; 1371 SmallVector<Metadata *, 4> Args; 1372 1373 // The expected hint is either a MDString or a MDNode with the first 1374 // operand a MDString. 1375 if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) { 1376 if (!MD || MD->getNumOperands() == 0) 1377 continue; 1378 S = dyn_cast<MDString>(MD->getOperand(0)); 1379 for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i) 1380 Args.push_back(MD->getOperand(i)); 1381 } else { 1382 S = dyn_cast<MDString>(LoopID->getOperand(i)); 1383 assert(Args.size() == 0 && "too many arguments for MDString"); 1384 } 1385 1386 if (!S) 1387 continue; 1388 1389 // Check if the hint starts with the loop metadata prefix. 1390 StringRef Name = S->getString(); 1391 if (Args.size() == 1) 1392 setHint(Name, Args[0]); 1393 } 1394 } 1395 1396 /// Checks string hint with one operand and set value if valid. 1397 void setHint(StringRef Name, Metadata *Arg) { 1398 if (!Name.startswith(Prefix())) 1399 return; 1400 Name = Name.substr(Prefix().size(), StringRef::npos); 1401 1402 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); 1403 if (!C) 1404 return; 1405 unsigned Val = C->getZExtValue(); 1406 1407 Hint *Hints[] = {&Width, &Interleave, &Force, &IsVectorized}; 1408 for (auto H : Hints) { 1409 if (Name == H->Name) { 1410 if (H->validate(Val)) 1411 H->Value = Val; 1412 else 1413 DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); 1414 break; 1415 } 1416 } 1417 } 1418 1419 /// Create a new hint from name / value pair. 1420 MDNode *createHintMetadata(StringRef Name, unsigned V) const { 1421 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1422 Metadata *MDs[] = {MDString::get(Context, Name), 1423 ConstantAsMetadata::get( 1424 ConstantInt::get(Type::getInt32Ty(Context), V))}; 1425 return MDNode::get(Context, MDs); 1426 } 1427 1428 /// Matches metadata with hint name. 1429 bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) { 1430 MDString *Name = dyn_cast<MDString>(Node->getOperand(0)); 1431 if (!Name) 1432 return false; 1433 1434 for (auto H : HintTypes) 1435 if (Name->getString().endswith(H.Name)) 1436 return true; 1437 return false; 1438 } 1439 1440 /// Sets current hints into loop metadata, keeping other values intact. 1441 void writeHintsToMetadata(ArrayRef<Hint> HintTypes) { 1442 if (HintTypes.empty()) 1443 return; 1444 1445 // Reserve the first element to LoopID (see below). 1446 SmallVector<Metadata *, 4> MDs(1); 1447 // If the loop already has metadata, then ignore the existing operands. 1448 MDNode *LoopID = TheLoop->getLoopID(); 1449 if (LoopID) { 1450 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1451 MDNode *Node = cast<MDNode>(LoopID->getOperand(i)); 1452 // If node in update list, ignore old value. 1453 if (!matchesHintMetadataName(Node, HintTypes)) 1454 MDs.push_back(Node); 1455 } 1456 } 1457 1458 // Now, add the missing hints. 1459 for (auto H : HintTypes) 1460 MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value)); 1461 1462 // Replace current metadata node with new one. 1463 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1464 MDNode *NewLoopID = MDNode::get(Context, MDs); 1465 // Set operand 0 to refer to the loop id itself. 1466 NewLoopID->replaceOperandWith(0, NewLoopID); 1467 1468 TheLoop->setLoopID(NewLoopID); 1469 } 1470 1471 /// The loop these hints belong to. 1472 const Loop *TheLoop; 1473 1474 /// Interface to emit optimization remarks. 1475 OptimizationRemarkEmitter &ORE; 1476 }; 1477 1478 } // end anonymous namespace 1479 1480 static void emitMissedWarning(Function *F, Loop *L, 1481 const LoopVectorizeHints &LH, 1482 OptimizationRemarkEmitter *ORE) { 1483 LH.emitRemarkWithHints(); 1484 1485 if (LH.getForce() == LoopVectorizeHints::FK_Enabled) { 1486 if (LH.getWidth() != 1) 1487 ORE->emit(DiagnosticInfoOptimizationFailure( 1488 DEBUG_TYPE, "FailedRequestedVectorization", 1489 L->getStartLoc(), L->getHeader()) 1490 << "loop not vectorized: " 1491 << "failed explicitly specified loop vectorization"); 1492 else if (LH.getInterleave() != 1) 1493 ORE->emit(DiagnosticInfoOptimizationFailure( 1494 DEBUG_TYPE, "FailedRequestedInterleaving", L->getStartLoc(), 1495 L->getHeader()) 1496 << "loop not interleaved: " 1497 << "failed explicitly specified loop interleaving"); 1498 } 1499 } 1500 1501 namespace llvm { 1502 1503 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and 1504 /// to what vectorization factor. 1505 /// This class does not look at the profitability of vectorization, only the 1506 /// legality. This class has two main kinds of checks: 1507 /// * Memory checks - The code in canVectorizeMemory checks if vectorization 1508 /// will change the order of memory accesses in a way that will change the 1509 /// correctness of the program. 1510 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory 1511 /// checks for a number of different conditions, such as the availability of a 1512 /// single induction variable, that all types are supported and vectorize-able, 1513 /// etc. This code reflects the capabilities of InnerLoopVectorizer. 1514 /// This class is also used by InnerLoopVectorizer for identifying 1515 /// induction variable and the different reduction variables. 1516 class LoopVectorizationLegality { 1517 public: 1518 LoopVectorizationLegality( 1519 Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT, 1520 TargetLibraryInfo *TLI, AliasAnalysis *AA, Function *F, 1521 const TargetTransformInfo *TTI, 1522 std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI, 1523 OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R, 1524 LoopVectorizeHints *H, DemandedBits *DB, AssumptionCache *AC) 1525 : TheLoop(L), PSE(PSE), TLI(TLI), TTI(TTI), DT(DT), GetLAA(GetLAA), 1526 ORE(ORE), InterleaveInfo(PSE, L, DT, LI), Requirements(R), Hints(H), 1527 DB(DB), AC(AC) {} 1528 1529 /// ReductionList contains the reduction descriptors for all 1530 /// of the reductions that were found in the loop. 1531 using ReductionList = DenseMap<PHINode *, RecurrenceDescriptor>; 1532 1533 /// InductionList saves induction variables and maps them to the 1534 /// induction descriptor. 1535 using InductionList = MapVector<PHINode *, InductionDescriptor>; 1536 1537 /// RecurrenceSet contains the phi nodes that are recurrences other than 1538 /// inductions and reductions. 1539 using RecurrenceSet = SmallPtrSet<const PHINode *, 8>; 1540 1541 /// Returns true if it is legal to vectorize this loop. 1542 /// This does not mean that it is profitable to vectorize this 1543 /// loop, only that it is legal to do so. 1544 bool canVectorize(); 1545 1546 /// Returns the primary induction variable. 1547 PHINode *getPrimaryInduction() { return PrimaryInduction; } 1548 1549 /// Returns the reduction variables found in the loop. 1550 ReductionList *getReductionVars() { return &Reductions; } 1551 1552 /// Returns the induction variables found in the loop. 1553 InductionList *getInductionVars() { return &Inductions; } 1554 1555 /// Return the first-order recurrences found in the loop. 1556 RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; } 1557 1558 /// Return the set of instructions to sink to handle first-order recurrences. 1559 DenseMap<Instruction *, Instruction *> &getSinkAfter() { return SinkAfter; } 1560 1561 /// Returns the widest induction type. 1562 Type *getWidestInductionType() { return WidestIndTy; } 1563 1564 /// Returns True if V is a Phi node of an induction variable in this loop. 1565 bool isInductionPhi(const Value *V); 1566 1567 /// Returns True if V is a cast that is part of an induction def-use chain, 1568 /// and had been proven to be redundant under a runtime guard (in other 1569 /// words, the cast has the same SCEV expression as the induction phi). 1570 bool isCastedInductionVariable(const Value *V); 1571 1572 /// Returns True if V can be considered as an induction variable in this 1573 /// loop. V can be the induction phi, or some redundant cast in the def-use 1574 /// chain of the inducion phi. 1575 bool isInductionVariable(const Value *V); 1576 1577 /// Returns True if PN is a reduction variable in this loop. 1578 bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); } 1579 1580 /// Returns True if Phi is a first-order recurrence in this loop. 1581 bool isFirstOrderRecurrence(const PHINode *Phi); 1582 1583 /// Return true if the block BB needs to be predicated in order for the loop 1584 /// to be vectorized. 1585 bool blockNeedsPredication(BasicBlock *BB); 1586 1587 /// Check if this pointer is consecutive when vectorizing. This happens 1588 /// when the last index of the GEP is the induction variable, or that the 1589 /// pointer itself is an induction variable. 1590 /// This check allows us to vectorize A[idx] into a wide load/store. 1591 /// Returns: 1592 /// 0 - Stride is unknown or non-consecutive. 1593 /// 1 - Address is consecutive. 1594 /// -1 - Address is consecutive, and decreasing. 1595 /// NOTE: This method must only be used before modifying the original scalar 1596 /// loop. Do not use after invoking 'createVectorizedLoopSkeleton' (PR34965). 1597 int isConsecutivePtr(Value *Ptr); 1598 1599 /// Returns true if the value V is uniform within the loop. 1600 bool isUniform(Value *V); 1601 1602 /// Returns the information that we collected about runtime memory check. 1603 const RuntimePointerChecking *getRuntimePointerChecking() const { 1604 return LAI->getRuntimePointerChecking(); 1605 } 1606 1607 const LoopAccessInfo *getLAI() const { return LAI; } 1608 1609 /// \brief Check if \p Instr belongs to any interleaved access group. 1610 bool isAccessInterleaved(Instruction *Instr) { 1611 return InterleaveInfo.isInterleaved(Instr); 1612 } 1613 1614 /// \brief Get the interleaved access group that \p Instr belongs to. 1615 const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { 1616 return InterleaveInfo.getInterleaveGroup(Instr); 1617 } 1618 1619 /// \brief Returns true if an interleaved group requires a scalar iteration 1620 /// to handle accesses with gaps. 1621 bool requiresScalarEpilogue() const { 1622 return InterleaveInfo.requiresScalarEpilogue(); 1623 } 1624 1625 unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); } 1626 1627 uint64_t getMaxSafeRegisterWidth() const { 1628 return LAI->getDepChecker().getMaxSafeRegisterWidth(); 1629 } 1630 1631 bool hasStride(Value *V) { return LAI->hasStride(V); } 1632 1633 /// Returns true if vector representation of the instruction \p I 1634 /// requires mask. 1635 bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); } 1636 1637 unsigned getNumStores() const { return LAI->getNumStores(); } 1638 unsigned getNumLoads() const { return LAI->getNumLoads(); } 1639 1640 // Returns true if the NoNaN attribute is set on the function. 1641 bool hasFunNoNaNAttr() const { return HasFunNoNaNAttr; } 1642 1643 private: 1644 /// Check if a single basic block loop is vectorizable. 1645 /// At this point we know that this is a loop with a constant trip count 1646 /// and we only need to check individual instructions. 1647 bool canVectorizeInstrs(); 1648 1649 /// When we vectorize loops we may change the order in which 1650 /// we read and write from memory. This method checks if it is 1651 /// legal to vectorize the code, considering only memory constrains. 1652 /// Returns true if the loop is vectorizable 1653 bool canVectorizeMemory(); 1654 1655 /// Return true if we can vectorize this loop using the IF-conversion 1656 /// transformation. 1657 bool canVectorizeWithIfConvert(); 1658 1659 /// Return true if all of the instructions in the block can be speculatively 1660 /// executed. \p SafePtrs is a list of addresses that are known to be legal 1661 /// and we know that we can read from them without segfault. 1662 bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs); 1663 1664 /// Updates the vectorization state by adding \p Phi to the inductions list. 1665 /// This can set \p Phi as the main induction of the loop if \p Phi is a 1666 /// better choice for the main induction than the existing one. 1667 void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID, 1668 SmallPtrSetImpl<Value *> &AllowedExit); 1669 1670 /// Create an analysis remark that explains why vectorization failed 1671 /// 1672 /// \p RemarkName is the identifier for the remark. If \p I is passed it is 1673 /// an instruction that prevents vectorization. Otherwise the loop is used 1674 /// for the location of the remark. \return the remark object that can be 1675 /// streamed to. 1676 OptimizationRemarkAnalysis 1677 createMissedAnalysis(StringRef RemarkName, Instruction *I = nullptr) const { 1678 return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(), 1679 RemarkName, TheLoop, I); 1680 } 1681 1682 /// \brief If an access has a symbolic strides, this maps the pointer value to 1683 /// the stride symbol. 1684 const ValueToValueMap *getSymbolicStrides() { 1685 // FIXME: Currently, the set of symbolic strides is sometimes queried before 1686 // it's collected. This happens from canVectorizeWithIfConvert, when the 1687 // pointer is checked to reference consecutive elements suitable for a 1688 // masked access. 1689 return LAI ? &LAI->getSymbolicStrides() : nullptr; 1690 } 1691 1692 /// The loop that we evaluate. 1693 Loop *TheLoop; 1694 1695 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. 1696 /// Applies dynamic knowledge to simplify SCEV expressions in the context 1697 /// of existing SCEV assumptions. The analysis will also add a minimal set 1698 /// of new predicates if this is required to enable vectorization and 1699 /// unrolling. 1700 PredicatedScalarEvolution &PSE; 1701 1702 /// Target Library Info. 1703 TargetLibraryInfo *TLI; 1704 1705 /// Target Transform Info 1706 const TargetTransformInfo *TTI; 1707 1708 /// Dominator Tree. 1709 DominatorTree *DT; 1710 1711 // LoopAccess analysis. 1712 std::function<const LoopAccessInfo &(Loop &)> *GetLAA; 1713 1714 // And the loop-accesses info corresponding to this loop. This pointer is 1715 // null until canVectorizeMemory sets it up. 1716 const LoopAccessInfo *LAI = nullptr; 1717 1718 /// Interface to emit optimization remarks. 1719 OptimizationRemarkEmitter *ORE; 1720 1721 /// The interleave access information contains groups of interleaved accesses 1722 /// with the same stride and close to each other. 1723 InterleavedAccessInfo InterleaveInfo; 1724 1725 // --- vectorization state --- // 1726 1727 /// Holds the primary induction variable. This is the counter of the 1728 /// loop. 1729 PHINode *PrimaryInduction = nullptr; 1730 1731 /// Holds the reduction variables. 1732 ReductionList Reductions; 1733 1734 /// Holds all of the induction variables that we found in the loop. 1735 /// Notice that inductions don't need to start at zero and that induction 1736 /// variables can be pointers. 1737 InductionList Inductions; 1738 1739 /// Holds all the casts that participate in the update chain of the induction 1740 /// variables, and that have been proven to be redundant (possibly under a 1741 /// runtime guard). These casts can be ignored when creating the vectorized 1742 /// loop body. 1743 SmallPtrSet<Instruction *, 4> InductionCastsToIgnore; 1744 1745 /// Holds the phi nodes that are first-order recurrences. 1746 RecurrenceSet FirstOrderRecurrences; 1747 1748 /// Holds instructions that need to sink past other instructions to handle 1749 /// first-order recurrences. 1750 DenseMap<Instruction *, Instruction *> SinkAfter; 1751 1752 /// Holds the widest induction type encountered. 1753 Type *WidestIndTy = nullptr; 1754 1755 /// Allowed outside users. This holds the induction and reduction 1756 /// vars which can be accessed from outside the loop. 1757 SmallPtrSet<Value *, 4> AllowedExit; 1758 1759 /// Can we assume the absence of NaNs. 1760 bool HasFunNoNaNAttr = false; 1761 1762 /// Vectorization requirements that will go through late-evaluation. 1763 LoopVectorizationRequirements *Requirements; 1764 1765 /// Used to emit an analysis of any legality issues. 1766 LoopVectorizeHints *Hints; 1767 1768 /// The demanded bits analsyis is used to compute the minimum type size in 1769 /// which a reduction can be computed. 1770 DemandedBits *DB; 1771 1772 /// The assumption cache analysis is used to compute the minimum type size in 1773 /// which a reduction can be computed. 1774 AssumptionCache *AC; 1775 1776 /// While vectorizing these instructions we have to generate a 1777 /// call to the appropriate masked intrinsic 1778 SmallPtrSet<const Instruction *, 8> MaskedOp; 1779 }; 1780 1781 /// LoopVectorizationCostModel - estimates the expected speedups due to 1782 /// vectorization. 1783 /// In many cases vectorization is not profitable. This can happen because of 1784 /// a number of reasons. In this class we mainly attempt to predict the 1785 /// expected speedup/slowdowns due to the supported instruction set. We use the 1786 /// TargetTransformInfo to query the different backends for the cost of 1787 /// different operations. 1788 class LoopVectorizationCostModel { 1789 public: 1790 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, 1791 LoopInfo *LI, LoopVectorizationLegality *Legal, 1792 const TargetTransformInfo &TTI, 1793 const TargetLibraryInfo *TLI, DemandedBits *DB, 1794 AssumptionCache *AC, 1795 OptimizationRemarkEmitter *ORE, const Function *F, 1796 const LoopVectorizeHints *Hints) 1797 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 1798 AC(AC), ORE(ORE), TheFunction(F), Hints(Hints) {} 1799 1800 /// \return An upper bound for the vectorization factor, or None if 1801 /// vectorization should be avoided up front. 1802 Optional<unsigned> computeMaxVF(bool OptForSize); 1803 1804 /// \return The most profitable vectorization factor and the cost of that VF. 1805 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 1806 /// then this vectorization factor will be selected if vectorization is 1807 /// possible. 1808 VectorizationFactor selectVectorizationFactor(unsigned MaxVF); 1809 1810 /// Setup cost-based decisions for user vectorization factor. 1811 void selectUserVectorizationFactor(unsigned UserVF) { 1812 collectUniformsAndScalars(UserVF); 1813 collectInstsToScalarize(UserVF); 1814 } 1815 1816 /// \return The size (in bits) of the smallest and widest types in the code 1817 /// that needs to be vectorized. We ignore values that remain scalar such as 1818 /// 64 bit loop indices. 1819 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1820 1821 /// \return The desired interleave count. 1822 /// If interleave count has been specified by metadata it will be returned. 1823 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1824 /// are the selected vectorization factor and the cost of the selected VF. 1825 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 1826 unsigned LoopCost); 1827 1828 /// Memory access instruction may be vectorized in more than one way. 1829 /// Form of instruction after vectorization depends on cost. 1830 /// This function takes cost-based decisions for Load/Store instructions 1831 /// and collects them in a map. This decisions map is used for building 1832 /// the lists of loop-uniform and loop-scalar instructions. 1833 /// The calculated cost is saved with widening decision in order to 1834 /// avoid redundant calculations. 1835 void setCostBasedWideningDecision(unsigned VF); 1836 1837 /// \brief A struct that represents some properties of the register usage 1838 /// of a loop. 1839 struct RegisterUsage { 1840 /// Holds the number of loop invariant values that are used in the loop. 1841 unsigned LoopInvariantRegs; 1842 1843 /// Holds the maximum number of concurrent live intervals in the loop. 1844 unsigned MaxLocalUsers; 1845 }; 1846 1847 /// \return Returns information about the register usages of the loop for the 1848 /// given vectorization factors. 1849 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 1850 1851 /// Collect values we want to ignore in the cost model. 1852 void collectValuesToIgnore(); 1853 1854 /// \returns The smallest bitwidth each instruction can be represented with. 1855 /// The vector equivalents of these instructions should be truncated to this 1856 /// type. 1857 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1858 return MinBWs; 1859 } 1860 1861 /// \returns True if it is more profitable to scalarize instruction \p I for 1862 /// vectorization factor \p VF. 1863 bool isProfitableToScalarize(Instruction *I, unsigned VF) const { 1864 assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1."); 1865 auto Scalars = InstsToScalarize.find(VF); 1866 assert(Scalars != InstsToScalarize.end() && 1867 "VF not yet analyzed for scalarization profitability"); 1868 return Scalars->second.count(I); 1869 } 1870 1871 /// Returns true if \p I is known to be uniform after vectorization. 1872 bool isUniformAfterVectorization(Instruction *I, unsigned VF) const { 1873 if (VF == 1) 1874 return true; 1875 assert(Uniforms.count(VF) && "VF not yet analyzed for uniformity"); 1876 auto UniformsPerVF = Uniforms.find(VF); 1877 return UniformsPerVF->second.count(I); 1878 } 1879 1880 /// Returns true if \p I is known to be scalar after vectorization. 1881 bool isScalarAfterVectorization(Instruction *I, unsigned VF) const { 1882 if (VF == 1) 1883 return true; 1884 assert(Scalars.count(VF) && "Scalar values are not calculated for VF"); 1885 auto ScalarsPerVF = Scalars.find(VF); 1886 return ScalarsPerVF->second.count(I); 1887 } 1888 1889 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1890 /// for vectorization factor \p VF. 1891 bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const { 1892 return VF > 1 && MinBWs.count(I) && !isProfitableToScalarize(I, VF) && 1893 !isScalarAfterVectorization(I, VF); 1894 } 1895 1896 /// Decision that was taken during cost calculation for memory instruction. 1897 enum InstWidening { 1898 CM_Unknown, 1899 CM_Widen, // For consecutive accesses with stride +1. 1900 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1901 CM_Interleave, 1902 CM_GatherScatter, 1903 CM_Scalarize 1904 }; 1905 1906 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1907 /// instruction \p I and vector width \p VF. 1908 void setWideningDecision(Instruction *I, unsigned VF, InstWidening W, 1909 unsigned Cost) { 1910 assert(VF >= 2 && "Expected VF >=2"); 1911 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1912 } 1913 1914 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1915 /// interleaving group \p Grp and vector width \p VF. 1916 void setWideningDecision(const InterleaveGroup *Grp, unsigned VF, 1917 InstWidening W, unsigned Cost) { 1918 assert(VF >= 2 && "Expected VF >=2"); 1919 /// Broadcast this decicion to all instructions inside the group. 1920 /// But the cost will be assigned to one instruction only. 1921 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1922 if (auto *I = Grp->getMember(i)) { 1923 if (Grp->getInsertPos() == I) 1924 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1925 else 1926 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1927 } 1928 } 1929 } 1930 1931 /// Return the cost model decision for the given instruction \p I and vector 1932 /// width \p VF. Return CM_Unknown if this instruction did not pass 1933 /// through the cost modeling. 1934 InstWidening getWideningDecision(Instruction *I, unsigned VF) { 1935 assert(VF >= 2 && "Expected VF >=2"); 1936 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1937 auto Itr = WideningDecisions.find(InstOnVF); 1938 if (Itr == WideningDecisions.end()) 1939 return CM_Unknown; 1940 return Itr->second.first; 1941 } 1942 1943 /// Return the vectorization cost for the given instruction \p I and vector 1944 /// width \p VF. 1945 unsigned getWideningCost(Instruction *I, unsigned VF) { 1946 assert(VF >= 2 && "Expected VF >=2"); 1947 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1948 assert(WideningDecisions.count(InstOnVF) && "The cost is not calculated"); 1949 return WideningDecisions[InstOnVF].second; 1950 } 1951 1952 /// Return True if instruction \p I is an optimizable truncate whose operand 1953 /// is an induction variable. Such a truncate will be removed by adding a new 1954 /// induction variable with the destination type. 1955 bool isOptimizableIVTruncate(Instruction *I, unsigned VF) { 1956 // If the instruction is not a truncate, return false. 1957 auto *Trunc = dyn_cast<TruncInst>(I); 1958 if (!Trunc) 1959 return false; 1960 1961 // Get the source and destination types of the truncate. 1962 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1963 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1964 1965 // If the truncate is free for the given types, return false. Replacing a 1966 // free truncate with an induction variable would add an induction variable 1967 // update instruction to each iteration of the loop. We exclude from this 1968 // check the primary induction variable since it will need an update 1969 // instruction regardless. 1970 Value *Op = Trunc->getOperand(0); 1971 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1972 return false; 1973 1974 // If the truncated value is not an induction variable, return false. 1975 return Legal->isInductionPhi(Op); 1976 } 1977 1978 /// Collects the instructions to scalarize for each predicated instruction in 1979 /// the loop. 1980 void collectInstsToScalarize(unsigned VF); 1981 1982 /// Collect Uniform and Scalar values for the given \p VF. 1983 /// The sets depend on CM decision for Load/Store instructions 1984 /// that may be vectorized as interleave, gather-scatter or scalarized. 1985 void collectUniformsAndScalars(unsigned VF) { 1986 // Do the analysis once. 1987 if (VF == 1 || Uniforms.count(VF)) 1988 return; 1989 setCostBasedWideningDecision(VF); 1990 collectLoopUniforms(VF); 1991 collectLoopScalars(VF); 1992 } 1993 1994 /// Returns true if the target machine supports masked store operation 1995 /// for the given \p DataType and kind of access to \p Ptr. 1996 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1997 return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedStore(DataType); 1998 } 1999 2000 /// Returns true if the target machine supports masked load operation 2001 /// for the given \p DataType and kind of access to \p Ptr. 2002 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 2003 return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedLoad(DataType); 2004 } 2005 2006 /// Returns true if the target machine supports masked scatter operation 2007 /// for the given \p DataType. 2008 bool isLegalMaskedScatter(Type *DataType) { 2009 return TTI.isLegalMaskedScatter(DataType); 2010 } 2011 2012 /// Returns true if the target machine supports masked gather operation 2013 /// for the given \p DataType. 2014 bool isLegalMaskedGather(Type *DataType) { 2015 return TTI.isLegalMaskedGather(DataType); 2016 } 2017 2018 /// Returns true if the target machine can represent \p V as a masked gather 2019 /// or scatter operation. 2020 bool isLegalGatherOrScatter(Value *V) { 2021 bool LI = isa<LoadInst>(V); 2022 bool SI = isa<StoreInst>(V); 2023 if (!LI && !SI) 2024 return false; 2025 auto *Ty = getMemInstValueType(V); 2026 return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty)); 2027 } 2028 2029 /// Returns true if \p I is an instruction that will be scalarized with 2030 /// predication. Such instructions include conditional stores and 2031 /// instructions that may divide by zero. 2032 bool isScalarWithPredication(Instruction *I); 2033 2034 /// Returns true if \p I is a memory instruction with consecutive memory 2035 /// access that can be widened. 2036 bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1); 2037 2038 private: 2039 unsigned NumPredStores = 0; 2040 2041 /// \return An upper bound for the vectorization factor, larger than zero. 2042 /// One is returned if vectorization should best be avoided due to cost. 2043 unsigned computeFeasibleMaxVF(bool OptForSize, unsigned ConstTripCount); 2044 2045 /// The vectorization cost is a combination of the cost itself and a boolean 2046 /// indicating whether any of the contributing operations will actually 2047 /// operate on 2048 /// vector values after type legalization in the backend. If this latter value 2049 /// is 2050 /// false, then all operations will be scalarized (i.e. no vectorization has 2051 /// actually taken place). 2052 using VectorizationCostTy = std::pair<unsigned, bool>; 2053 2054 /// Returns the expected execution cost. The unit of the cost does 2055 /// not matter because we use the 'cost' units to compare different 2056 /// vector widths. The cost that is returned is *not* normalized by 2057 /// the factor width. 2058 VectorizationCostTy expectedCost(unsigned VF); 2059 2060 /// Returns the execution time cost of an instruction for a given vector 2061 /// width. Vector width of one means scalar. 2062 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 2063 2064 /// The cost-computation logic from getInstructionCost which provides 2065 /// the vector type as an output parameter. 2066 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 2067 2068 /// Calculate vectorization cost of memory instruction \p I. 2069 unsigned getMemoryInstructionCost(Instruction *I, unsigned VF); 2070 2071 /// The cost computation for scalarized memory instruction. 2072 unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF); 2073 2074 /// The cost computation for interleaving group of memory instructions. 2075 unsigned getInterleaveGroupCost(Instruction *I, unsigned VF); 2076 2077 /// The cost computation for Gather/Scatter instruction. 2078 unsigned getGatherScatterCost(Instruction *I, unsigned VF); 2079 2080 /// The cost computation for widening instruction \p I with consecutive 2081 /// memory access. 2082 unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF); 2083 2084 /// The cost calculation for Load instruction \p I with uniform pointer - 2085 /// scalar load + broadcast. 2086 unsigned getUniformMemOpCost(Instruction *I, unsigned VF); 2087 2088 /// Returns whether the instruction is a load or store and will be a emitted 2089 /// as a vector operation. 2090 bool isConsecutiveLoadOrStore(Instruction *I); 2091 2092 /// Returns true if an artificially high cost for emulated masked memrefs 2093 /// should be used. 2094 bool useEmulatedMaskMemRefHack(Instruction *I); 2095 2096 /// Create an analysis remark that explains why vectorization failed 2097 /// 2098 /// \p RemarkName is the identifier for the remark. \return the remark object 2099 /// that can be streamed to. 2100 OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) { 2101 return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(), 2102 RemarkName, TheLoop); 2103 } 2104 2105 /// Map of scalar integer values to the smallest bitwidth they can be legally 2106 /// represented as. The vector equivalents of these values should be truncated 2107 /// to this type. 2108 MapVector<Instruction *, uint64_t> MinBWs; 2109 2110 /// A type representing the costs for instructions if they were to be 2111 /// scalarized rather than vectorized. The entries are Instruction-Cost 2112 /// pairs. 2113 using ScalarCostsTy = DenseMap<Instruction *, unsigned>; 2114 2115 /// A set containing all BasicBlocks that are known to present after 2116 /// vectorization as a predicated block. 2117 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 2118 2119 /// A map holding scalar costs for different vectorization factors. The 2120 /// presence of a cost for an instruction in the mapping indicates that the 2121 /// instruction will be scalarized when vectorizing with the associated 2122 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 2123 DenseMap<unsigned, ScalarCostsTy> InstsToScalarize; 2124 2125 /// Holds the instructions known to be uniform after vectorization. 2126 /// The data is collected per VF. 2127 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms; 2128 2129 /// Holds the instructions known to be scalar after vectorization. 2130 /// The data is collected per VF. 2131 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars; 2132 2133 /// Holds the instructions (address computations) that are forced to be 2134 /// scalarized. 2135 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars; 2136 2137 /// Returns the expected difference in cost from scalarizing the expression 2138 /// feeding a predicated instruction \p PredInst. The instructions to 2139 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 2140 /// non-negative return value implies the expression will be scalarized. 2141 /// Currently, only single-use chains are considered for scalarization. 2142 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 2143 unsigned VF); 2144 2145 /// Collect the instructions that are uniform after vectorization. An 2146 /// instruction is uniform if we represent it with a single scalar value in 2147 /// the vectorized loop corresponding to each vector iteration. Examples of 2148 /// uniform instructions include pointer operands of consecutive or 2149 /// interleaved memory accesses. Note that although uniformity implies an 2150 /// instruction will be scalar, the reverse is not true. In general, a 2151 /// scalarized instruction will be represented by VF scalar values in the 2152 /// vectorized loop, each corresponding to an iteration of the original 2153 /// scalar loop. 2154 void collectLoopUniforms(unsigned VF); 2155 2156 /// Collect the instructions that are scalar after vectorization. An 2157 /// instruction is scalar if it is known to be uniform or will be scalarized 2158 /// during vectorization. Non-uniform scalarized instructions will be 2159 /// represented by VF values in the vectorized loop, each corresponding to an 2160 /// iteration of the original scalar loop. 2161 void collectLoopScalars(unsigned VF); 2162 2163 /// Keeps cost model vectorization decision and cost for instructions. 2164 /// Right now it is used for memory instructions only. 2165 using DecisionList = DenseMap<std::pair<Instruction *, unsigned>, 2166 std::pair<InstWidening, unsigned>>; 2167 2168 DecisionList WideningDecisions; 2169 2170 public: 2171 /// The loop that we evaluate. 2172 Loop *TheLoop; 2173 2174 /// Predicated scalar evolution analysis. 2175 PredicatedScalarEvolution &PSE; 2176 2177 /// Loop Info analysis. 2178 LoopInfo *LI; 2179 2180 /// Vectorization legality. 2181 LoopVectorizationLegality *Legal; 2182 2183 /// Vector target information. 2184 const TargetTransformInfo &TTI; 2185 2186 /// Target Library Info. 2187 const TargetLibraryInfo *TLI; 2188 2189 /// Demanded bits analysis. 2190 DemandedBits *DB; 2191 2192 /// Assumption cache. 2193 AssumptionCache *AC; 2194 2195 /// Interface to emit optimization remarks. 2196 OptimizationRemarkEmitter *ORE; 2197 2198 const Function *TheFunction; 2199 2200 /// Loop Vectorize Hint. 2201 const LoopVectorizeHints *Hints; 2202 2203 /// Values to ignore in the cost model. 2204 SmallPtrSet<const Value *, 16> ValuesToIgnore; 2205 2206 /// Values to ignore in the cost model when VF > 1. 2207 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 2208 }; 2209 2210 } // end namespace llvm 2211 2212 namespace { 2213 2214 /// \brief This holds vectorization requirements that must be verified late in 2215 /// the process. The requirements are set by legalize and costmodel. Once 2216 /// vectorization has been determined to be possible and profitable the 2217 /// requirements can be verified by looking for metadata or compiler options. 2218 /// For example, some loops require FP commutativity which is only allowed if 2219 /// vectorization is explicitly specified or if the fast-math compiler option 2220 /// has been provided. 2221 /// Late evaluation of these requirements allows helpful diagnostics to be 2222 /// composed that tells the user what need to be done to vectorize the loop. For 2223 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late 2224 /// evaluation should be used only when diagnostics can generated that can be 2225 /// followed by a non-expert user. 2226 class LoopVectorizationRequirements { 2227 public: 2228 LoopVectorizationRequirements(OptimizationRemarkEmitter &ORE) : ORE(ORE) {} 2229 2230 void addUnsafeAlgebraInst(Instruction *I) { 2231 // First unsafe algebra instruction. 2232 if (!UnsafeAlgebraInst) 2233 UnsafeAlgebraInst = I; 2234 } 2235 2236 void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; } 2237 2238 bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) { 2239 const char *PassName = Hints.vectorizeAnalysisPassName(); 2240 bool Failed = false; 2241 if (UnsafeAlgebraInst && !Hints.allowReordering()) { 2242 ORE.emit([&]() { 2243 return OptimizationRemarkAnalysisFPCommute( 2244 PassName, "CantReorderFPOps", 2245 UnsafeAlgebraInst->getDebugLoc(), 2246 UnsafeAlgebraInst->getParent()) 2247 << "loop not vectorized: cannot prove it is safe to reorder " 2248 "floating-point operations"; 2249 }); 2250 Failed = true; 2251 } 2252 2253 // Test if runtime memcheck thresholds are exceeded. 2254 bool PragmaThresholdReached = 2255 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 2256 bool ThresholdReached = 2257 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 2258 if ((ThresholdReached && !Hints.allowReordering()) || 2259 PragmaThresholdReached) { 2260 ORE.emit([&]() { 2261 return OptimizationRemarkAnalysisAliasing(PassName, "CantReorderMemOps", 2262 L->getStartLoc(), 2263 L->getHeader()) 2264 << "loop not vectorized: cannot prove it is safe to reorder " 2265 "memory operations"; 2266 }); 2267 DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 2268 Failed = true; 2269 } 2270 2271 return Failed; 2272 } 2273 2274 private: 2275 unsigned NumRuntimePointerChecks = 0; 2276 Instruction *UnsafeAlgebraInst = nullptr; 2277 2278 /// Interface to emit optimization remarks. 2279 OptimizationRemarkEmitter &ORE; 2280 }; 2281 2282 } // end anonymous namespace 2283 2284 static void addAcyclicInnerLoop(Loop &L, LoopInfo &LI, 2285 SmallVectorImpl<Loop *> &V) { 2286 if (L.empty()) { 2287 LoopBlocksRPO RPOT(&L); 2288 RPOT.perform(&LI); 2289 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, LI)) 2290 V.push_back(&L); 2291 return; 2292 } 2293 for (Loop *InnerL : L) 2294 addAcyclicInnerLoop(*InnerL, LI, V); 2295 } 2296 2297 namespace { 2298 2299 /// The LoopVectorize Pass. 2300 struct LoopVectorize : public FunctionPass { 2301 /// Pass identification, replacement for typeid 2302 static char ID; 2303 2304 LoopVectorizePass Impl; 2305 2306 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) 2307 : FunctionPass(ID) { 2308 Impl.DisableUnrolling = NoUnrolling; 2309 Impl.AlwaysVectorize = AlwaysVectorize; 2310 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2311 } 2312 2313 bool runOnFunction(Function &F) override { 2314 if (skipFunction(F)) 2315 return false; 2316 2317 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2318 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2319 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2320 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2321 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2322 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2323 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 2324 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2325 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2326 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2327 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2328 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2329 2330 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2331 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2332 2333 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2334 GetLAA, *ORE); 2335 } 2336 2337 void getAnalysisUsage(AnalysisUsage &AU) const override { 2338 AU.addRequired<AssumptionCacheTracker>(); 2339 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2340 AU.addRequired<DominatorTreeWrapperPass>(); 2341 AU.addRequired<LoopInfoWrapperPass>(); 2342 AU.addRequired<ScalarEvolutionWrapperPass>(); 2343 AU.addRequired<TargetTransformInfoWrapperPass>(); 2344 AU.addRequired<AAResultsWrapperPass>(); 2345 AU.addRequired<LoopAccessLegacyAnalysis>(); 2346 AU.addRequired<DemandedBitsWrapperPass>(); 2347 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2348 AU.addPreserved<LoopInfoWrapperPass>(); 2349 AU.addPreserved<DominatorTreeWrapperPass>(); 2350 AU.addPreserved<BasicAAWrapperPass>(); 2351 AU.addPreserved<GlobalsAAWrapperPass>(); 2352 } 2353 }; 2354 2355 } // end anonymous namespace 2356 2357 //===----------------------------------------------------------------------===// 2358 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2359 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2360 //===----------------------------------------------------------------------===// 2361 2362 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2363 // We need to place the broadcast of invariant variables outside the loop. 2364 Instruction *Instr = dyn_cast<Instruction>(V); 2365 bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody); 2366 bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr; 2367 2368 // Place the code for broadcasting invariant variables in the new preheader. 2369 IRBuilder<>::InsertPointGuard Guard(Builder); 2370 if (Invariant) 2371 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2372 2373 // Broadcast the scalar into all locations in the vector. 2374 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2375 2376 return Shuf; 2377 } 2378 2379 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2380 const InductionDescriptor &II, Value *Step, Instruction *EntryVal) { 2381 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2382 "Expected either an induction phi-node or a truncate of it!"); 2383 Value *Start = II.getStartValue(); 2384 2385 // Construct the initial value of the vector IV in the vector loop preheader 2386 auto CurrIP = Builder.saveIP(); 2387 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2388 if (isa<TruncInst>(EntryVal)) { 2389 assert(Start->getType()->isIntegerTy() && 2390 "Truncation requires an integer type"); 2391 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2392 Step = Builder.CreateTrunc(Step, TruncType); 2393 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2394 } 2395 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2396 Value *SteppedStart = 2397 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 2398 2399 // We create vector phi nodes for both integer and floating-point induction 2400 // variables. Here, we determine the kind of arithmetic we will perform. 2401 Instruction::BinaryOps AddOp; 2402 Instruction::BinaryOps MulOp; 2403 if (Step->getType()->isIntegerTy()) { 2404 AddOp = Instruction::Add; 2405 MulOp = Instruction::Mul; 2406 } else { 2407 AddOp = II.getInductionOpcode(); 2408 MulOp = Instruction::FMul; 2409 } 2410 2411 // Multiply the vectorization factor by the step using integer or 2412 // floating-point arithmetic as appropriate. 2413 Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF); 2414 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF)); 2415 2416 // Create a vector splat to use in the induction update. 2417 // 2418 // FIXME: If the step is non-constant, we create the vector splat with 2419 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2420 // handle a constant vector splat. 2421 Value *SplatVF = isa<Constant>(Mul) 2422 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 2423 : Builder.CreateVectorSplat(VF, Mul); 2424 Builder.restoreIP(CurrIP); 2425 2426 // We may need to add the step a number of times, depending on the unroll 2427 // factor. The last of those goes into the PHI. 2428 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2429 &*LoopVectorBody->getFirstInsertionPt()); 2430 Instruction *LastInduction = VecInd; 2431 for (unsigned Part = 0; Part < UF; ++Part) { 2432 VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction); 2433 2434 if (isa<TruncInst>(EntryVal)) 2435 addMetadata(LastInduction, EntryVal); 2436 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part); 2437 2438 LastInduction = cast<Instruction>(addFastMathFlag( 2439 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"))); 2440 } 2441 2442 // Move the last step to the end of the latch block. This ensures consistent 2443 // placement of all induction updates. 2444 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2445 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2446 auto *ICmp = cast<Instruction>(Br->getCondition()); 2447 LastInduction->moveBefore(ICmp); 2448 LastInduction->setName("vec.ind.next"); 2449 2450 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2451 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2452 } 2453 2454 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2455 return Cost->isScalarAfterVectorization(I, VF) || 2456 Cost->isProfitableToScalarize(I, VF); 2457 } 2458 2459 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2460 if (shouldScalarizeInstruction(IV)) 2461 return true; 2462 auto isScalarInst = [&](User *U) -> bool { 2463 auto *I = cast<Instruction>(U); 2464 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2465 }; 2466 return llvm::any_of(IV->users(), isScalarInst); 2467 } 2468 2469 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 2470 const InductionDescriptor &ID, const Instruction *EntryVal, 2471 Value *VectorLoopVal, unsigned Part, unsigned Lane) { 2472 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2473 "Expected either an induction phi-node or a truncate of it!"); 2474 2475 // This induction variable is not the phi from the original loop but the 2476 // newly-created IV based on the proof that casted Phi is equal to the 2477 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 2478 // re-uses the same InductionDescriptor that original IV uses but we don't 2479 // have to do any recording in this case - that is done when original IV is 2480 // processed. 2481 if (isa<TruncInst>(EntryVal)) 2482 return; 2483 2484 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 2485 if (Casts.empty()) 2486 return; 2487 // Only the first Cast instruction in the Casts vector is of interest. 2488 // The rest of the Casts (if exist) have no uses outside the 2489 // induction update chain itself. 2490 Instruction *CastInst = *Casts.begin(); 2491 if (Lane < UINT_MAX) 2492 VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal); 2493 else 2494 VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal); 2495 } 2496 2497 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) { 2498 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2499 "Primary induction variable must have an integer type"); 2500 2501 auto II = Legal->getInductionVars()->find(IV); 2502 assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); 2503 2504 auto ID = II->second; 2505 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2506 2507 // The scalar value to broadcast. This will be derived from the canonical 2508 // induction variable. 2509 Value *ScalarIV = nullptr; 2510 2511 // The value from the original loop to which we are mapping the new induction 2512 // variable. 2513 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2514 2515 // True if we have vectorized the induction variable. 2516 auto VectorizedIV = false; 2517 2518 // Determine if we want a scalar version of the induction variable. This is 2519 // true if the induction variable itself is not widened, or if it has at 2520 // least one user in the loop that is not widened. 2521 auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal); 2522 2523 // Generate code for the induction step. Note that induction steps are 2524 // required to be loop-invariant 2525 assert(PSE.getSE()->isLoopInvariant(ID.getStep(), OrigLoop) && 2526 "Induction step should be loop invariant"); 2527 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2528 Value *Step = nullptr; 2529 if (PSE.getSE()->isSCEVable(IV->getType())) { 2530 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2531 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), 2532 LoopVectorPreHeader->getTerminator()); 2533 } else { 2534 Step = cast<SCEVUnknown>(ID.getStep())->getValue(); 2535 } 2536 2537 // Try to create a new independent vector induction variable. If we can't 2538 // create the phi node, we will splat the scalar induction variable in each 2539 // loop iteration. 2540 if (VF > 1 && !shouldScalarizeInstruction(EntryVal)) { 2541 createVectorIntOrFpInductionPHI(ID, Step, EntryVal); 2542 VectorizedIV = true; 2543 } 2544 2545 // If we haven't yet vectorized the induction variable, or if we will create 2546 // a scalar one, we need to define the scalar induction variable and step 2547 // values. If we were given a truncation type, truncate the canonical 2548 // induction variable and step. Otherwise, derive these values from the 2549 // induction descriptor. 2550 if (!VectorizedIV || NeedsScalarIV) { 2551 ScalarIV = Induction; 2552 if (IV != OldInduction) { 2553 ScalarIV = IV->getType()->isIntegerTy() 2554 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2555 : Builder.CreateCast(Instruction::SIToFP, Induction, 2556 IV->getType()); 2557 ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL); 2558 ScalarIV->setName("offset.idx"); 2559 } 2560 if (Trunc) { 2561 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2562 assert(Step->getType()->isIntegerTy() && 2563 "Truncation requires an integer step"); 2564 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2565 Step = Builder.CreateTrunc(Step, TruncType); 2566 } 2567 } 2568 2569 // If we haven't yet vectorized the induction variable, splat the scalar 2570 // induction variable, and build the necessary step vectors. 2571 // TODO: Don't do it unless the vectorized IV is really required. 2572 if (!VectorizedIV) { 2573 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2574 for (unsigned Part = 0; Part < UF; ++Part) { 2575 Value *EntryPart = 2576 getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode()); 2577 VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart); 2578 if (Trunc) 2579 addMetadata(EntryPart, Trunc); 2580 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part); 2581 } 2582 } 2583 2584 // If an induction variable is only used for counting loop iterations or 2585 // calculating addresses, it doesn't need to be widened. Create scalar steps 2586 // that can be used by instructions we will later scalarize. Note that the 2587 // addition of the scalar steps will not increase the number of instructions 2588 // in the loop in the common case prior to InstCombine. We will be trading 2589 // one vector extract for each scalar step. 2590 if (NeedsScalarIV) 2591 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 2592 } 2593 2594 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2595 Instruction::BinaryOps BinOp) { 2596 // Create and check the types. 2597 assert(Val->getType()->isVectorTy() && "Must be a vector"); 2598 int VLen = Val->getType()->getVectorNumElements(); 2599 2600 Type *STy = Val->getType()->getScalarType(); 2601 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2602 "Induction Step must be an integer or FP"); 2603 assert(Step->getType() == STy && "Step has wrong type"); 2604 2605 SmallVector<Constant *, 8> Indices; 2606 2607 if (STy->isIntegerTy()) { 2608 // Create a vector of consecutive numbers from zero to VF. 2609 for (int i = 0; i < VLen; ++i) 2610 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 2611 2612 // Add the consecutive indices to the vector value. 2613 Constant *Cv = ConstantVector::get(Indices); 2614 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2615 Step = Builder.CreateVectorSplat(VLen, Step); 2616 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2617 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2618 // which can be found from the original scalar operations. 2619 Step = Builder.CreateMul(Cv, Step); 2620 return Builder.CreateAdd(Val, Step, "induction"); 2621 } 2622 2623 // Floating point induction. 2624 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2625 "Binary Opcode should be specified for FP induction"); 2626 // Create a vector of consecutive numbers from zero to VF. 2627 for (int i = 0; i < VLen; ++i) 2628 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2629 2630 // Add the consecutive indices to the vector value. 2631 Constant *Cv = ConstantVector::get(Indices); 2632 2633 Step = Builder.CreateVectorSplat(VLen, Step); 2634 2635 // Floating point operations had to be 'fast' to enable the induction. 2636 FastMathFlags Flags; 2637 Flags.setFast(); 2638 2639 Value *MulOp = Builder.CreateFMul(Cv, Step); 2640 if (isa<Instruction>(MulOp)) 2641 // Have to check, MulOp may be a constant 2642 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2643 2644 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2645 if (isa<Instruction>(BOp)) 2646 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2647 return BOp; 2648 } 2649 2650 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2651 Instruction *EntryVal, 2652 const InductionDescriptor &ID) { 2653 // We shouldn't have to build scalar steps if we aren't vectorizing. 2654 assert(VF > 1 && "VF should be greater than one"); 2655 2656 // Get the value type and ensure it and the step have the same integer type. 2657 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2658 assert(ScalarIVTy == Step->getType() && 2659 "Val and Step should have the same type"); 2660 2661 // We build scalar steps for both integer and floating-point induction 2662 // variables. Here, we determine the kind of arithmetic we will perform. 2663 Instruction::BinaryOps AddOp; 2664 Instruction::BinaryOps MulOp; 2665 if (ScalarIVTy->isIntegerTy()) { 2666 AddOp = Instruction::Add; 2667 MulOp = Instruction::Mul; 2668 } else { 2669 AddOp = ID.getInductionOpcode(); 2670 MulOp = Instruction::FMul; 2671 } 2672 2673 // Determine the number of scalars we need to generate for each unroll 2674 // iteration. If EntryVal is uniform, we only need to generate the first 2675 // lane. Otherwise, we generate all VF values. 2676 unsigned Lanes = 2677 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1 2678 : VF; 2679 // Compute the scalar steps and save the results in VectorLoopValueMap. 2680 for (unsigned Part = 0; Part < UF; ++Part) { 2681 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2682 auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane); 2683 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step)); 2684 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul)); 2685 VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add); 2686 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane); 2687 } 2688 } 2689 } 2690 2691 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { 2692 const ValueToValueMap &Strides = getSymbolicStrides() ? *getSymbolicStrides() : 2693 ValueToValueMap(); 2694 2695 int Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, true, false); 2696 if (Stride == 1 || Stride == -1) 2697 return Stride; 2698 return 0; 2699 } 2700 2701 bool LoopVectorizationLegality::isUniform(Value *V) { 2702 return LAI->isUniform(V); 2703 } 2704 2705 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) { 2706 assert(V != Induction && "The new induction variable should not be used."); 2707 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2708 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2709 2710 // If we have a stride that is replaced by one, do it here. 2711 if (Legal->hasStride(V)) 2712 V = ConstantInt::get(V->getType(), 1); 2713 2714 // If we have a vector mapped to this value, return it. 2715 if (VectorLoopValueMap.hasVectorValue(V, Part)) 2716 return VectorLoopValueMap.getVectorValue(V, Part); 2717 2718 // If the value has not been vectorized, check if it has been scalarized 2719 // instead. If it has been scalarized, and we actually need the value in 2720 // vector form, we will construct the vector values on demand. 2721 if (VectorLoopValueMap.hasAnyScalarValue(V)) { 2722 Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0}); 2723 2724 // If we've scalarized a value, that value should be an instruction. 2725 auto *I = cast<Instruction>(V); 2726 2727 // If we aren't vectorizing, we can just copy the scalar map values over to 2728 // the vector map. 2729 if (VF == 1) { 2730 VectorLoopValueMap.setVectorValue(V, Part, ScalarValue); 2731 return ScalarValue; 2732 } 2733 2734 // Get the last scalar instruction we generated for V and Part. If the value 2735 // is known to be uniform after vectorization, this corresponds to lane zero 2736 // of the Part unroll iteration. Otherwise, the last instruction is the one 2737 // we created for the last vector lane of the Part unroll iteration. 2738 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1; 2739 auto *LastInst = cast<Instruction>( 2740 VectorLoopValueMap.getScalarValue(V, {Part, LastLane})); 2741 2742 // Set the insert point after the last scalarized instruction. This ensures 2743 // the insertelement sequence will directly follow the scalar definitions. 2744 auto OldIP = Builder.saveIP(); 2745 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 2746 Builder.SetInsertPoint(&*NewIP); 2747 2748 // However, if we are vectorizing, we need to construct the vector values. 2749 // If the value is known to be uniform after vectorization, we can just 2750 // broadcast the scalar value corresponding to lane zero for each unroll 2751 // iteration. Otherwise, we construct the vector values using insertelement 2752 // instructions. Since the resulting vectors are stored in 2753 // VectorLoopValueMap, we will only generate the insertelements once. 2754 Value *VectorValue = nullptr; 2755 if (Cost->isUniformAfterVectorization(I, VF)) { 2756 VectorValue = getBroadcastInstrs(ScalarValue); 2757 VectorLoopValueMap.setVectorValue(V, Part, VectorValue); 2758 } else { 2759 // Initialize packing with insertelements to start from undef. 2760 Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF)); 2761 VectorLoopValueMap.setVectorValue(V, Part, Undef); 2762 for (unsigned Lane = 0; Lane < VF; ++Lane) 2763 packScalarIntoVectorValue(V, {Part, Lane}); 2764 VectorValue = VectorLoopValueMap.getVectorValue(V, Part); 2765 } 2766 Builder.restoreIP(OldIP); 2767 return VectorValue; 2768 } 2769 2770 // If this scalar is unknown, assume that it is a constant or that it is 2771 // loop invariant. Broadcast V and save the value for future uses. 2772 Value *B = getBroadcastInstrs(V); 2773 VectorLoopValueMap.setVectorValue(V, Part, B); 2774 return B; 2775 } 2776 2777 Value * 2778 InnerLoopVectorizer::getOrCreateScalarValue(Value *V, 2779 const VPIteration &Instance) { 2780 // If the value is not an instruction contained in the loop, it should 2781 // already be scalar. 2782 if (OrigLoop->isLoopInvariant(V)) 2783 return V; 2784 2785 assert(Instance.Lane > 0 2786 ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF) 2787 : true && "Uniform values only have lane zero"); 2788 2789 // If the value from the original loop has not been vectorized, it is 2790 // represented by UF x VF scalar values in the new loop. Return the requested 2791 // scalar value. 2792 if (VectorLoopValueMap.hasScalarValue(V, Instance)) 2793 return VectorLoopValueMap.getScalarValue(V, Instance); 2794 2795 // If the value has not been scalarized, get its entry in VectorLoopValueMap 2796 // for the given unroll part. If this entry is not a vector type (i.e., the 2797 // vectorization factor is one), there is no need to generate an 2798 // extractelement instruction. 2799 auto *U = getOrCreateVectorValue(V, Instance.Part); 2800 if (!U->getType()->isVectorTy()) { 2801 assert(VF == 1 && "Value not scalarized has non-vector type"); 2802 return U; 2803 } 2804 2805 // Otherwise, the value from the original loop has been vectorized and is 2806 // represented by UF vector values. Extract and return the requested scalar 2807 // value from the appropriate vector lane. 2808 return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane)); 2809 } 2810 2811 void InnerLoopVectorizer::packScalarIntoVectorValue( 2812 Value *V, const VPIteration &Instance) { 2813 assert(V != Induction && "The new induction variable should not be used."); 2814 assert(!V->getType()->isVectorTy() && "Can't pack a vector"); 2815 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2816 2817 Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance); 2818 Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part); 2819 VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst, 2820 Builder.getInt32(Instance.Lane)); 2821 VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue); 2822 } 2823 2824 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2825 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2826 SmallVector<Constant *, 8> ShuffleMask; 2827 for (unsigned i = 0; i < VF; ++i) 2828 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 2829 2830 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2831 ConstantVector::get(ShuffleMask), 2832 "reverse"); 2833 } 2834 2835 // Try to vectorize the interleave group that \p Instr belongs to. 2836 // 2837 // E.g. Translate following interleaved load group (factor = 3): 2838 // for (i = 0; i < N; i+=3) { 2839 // R = Pic[i]; // Member of index 0 2840 // G = Pic[i+1]; // Member of index 1 2841 // B = Pic[i+2]; // Member of index 2 2842 // ... // do something to R, G, B 2843 // } 2844 // To: 2845 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2846 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2847 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2848 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2849 // 2850 // Or translate following interleaved store group (factor = 3): 2851 // for (i = 0; i < N; i+=3) { 2852 // ... do something to R, G, B 2853 // Pic[i] = R; // Member of index 0 2854 // Pic[i+1] = G; // Member of index 1 2855 // Pic[i+2] = B; // Member of index 2 2856 // } 2857 // To: 2858 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2859 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2860 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2861 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2862 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2863 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { 2864 const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr); 2865 assert(Group && "Fail to get an interleaved access group."); 2866 2867 // Skip if current instruction is not the insert position. 2868 if (Instr != Group->getInsertPos()) 2869 return; 2870 2871 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2872 Value *Ptr = getLoadStorePointerOperand(Instr); 2873 2874 // Prepare for the vector type of the interleaved load/store. 2875 Type *ScalarTy = getMemInstValueType(Instr); 2876 unsigned InterleaveFactor = Group->getFactor(); 2877 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2878 Type *PtrTy = VecTy->getPointerTo(getMemInstAddressSpace(Instr)); 2879 2880 // Prepare for the new pointers. 2881 setDebugLocFromInst(Builder, Ptr); 2882 SmallVector<Value *, 2> NewPtrs; 2883 unsigned Index = Group->getIndex(Instr); 2884 2885 // If the group is reverse, adjust the index to refer to the last vector lane 2886 // instead of the first. We adjust the index from the first vector lane, 2887 // rather than directly getting the pointer for lane VF - 1, because the 2888 // pointer operand of the interleaved access is supposed to be uniform. For 2889 // uniform instructions, we're only required to generate a value for the 2890 // first vector lane in each unroll iteration. 2891 if (Group->isReverse()) 2892 Index += (VF - 1) * Group->getFactor(); 2893 2894 for (unsigned Part = 0; Part < UF; Part++) { 2895 Value *NewPtr = getOrCreateScalarValue(Ptr, {Part, 0}); 2896 2897 // Notice current instruction could be any index. Need to adjust the address 2898 // to the member of index 0. 2899 // 2900 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2901 // b = A[i]; // Member of index 0 2902 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2903 // 2904 // E.g. A[i+1] = a; // Member of index 1 2905 // A[i] = b; // Member of index 0 2906 // A[i+2] = c; // Member of index 2 (Current instruction) 2907 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2908 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 2909 2910 // Cast to the vector pointer type. 2911 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2912 } 2913 2914 setDebugLocFromInst(Builder, Instr); 2915 Value *UndefVec = UndefValue::get(VecTy); 2916 2917 // Vectorize the interleaved load group. 2918 if (isa<LoadInst>(Instr)) { 2919 // For each unroll part, create a wide load for the group. 2920 SmallVector<Value *, 2> NewLoads; 2921 for (unsigned Part = 0; Part < UF; Part++) { 2922 auto *NewLoad = Builder.CreateAlignedLoad( 2923 NewPtrs[Part], Group->getAlignment(), "wide.vec"); 2924 Group->addMetadata(NewLoad); 2925 NewLoads.push_back(NewLoad); 2926 } 2927 2928 // For each member in the group, shuffle out the appropriate data from the 2929 // wide loads. 2930 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2931 Instruction *Member = Group->getMember(I); 2932 2933 // Skip the gaps in the group. 2934 if (!Member) 2935 continue; 2936 2937 Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF); 2938 for (unsigned Part = 0; Part < UF; Part++) { 2939 Value *StridedVec = Builder.CreateShuffleVector( 2940 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 2941 2942 // If this member has different type, cast the result type. 2943 if (Member->getType() != ScalarTy) { 2944 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2945 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2946 } 2947 2948 if (Group->isReverse()) 2949 StridedVec = reverseVector(StridedVec); 2950 2951 VectorLoopValueMap.setVectorValue(Member, Part, StridedVec); 2952 } 2953 } 2954 return; 2955 } 2956 2957 // The sub vector type for current instruction. 2958 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2959 2960 // Vectorize the interleaved store group. 2961 for (unsigned Part = 0; Part < UF; Part++) { 2962 // Collect the stored vector from each member. 2963 SmallVector<Value *, 4> StoredVecs; 2964 for (unsigned i = 0; i < InterleaveFactor; i++) { 2965 // Interleaved store group doesn't allow a gap, so each index has a member 2966 Instruction *Member = Group->getMember(i); 2967 assert(Member && "Fail to get a member from an interleaved store group"); 2968 2969 Value *StoredVec = getOrCreateVectorValue( 2970 cast<StoreInst>(Member)->getValueOperand(), Part); 2971 if (Group->isReverse()) 2972 StoredVec = reverseVector(StoredVec); 2973 2974 // If this member has different type, cast it to a unified type. 2975 2976 if (StoredVec->getType() != SubVT) 2977 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2978 2979 StoredVecs.push_back(StoredVec); 2980 } 2981 2982 // Concatenate all vectors into a wide vector. 2983 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2984 2985 // Interleave the elements in the wide vector. 2986 Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor); 2987 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2988 "interleaved.vec"); 2989 2990 Instruction *NewStoreInstr = 2991 Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment()); 2992 2993 Group->addMetadata(NewStoreInstr); 2994 } 2995 } 2996 2997 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr, 2998 VectorParts *BlockInMask) { 2999 // Attempt to issue a wide load. 3000 LoadInst *LI = dyn_cast<LoadInst>(Instr); 3001 StoreInst *SI = dyn_cast<StoreInst>(Instr); 3002 3003 assert((LI || SI) && "Invalid Load/Store instruction"); 3004 3005 LoopVectorizationCostModel::InstWidening Decision = 3006 Cost->getWideningDecision(Instr, VF); 3007 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 3008 "CM decision should be taken at this point"); 3009 if (Decision == LoopVectorizationCostModel::CM_Interleave) 3010 return vectorizeInterleaveGroup(Instr); 3011 3012 Type *ScalarDataTy = getMemInstValueType(Instr); 3013 Type *DataTy = VectorType::get(ScalarDataTy, VF); 3014 Value *Ptr = getLoadStorePointerOperand(Instr); 3015 unsigned Alignment = getMemInstAlignment(Instr); 3016 // An alignment of 0 means target abi alignment. We need to use the scalar's 3017 // target abi alignment in such a case. 3018 const DataLayout &DL = Instr->getModule()->getDataLayout(); 3019 if (!Alignment) 3020 Alignment = DL.getABITypeAlignment(ScalarDataTy); 3021 unsigned AddressSpace = getMemInstAddressSpace(Instr); 3022 3023 // Determine if the pointer operand of the access is either consecutive or 3024 // reverse consecutive. 3025 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 3026 bool ConsecutiveStride = 3027 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 3028 bool CreateGatherScatter = 3029 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 3030 3031 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 3032 // gather/scatter. Otherwise Decision should have been to Scalarize. 3033 assert((ConsecutiveStride || CreateGatherScatter) && 3034 "The instruction should be scalarized"); 3035 3036 // Handle consecutive loads/stores. 3037 if (ConsecutiveStride) 3038 Ptr = getOrCreateScalarValue(Ptr, {0, 0}); 3039 3040 VectorParts Mask; 3041 bool isMaskRequired = BlockInMask; 3042 if (isMaskRequired) 3043 Mask = *BlockInMask; 3044 3045 // Handle Stores: 3046 if (SI) { 3047 setDebugLocFromInst(Builder, SI); 3048 3049 for (unsigned Part = 0; Part < UF; ++Part) { 3050 Instruction *NewSI = nullptr; 3051 Value *StoredVal = getOrCreateVectorValue(SI->getValueOperand(), Part); 3052 if (CreateGatherScatter) { 3053 Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr; 3054 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 3055 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 3056 MaskPart); 3057 } else { 3058 // Calculate the pointer for the specific unroll-part. 3059 Value *PartPtr = 3060 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 3061 3062 if (Reverse) { 3063 // If we store to reverse consecutive memory locations, then we need 3064 // to reverse the order of elements in the stored value. 3065 StoredVal = reverseVector(StoredVal); 3066 // We don't want to update the value in the map as it might be used in 3067 // another expression. So don't call resetVectorValue(StoredVal). 3068 3069 // If the address is consecutive but reversed, then the 3070 // wide store needs to start at the last vector element. 3071 PartPtr = 3072 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 3073 PartPtr = 3074 Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 3075 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 3076 Mask[Part] = reverseVector(Mask[Part]); 3077 } 3078 3079 Value *VecPtr = 3080 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 3081 3082 if (isMaskRequired) 3083 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 3084 Mask[Part]); 3085 else 3086 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 3087 } 3088 addMetadata(NewSI, SI); 3089 } 3090 return; 3091 } 3092 3093 // Handle loads. 3094 assert(LI && "Must have a load instruction"); 3095 setDebugLocFromInst(Builder, LI); 3096 for (unsigned Part = 0; Part < UF; ++Part) { 3097 Value *NewLI; 3098 if (CreateGatherScatter) { 3099 Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr; 3100 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 3101 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 3102 nullptr, "wide.masked.gather"); 3103 addMetadata(NewLI, LI); 3104 } else { 3105 // Calculate the pointer for the specific unroll-part. 3106 Value *PartPtr = 3107 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 3108 3109 if (Reverse) { 3110 // If the address is consecutive but reversed, then the 3111 // wide load needs to start at the last vector element. 3112 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 3113 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 3114 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 3115 Mask[Part] = reverseVector(Mask[Part]); 3116 } 3117 3118 Value *VecPtr = 3119 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 3120 if (isMaskRequired) 3121 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 3122 UndefValue::get(DataTy), 3123 "wide.masked.load"); 3124 else 3125 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 3126 3127 // Add metadata to the load, but setVectorValue to the reverse shuffle. 3128 addMetadata(NewLI, LI); 3129 if (Reverse) 3130 NewLI = reverseVector(NewLI); 3131 } 3132 VectorLoopValueMap.setVectorValue(Instr, Part, NewLI); 3133 } 3134 } 3135 3136 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 3137 const VPIteration &Instance, 3138 bool IfPredicateInstr) { 3139 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 3140 3141 setDebugLocFromInst(Builder, Instr); 3142 3143 // Does this instruction return a value ? 3144 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 3145 3146 Instruction *Cloned = Instr->clone(); 3147 if (!IsVoidRetTy) 3148 Cloned->setName(Instr->getName() + ".cloned"); 3149 3150 // Replace the operands of the cloned instructions with their scalar 3151 // equivalents in the new loop. 3152 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 3153 auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance); 3154 Cloned->setOperand(op, NewOp); 3155 } 3156 addNewMetadata(Cloned, Instr); 3157 3158 // Place the cloned scalar in the new loop. 3159 Builder.Insert(Cloned); 3160 3161 // Add the cloned scalar to the scalar map entry. 3162 VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned); 3163 3164 // If we just cloned a new assumption, add it the assumption cache. 3165 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 3166 if (II->getIntrinsicID() == Intrinsic::assume) 3167 AC->registerAssumption(II); 3168 3169 // End if-block. 3170 if (IfPredicateInstr) 3171 PredicatedInstructions.push_back(Cloned); 3172 } 3173 3174 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3175 Value *End, Value *Step, 3176 Instruction *DL) { 3177 BasicBlock *Header = L->getHeader(); 3178 BasicBlock *Latch = L->getLoopLatch(); 3179 // As we're just creating this loop, it's possible no latch exists 3180 // yet. If so, use the header as this will be a single block loop. 3181 if (!Latch) 3182 Latch = Header; 3183 3184 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 3185 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 3186 setDebugLocFromInst(Builder, OldInst); 3187 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 3188 3189 Builder.SetInsertPoint(Latch->getTerminator()); 3190 setDebugLocFromInst(Builder, OldInst); 3191 3192 // Create i+1 and fill the PHINode. 3193 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 3194 Induction->addIncoming(Start, L->getLoopPreheader()); 3195 Induction->addIncoming(Next, Latch); 3196 // Create the compare. 3197 Value *ICmp = Builder.CreateICmpEQ(Next, End); 3198 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 3199 3200 // Now we have two terminators. Remove the old one from the block. 3201 Latch->getTerminator()->eraseFromParent(); 3202 3203 return Induction; 3204 } 3205 3206 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3207 if (TripCount) 3208 return TripCount; 3209 3210 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3211 // Find the loop boundaries. 3212 ScalarEvolution *SE = PSE.getSE(); 3213 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3214 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 3215 "Invalid loop count"); 3216 3217 Type *IdxTy = Legal->getWidestInductionType(); 3218 3219 // The exit count might have the type of i64 while the phi is i32. This can 3220 // happen if we have an induction variable that is sign extended before the 3221 // compare. The only way that we get a backedge taken count is that the 3222 // induction variable was signed and as such will not overflow. In such a case 3223 // truncation is legal. 3224 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 3225 IdxTy->getPrimitiveSizeInBits()) 3226 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3227 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3228 3229 // Get the total trip count from the count by adding 1. 3230 const SCEV *ExitCount = SE->getAddExpr( 3231 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3232 3233 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3234 3235 // Expand the trip count and place the new instructions in the preheader. 3236 // Notice that the pre-header does not change, only the loop body. 3237 SCEVExpander Exp(*SE, DL, "induction"); 3238 3239 // Count holds the overall loop count (N). 3240 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3241 L->getLoopPreheader()->getTerminator()); 3242 3243 if (TripCount->getType()->isPointerTy()) 3244 TripCount = 3245 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3246 L->getLoopPreheader()->getTerminator()); 3247 3248 return TripCount; 3249 } 3250 3251 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3252 if (VectorTripCount) 3253 return VectorTripCount; 3254 3255 Value *TC = getOrCreateTripCount(L); 3256 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3257 3258 // Now we need to generate the expression for the part of the loop that the 3259 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3260 // iterations are not required for correctness, or N - Step, otherwise. Step 3261 // is equal to the vectorization factor (number of SIMD elements) times the 3262 // unroll factor (number of SIMD instructions). 3263 Constant *Step = ConstantInt::get(TC->getType(), VF * UF); 3264 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3265 3266 // If there is a non-reversed interleaved group that may speculatively access 3267 // memory out-of-bounds, we need to ensure that there will be at least one 3268 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 3269 // the trip count, we set the remainder to be equal to the step. If the step 3270 // does not evenly divide the trip count, no adjustment is necessary since 3271 // there will already be scalar iterations. Note that the minimum iterations 3272 // check ensures that N >= Step. 3273 if (VF > 1 && Legal->requiresScalarEpilogue()) { 3274 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3275 R = Builder.CreateSelect(IsZero, Step, R); 3276 } 3277 3278 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3279 3280 return VectorTripCount; 3281 } 3282 3283 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3284 const DataLayout &DL) { 3285 // Verify that V is a vector type with same number of elements as DstVTy. 3286 unsigned VF = DstVTy->getNumElements(); 3287 VectorType *SrcVecTy = cast<VectorType>(V->getType()); 3288 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3289 Type *SrcElemTy = SrcVecTy->getElementType(); 3290 Type *DstElemTy = DstVTy->getElementType(); 3291 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3292 "Vector elements must have same size"); 3293 3294 // Do a direct cast if element types are castable. 3295 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3296 return Builder.CreateBitOrPointerCast(V, DstVTy); 3297 } 3298 // V cannot be directly casted to desired vector type. 3299 // May happen when V is a floating point vector but DstVTy is a vector of 3300 // pointers or vice-versa. Handle this using a two-step bitcast using an 3301 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3302 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3303 "Only one type should be a pointer type"); 3304 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3305 "Only one type should be a floating point type"); 3306 Type *IntTy = 3307 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3308 VectorType *VecIntTy = VectorType::get(IntTy, VF); 3309 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3310 return Builder.CreateBitOrPointerCast(CastVal, DstVTy); 3311 } 3312 3313 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3314 BasicBlock *Bypass) { 3315 Value *Count = getOrCreateTripCount(L); 3316 BasicBlock *BB = L->getLoopPreheader(); 3317 IRBuilder<> Builder(BB->getTerminator()); 3318 3319 // Generate code to check if the loop's trip count is less than VF * UF, or 3320 // equal to it in case a scalar epilogue is required; this implies that the 3321 // vector trip count is zero. This check also covers the case where adding one 3322 // to the backedge-taken count overflowed leading to an incorrect trip count 3323 // of zero. In this case we will also jump to the scalar loop. 3324 auto P = Legal->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 3325 : ICmpInst::ICMP_ULT; 3326 Value *CheckMinIters = Builder.CreateICmp( 3327 P, Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check"); 3328 3329 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3330 // Update dominator tree immediately if the generated block is a 3331 // LoopBypassBlock because SCEV expansions to generate loop bypass 3332 // checks may query it before the current function is finished. 3333 DT->addNewBlock(NewBB, BB); 3334 if (L->getParentLoop()) 3335 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3336 ReplaceInstWithInst(BB->getTerminator(), 3337 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 3338 LoopBypassBlocks.push_back(BB); 3339 } 3340 3341 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3342 BasicBlock *BB = L->getLoopPreheader(); 3343 3344 // Generate the code to check that the SCEV assumptions that we made. 3345 // We want the new basic block to start at the first instruction in a 3346 // sequence of instructions that form a check. 3347 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 3348 "scev.check"); 3349 Value *SCEVCheck = 3350 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 3351 3352 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 3353 if (C->isZero()) 3354 return; 3355 3356 // Create a new block containing the stride check. 3357 BB->setName("vector.scevcheck"); 3358 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3359 // Update dominator tree immediately if the generated block is a 3360 // LoopBypassBlock because SCEV expansions to generate loop bypass 3361 // checks may query it before the current function is finished. 3362 DT->addNewBlock(NewBB, BB); 3363 if (L->getParentLoop()) 3364 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3365 ReplaceInstWithInst(BB->getTerminator(), 3366 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 3367 LoopBypassBlocks.push_back(BB); 3368 AddedSafetyChecks = true; 3369 } 3370 3371 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 3372 BasicBlock *BB = L->getLoopPreheader(); 3373 3374 // Generate the code that checks in runtime if arrays overlap. We put the 3375 // checks into a separate block to make the more common case of few elements 3376 // faster. 3377 Instruction *FirstCheckInst; 3378 Instruction *MemRuntimeCheck; 3379 std::tie(FirstCheckInst, MemRuntimeCheck) = 3380 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 3381 if (!MemRuntimeCheck) 3382 return; 3383 3384 // Create a new block containing the memory check. 3385 BB->setName("vector.memcheck"); 3386 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3387 // Update dominator tree immediately if the generated block is a 3388 // LoopBypassBlock because SCEV expansions to generate loop bypass 3389 // checks may query it before the current function is finished. 3390 DT->addNewBlock(NewBB, BB); 3391 if (L->getParentLoop()) 3392 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3393 ReplaceInstWithInst(BB->getTerminator(), 3394 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 3395 LoopBypassBlocks.push_back(BB); 3396 AddedSafetyChecks = true; 3397 3398 // We currently don't use LoopVersioning for the actual loop cloning but we 3399 // still use it to add the noalias metadata. 3400 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 3401 PSE.getSE()); 3402 LVer->prepareNoAliasMetadata(); 3403 } 3404 3405 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3406 /* 3407 In this function we generate a new loop. The new loop will contain 3408 the vectorized instructions while the old loop will continue to run the 3409 scalar remainder. 3410 3411 [ ] <-- loop iteration number check. 3412 / | 3413 / v 3414 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3415 | / | 3416 | / v 3417 || [ ] <-- vector pre header. 3418 |/ | 3419 | v 3420 | [ ] \ 3421 | [ ]_| <-- vector loop. 3422 | | 3423 | v 3424 | -[ ] <--- middle-block. 3425 | / | 3426 | / v 3427 -|- >[ ] <--- new preheader. 3428 | | 3429 | v 3430 | [ ] \ 3431 | [ ]_| <-- old scalar loop to handle remainder. 3432 \ | 3433 \ v 3434 >[ ] <-- exit block. 3435 ... 3436 */ 3437 3438 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 3439 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 3440 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 3441 assert(VectorPH && "Invalid loop structure"); 3442 assert(ExitBlock && "Must have an exit block"); 3443 3444 // Some loops have a single integer induction variable, while other loops 3445 // don't. One example is c++ iterators that often have multiple pointer 3446 // induction variables. In the code below we also support a case where we 3447 // don't have a single induction variable. 3448 // 3449 // We try to obtain an induction variable from the original loop as hard 3450 // as possible. However if we don't find one that: 3451 // - is an integer 3452 // - counts from zero, stepping by one 3453 // - is the size of the widest induction variable type 3454 // then we create a new one. 3455 OldInduction = Legal->getPrimaryInduction(); 3456 Type *IdxTy = Legal->getWidestInductionType(); 3457 3458 // Split the single block loop into the two loop structure described above. 3459 BasicBlock *VecBody = 3460 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 3461 BasicBlock *MiddleBlock = 3462 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 3463 BasicBlock *ScalarPH = 3464 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 3465 3466 // Create and register the new vector loop. 3467 Loop *Lp = LI->AllocateLoop(); 3468 Loop *ParentLoop = OrigLoop->getParentLoop(); 3469 3470 // Insert the new loop into the loop nest and register the new basic blocks 3471 // before calling any utilities such as SCEV that require valid LoopInfo. 3472 if (ParentLoop) { 3473 ParentLoop->addChildLoop(Lp); 3474 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 3475 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 3476 } else { 3477 LI->addTopLevelLoop(Lp); 3478 } 3479 Lp->addBasicBlockToLoop(VecBody, *LI); 3480 3481 // Find the loop boundaries. 3482 Value *Count = getOrCreateTripCount(Lp); 3483 3484 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3485 3486 // Now, compare the new count to zero. If it is zero skip the vector loop and 3487 // jump to the scalar loop. This check also covers the case where the 3488 // backedge-taken count is uint##_max: adding one to it will overflow leading 3489 // to an incorrect trip count of zero. In this (rare) case we will also jump 3490 // to the scalar loop. 3491 emitMinimumIterationCountCheck(Lp, ScalarPH); 3492 3493 // Generate the code to check any assumptions that we've made for SCEV 3494 // expressions. 3495 emitSCEVChecks(Lp, ScalarPH); 3496 3497 // Generate the code that checks in runtime if arrays overlap. We put the 3498 // checks into a separate block to make the more common case of few elements 3499 // faster. 3500 emitMemRuntimeChecks(Lp, ScalarPH); 3501 3502 // Generate the induction variable. 3503 // The loop step is equal to the vectorization factor (num of SIMD elements) 3504 // times the unroll factor (num of SIMD instructions). 3505 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3506 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 3507 Induction = 3508 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3509 getDebugLocFromInstOrOperands(OldInduction)); 3510 3511 // We are going to resume the execution of the scalar loop. 3512 // Go over all of the induction variables that we found and fix the 3513 // PHIs that are left in the scalar version of the loop. 3514 // The starting values of PHI nodes depend on the counter of the last 3515 // iteration in the vectorized loop. 3516 // If we come from a bypass edge then we need to start from the original 3517 // start value. 3518 3519 // This variable saves the new starting index for the scalar loop. It is used 3520 // to test if there are any tail iterations left once the vector loop has 3521 // completed. 3522 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 3523 for (auto &InductionEntry : *List) { 3524 PHINode *OrigPhi = InductionEntry.first; 3525 InductionDescriptor II = InductionEntry.second; 3526 3527 // Create phi nodes to merge from the backedge-taken check block. 3528 PHINode *BCResumeVal = PHINode::Create( 3529 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 3530 Value *&EndValue = IVEndValues[OrigPhi]; 3531 if (OrigPhi == OldInduction) { 3532 // We know what the end value is. 3533 EndValue = CountRoundDown; 3534 } else { 3535 IRBuilder<> B(Lp->getLoopPreheader()->getTerminator()); 3536 Type *StepType = II.getStep()->getType(); 3537 Instruction::CastOps CastOp = 3538 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 3539 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 3540 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3541 EndValue = II.transform(B, CRD, PSE.getSE(), DL); 3542 EndValue->setName("ind.end"); 3543 } 3544 3545 // The new PHI merges the original incoming value, in case of a bypass, 3546 // or the value at the end of the vectorized loop. 3547 BCResumeVal->addIncoming(EndValue, MiddleBlock); 3548 3549 // Fix the scalar body counter (PHI node). 3550 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 3551 3552 // The old induction's phi node in the scalar body needs the truncated 3553 // value. 3554 for (BasicBlock *BB : LoopBypassBlocks) 3555 BCResumeVal->addIncoming(II.getStartValue(), BB); 3556 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 3557 } 3558 3559 // Add a check in the middle block to see if we have completed 3560 // all of the iterations in the first vector loop. 3561 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3562 Value *CmpN = 3563 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 3564 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 3565 ReplaceInstWithInst(MiddleBlock->getTerminator(), 3566 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 3567 3568 // Get ready to start creating new instructions into the vectorized body. 3569 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 3570 3571 // Save the state. 3572 LoopVectorPreHeader = Lp->getLoopPreheader(); 3573 LoopScalarPreHeader = ScalarPH; 3574 LoopMiddleBlock = MiddleBlock; 3575 LoopExitBlock = ExitBlock; 3576 LoopVectorBody = VecBody; 3577 LoopScalarBody = OldBasicBlock; 3578 3579 // Keep all loop hints from the original loop on the vector loop (we'll 3580 // replace the vectorizer-specific hints below). 3581 if (MDNode *LID = OrigLoop->getLoopID()) 3582 Lp->setLoopID(LID); 3583 3584 LoopVectorizeHints Hints(Lp, true, *ORE); 3585 Hints.setAlreadyVectorized(); 3586 3587 return LoopVectorPreHeader; 3588 } 3589 3590 // Fix up external users of the induction variable. At this point, we are 3591 // in LCSSA form, with all external PHIs that use the IV having one input value, 3592 // coming from the remainder loop. We need those PHIs to also have a correct 3593 // value for the IV when arriving directly from the middle block. 3594 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3595 const InductionDescriptor &II, 3596 Value *CountRoundDown, Value *EndValue, 3597 BasicBlock *MiddleBlock) { 3598 // There are two kinds of external IV usages - those that use the value 3599 // computed in the last iteration (the PHI) and those that use the penultimate 3600 // value (the value that feeds into the phi from the loop latch). 3601 // We allow both, but they, obviously, have different values. 3602 3603 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 3604 3605 DenseMap<Value *, Value *> MissingVals; 3606 3607 // An external user of the last iteration's value should see the value that 3608 // the remainder loop uses to initialize its own IV. 3609 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3610 for (User *U : PostInc->users()) { 3611 Instruction *UI = cast<Instruction>(U); 3612 if (!OrigLoop->contains(UI)) { 3613 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3614 MissingVals[UI] = EndValue; 3615 } 3616 } 3617 3618 // An external user of the penultimate value need to see EndValue - Step. 3619 // The simplest way to get this is to recompute it from the constituent SCEVs, 3620 // that is Start + (Step * (CRD - 1)). 3621 for (User *U : OrigPhi->users()) { 3622 auto *UI = cast<Instruction>(U); 3623 if (!OrigLoop->contains(UI)) { 3624 const DataLayout &DL = 3625 OrigLoop->getHeader()->getModule()->getDataLayout(); 3626 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3627 3628 IRBuilder<> B(MiddleBlock->getTerminator()); 3629 Value *CountMinusOne = B.CreateSub( 3630 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3631 Value *CMO = 3632 !II.getStep()->getType()->isIntegerTy() 3633 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3634 II.getStep()->getType()) 3635 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3636 CMO->setName("cast.cmo"); 3637 Value *Escape = II.transform(B, CMO, PSE.getSE(), DL); 3638 Escape->setName("ind.escape"); 3639 MissingVals[UI] = Escape; 3640 } 3641 } 3642 3643 for (auto &I : MissingVals) { 3644 PHINode *PHI = cast<PHINode>(I.first); 3645 // One corner case we have to handle is two IVs "chasing" each-other, 3646 // that is %IV2 = phi [...], [ %IV1, %latch ] 3647 // In this case, if IV1 has an external use, we need to avoid adding both 3648 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3649 // don't already have an incoming value for the middle block. 3650 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3651 PHI->addIncoming(I.second, MiddleBlock); 3652 } 3653 } 3654 3655 namespace { 3656 3657 struct CSEDenseMapInfo { 3658 static bool canHandle(const Instruction *I) { 3659 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3660 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3661 } 3662 3663 static inline Instruction *getEmptyKey() { 3664 return DenseMapInfo<Instruction *>::getEmptyKey(); 3665 } 3666 3667 static inline Instruction *getTombstoneKey() { 3668 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3669 } 3670 3671 static unsigned getHashValue(const Instruction *I) { 3672 assert(canHandle(I) && "Unknown instruction!"); 3673 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3674 I->value_op_end())); 3675 } 3676 3677 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3678 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3679 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3680 return LHS == RHS; 3681 return LHS->isIdenticalTo(RHS); 3682 } 3683 }; 3684 3685 } // end anonymous namespace 3686 3687 ///\brief Perform cse of induction variable instructions. 3688 static void cse(BasicBlock *BB) { 3689 // Perform simple cse. 3690 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3691 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3692 Instruction *In = &*I++; 3693 3694 if (!CSEDenseMapInfo::canHandle(In)) 3695 continue; 3696 3697 // Check if we can replace this instruction with any of the 3698 // visited instructions. 3699 if (Instruction *V = CSEMap.lookup(In)) { 3700 In->replaceAllUsesWith(V); 3701 In->eraseFromParent(); 3702 continue; 3703 } 3704 3705 CSEMap[In] = In; 3706 } 3707 } 3708 3709 /// \brief Estimate the overhead of scalarizing an instruction. This is a 3710 /// convenience wrapper for the type-based getScalarizationOverhead API. 3711 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF, 3712 const TargetTransformInfo &TTI) { 3713 if (VF == 1) 3714 return 0; 3715 3716 unsigned Cost = 0; 3717 Type *RetTy = ToVectorTy(I->getType(), VF); 3718 if (!RetTy->isVoidTy() && 3719 (!isa<LoadInst>(I) || 3720 !TTI.supportsEfficientVectorElementLoadStore())) 3721 Cost += TTI.getScalarizationOverhead(RetTy, true, false); 3722 3723 if (CallInst *CI = dyn_cast<CallInst>(I)) { 3724 SmallVector<const Value *, 4> Operands(CI->arg_operands()); 3725 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3726 } 3727 else if (!isa<StoreInst>(I) || 3728 !TTI.supportsEfficientVectorElementLoadStore()) { 3729 SmallVector<const Value *, 4> Operands(I->operand_values()); 3730 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3731 } 3732 3733 return Cost; 3734 } 3735 3736 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3737 // Return the cost of the instruction, including scalarization overhead if it's 3738 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3739 // i.e. either vector version isn't available, or is too expensive. 3740 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3741 const TargetTransformInfo &TTI, 3742 const TargetLibraryInfo *TLI, 3743 bool &NeedToScalarize) { 3744 Function *F = CI->getCalledFunction(); 3745 StringRef FnName = CI->getCalledFunction()->getName(); 3746 Type *ScalarRetTy = CI->getType(); 3747 SmallVector<Type *, 4> Tys, ScalarTys; 3748 for (auto &ArgOp : CI->arg_operands()) 3749 ScalarTys.push_back(ArgOp->getType()); 3750 3751 // Estimate cost of scalarized vector call. The source operands are assumed 3752 // to be vectors, so we need to extract individual elements from there, 3753 // execute VF scalar calls, and then gather the result into the vector return 3754 // value. 3755 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3756 if (VF == 1) 3757 return ScalarCallCost; 3758 3759 // Compute corresponding vector type for return value and arguments. 3760 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3761 for (Type *ScalarTy : ScalarTys) 3762 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3763 3764 // Compute costs of unpacking argument values for the scalar calls and 3765 // packing the return values to a vector. 3766 unsigned ScalarizationCost = getScalarizationOverhead(CI, VF, TTI); 3767 3768 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3769 3770 // If we can't emit a vector call for this function, then the currently found 3771 // cost is the cost we need to return. 3772 NeedToScalarize = true; 3773 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3774 return Cost; 3775 3776 // If the corresponding vector cost is cheaper, return its cost. 3777 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3778 if (VectorCallCost < Cost) { 3779 NeedToScalarize = false; 3780 return VectorCallCost; 3781 } 3782 return Cost; 3783 } 3784 3785 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3786 // factor VF. Return the cost of the instruction, including scalarization 3787 // overhead if it's needed. 3788 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3789 const TargetTransformInfo &TTI, 3790 const TargetLibraryInfo *TLI) { 3791 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3792 assert(ID && "Expected intrinsic call!"); 3793 3794 FastMathFlags FMF; 3795 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3796 FMF = FPMO->getFastMathFlags(); 3797 3798 SmallVector<Value *, 4> Operands(CI->arg_operands()); 3799 return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF); 3800 } 3801 3802 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3803 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3804 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3805 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3806 } 3807 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3808 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3809 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3810 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3811 } 3812 3813 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3814 // For every instruction `I` in MinBWs, truncate the operands, create a 3815 // truncated version of `I` and reextend its result. InstCombine runs 3816 // later and will remove any ext/trunc pairs. 3817 SmallPtrSet<Value *, 4> Erased; 3818 for (const auto &KV : Cost->getMinimalBitwidths()) { 3819 // If the value wasn't vectorized, we must maintain the original scalar 3820 // type. The absence of the value from VectorLoopValueMap indicates that it 3821 // wasn't vectorized. 3822 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3823 continue; 3824 for (unsigned Part = 0; Part < UF; ++Part) { 3825 Value *I = getOrCreateVectorValue(KV.first, Part); 3826 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3827 continue; 3828 Type *OriginalTy = I->getType(); 3829 Type *ScalarTruncatedTy = 3830 IntegerType::get(OriginalTy->getContext(), KV.second); 3831 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3832 OriginalTy->getVectorNumElements()); 3833 if (TruncatedTy == OriginalTy) 3834 continue; 3835 3836 IRBuilder<> B(cast<Instruction>(I)); 3837 auto ShrinkOperand = [&](Value *V) -> Value * { 3838 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3839 if (ZI->getSrcTy() == TruncatedTy) 3840 return ZI->getOperand(0); 3841 return B.CreateZExtOrTrunc(V, TruncatedTy); 3842 }; 3843 3844 // The actual instruction modification depends on the instruction type, 3845 // unfortunately. 3846 Value *NewI = nullptr; 3847 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3848 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3849 ShrinkOperand(BO->getOperand(1))); 3850 3851 // Any wrapping introduced by shrinking this operation shouldn't be 3852 // considered undefined behavior. So, we can't unconditionally copy 3853 // arithmetic wrapping flags to NewI. 3854 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3855 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3856 NewI = 3857 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3858 ShrinkOperand(CI->getOperand(1))); 3859 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3860 NewI = B.CreateSelect(SI->getCondition(), 3861 ShrinkOperand(SI->getTrueValue()), 3862 ShrinkOperand(SI->getFalseValue())); 3863 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3864 switch (CI->getOpcode()) { 3865 default: 3866 llvm_unreachable("Unhandled cast!"); 3867 case Instruction::Trunc: 3868 NewI = ShrinkOperand(CI->getOperand(0)); 3869 break; 3870 case Instruction::SExt: 3871 NewI = B.CreateSExtOrTrunc( 3872 CI->getOperand(0), 3873 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3874 break; 3875 case Instruction::ZExt: 3876 NewI = B.CreateZExtOrTrunc( 3877 CI->getOperand(0), 3878 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3879 break; 3880 } 3881 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3882 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3883 auto *O0 = B.CreateZExtOrTrunc( 3884 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3885 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3886 auto *O1 = B.CreateZExtOrTrunc( 3887 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3888 3889 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3890 } else if (isa<LoadInst>(I)) { 3891 // Don't do anything with the operands, just extend the result. 3892 continue; 3893 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3894 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3895 auto *O0 = B.CreateZExtOrTrunc( 3896 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3897 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3898 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3899 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3900 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3901 auto *O0 = B.CreateZExtOrTrunc( 3902 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3903 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3904 } else { 3905 llvm_unreachable("Unhandled instruction type!"); 3906 } 3907 3908 // Lastly, extend the result. 3909 NewI->takeName(cast<Instruction>(I)); 3910 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3911 I->replaceAllUsesWith(Res); 3912 cast<Instruction>(I)->eraseFromParent(); 3913 Erased.insert(I); 3914 VectorLoopValueMap.resetVectorValue(KV.first, Part, Res); 3915 } 3916 } 3917 3918 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3919 for (const auto &KV : Cost->getMinimalBitwidths()) { 3920 // If the value wasn't vectorized, we must maintain the original scalar 3921 // type. The absence of the value from VectorLoopValueMap indicates that it 3922 // wasn't vectorized. 3923 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3924 continue; 3925 for (unsigned Part = 0; Part < UF; ++Part) { 3926 Value *I = getOrCreateVectorValue(KV.first, Part); 3927 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3928 if (Inst && Inst->use_empty()) { 3929 Value *NewI = Inst->getOperand(0); 3930 Inst->eraseFromParent(); 3931 VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI); 3932 } 3933 } 3934 } 3935 } 3936 3937 void InnerLoopVectorizer::fixVectorizedLoop() { 3938 // Insert truncates and extends for any truncated instructions as hints to 3939 // InstCombine. 3940 if (VF > 1) 3941 truncateToMinimalBitwidths(); 3942 3943 // At this point every instruction in the original loop is widened to a 3944 // vector form. Now we need to fix the recurrences in the loop. These PHI 3945 // nodes are currently empty because we did not want to introduce cycles. 3946 // This is the second stage of vectorizing recurrences. 3947 fixCrossIterationPHIs(); 3948 3949 // Update the dominator tree. 3950 // 3951 // FIXME: After creating the structure of the new loop, the dominator tree is 3952 // no longer up-to-date, and it remains that way until we update it 3953 // here. An out-of-date dominator tree is problematic for SCEV, 3954 // because SCEVExpander uses it to guide code generation. The 3955 // vectorizer use SCEVExpanders in several places. Instead, we should 3956 // keep the dominator tree up-to-date as we go. 3957 updateAnalysis(); 3958 3959 // Fix-up external users of the induction variables. 3960 for (auto &Entry : *Legal->getInductionVars()) 3961 fixupIVUsers(Entry.first, Entry.second, 3962 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 3963 IVEndValues[Entry.first], LoopMiddleBlock); 3964 3965 fixLCSSAPHIs(); 3966 for (Instruction *PI : PredicatedInstructions) 3967 sinkScalarOperands(&*PI); 3968 3969 // Remove redundant induction instructions. 3970 cse(LoopVectorBody); 3971 } 3972 3973 void InnerLoopVectorizer::fixCrossIterationPHIs() { 3974 // In order to support recurrences we need to be able to vectorize Phi nodes. 3975 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3976 // stage #2: We now need to fix the recurrences by adding incoming edges to 3977 // the currently empty PHI nodes. At this point every instruction in the 3978 // original loop is widened to a vector form so we can use them to construct 3979 // the incoming edges. 3980 for (PHINode &Phi : OrigLoop->getHeader()->phis()) { 3981 // Handle first-order recurrences and reductions that need to be fixed. 3982 if (Legal->isFirstOrderRecurrence(&Phi)) 3983 fixFirstOrderRecurrence(&Phi); 3984 else if (Legal->isReductionVariable(&Phi)) 3985 fixReduction(&Phi); 3986 } 3987 } 3988 3989 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 3990 // This is the second phase of vectorizing first-order recurrences. An 3991 // overview of the transformation is described below. Suppose we have the 3992 // following loop. 3993 // 3994 // for (int i = 0; i < n; ++i) 3995 // b[i] = a[i] - a[i - 1]; 3996 // 3997 // There is a first-order recurrence on "a". For this loop, the shorthand 3998 // scalar IR looks like: 3999 // 4000 // scalar.ph: 4001 // s_init = a[-1] 4002 // br scalar.body 4003 // 4004 // scalar.body: 4005 // i = phi [0, scalar.ph], [i+1, scalar.body] 4006 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4007 // s2 = a[i] 4008 // b[i] = s2 - s1 4009 // br cond, scalar.body, ... 4010 // 4011 // In this example, s1 is a recurrence because it's value depends on the 4012 // previous iteration. In the first phase of vectorization, we created a 4013 // temporary value for s1. We now complete the vectorization and produce the 4014 // shorthand vector IR shown below (for VF = 4, UF = 1). 4015 // 4016 // vector.ph: 4017 // v_init = vector(..., ..., ..., a[-1]) 4018 // br vector.body 4019 // 4020 // vector.body 4021 // i = phi [0, vector.ph], [i+4, vector.body] 4022 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4023 // v2 = a[i, i+1, i+2, i+3]; 4024 // v3 = vector(v1(3), v2(0, 1, 2)) 4025 // b[i, i+1, i+2, i+3] = v2 - v3 4026 // br cond, vector.body, middle.block 4027 // 4028 // middle.block: 4029 // x = v2(3) 4030 // br scalar.ph 4031 // 4032 // scalar.ph: 4033 // s_init = phi [x, middle.block], [a[-1], otherwise] 4034 // br scalar.body 4035 // 4036 // After execution completes the vector loop, we extract the next value of 4037 // the recurrence (x) to use as the initial value in the scalar loop. 4038 4039 // Get the original loop preheader and single loop latch. 4040 auto *Preheader = OrigLoop->getLoopPreheader(); 4041 auto *Latch = OrigLoop->getLoopLatch(); 4042 4043 // Get the initial and previous values of the scalar recurrence. 4044 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 4045 auto *Previous = Phi->getIncomingValueForBlock(Latch); 4046 4047 // Create a vector from the initial value. 4048 auto *VectorInit = ScalarInit; 4049 if (VF > 1) { 4050 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4051 VectorInit = Builder.CreateInsertElement( 4052 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 4053 Builder.getInt32(VF - 1), "vector.recur.init"); 4054 } 4055 4056 // We constructed a temporary phi node in the first phase of vectorization. 4057 // This phi node will eventually be deleted. 4058 Builder.SetInsertPoint( 4059 cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0))); 4060 4061 // Create a phi node for the new recurrence. The current value will either be 4062 // the initial value inserted into a vector or loop-varying vector value. 4063 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 4064 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 4065 4066 // Get the vectorized previous value of the last part UF - 1. It appears last 4067 // among all unrolled iterations, due to the order of their construction. 4068 Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1); 4069 4070 // Set the insertion point after the previous value if it is an instruction. 4071 // Note that the previous value may have been constant-folded so it is not 4072 // guaranteed to be an instruction in the vector loop. Also, if the previous 4073 // value is a phi node, we should insert after all the phi nodes to avoid 4074 // breaking basic block verification. 4075 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart) || 4076 isa<PHINode>(PreviousLastPart)) 4077 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 4078 else 4079 Builder.SetInsertPoint( 4080 &*++BasicBlock::iterator(cast<Instruction>(PreviousLastPart))); 4081 4082 // We will construct a vector for the recurrence by combining the values for 4083 // the current and previous iterations. This is the required shuffle mask. 4084 SmallVector<Constant *, 8> ShuffleMask(VF); 4085 ShuffleMask[0] = Builder.getInt32(VF - 1); 4086 for (unsigned I = 1; I < VF; ++I) 4087 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 4088 4089 // The vector from which to take the initial value for the current iteration 4090 // (actual or unrolled). Initially, this is the vector phi node. 4091 Value *Incoming = VecPhi; 4092 4093 // Shuffle the current and previous vector and update the vector parts. 4094 for (unsigned Part = 0; Part < UF; ++Part) { 4095 Value *PreviousPart = getOrCreateVectorValue(Previous, Part); 4096 Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part); 4097 auto *Shuffle = 4098 VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart, 4099 ConstantVector::get(ShuffleMask)) 4100 : Incoming; 4101 PhiPart->replaceAllUsesWith(Shuffle); 4102 cast<Instruction>(PhiPart)->eraseFromParent(); 4103 VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle); 4104 Incoming = PreviousPart; 4105 } 4106 4107 // Fix the latch value of the new recurrence in the vector loop. 4108 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4109 4110 // Extract the last vector element in the middle block. This will be the 4111 // initial value for the recurrence when jumping to the scalar loop. 4112 auto *ExtractForScalar = Incoming; 4113 if (VF > 1) { 4114 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4115 ExtractForScalar = Builder.CreateExtractElement( 4116 ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract"); 4117 } 4118 // Extract the second last element in the middle block if the 4119 // Phi is used outside the loop. We need to extract the phi itself 4120 // and not the last element (the phi update in the current iteration). This 4121 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4122 // when the scalar loop is not run at all. 4123 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4124 if (VF > 1) 4125 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4126 Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi"); 4127 // When loop is unrolled without vectorizing, initialize 4128 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 4129 // `Incoming`. This is analogous to the vectorized case above: extracting the 4130 // second last element when VF > 1. 4131 else if (UF > 1) 4132 ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2); 4133 4134 // Fix the initial value of the original recurrence in the scalar loop. 4135 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4136 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4137 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4138 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4139 Start->addIncoming(Incoming, BB); 4140 } 4141 4142 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 4143 Phi->setName("scalar.recur"); 4144 4145 // Finally, fix users of the recurrence outside the loop. The users will need 4146 // either the last value of the scalar recurrence or the last value of the 4147 // vector recurrence we extracted in the middle block. Since the loop is in 4148 // LCSSA form, we just need to find the phi node for the original scalar 4149 // recurrence in the exit block, and then add an edge for the middle block. 4150 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4151 if (LCSSAPhi.getIncomingValue(0) == Phi) { 4152 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4153 break; 4154 } 4155 } 4156 } 4157 4158 void InnerLoopVectorizer::fixReduction(PHINode *Phi) { 4159 Constant *Zero = Builder.getInt32(0); 4160 4161 // Get it's reduction variable descriptor. 4162 assert(Legal->isReductionVariable(Phi) && 4163 "Unable to find the reduction variable"); 4164 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 4165 4166 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 4167 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4168 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4169 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 4170 RdxDesc.getMinMaxRecurrenceKind(); 4171 setDebugLocFromInst(Builder, ReductionStartValue); 4172 4173 // We need to generate a reduction vector from the incoming scalar. 4174 // To do so, we need to generate the 'identity' vector and override 4175 // one of the elements with the incoming scalar reduction. We need 4176 // to do it in the vector-loop preheader. 4177 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4178 4179 // This is the vector-clone of the value that leaves the loop. 4180 Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType(); 4181 4182 // Find the reduction identity variable. Zero for addition, or, xor, 4183 // one for multiplication, -1 for And. 4184 Value *Identity; 4185 Value *VectorStart; 4186 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 4187 RK == RecurrenceDescriptor::RK_FloatMinMax) { 4188 // MinMax reduction have the start value as their identify. 4189 if (VF == 1) { 4190 VectorStart = Identity = ReductionStartValue; 4191 } else { 4192 VectorStart = Identity = 4193 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 4194 } 4195 } else { 4196 // Handle other reduction kinds: 4197 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 4198 RK, VecTy->getScalarType()); 4199 if (VF == 1) { 4200 Identity = Iden; 4201 // This vector is the Identity vector where the first element is the 4202 // incoming scalar reduction. 4203 VectorStart = ReductionStartValue; 4204 } else { 4205 Identity = ConstantVector::getSplat(VF, Iden); 4206 4207 // This vector is the Identity vector where the first element is the 4208 // incoming scalar reduction. 4209 VectorStart = 4210 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 4211 } 4212 } 4213 4214 // Fix the vector-loop phi. 4215 4216 // Reductions do not have to start at zero. They can start with 4217 // any loop invariant values. 4218 BasicBlock *Latch = OrigLoop->getLoopLatch(); 4219 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 4220 for (unsigned Part = 0; Part < UF; ++Part) { 4221 Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part); 4222 Value *Val = getOrCreateVectorValue(LoopVal, Part); 4223 // Make sure to add the reduction stat value only to the 4224 // first unroll part. 4225 Value *StartVal = (Part == 0) ? VectorStart : Identity; 4226 cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader); 4227 cast<PHINode>(VecRdxPhi) 4228 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4229 } 4230 4231 // Before each round, move the insertion point right between 4232 // the PHIs and the values we are going to write. 4233 // This allows us to write both PHINodes and the extractelement 4234 // instructions. 4235 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4236 4237 setDebugLocFromInst(Builder, LoopExitInst); 4238 4239 // If the vector reduction can be performed in a smaller type, we truncate 4240 // then extend the loop exit value to enable InstCombine to evaluate the 4241 // entire expression in the smaller type. 4242 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 4243 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4244 Builder.SetInsertPoint( 4245 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4246 VectorParts RdxParts(UF); 4247 for (unsigned Part = 0; Part < UF; ++Part) { 4248 RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 4249 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4250 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4251 : Builder.CreateZExt(Trunc, VecTy); 4252 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 4253 UI != RdxParts[Part]->user_end();) 4254 if (*UI != Trunc) { 4255 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 4256 RdxParts[Part] = Extnd; 4257 } else { 4258 ++UI; 4259 } 4260 } 4261 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4262 for (unsigned Part = 0; Part < UF; ++Part) { 4263 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4264 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]); 4265 } 4266 } 4267 4268 // Reduce all of the unrolled parts into a single vector. 4269 Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0); 4270 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 4271 setDebugLocFromInst(Builder, ReducedPartRdx); 4272 for (unsigned Part = 1; Part < UF; ++Part) { 4273 Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 4274 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 4275 // Floating point operations had to be 'fast' to enable the reduction. 4276 ReducedPartRdx = addFastMathFlag( 4277 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart, 4278 ReducedPartRdx, "bin.rdx")); 4279 else 4280 ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp( 4281 Builder, MinMaxKind, ReducedPartRdx, RdxPart); 4282 } 4283 4284 if (VF > 1) { 4285 bool NoNaN = Legal->hasFunNoNaNAttr(); 4286 ReducedPartRdx = 4287 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN); 4288 // If the reduction can be performed in a smaller type, we need to extend 4289 // the reduction to the wider type before we branch to the original loop. 4290 if (Phi->getType() != RdxDesc.getRecurrenceType()) 4291 ReducedPartRdx = 4292 RdxDesc.isSigned() 4293 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 4294 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 4295 } 4296 4297 // Create a phi node that merges control-flow from the backedge-taken check 4298 // block and the middle block. 4299 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 4300 LoopScalarPreHeader->getTerminator()); 4301 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4302 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4303 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4304 4305 // Now, we need to fix the users of the reduction variable 4306 // inside and outside of the scalar remainder loop. 4307 // We know that the loop is in LCSSA form. We need to update the 4308 // PHI nodes in the exit blocks. 4309 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4310 // All PHINodes need to have a single entry edge, or two if 4311 // we already fixed them. 4312 assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 4313 4314 // We found a reduction value exit-PHI. Update it with the 4315 // incoming bypass edge. 4316 if (LCSSAPhi.getIncomingValue(0) == LoopExitInst) 4317 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4318 } // end of the LCSSA phi scan. 4319 4320 // Fix the scalar loop reduction variable with the incoming reduction sum 4321 // from the vector body and from the backedge value. 4322 int IncomingEdgeBlockIdx = 4323 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4324 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4325 // Pick the other block. 4326 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4327 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4328 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4329 } 4330 4331 void InnerLoopVectorizer::fixLCSSAPHIs() { 4332 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4333 if (LCSSAPhi.getNumIncomingValues() == 1) { 4334 assert(OrigLoop->isLoopInvariant(LCSSAPhi.getIncomingValue(0)) && 4335 "Incoming value isn't loop invariant"); 4336 LCSSAPhi.addIncoming(LCSSAPhi.getIncomingValue(0), LoopMiddleBlock); 4337 } 4338 } 4339 } 4340 4341 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4342 // The basic block and loop containing the predicated instruction. 4343 auto *PredBB = PredInst->getParent(); 4344 auto *VectorLoop = LI->getLoopFor(PredBB); 4345 4346 // Initialize a worklist with the operands of the predicated instruction. 4347 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4348 4349 // Holds instructions that we need to analyze again. An instruction may be 4350 // reanalyzed if we don't yet know if we can sink it or not. 4351 SmallVector<Instruction *, 8> InstsToReanalyze; 4352 4353 // Returns true if a given use occurs in the predicated block. Phi nodes use 4354 // their operands in their corresponding predecessor blocks. 4355 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4356 auto *I = cast<Instruction>(U.getUser()); 4357 BasicBlock *BB = I->getParent(); 4358 if (auto *Phi = dyn_cast<PHINode>(I)) 4359 BB = Phi->getIncomingBlock( 4360 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4361 return BB == PredBB; 4362 }; 4363 4364 // Iteratively sink the scalarized operands of the predicated instruction 4365 // into the block we created for it. When an instruction is sunk, it's 4366 // operands are then added to the worklist. The algorithm ends after one pass 4367 // through the worklist doesn't sink a single instruction. 4368 bool Changed; 4369 do { 4370 // Add the instructions that need to be reanalyzed to the worklist, and 4371 // reset the changed indicator. 4372 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4373 InstsToReanalyze.clear(); 4374 Changed = false; 4375 4376 while (!Worklist.empty()) { 4377 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4378 4379 // We can't sink an instruction if it is a phi node, is already in the 4380 // predicated block, is not in the loop, or may have side effects. 4381 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 4382 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 4383 continue; 4384 4385 // It's legal to sink the instruction if all its uses occur in the 4386 // predicated block. Otherwise, there's nothing to do yet, and we may 4387 // need to reanalyze the instruction. 4388 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4389 InstsToReanalyze.push_back(I); 4390 continue; 4391 } 4392 4393 // Move the instruction to the beginning of the predicated block, and add 4394 // it's operands to the worklist. 4395 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4396 Worklist.insert(I->op_begin(), I->op_end()); 4397 4398 // The sinking may have enabled other instructions to be sunk, so we will 4399 // need to iterate. 4400 Changed = true; 4401 } 4402 } while (Changed); 4403 } 4404 4405 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 4406 unsigned VF) { 4407 assert(PN->getParent() == OrigLoop->getHeader() && 4408 "Non-header phis should have been handled elsewhere"); 4409 4410 PHINode *P = cast<PHINode>(PN); 4411 // In order to support recurrences we need to be able to vectorize Phi nodes. 4412 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4413 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4414 // this value when we vectorize all of the instructions that use the PHI. 4415 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 4416 for (unsigned Part = 0; Part < UF; ++Part) { 4417 // This is phase one of vectorizing PHIs. 4418 Type *VecTy = 4419 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4420 Value *EntryPart = PHINode::Create( 4421 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4422 VectorLoopValueMap.setVectorValue(P, Part, EntryPart); 4423 } 4424 return; 4425 } 4426 4427 setDebugLocFromInst(Builder, P); 4428 4429 // This PHINode must be an induction variable. 4430 // Make sure that we know about it. 4431 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 4432 4433 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 4434 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4435 4436 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4437 // which can be found from the original scalar operations. 4438 switch (II.getKind()) { 4439 case InductionDescriptor::IK_NoInduction: 4440 llvm_unreachable("Unknown induction"); 4441 case InductionDescriptor::IK_IntInduction: 4442 case InductionDescriptor::IK_FpInduction: 4443 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4444 case InductionDescriptor::IK_PtrInduction: { 4445 // Handle the pointer induction variable case. 4446 assert(P->getType()->isPointerTy() && "Unexpected type."); 4447 // This is the normalized GEP that starts counting at zero. 4448 Value *PtrInd = Induction; 4449 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 4450 // Determine the number of scalars we need to generate for each unroll 4451 // iteration. If the instruction is uniform, we only need to generate the 4452 // first lane. Otherwise, we generate all VF values. 4453 unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF; 4454 // These are the scalar results. Notice that we don't generate vector GEPs 4455 // because scalar GEPs result in better code. 4456 for (unsigned Part = 0; Part < UF; ++Part) { 4457 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4458 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 4459 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4460 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 4461 SclrGep->setName("next.gep"); 4462 VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep); 4463 } 4464 } 4465 return; 4466 } 4467 } 4468 } 4469 4470 /// A helper function for checking whether an integer division-related 4471 /// instruction may divide by zero (in which case it must be predicated if 4472 /// executed conditionally in the scalar code). 4473 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4474 /// Non-zero divisors that are non compile-time constants will not be 4475 /// converted into multiplication, so we will still end up scalarizing 4476 /// the division, but can do so w/o predication. 4477 static bool mayDivideByZero(Instruction &I) { 4478 assert((I.getOpcode() == Instruction::UDiv || 4479 I.getOpcode() == Instruction::SDiv || 4480 I.getOpcode() == Instruction::URem || 4481 I.getOpcode() == Instruction::SRem) && 4482 "Unexpected instruction"); 4483 Value *Divisor = I.getOperand(1); 4484 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4485 return !CInt || CInt->isZero(); 4486 } 4487 4488 void InnerLoopVectorizer::widenInstruction(Instruction &I) { 4489 switch (I.getOpcode()) { 4490 case Instruction::Br: 4491 case Instruction::PHI: 4492 llvm_unreachable("This instruction is handled by a different recipe."); 4493 case Instruction::GetElementPtr: { 4494 // Construct a vector GEP by widening the operands of the scalar GEP as 4495 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4496 // results in a vector of pointers when at least one operand of the GEP 4497 // is vector-typed. Thus, to keep the representation compact, we only use 4498 // vector-typed operands for loop-varying values. 4499 auto *GEP = cast<GetElementPtrInst>(&I); 4500 4501 if (VF > 1 && OrigLoop->hasLoopInvariantOperands(GEP)) { 4502 // If we are vectorizing, but the GEP has only loop-invariant operands, 4503 // the GEP we build (by only using vector-typed operands for 4504 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4505 // produce a vector of pointers, we need to either arbitrarily pick an 4506 // operand to broadcast, or broadcast a clone of the original GEP. 4507 // Here, we broadcast a clone of the original. 4508 // 4509 // TODO: If at some point we decide to scalarize instructions having 4510 // loop-invariant operands, this special case will no longer be 4511 // required. We would add the scalarization decision to 4512 // collectLoopScalars() and teach getVectorValue() to broadcast 4513 // the lane-zero scalar value. 4514 auto *Clone = Builder.Insert(GEP->clone()); 4515 for (unsigned Part = 0; Part < UF; ++Part) { 4516 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4517 VectorLoopValueMap.setVectorValue(&I, Part, EntryPart); 4518 addMetadata(EntryPart, GEP); 4519 } 4520 } else { 4521 // If the GEP has at least one loop-varying operand, we are sure to 4522 // produce a vector of pointers. But if we are only unrolling, we want 4523 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4524 // produce with the code below will be scalar (if VF == 1) or vector 4525 // (otherwise). Note that for the unroll-only case, we still maintain 4526 // values in the vector mapping with initVector, as we do for other 4527 // instructions. 4528 for (unsigned Part = 0; Part < UF; ++Part) { 4529 // The pointer operand of the new GEP. If it's loop-invariant, we 4530 // won't broadcast it. 4531 auto *Ptr = 4532 OrigLoop->isLoopInvariant(GEP->getPointerOperand()) 4533 ? GEP->getPointerOperand() 4534 : getOrCreateVectorValue(GEP->getPointerOperand(), Part); 4535 4536 // Collect all the indices for the new GEP. If any index is 4537 // loop-invariant, we won't broadcast it. 4538 SmallVector<Value *, 4> Indices; 4539 for (auto &U : make_range(GEP->idx_begin(), GEP->idx_end())) { 4540 if (OrigLoop->isLoopInvariant(U.get())) 4541 Indices.push_back(U.get()); 4542 else 4543 Indices.push_back(getOrCreateVectorValue(U.get(), Part)); 4544 } 4545 4546 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4547 // but it should be a vector, otherwise. 4548 auto *NewGEP = GEP->isInBounds() 4549 ? Builder.CreateInBoundsGEP(Ptr, Indices) 4550 : Builder.CreateGEP(Ptr, Indices); 4551 assert((VF == 1 || NewGEP->getType()->isVectorTy()) && 4552 "NewGEP is not a pointer vector"); 4553 VectorLoopValueMap.setVectorValue(&I, Part, NewGEP); 4554 addMetadata(NewGEP, GEP); 4555 } 4556 } 4557 4558 break; 4559 } 4560 case Instruction::UDiv: 4561 case Instruction::SDiv: 4562 case Instruction::SRem: 4563 case Instruction::URem: 4564 case Instruction::Add: 4565 case Instruction::FAdd: 4566 case Instruction::Sub: 4567 case Instruction::FSub: 4568 case Instruction::Mul: 4569 case Instruction::FMul: 4570 case Instruction::FDiv: 4571 case Instruction::FRem: 4572 case Instruction::Shl: 4573 case Instruction::LShr: 4574 case Instruction::AShr: 4575 case Instruction::And: 4576 case Instruction::Or: 4577 case Instruction::Xor: { 4578 // Just widen binops. 4579 auto *BinOp = cast<BinaryOperator>(&I); 4580 setDebugLocFromInst(Builder, BinOp); 4581 4582 for (unsigned Part = 0; Part < UF; ++Part) { 4583 Value *A = getOrCreateVectorValue(BinOp->getOperand(0), Part); 4584 Value *B = getOrCreateVectorValue(BinOp->getOperand(1), Part); 4585 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A, B); 4586 4587 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 4588 VecOp->copyIRFlags(BinOp); 4589 4590 // Use this vector value for all users of the original instruction. 4591 VectorLoopValueMap.setVectorValue(&I, Part, V); 4592 addMetadata(V, BinOp); 4593 } 4594 4595 break; 4596 } 4597 case Instruction::Select: { 4598 // Widen selects. 4599 // If the selector is loop invariant we can create a select 4600 // instruction with a scalar condition. Otherwise, use vector-select. 4601 auto *SE = PSE.getSE(); 4602 bool InvariantCond = 4603 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); 4604 setDebugLocFromInst(Builder, &I); 4605 4606 // The condition can be loop invariant but still defined inside the 4607 // loop. This means that we can't just use the original 'cond' value. 4608 // We have to take the 'vectorized' value and pick the first lane. 4609 // Instcombine will make this a no-op. 4610 4611 auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0}); 4612 4613 for (unsigned Part = 0; Part < UF; ++Part) { 4614 Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part); 4615 Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part); 4616 Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part); 4617 Value *Sel = 4618 Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1); 4619 VectorLoopValueMap.setVectorValue(&I, Part, Sel); 4620 addMetadata(Sel, &I); 4621 } 4622 4623 break; 4624 } 4625 4626 case Instruction::ICmp: 4627 case Instruction::FCmp: { 4628 // Widen compares. Generate vector compares. 4629 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4630 auto *Cmp = dyn_cast<CmpInst>(&I); 4631 setDebugLocFromInst(Builder, Cmp); 4632 for (unsigned Part = 0; Part < UF; ++Part) { 4633 Value *A = getOrCreateVectorValue(Cmp->getOperand(0), Part); 4634 Value *B = getOrCreateVectorValue(Cmp->getOperand(1), Part); 4635 Value *C = nullptr; 4636 if (FCmp) { 4637 // Propagate fast math flags. 4638 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4639 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4640 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4641 } else { 4642 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4643 } 4644 VectorLoopValueMap.setVectorValue(&I, Part, C); 4645 addMetadata(C, &I); 4646 } 4647 4648 break; 4649 } 4650 4651 case Instruction::ZExt: 4652 case Instruction::SExt: 4653 case Instruction::FPToUI: 4654 case Instruction::FPToSI: 4655 case Instruction::FPExt: 4656 case Instruction::PtrToInt: 4657 case Instruction::IntToPtr: 4658 case Instruction::SIToFP: 4659 case Instruction::UIToFP: 4660 case Instruction::Trunc: 4661 case Instruction::FPTrunc: 4662 case Instruction::BitCast: { 4663 auto *CI = dyn_cast<CastInst>(&I); 4664 setDebugLocFromInst(Builder, CI); 4665 4666 /// Vectorize casts. 4667 Type *DestTy = 4668 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4669 4670 for (unsigned Part = 0; Part < UF; ++Part) { 4671 Value *A = getOrCreateVectorValue(CI->getOperand(0), Part); 4672 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4673 VectorLoopValueMap.setVectorValue(&I, Part, Cast); 4674 addMetadata(Cast, &I); 4675 } 4676 break; 4677 } 4678 4679 case Instruction::Call: { 4680 // Ignore dbg intrinsics. 4681 if (isa<DbgInfoIntrinsic>(I)) 4682 break; 4683 setDebugLocFromInst(Builder, &I); 4684 4685 Module *M = I.getParent()->getParent()->getParent(); 4686 auto *CI = cast<CallInst>(&I); 4687 4688 StringRef FnName = CI->getCalledFunction()->getName(); 4689 Function *F = CI->getCalledFunction(); 4690 Type *RetTy = ToVectorTy(CI->getType(), VF); 4691 SmallVector<Type *, 4> Tys; 4692 for (Value *ArgOperand : CI->arg_operands()) 4693 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4694 4695 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4696 4697 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4698 // version of the instruction. 4699 // Is it beneficial to perform intrinsic call compared to lib call? 4700 bool NeedToScalarize; 4701 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4702 bool UseVectorIntrinsic = 4703 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4704 assert((UseVectorIntrinsic || !NeedToScalarize) && 4705 "Instruction should be scalarized elsewhere."); 4706 4707 for (unsigned Part = 0; Part < UF; ++Part) { 4708 SmallVector<Value *, 4> Args; 4709 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4710 Value *Arg = CI->getArgOperand(i); 4711 // Some intrinsics have a scalar argument - don't replace it with a 4712 // vector. 4713 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) 4714 Arg = getOrCreateVectorValue(CI->getArgOperand(i), Part); 4715 Args.push_back(Arg); 4716 } 4717 4718 Function *VectorF; 4719 if (UseVectorIntrinsic) { 4720 // Use vector version of the intrinsic. 4721 Type *TysForDecl[] = {CI->getType()}; 4722 if (VF > 1) 4723 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4724 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4725 } else { 4726 // Use vector version of the library call. 4727 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4728 assert(!VFnName.empty() && "Vector function name is empty."); 4729 VectorF = M->getFunction(VFnName); 4730 if (!VectorF) { 4731 // Generate a declaration 4732 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4733 VectorF = 4734 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4735 VectorF->copyAttributesFrom(F); 4736 } 4737 } 4738 assert(VectorF && "Can't create vector function."); 4739 4740 SmallVector<OperandBundleDef, 1> OpBundles; 4741 CI->getOperandBundlesAsDefs(OpBundles); 4742 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4743 4744 if (isa<FPMathOperator>(V)) 4745 V->copyFastMathFlags(CI); 4746 4747 VectorLoopValueMap.setVectorValue(&I, Part, V); 4748 addMetadata(V, &I); 4749 } 4750 4751 break; 4752 } 4753 4754 default: 4755 // This instruction is not vectorized by simple widening. 4756 DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4757 llvm_unreachable("Unhandled instruction!"); 4758 } // end of switch. 4759 } 4760 4761 void InnerLoopVectorizer::updateAnalysis() { 4762 // Forget the original basic block. 4763 PSE.getSE()->forgetLoop(OrigLoop); 4764 4765 // Update the dominator tree information. 4766 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4767 "Entry does not dominate exit."); 4768 4769 DT->addNewBlock(LoopMiddleBlock, 4770 LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4771 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4772 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4773 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4774 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 4775 } 4776 4777 /// \brief Check whether it is safe to if-convert this phi node. 4778 /// 4779 /// Phi nodes with constant expressions that can trap are not safe to if 4780 /// convert. 4781 static bool canIfConvertPHINodes(BasicBlock *BB) { 4782 for (PHINode &Phi : BB->phis()) { 4783 for (Value *V : Phi.incoming_values()) 4784 if (auto *C = dyn_cast<Constant>(V)) 4785 if (C->canTrap()) 4786 return false; 4787 } 4788 return true; 4789 } 4790 4791 bool LoopVectorizationLegality::canVectorizeWithIfConvert() { 4792 if (!EnableIfConversion) { 4793 ORE->emit(createMissedAnalysis("IfConversionDisabled") 4794 << "if-conversion is disabled"); 4795 return false; 4796 } 4797 4798 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable"); 4799 4800 // A list of pointers that we can safely read and write to. 4801 SmallPtrSet<Value *, 8> SafePointes; 4802 4803 // Collect safe addresses. 4804 for (BasicBlock *BB : TheLoop->blocks()) { 4805 if (blockNeedsPredication(BB)) 4806 continue; 4807 4808 for (Instruction &I : *BB) 4809 if (auto *Ptr = getLoadStorePointerOperand(&I)) 4810 SafePointes.insert(Ptr); 4811 } 4812 4813 // Collect the blocks that need predication. 4814 BasicBlock *Header = TheLoop->getHeader(); 4815 for (BasicBlock *BB : TheLoop->blocks()) { 4816 // We don't support switch statements inside loops. 4817 if (!isa<BranchInst>(BB->getTerminator())) { 4818 ORE->emit(createMissedAnalysis("LoopContainsSwitch", BB->getTerminator()) 4819 << "loop contains a switch statement"); 4820 return false; 4821 } 4822 4823 // We must be able to predicate all blocks that need to be predicated. 4824 if (blockNeedsPredication(BB)) { 4825 if (!blockCanBePredicated(BB, SafePointes)) { 4826 ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator()) 4827 << "control flow cannot be substituted for a select"); 4828 return false; 4829 } 4830 } else if (BB != Header && !canIfConvertPHINodes(BB)) { 4831 ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator()) 4832 << "control flow cannot be substituted for a select"); 4833 return false; 4834 } 4835 } 4836 4837 // We can if-convert this loop. 4838 return true; 4839 } 4840 4841 bool LoopVectorizationLegality::canVectorize() { 4842 // Store the result and return it at the end instead of exiting early, in case 4843 // allowExtraAnalysis is used to report multiple reasons for not vectorizing. 4844 bool Result = true; 4845 4846 bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE); 4847 // We must have a loop in canonical form. Loops with indirectbr in them cannot 4848 // be canonicalized. 4849 if (!TheLoop->getLoopPreheader()) { 4850 DEBUG(dbgs() << "LV: Loop doesn't have a legal pre-header.\n"); 4851 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 4852 << "loop control flow is not understood by vectorizer"); 4853 if (DoExtraAnalysis) 4854 Result = false; 4855 else 4856 return false; 4857 } 4858 4859 // FIXME: The code is currently dead, since the loop gets sent to 4860 // LoopVectorizationLegality is already an innermost loop. 4861 // 4862 // We can only vectorize innermost loops. 4863 if (!TheLoop->empty()) { 4864 ORE->emit(createMissedAnalysis("NotInnermostLoop") 4865 << "loop is not the innermost loop"); 4866 if (DoExtraAnalysis) 4867 Result = false; 4868 else 4869 return false; 4870 } 4871 4872 // We must have a single backedge. 4873 if (TheLoop->getNumBackEdges() != 1) { 4874 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 4875 << "loop control flow is not understood by vectorizer"); 4876 if (DoExtraAnalysis) 4877 Result = false; 4878 else 4879 return false; 4880 } 4881 4882 // We must have a single exiting block. 4883 if (!TheLoop->getExitingBlock()) { 4884 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 4885 << "loop control flow is not understood by vectorizer"); 4886 if (DoExtraAnalysis) 4887 Result = false; 4888 else 4889 return false; 4890 } 4891 4892 // We only handle bottom-tested loops, i.e. loop in which the condition is 4893 // checked at the end of each iteration. With that we can assume that all 4894 // instructions in the loop are executed the same number of times. 4895 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 4896 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 4897 << "loop control flow is not understood by vectorizer"); 4898 if (DoExtraAnalysis) 4899 Result = false; 4900 else 4901 return false; 4902 } 4903 4904 // We need to have a loop header. 4905 DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName() 4906 << '\n'); 4907 4908 // Check if we can if-convert non-single-bb loops. 4909 unsigned NumBlocks = TheLoop->getNumBlocks(); 4910 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { 4911 DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); 4912 if (DoExtraAnalysis) 4913 Result = false; 4914 else 4915 return false; 4916 } 4917 4918 // Check if we can vectorize the instructions and CFG in this loop. 4919 if (!canVectorizeInstrs()) { 4920 DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); 4921 if (DoExtraAnalysis) 4922 Result = false; 4923 else 4924 return false; 4925 } 4926 4927 // Go over each instruction and look at memory deps. 4928 if (!canVectorizeMemory()) { 4929 DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); 4930 if (DoExtraAnalysis) 4931 Result = false; 4932 else 4933 return false; 4934 } 4935 4936 DEBUG(dbgs() << "LV: We can vectorize this loop" 4937 << (LAI->getRuntimePointerChecking()->Need 4938 ? " (with a runtime bound check)" 4939 : "") 4940 << "!\n"); 4941 4942 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 4943 4944 // If an override option has been passed in for interleaved accesses, use it. 4945 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 4946 UseInterleaved = EnableInterleavedMemAccesses; 4947 4948 // Analyze interleaved memory accesses. 4949 if (UseInterleaved) 4950 InterleaveInfo.analyzeInterleaving(*getSymbolicStrides()); 4951 4952 unsigned SCEVThreshold = VectorizeSCEVCheckThreshold; 4953 if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) 4954 SCEVThreshold = PragmaVectorizeSCEVCheckThreshold; 4955 4956 if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) { 4957 ORE->emit(createMissedAnalysis("TooManySCEVRunTimeChecks") 4958 << "Too many SCEV assumptions need to be made and checked " 4959 << "at runtime"); 4960 DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n"); 4961 if (DoExtraAnalysis) 4962 Result = false; 4963 else 4964 return false; 4965 } 4966 4967 // Okay! We've done all the tests. If any have failed, return false. Otherwise 4968 // we can vectorize, and at this point we don't have any other mem analysis 4969 // which may limit our maximum vectorization factor, so just return true with 4970 // no restrictions. 4971 return Result; 4972 } 4973 4974 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { 4975 if (Ty->isPointerTy()) 4976 return DL.getIntPtrType(Ty); 4977 4978 // It is possible that char's or short's overflow when we ask for the loop's 4979 // trip count, work around this by changing the type size. 4980 if (Ty->getScalarSizeInBits() < 32) 4981 return Type::getInt32Ty(Ty->getContext()); 4982 4983 return Ty; 4984 } 4985 4986 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { 4987 Ty0 = convertPointerToIntegerType(DL, Ty0); 4988 Ty1 = convertPointerToIntegerType(DL, Ty1); 4989 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) 4990 return Ty0; 4991 return Ty1; 4992 } 4993 4994 /// \brief Check that the instruction has outside loop users and is not an 4995 /// identified reduction variable. 4996 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, 4997 SmallPtrSetImpl<Value *> &AllowedExit) { 4998 // Reduction and Induction instructions are allowed to have exit users. All 4999 // other instructions must not have external users. 5000 if (!AllowedExit.count(Inst)) 5001 // Check that all of the users of the loop are inside the BB. 5002 for (User *U : Inst->users()) { 5003 Instruction *UI = cast<Instruction>(U); 5004 // This user may be a reduction exit value. 5005 if (!TheLoop->contains(UI)) { 5006 DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); 5007 return true; 5008 } 5009 } 5010 return false; 5011 } 5012 5013 void LoopVectorizationLegality::addInductionPhi( 5014 PHINode *Phi, const InductionDescriptor &ID, 5015 SmallPtrSetImpl<Value *> &AllowedExit) { 5016 Inductions[Phi] = ID; 5017 5018 // In case this induction also comes with casts that we know we can ignore 5019 // in the vectorized loop body, record them here. All casts could be recorded 5020 // here for ignoring, but suffices to record only the first (as it is the 5021 // only one that may bw used outside the cast sequence). 5022 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 5023 if (!Casts.empty()) 5024 InductionCastsToIgnore.insert(*Casts.begin()); 5025 5026 Type *PhiTy = Phi->getType(); 5027 const DataLayout &DL = Phi->getModule()->getDataLayout(); 5028 5029 // Get the widest type. 5030 if (!PhiTy->isFloatingPointTy()) { 5031 if (!WidestIndTy) 5032 WidestIndTy = convertPointerToIntegerType(DL, PhiTy); 5033 else 5034 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); 5035 } 5036 5037 // Int inductions are special because we only allow one IV. 5038 if (ID.getKind() == InductionDescriptor::IK_IntInduction && 5039 ID.getConstIntStepValue() && 5040 ID.getConstIntStepValue()->isOne() && 5041 isa<Constant>(ID.getStartValue()) && 5042 cast<Constant>(ID.getStartValue())->isNullValue()) { 5043 5044 // Use the phi node with the widest type as induction. Use the last 5045 // one if there are multiple (no good reason for doing this other 5046 // than it is expedient). We've checked that it begins at zero and 5047 // steps by one, so this is a canonical induction variable. 5048 if (!PrimaryInduction || PhiTy == WidestIndTy) 5049 PrimaryInduction = Phi; 5050 } 5051 5052 // Both the PHI node itself, and the "post-increment" value feeding 5053 // back into the PHI node may have external users. 5054 // We can allow those uses, except if the SCEVs we have for them rely 5055 // on predicates that only hold within the loop, since allowing the exit 5056 // currently means re-using this SCEV outside the loop. 5057 if (PSE.getUnionPredicate().isAlwaysTrue()) { 5058 AllowedExit.insert(Phi); 5059 AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch())); 5060 } 5061 5062 DEBUG(dbgs() << "LV: Found an induction variable.\n"); 5063 } 5064 5065 bool LoopVectorizationLegality::canVectorizeInstrs() { 5066 BasicBlock *Header = TheLoop->getHeader(); 5067 5068 // Look for the attribute signaling the absence of NaNs. 5069 Function &F = *Header->getParent(); 5070 HasFunNoNaNAttr = 5071 F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; 5072 5073 // For each block in the loop. 5074 for (BasicBlock *BB : TheLoop->blocks()) { 5075 // Scan the instructions in the block and look for hazards. 5076 for (Instruction &I : *BB) { 5077 if (auto *Phi = dyn_cast<PHINode>(&I)) { 5078 Type *PhiTy = Phi->getType(); 5079 // Check that this PHI type is allowed. 5080 if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() && 5081 !PhiTy->isPointerTy()) { 5082 ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi) 5083 << "loop control flow is not understood by vectorizer"); 5084 DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); 5085 return false; 5086 } 5087 5088 // If this PHINode is not in the header block, then we know that we 5089 // can convert it to select during if-conversion. No need to check if 5090 // the PHIs in this block are induction or reduction variables. 5091 if (BB != Header) { 5092 // Check that this instruction has no outside users or is an 5093 // identified reduction value with an outside user. 5094 if (!hasOutsideLoopUser(TheLoop, Phi, AllowedExit)) 5095 continue; 5096 ORE->emit(createMissedAnalysis("NeitherInductionNorReduction", Phi) 5097 << "value could not be identified as " 5098 "an induction or reduction variable"); 5099 return false; 5100 } 5101 5102 // We only allow if-converted PHIs with exactly two incoming values. 5103 if (Phi->getNumIncomingValues() != 2) { 5104 ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi) 5105 << "control flow not understood by vectorizer"); 5106 DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); 5107 return false; 5108 } 5109 5110 RecurrenceDescriptor RedDes; 5111 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes, DB, AC, 5112 DT)) { 5113 if (RedDes.hasUnsafeAlgebra()) 5114 Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst()); 5115 AllowedExit.insert(RedDes.getLoopExitInstr()); 5116 Reductions[Phi] = RedDes; 5117 continue; 5118 } 5119 5120 InductionDescriptor ID; 5121 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID)) { 5122 addInductionPhi(Phi, ID, AllowedExit); 5123 if (ID.hasUnsafeAlgebra() && !HasFunNoNaNAttr) 5124 Requirements->addUnsafeAlgebraInst(ID.getUnsafeAlgebraInst()); 5125 continue; 5126 } 5127 5128 if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, 5129 SinkAfter, DT)) { 5130 FirstOrderRecurrences.insert(Phi); 5131 continue; 5132 } 5133 5134 // As a last resort, coerce the PHI to a AddRec expression 5135 // and re-try classifying it a an induction PHI. 5136 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true)) { 5137 addInductionPhi(Phi, ID, AllowedExit); 5138 continue; 5139 } 5140 5141 ORE->emit(createMissedAnalysis("NonReductionValueUsedOutsideLoop", Phi) 5142 << "value that could not be identified as " 5143 "reduction is used outside the loop"); 5144 DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n"); 5145 return false; 5146 } // end of PHI handling 5147 5148 // We handle calls that: 5149 // * Are debug info intrinsics. 5150 // * Have a mapping to an IR intrinsic. 5151 // * Have a vector version available. 5152 auto *CI = dyn_cast<CallInst>(&I); 5153 if (CI && !getVectorIntrinsicIDForCall(CI, TLI) && 5154 !isa<DbgInfoIntrinsic>(CI) && 5155 !(CI->getCalledFunction() && TLI && 5156 TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) { 5157 ORE->emit(createMissedAnalysis("CantVectorizeCall", CI) 5158 << "call instruction cannot be vectorized"); 5159 DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); 5160 return false; 5161 } 5162 5163 // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the 5164 // second argument is the same (i.e. loop invariant) 5165 if (CI && hasVectorInstrinsicScalarOpd( 5166 getVectorIntrinsicIDForCall(CI, TLI), 1)) { 5167 auto *SE = PSE.getSE(); 5168 if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) { 5169 ORE->emit(createMissedAnalysis("CantVectorizeIntrinsic", CI) 5170 << "intrinsic instruction cannot be vectorized"); 5171 DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n"); 5172 return false; 5173 } 5174 } 5175 5176 // Check that the instruction return type is vectorizable. 5177 // Also, we can't vectorize extractelement instructions. 5178 if ((!VectorType::isValidElementType(I.getType()) && 5179 !I.getType()->isVoidTy()) || 5180 isa<ExtractElementInst>(I)) { 5181 ORE->emit(createMissedAnalysis("CantVectorizeInstructionReturnType", &I) 5182 << "instruction return type cannot be vectorized"); 5183 DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); 5184 return false; 5185 } 5186 5187 // Check that the stored type is vectorizable. 5188 if (auto *ST = dyn_cast<StoreInst>(&I)) { 5189 Type *T = ST->getValueOperand()->getType(); 5190 if (!VectorType::isValidElementType(T)) { 5191 ORE->emit(createMissedAnalysis("CantVectorizeStore", ST) 5192 << "store instruction cannot be vectorized"); 5193 return false; 5194 } 5195 5196 // FP instructions can allow unsafe algebra, thus vectorizable by 5197 // non-IEEE-754 compliant SIMD units. 5198 // This applies to floating-point math operations and calls, not memory 5199 // operations, shuffles, or casts, as they don't change precision or 5200 // semantics. 5201 } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) && 5202 !I.isFast()) { 5203 DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n"); 5204 Hints->setPotentiallyUnsafe(); 5205 } 5206 5207 // Reduction instructions are allowed to have exit users. 5208 // All other instructions must not have external users. 5209 if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) { 5210 ORE->emit(createMissedAnalysis("ValueUsedOutsideLoop", &I) 5211 << "value cannot be used outside the loop"); 5212 return false; 5213 } 5214 } // next instr. 5215 } 5216 5217 if (!PrimaryInduction) { 5218 DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); 5219 if (Inductions.empty()) { 5220 ORE->emit(createMissedAnalysis("NoInductionVariable") 5221 << "loop induction variable could not be identified"); 5222 return false; 5223 } 5224 } 5225 5226 // Now we know the widest induction type, check if our found induction 5227 // is the same size. If it's not, unset it here and InnerLoopVectorizer 5228 // will create another. 5229 if (PrimaryInduction && WidestIndTy != PrimaryInduction->getType()) 5230 PrimaryInduction = nullptr; 5231 5232 return true; 5233 } 5234 5235 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) { 5236 // We should not collect Scalars more than once per VF. Right now, this 5237 // function is called from collectUniformsAndScalars(), which already does 5238 // this check. Collecting Scalars for VF=1 does not make any sense. 5239 assert(VF >= 2 && !Scalars.count(VF) && 5240 "This function should not be visited twice for the same VF"); 5241 5242 SmallSetVector<Instruction *, 8> Worklist; 5243 5244 // These sets are used to seed the analysis with pointers used by memory 5245 // accesses that will remain scalar. 5246 SmallSetVector<Instruction *, 8> ScalarPtrs; 5247 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 5248 5249 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 5250 // The pointer operands of loads and stores will be scalar as long as the 5251 // memory access is not a gather or scatter operation. The value operand of a 5252 // store will remain scalar if the store is scalarized. 5253 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 5254 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 5255 assert(WideningDecision != CM_Unknown && 5256 "Widening decision should be ready at this moment"); 5257 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 5258 if (Ptr == Store->getValueOperand()) 5259 return WideningDecision == CM_Scalarize; 5260 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 5261 "Ptr is neither a value or pointer operand"); 5262 return WideningDecision != CM_GatherScatter; 5263 }; 5264 5265 // A helper that returns true if the given value is a bitcast or 5266 // getelementptr instruction contained in the loop. 5267 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 5268 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 5269 isa<GetElementPtrInst>(V)) && 5270 !TheLoop->isLoopInvariant(V); 5271 }; 5272 5273 // A helper that evaluates a memory access's use of a pointer. If the use 5274 // will be a scalar use, and the pointer is only used by memory accesses, we 5275 // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in 5276 // PossibleNonScalarPtrs. 5277 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 5278 // We only care about bitcast and getelementptr instructions contained in 5279 // the loop. 5280 if (!isLoopVaryingBitCastOrGEP(Ptr)) 5281 return; 5282 5283 // If the pointer has already been identified as scalar (e.g., if it was 5284 // also identified as uniform), there's nothing to do. 5285 auto *I = cast<Instruction>(Ptr); 5286 if (Worklist.count(I)) 5287 return; 5288 5289 // If the use of the pointer will be a scalar use, and all users of the 5290 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 5291 // place the pointer in PossibleNonScalarPtrs. 5292 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 5293 return isa<LoadInst>(U) || isa<StoreInst>(U); 5294 })) 5295 ScalarPtrs.insert(I); 5296 else 5297 PossibleNonScalarPtrs.insert(I); 5298 }; 5299 5300 // We seed the scalars analysis with three classes of instructions: (1) 5301 // instructions marked uniform-after-vectorization, (2) bitcast and 5302 // getelementptr instructions used by memory accesses requiring a scalar use, 5303 // and (3) pointer induction variables and their update instructions (we 5304 // currently only scalarize these). 5305 // 5306 // (1) Add to the worklist all instructions that have been identified as 5307 // uniform-after-vectorization. 5308 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 5309 5310 // (2) Add to the worklist all bitcast and getelementptr instructions used by 5311 // memory accesses requiring a scalar use. The pointer operands of loads and 5312 // stores will be scalar as long as the memory accesses is not a gather or 5313 // scatter operation. The value operand of a store will remain scalar if the 5314 // store is scalarized. 5315 for (auto *BB : TheLoop->blocks()) 5316 for (auto &I : *BB) { 5317 if (auto *Load = dyn_cast<LoadInst>(&I)) { 5318 evaluatePtrUse(Load, Load->getPointerOperand()); 5319 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 5320 evaluatePtrUse(Store, Store->getPointerOperand()); 5321 evaluatePtrUse(Store, Store->getValueOperand()); 5322 } 5323 } 5324 for (auto *I : ScalarPtrs) 5325 if (!PossibleNonScalarPtrs.count(I)) { 5326 DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 5327 Worklist.insert(I); 5328 } 5329 5330 // (3) Add to the worklist all pointer induction variables and their update 5331 // instructions. 5332 // 5333 // TODO: Once we are able to vectorize pointer induction variables we should 5334 // no longer insert them into the worklist here. 5335 auto *Latch = TheLoop->getLoopLatch(); 5336 for (auto &Induction : *Legal->getInductionVars()) { 5337 auto *Ind = Induction.first; 5338 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5339 if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction) 5340 continue; 5341 Worklist.insert(Ind); 5342 Worklist.insert(IndUpdate); 5343 DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5344 DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n"); 5345 } 5346 5347 // Insert the forced scalars. 5348 // FIXME: Currently widenPHIInstruction() often creates a dead vector 5349 // induction variable when the PHI user is scalarized. 5350 if (ForcedScalars.count(VF)) 5351 for (auto *I : ForcedScalars.find(VF)->second) 5352 Worklist.insert(I); 5353 5354 // Expand the worklist by looking through any bitcasts and getelementptr 5355 // instructions we've already identified as scalar. This is similar to the 5356 // expansion step in collectLoopUniforms(); however, here we're only 5357 // expanding to include additional bitcasts and getelementptr instructions. 5358 unsigned Idx = 0; 5359 while (Idx != Worklist.size()) { 5360 Instruction *Dst = Worklist[Idx++]; 5361 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 5362 continue; 5363 auto *Src = cast<Instruction>(Dst->getOperand(0)); 5364 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 5365 auto *J = cast<Instruction>(U); 5366 return !TheLoop->contains(J) || Worklist.count(J) || 5367 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 5368 isScalarUse(J, Src)); 5369 })) { 5370 Worklist.insert(Src); 5371 DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 5372 } 5373 } 5374 5375 // An induction variable will remain scalar if all users of the induction 5376 // variable and induction variable update remain scalar. 5377 for (auto &Induction : *Legal->getInductionVars()) { 5378 auto *Ind = Induction.first; 5379 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5380 5381 // We already considered pointer induction variables, so there's no reason 5382 // to look at their users again. 5383 // 5384 // TODO: Once we are able to vectorize pointer induction variables we 5385 // should no longer skip over them here. 5386 if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction) 5387 continue; 5388 5389 // Determine if all users of the induction variable are scalar after 5390 // vectorization. 5391 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5392 auto *I = cast<Instruction>(U); 5393 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 5394 }); 5395 if (!ScalarInd) 5396 continue; 5397 5398 // Determine if all users of the induction variable update instruction are 5399 // scalar after vectorization. 5400 auto ScalarIndUpdate = 5401 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5402 auto *I = cast<Instruction>(U); 5403 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 5404 }); 5405 if (!ScalarIndUpdate) 5406 continue; 5407 5408 // The induction variable and its update instruction will remain scalar. 5409 Worklist.insert(Ind); 5410 Worklist.insert(IndUpdate); 5411 DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5412 DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n"); 5413 } 5414 5415 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 5416 } 5417 5418 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) { 5419 if (!Legal->blockNeedsPredication(I->getParent())) 5420 return false; 5421 switch(I->getOpcode()) { 5422 default: 5423 break; 5424 case Instruction::Load: 5425 case Instruction::Store: { 5426 if (!Legal->isMaskRequired(I)) 5427 return false; 5428 auto *Ptr = getLoadStorePointerOperand(I); 5429 auto *Ty = getMemInstValueType(I); 5430 return isa<LoadInst>(I) ? 5431 !(isLegalMaskedLoad(Ty, Ptr) || isLegalMaskedGather(Ty)) 5432 : !(isLegalMaskedStore(Ty, Ptr) || isLegalMaskedScatter(Ty)); 5433 } 5434 case Instruction::UDiv: 5435 case Instruction::SDiv: 5436 case Instruction::SRem: 5437 case Instruction::URem: 5438 return mayDivideByZero(*I); 5439 } 5440 return false; 5441 } 5442 5443 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I, 5444 unsigned VF) { 5445 // Get and ensure we have a valid memory instruction. 5446 LoadInst *LI = dyn_cast<LoadInst>(I); 5447 StoreInst *SI = dyn_cast<StoreInst>(I); 5448 assert((LI || SI) && "Invalid memory instruction"); 5449 5450 auto *Ptr = getLoadStorePointerOperand(I); 5451 5452 // In order to be widened, the pointer should be consecutive, first of all. 5453 if (!Legal->isConsecutivePtr(Ptr)) 5454 return false; 5455 5456 // If the instruction is a store located in a predicated block, it will be 5457 // scalarized. 5458 if (isScalarWithPredication(I)) 5459 return false; 5460 5461 // If the instruction's allocated size doesn't equal it's type size, it 5462 // requires padding and will be scalarized. 5463 auto &DL = I->getModule()->getDataLayout(); 5464 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 5465 if (hasIrregularType(ScalarTy, DL, VF)) 5466 return false; 5467 5468 return true; 5469 } 5470 5471 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { 5472 // We should not collect Uniforms more than once per VF. Right now, 5473 // this function is called from collectUniformsAndScalars(), which 5474 // already does this check. Collecting Uniforms for VF=1 does not make any 5475 // sense. 5476 5477 assert(VF >= 2 && !Uniforms.count(VF) && 5478 "This function should not be visited twice for the same VF"); 5479 5480 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5481 // not analyze again. Uniforms.count(VF) will return 1. 5482 Uniforms[VF].clear(); 5483 5484 // We now know that the loop is vectorizable! 5485 // Collect instructions inside the loop that will remain uniform after 5486 // vectorization. 5487 5488 // Global values, params and instructions outside of current loop are out of 5489 // scope. 5490 auto isOutOfScope = [&](Value *V) -> bool { 5491 Instruction *I = dyn_cast<Instruction>(V); 5492 return (!I || !TheLoop->contains(I)); 5493 }; 5494 5495 SetVector<Instruction *> Worklist; 5496 BasicBlock *Latch = TheLoop->getLoopLatch(); 5497 5498 // Start with the conditional branch. If the branch condition is an 5499 // instruction contained in the loop that is only used by the branch, it is 5500 // uniform. 5501 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5502 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) { 5503 Worklist.insert(Cmp); 5504 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); 5505 } 5506 5507 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers 5508 // are pointers that are treated like consecutive pointers during 5509 // vectorization. The pointer operands of interleaved accesses are an 5510 // example. 5511 SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs; 5512 5513 // Holds pointer operands of instructions that are possibly non-uniform. 5514 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs; 5515 5516 auto isUniformDecision = [&](Instruction *I, unsigned VF) { 5517 InstWidening WideningDecision = getWideningDecision(I, VF); 5518 assert(WideningDecision != CM_Unknown && 5519 "Widening decision should be ready at this moment"); 5520 5521 return (WideningDecision == CM_Widen || 5522 WideningDecision == CM_Widen_Reverse || 5523 WideningDecision == CM_Interleave); 5524 }; 5525 // Iterate over the instructions in the loop, and collect all 5526 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible 5527 // that a consecutive-like pointer operand will be scalarized, we collect it 5528 // in PossibleNonUniformPtrs instead. We use two sets here because a single 5529 // getelementptr instruction can be used by both vectorized and scalarized 5530 // memory instructions. For example, if a loop loads and stores from the same 5531 // location, but the store is conditional, the store will be scalarized, and 5532 // the getelementptr won't remain uniform. 5533 for (auto *BB : TheLoop->blocks()) 5534 for (auto &I : *BB) { 5535 // If there's no pointer operand, there's nothing to do. 5536 auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 5537 if (!Ptr) 5538 continue; 5539 5540 // True if all users of Ptr are memory accesses that have Ptr as their 5541 // pointer operand. 5542 auto UsersAreMemAccesses = 5543 llvm::all_of(Ptr->users(), [&](User *U) -> bool { 5544 return getLoadStorePointerOperand(U) == Ptr; 5545 }); 5546 5547 // Ensure the memory instruction will not be scalarized or used by 5548 // gather/scatter, making its pointer operand non-uniform. If the pointer 5549 // operand is used by any instruction other than a memory access, we 5550 // conservatively assume the pointer operand may be non-uniform. 5551 if (!UsersAreMemAccesses || !isUniformDecision(&I, VF)) 5552 PossibleNonUniformPtrs.insert(Ptr); 5553 5554 // If the memory instruction will be vectorized and its pointer operand 5555 // is consecutive-like, or interleaving - the pointer operand should 5556 // remain uniform. 5557 else 5558 ConsecutiveLikePtrs.insert(Ptr); 5559 } 5560 5561 // Add to the Worklist all consecutive and consecutive-like pointers that 5562 // aren't also identified as possibly non-uniform. 5563 for (auto *V : ConsecutiveLikePtrs) 5564 if (!PossibleNonUniformPtrs.count(V)) { 5565 DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n"); 5566 Worklist.insert(V); 5567 } 5568 5569 // Expand Worklist in topological order: whenever a new instruction 5570 // is added , its users should be either already inside Worklist, or 5571 // out of scope. It ensures a uniform instruction will only be used 5572 // by uniform instructions or out of scope instructions. 5573 unsigned idx = 0; 5574 while (idx != Worklist.size()) { 5575 Instruction *I = Worklist[idx++]; 5576 5577 for (auto OV : I->operand_values()) { 5578 if (isOutOfScope(OV)) 5579 continue; 5580 auto *OI = cast<Instruction>(OV); 5581 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5582 auto *J = cast<Instruction>(U); 5583 return !TheLoop->contains(J) || Worklist.count(J) || 5584 (OI == getLoadStorePointerOperand(J) && 5585 isUniformDecision(J, VF)); 5586 })) { 5587 Worklist.insert(OI); 5588 DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); 5589 } 5590 } 5591 } 5592 5593 // Returns true if Ptr is the pointer operand of a memory access instruction 5594 // I, and I is known to not require scalarization. 5595 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5596 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5597 }; 5598 5599 // For an instruction to be added into Worklist above, all its users inside 5600 // the loop should also be in Worklist. However, this condition cannot be 5601 // true for phi nodes that form a cyclic dependence. We must process phi 5602 // nodes separately. An induction variable will remain uniform if all users 5603 // of the induction variable and induction variable update remain uniform. 5604 // The code below handles both pointer and non-pointer induction variables. 5605 for (auto &Induction : *Legal->getInductionVars()) { 5606 auto *Ind = Induction.first; 5607 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5608 5609 // Determine if all users of the induction variable are uniform after 5610 // vectorization. 5611 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5612 auto *I = cast<Instruction>(U); 5613 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5614 isVectorizedMemAccessUse(I, Ind); 5615 }); 5616 if (!UniformInd) 5617 continue; 5618 5619 // Determine if all users of the induction variable update instruction are 5620 // uniform after vectorization. 5621 auto UniformIndUpdate = 5622 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5623 auto *I = cast<Instruction>(U); 5624 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5625 isVectorizedMemAccessUse(I, IndUpdate); 5626 }); 5627 if (!UniformIndUpdate) 5628 continue; 5629 5630 // The induction variable and its update instruction will remain uniform. 5631 Worklist.insert(Ind); 5632 Worklist.insert(IndUpdate); 5633 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n"); 5634 DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate << "\n"); 5635 } 5636 5637 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5638 } 5639 5640 bool LoopVectorizationLegality::canVectorizeMemory() { 5641 LAI = &(*GetLAA)(*TheLoop); 5642 InterleaveInfo.setLAI(LAI); 5643 const OptimizationRemarkAnalysis *LAR = LAI->getReport(); 5644 if (LAR) { 5645 ORE->emit([&]() { 5646 return OptimizationRemarkAnalysis(Hints->vectorizeAnalysisPassName(), 5647 "loop not vectorized: ", *LAR); 5648 }); 5649 } 5650 if (!LAI->canVectorizeMemory()) 5651 return false; 5652 5653 if (LAI->hasStoreToLoopInvariantAddress()) { 5654 ORE->emit(createMissedAnalysis("CantVectorizeStoreToLoopInvariantAddress") 5655 << "write to a loop invariant address could not be vectorized"); 5656 DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); 5657 return false; 5658 } 5659 5660 Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks()); 5661 PSE.addPredicate(LAI->getPSE().getUnionPredicate()); 5662 5663 return true; 5664 } 5665 5666 bool LoopVectorizationLegality::isInductionPhi(const Value *V) { 5667 Value *In0 = const_cast<Value *>(V); 5668 PHINode *PN = dyn_cast_or_null<PHINode>(In0); 5669 if (!PN) 5670 return false; 5671 5672 return Inductions.count(PN); 5673 } 5674 5675 bool LoopVectorizationLegality::isCastedInductionVariable(const Value *V) { 5676 auto *Inst = dyn_cast<Instruction>(V); 5677 return (Inst && InductionCastsToIgnore.count(Inst)); 5678 } 5679 5680 bool LoopVectorizationLegality::isInductionVariable(const Value *V) { 5681 return isInductionPhi(V) || isCastedInductionVariable(V); 5682 } 5683 5684 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) { 5685 return FirstOrderRecurrences.count(Phi); 5686 } 5687 5688 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { 5689 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 5690 } 5691 5692 bool LoopVectorizationLegality::blockCanBePredicated( 5693 BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) { 5694 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 5695 5696 for (Instruction &I : *BB) { 5697 // Check that we don't have a constant expression that can trap as operand. 5698 for (Value *Operand : I.operands()) { 5699 if (auto *C = dyn_cast<Constant>(Operand)) 5700 if (C->canTrap()) 5701 return false; 5702 } 5703 // We might be able to hoist the load. 5704 if (I.mayReadFromMemory()) { 5705 auto *LI = dyn_cast<LoadInst>(&I); 5706 if (!LI) 5707 return false; 5708 if (!SafePtrs.count(LI->getPointerOperand())) { 5709 // !llvm.mem.parallel_loop_access implies if-conversion safety. 5710 // Otherwise, record that the load needs (real or emulated) masking 5711 // and let the cost model decide. 5712 if (!IsAnnotatedParallel) 5713 MaskedOp.insert(LI); 5714 continue; 5715 } 5716 } 5717 5718 if (I.mayWriteToMemory()) { 5719 auto *SI = dyn_cast<StoreInst>(&I); 5720 if (!SI) 5721 return false; 5722 // Predicated store requires some form of masking: 5723 // 1) masked store HW instruction, 5724 // 2) emulation via load-blend-store (only if safe and legal to do so, 5725 // be aware on the race conditions), or 5726 // 3) element-by-element predicate check and scalar store. 5727 MaskedOp.insert(SI); 5728 continue; 5729 } 5730 if (I.mayThrow()) 5731 return false; 5732 } 5733 5734 return true; 5735 } 5736 5737 void InterleavedAccessInfo::collectConstStrideAccesses( 5738 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 5739 const ValueToValueMap &Strides) { 5740 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 5741 5742 // Since it's desired that the load/store instructions be maintained in 5743 // "program order" for the interleaved access analysis, we have to visit the 5744 // blocks in the loop in reverse postorder (i.e., in a topological order). 5745 // Such an ordering will ensure that any load/store that may be executed 5746 // before a second load/store will precede the second load/store in 5747 // AccessStrideInfo. 5748 LoopBlocksDFS DFS(TheLoop); 5749 DFS.perform(LI); 5750 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 5751 for (auto &I : *BB) { 5752 auto *LI = dyn_cast<LoadInst>(&I); 5753 auto *SI = dyn_cast<StoreInst>(&I); 5754 if (!LI && !SI) 5755 continue; 5756 5757 Value *Ptr = getLoadStorePointerOperand(&I); 5758 // We don't check wrapping here because we don't know yet if Ptr will be 5759 // part of a full group or a group with gaps. Checking wrapping for all 5760 // pointers (even those that end up in groups with no gaps) will be overly 5761 // conservative. For full groups, wrapping should be ok since if we would 5762 // wrap around the address space we would do a memory access at nullptr 5763 // even without the transformation. The wrapping checks are therefore 5764 // deferred until after we've formed the interleaved groups. 5765 int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, 5766 /*Assume=*/true, /*ShouldCheckWrap=*/false); 5767 5768 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 5769 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 5770 uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 5771 5772 // An alignment of 0 means target ABI alignment. 5773 unsigned Align = getMemInstAlignment(&I); 5774 if (!Align) 5775 Align = DL.getABITypeAlignment(PtrTy->getElementType()); 5776 5777 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align); 5778 } 5779 } 5780 5781 // Analyze interleaved accesses and collect them into interleaved load and 5782 // store groups. 5783 // 5784 // When generating code for an interleaved load group, we effectively hoist all 5785 // loads in the group to the location of the first load in program order. When 5786 // generating code for an interleaved store group, we sink all stores to the 5787 // location of the last store. This code motion can change the order of load 5788 // and store instructions and may break dependences. 5789 // 5790 // The code generation strategy mentioned above ensures that we won't violate 5791 // any write-after-read (WAR) dependences. 5792 // 5793 // E.g., for the WAR dependence: a = A[i]; // (1) 5794 // A[i] = b; // (2) 5795 // 5796 // The store group of (2) is always inserted at or below (2), and the load 5797 // group of (1) is always inserted at or above (1). Thus, the instructions will 5798 // never be reordered. All other dependences are checked to ensure the 5799 // correctness of the instruction reordering. 5800 // 5801 // The algorithm visits all memory accesses in the loop in bottom-up program 5802 // order. Program order is established by traversing the blocks in the loop in 5803 // reverse postorder when collecting the accesses. 5804 // 5805 // We visit the memory accesses in bottom-up order because it can simplify the 5806 // construction of store groups in the presence of write-after-write (WAW) 5807 // dependences. 5808 // 5809 // E.g., for the WAW dependence: A[i] = a; // (1) 5810 // A[i] = b; // (2) 5811 // A[i + 1] = c; // (3) 5812 // 5813 // We will first create a store group with (3) and (2). (1) can't be added to 5814 // this group because it and (2) are dependent. However, (1) can be grouped 5815 // with other accesses that may precede it in program order. Note that a 5816 // bottom-up order does not imply that WAW dependences should not be checked. 5817 void InterleavedAccessInfo::analyzeInterleaving( 5818 const ValueToValueMap &Strides) { 5819 DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 5820 5821 // Holds all accesses with a constant stride. 5822 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 5823 collectConstStrideAccesses(AccessStrideInfo, Strides); 5824 5825 if (AccessStrideInfo.empty()) 5826 return; 5827 5828 // Collect the dependences in the loop. 5829 collectDependences(); 5830 5831 // Holds all interleaved store groups temporarily. 5832 SmallSetVector<InterleaveGroup *, 4> StoreGroups; 5833 // Holds all interleaved load groups temporarily. 5834 SmallSetVector<InterleaveGroup *, 4> LoadGroups; 5835 5836 // Search in bottom-up program order for pairs of accesses (A and B) that can 5837 // form interleaved load or store groups. In the algorithm below, access A 5838 // precedes access B in program order. We initialize a group for B in the 5839 // outer loop of the algorithm, and then in the inner loop, we attempt to 5840 // insert each A into B's group if: 5841 // 5842 // 1. A and B have the same stride, 5843 // 2. A and B have the same memory object size, and 5844 // 3. A belongs in B's group according to its distance from B. 5845 // 5846 // Special care is taken to ensure group formation will not break any 5847 // dependences. 5848 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 5849 BI != E; ++BI) { 5850 Instruction *B = BI->first; 5851 StrideDescriptor DesB = BI->second; 5852 5853 // Initialize a group for B if it has an allowable stride. Even if we don't 5854 // create a group for B, we continue with the bottom-up algorithm to ensure 5855 // we don't break any of B's dependences. 5856 InterleaveGroup *Group = nullptr; 5857 if (isStrided(DesB.Stride)) { 5858 Group = getInterleaveGroup(B); 5859 if (!Group) { 5860 DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n'); 5861 Group = createInterleaveGroup(B, DesB.Stride, DesB.Align); 5862 } 5863 if (B->mayWriteToMemory()) 5864 StoreGroups.insert(Group); 5865 else 5866 LoadGroups.insert(Group); 5867 } 5868 5869 for (auto AI = std::next(BI); AI != E; ++AI) { 5870 Instruction *A = AI->first; 5871 StrideDescriptor DesA = AI->second; 5872 5873 // Our code motion strategy implies that we can't have dependences 5874 // between accesses in an interleaved group and other accesses located 5875 // between the first and last member of the group. Note that this also 5876 // means that a group can't have more than one member at a given offset. 5877 // The accesses in a group can have dependences with other accesses, but 5878 // we must ensure we don't extend the boundaries of the group such that 5879 // we encompass those dependent accesses. 5880 // 5881 // For example, assume we have the sequence of accesses shown below in a 5882 // stride-2 loop: 5883 // 5884 // (1, 2) is a group | A[i] = a; // (1) 5885 // | A[i-1] = b; // (2) | 5886 // A[i-3] = c; // (3) 5887 // A[i] = d; // (4) | (2, 4) is not a group 5888 // 5889 // Because accesses (2) and (3) are dependent, we can group (2) with (1) 5890 // but not with (4). If we did, the dependent access (3) would be within 5891 // the boundaries of the (2, 4) group. 5892 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { 5893 // If a dependence exists and A is already in a group, we know that A 5894 // must be a store since A precedes B and WAR dependences are allowed. 5895 // Thus, A would be sunk below B. We release A's group to prevent this 5896 // illegal code motion. A will then be free to form another group with 5897 // instructions that precede it. 5898 if (isInterleaved(A)) { 5899 InterleaveGroup *StoreGroup = getInterleaveGroup(A); 5900 StoreGroups.remove(StoreGroup); 5901 releaseGroup(StoreGroup); 5902 } 5903 5904 // If a dependence exists and A is not already in a group (or it was 5905 // and we just released it), B might be hoisted above A (if B is a 5906 // load) or another store might be sunk below A (if B is a store). In 5907 // either case, we can't add additional instructions to B's group. B 5908 // will only form a group with instructions that it precedes. 5909 break; 5910 } 5911 5912 // At this point, we've checked for illegal code motion. If either A or B 5913 // isn't strided, there's nothing left to do. 5914 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 5915 continue; 5916 5917 // Ignore A if it's already in a group or isn't the same kind of memory 5918 // operation as B. 5919 // Note that mayReadFromMemory() isn't mutually exclusive to mayWriteToMemory 5920 // in the case of atomic loads. We shouldn't see those here, canVectorizeMemory() 5921 // should have returned false - except for the case we asked for optimization 5922 // remarks. 5923 if (isInterleaved(A) || (A->mayReadFromMemory() != B->mayReadFromMemory()) 5924 || (A->mayWriteToMemory() != B->mayWriteToMemory())) 5925 continue; 5926 5927 // Check rules 1 and 2. Ignore A if its stride or size is different from 5928 // that of B. 5929 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 5930 continue; 5931 5932 // Ignore A if the memory object of A and B don't belong to the same 5933 // address space 5934 if (getMemInstAddressSpace(A) != getMemInstAddressSpace(B)) 5935 continue; 5936 5937 // Calculate the distance from A to B. 5938 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 5939 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 5940 if (!DistToB) 5941 continue; 5942 int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 5943 5944 // Check rule 3. Ignore A if its distance to B is not a multiple of the 5945 // size. 5946 if (DistanceToB % static_cast<int64_t>(DesB.Size)) 5947 continue; 5948 5949 // Ignore A if either A or B is in a predicated block. Although we 5950 // currently prevent group formation for predicated accesses, we may be 5951 // able to relax this limitation in the future once we handle more 5952 // complicated blocks. 5953 if (isPredicated(A->getParent()) || isPredicated(B->getParent())) 5954 continue; 5955 5956 // The index of A is the index of B plus A's distance to B in multiples 5957 // of the size. 5958 int IndexA = 5959 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 5960 5961 // Try to insert A into B's group. 5962 if (Group->insertMember(A, IndexA, DesA.Align)) { 5963 DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 5964 << " into the interleave group with" << *B << '\n'); 5965 InterleaveGroupMap[A] = Group; 5966 5967 // Set the first load in program order as the insert position. 5968 if (A->mayReadFromMemory()) 5969 Group->setInsertPos(A); 5970 } 5971 } // Iteration over A accesses. 5972 } // Iteration over B accesses. 5973 5974 // Remove interleaved store groups with gaps. 5975 for (InterleaveGroup *Group : StoreGroups) 5976 if (Group->getNumMembers() != Group->getFactor()) { 5977 DEBUG(dbgs() << "LV: Invalidate candidate interleaved store group due " 5978 "to gaps.\n"); 5979 releaseGroup(Group); 5980 } 5981 // Remove interleaved groups with gaps (currently only loads) whose memory 5982 // accesses may wrap around. We have to revisit the getPtrStride analysis, 5983 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does 5984 // not check wrapping (see documentation there). 5985 // FORNOW we use Assume=false; 5986 // TODO: Change to Assume=true but making sure we don't exceed the threshold 5987 // of runtime SCEV assumptions checks (thereby potentially failing to 5988 // vectorize altogether). 5989 // Additional optional optimizations: 5990 // TODO: If we are peeling the loop and we know that the first pointer doesn't 5991 // wrap then we can deduce that all pointers in the group don't wrap. 5992 // This means that we can forcefully peel the loop in order to only have to 5993 // check the first pointer for no-wrap. When we'll change to use Assume=true 5994 // we'll only need at most one runtime check per interleaved group. 5995 for (InterleaveGroup *Group : LoadGroups) { 5996 // Case 1: A full group. Can Skip the checks; For full groups, if the wide 5997 // load would wrap around the address space we would do a memory access at 5998 // nullptr even without the transformation. 5999 if (Group->getNumMembers() == Group->getFactor()) 6000 continue; 6001 6002 // Case 2: If first and last members of the group don't wrap this implies 6003 // that all the pointers in the group don't wrap. 6004 // So we check only group member 0 (which is always guaranteed to exist), 6005 // and group member Factor - 1; If the latter doesn't exist we rely on 6006 // peeling (if it is a non-reveresed accsess -- see Case 3). 6007 Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0)); 6008 if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false, 6009 /*ShouldCheckWrap=*/true)) { 6010 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 6011 "first group member potentially pointer-wrapping.\n"); 6012 releaseGroup(Group); 6013 continue; 6014 } 6015 Instruction *LastMember = Group->getMember(Group->getFactor() - 1); 6016 if (LastMember) { 6017 Value *LastMemberPtr = getLoadStorePointerOperand(LastMember); 6018 if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false, 6019 /*ShouldCheckWrap=*/true)) { 6020 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 6021 "last group member potentially pointer-wrapping.\n"); 6022 releaseGroup(Group); 6023 } 6024 } else { 6025 // Case 3: A non-reversed interleaved load group with gaps: We need 6026 // to execute at least one scalar epilogue iteration. This will ensure 6027 // we don't speculatively access memory out-of-bounds. We only need 6028 // to look for a member at index factor - 1, since every group must have 6029 // a member at index zero. 6030 if (Group->isReverse()) { 6031 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 6032 "a reverse access with gaps.\n"); 6033 releaseGroup(Group); 6034 continue; 6035 } 6036 DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 6037 RequiresScalarEpilogue = true; 6038 } 6039 } 6040 } 6041 6042 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) { 6043 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 6044 // TODO: It may by useful to do since it's still likely to be dynamically 6045 // uniform if the target can skip. 6046 DEBUG(dbgs() << "LV: Not inserting runtime ptr check for divergent target"); 6047 6048 ORE->emit( 6049 createMissedAnalysis("CantVersionLoopWithDivergentTarget") 6050 << "runtime pointer checks needed. Not enabled for divergent target"); 6051 6052 return None; 6053 } 6054 6055 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 6056 if (!OptForSize) // Remaining checks deal with scalar loop when OptForSize. 6057 return computeFeasibleMaxVF(OptForSize, TC); 6058 6059 if (Legal->getRuntimePointerChecking()->Need) { 6060 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 6061 << "runtime pointer checks needed. Enable vectorization of this " 6062 "loop with '#pragma clang loop vectorize(enable)' when " 6063 "compiling with -Os/-Oz"); 6064 DEBUG(dbgs() 6065 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 6066 return None; 6067 } 6068 6069 // If we optimize the program for size, avoid creating the tail loop. 6070 DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 6071 6072 // If we don't know the precise trip count, don't try to vectorize. 6073 if (TC < 2) { 6074 ORE->emit( 6075 createMissedAnalysis("UnknownLoopCountComplexCFG") 6076 << "unable to calculate the loop count due to complex control flow"); 6077 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 6078 return None; 6079 } 6080 6081 unsigned MaxVF = computeFeasibleMaxVF(OptForSize, TC); 6082 6083 if (TC % MaxVF != 0) { 6084 // If the trip count that we found modulo the vectorization factor is not 6085 // zero then we require a tail. 6086 // FIXME: look for a smaller MaxVF that does divide TC rather than give up. 6087 // FIXME: return None if loop requiresScalarEpilog(<MaxVF>), or look for a 6088 // smaller MaxVF that does not require a scalar epilog. 6089 6090 ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize") 6091 << "cannot optimize for size and vectorize at the " 6092 "same time. Enable vectorization of this loop " 6093 "with '#pragma clang loop vectorize(enable)' " 6094 "when compiling with -Os/-Oz"); 6095 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 6096 return None; 6097 } 6098 6099 return MaxVF; 6100 } 6101 6102 unsigned 6103 LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize, 6104 unsigned ConstTripCount) { 6105 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 6106 unsigned SmallestType, WidestType; 6107 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 6108 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 6109 6110 // Get the maximum safe dependence distance in bits computed by LAA. 6111 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 6112 // the memory accesses that is most restrictive (involved in the smallest 6113 // dependence distance). 6114 unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth(); 6115 6116 WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth); 6117 6118 unsigned MaxVectorSize = WidestRegister / WidestType; 6119 6120 DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " 6121 << WidestType << " bits.\n"); 6122 DEBUG(dbgs() << "LV: The Widest register safe to use is: " << WidestRegister 6123 << " bits.\n"); 6124 6125 assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements" 6126 " into one vector!"); 6127 if (MaxVectorSize == 0) { 6128 DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 6129 MaxVectorSize = 1; 6130 return MaxVectorSize; 6131 } else if (ConstTripCount && ConstTripCount < MaxVectorSize && 6132 isPowerOf2_32(ConstTripCount)) { 6133 // We need to clamp the VF to be the ConstTripCount. There is no point in 6134 // choosing a higher viable VF as done in the loop below. 6135 DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 6136 << ConstTripCount << "\n"); 6137 MaxVectorSize = ConstTripCount; 6138 return MaxVectorSize; 6139 } 6140 6141 unsigned MaxVF = MaxVectorSize; 6142 if (TTI.shouldMaximizeVectorBandwidth(OptForSize) || 6143 (MaximizeBandwidth && !OptForSize)) { 6144 // Collect all viable vectorization factors larger than the default MaxVF 6145 // (i.e. MaxVectorSize). 6146 SmallVector<unsigned, 8> VFs; 6147 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 6148 for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2) 6149 VFs.push_back(VS); 6150 6151 // For each VF calculate its register usage. 6152 auto RUs = calculateRegisterUsage(VFs); 6153 6154 // Select the largest VF which doesn't require more registers than existing 6155 // ones. 6156 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 6157 for (int i = RUs.size() - 1; i >= 0; --i) { 6158 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 6159 MaxVF = VFs[i]; 6160 break; 6161 } 6162 } 6163 } 6164 return MaxVF; 6165 } 6166 6167 VectorizationFactor 6168 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) { 6169 float Cost = expectedCost(1).first; 6170 const float ScalarCost = Cost; 6171 unsigned Width = 1; 6172 DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 6173 6174 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 6175 // Ignore scalar width, because the user explicitly wants vectorization. 6176 if (ForceVectorization && MaxVF > 1) { 6177 Width = 2; 6178 Cost = expectedCost(Width).first / (float)Width; 6179 } 6180 6181 for (unsigned i = 2; i <= MaxVF; i *= 2) { 6182 // Notice that the vector loop needs to be executed less times, so 6183 // we need to divide the cost of the vector loops by the width of 6184 // the vector elements. 6185 VectorizationCostTy C = expectedCost(i); 6186 float VectorCost = C.first / (float)i; 6187 DEBUG(dbgs() << "LV: Vector loop of width " << i 6188 << " costs: " << (int)VectorCost << ".\n"); 6189 if (!C.second && !ForceVectorization) { 6190 DEBUG( 6191 dbgs() << "LV: Not considering vector loop of width " << i 6192 << " because it will not generate any vector instructions.\n"); 6193 continue; 6194 } 6195 if (VectorCost < Cost) { 6196 Cost = VectorCost; 6197 Width = i; 6198 } 6199 } 6200 6201 if (!EnableCondStoresVectorization && NumPredStores) { 6202 ORE->emit(createMissedAnalysis("ConditionalStore") 6203 << "store that is conditionally executed prevents vectorization"); 6204 DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); 6205 Width = 1; 6206 Cost = ScalarCost; 6207 } 6208 6209 DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 6210 << "LV: Vectorization seems to be not beneficial, " 6211 << "but was forced by a user.\n"); 6212 DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 6213 VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)}; 6214 return Factor; 6215 } 6216 6217 std::pair<unsigned, unsigned> 6218 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6219 unsigned MinWidth = -1U; 6220 unsigned MaxWidth = 8; 6221 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6222 6223 // For each block. 6224 for (BasicBlock *BB : TheLoop->blocks()) { 6225 // For each instruction in the loop. 6226 for (Instruction &I : *BB) { 6227 Type *T = I.getType(); 6228 6229 // Skip ignored values. 6230 if (ValuesToIgnore.count(&I)) 6231 continue; 6232 6233 // Only examine Loads, Stores and PHINodes. 6234 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6235 continue; 6236 6237 // Examine PHI nodes that are reduction variables. Update the type to 6238 // account for the recurrence type. 6239 if (auto *PN = dyn_cast<PHINode>(&I)) { 6240 if (!Legal->isReductionVariable(PN)) 6241 continue; 6242 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 6243 T = RdxDesc.getRecurrenceType(); 6244 } 6245 6246 // Examine the stored values. 6247 if (auto *ST = dyn_cast<StoreInst>(&I)) 6248 T = ST->getValueOperand()->getType(); 6249 6250 // Ignore loaded pointer types and stored pointer types that are not 6251 // vectorizable. 6252 // 6253 // FIXME: The check here attempts to predict whether a load or store will 6254 // be vectorized. We only know this for certain after a VF has 6255 // been selected. Here, we assume that if an access can be 6256 // vectorized, it will be. We should also look at extending this 6257 // optimization to non-pointer types. 6258 // 6259 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 6260 !Legal->isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 6261 continue; 6262 6263 MinWidth = std::min(MinWidth, 6264 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6265 MaxWidth = std::max(MaxWidth, 6266 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6267 } 6268 } 6269 6270 return {MinWidth, MaxWidth}; 6271 } 6272 6273 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 6274 unsigned VF, 6275 unsigned LoopCost) { 6276 // -- The interleave heuristics -- 6277 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6278 // There are many micro-architectural considerations that we can't predict 6279 // at this level. For example, frontend pressure (on decode or fetch) due to 6280 // code size, or the number and capabilities of the execution ports. 6281 // 6282 // We use the following heuristics to select the interleave count: 6283 // 1. If the code has reductions, then we interleave to break the cross 6284 // iteration dependency. 6285 // 2. If the loop is really small, then we interleave to reduce the loop 6286 // overhead. 6287 // 3. We don't interleave if we think that we will spill registers to memory 6288 // due to the increased register pressure. 6289 6290 // When we optimize for size, we don't interleave. 6291 if (OptForSize) 6292 return 1; 6293 6294 // We used the distance for the interleave count. 6295 if (Legal->getMaxSafeDepDistBytes() != -1U) 6296 return 1; 6297 6298 // Do not interleave loops with a relatively small trip count. 6299 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 6300 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 6301 return 1; 6302 6303 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 6304 DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6305 << " registers\n"); 6306 6307 if (VF == 1) { 6308 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6309 TargetNumRegisters = ForceTargetNumScalarRegs; 6310 } else { 6311 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6312 TargetNumRegisters = ForceTargetNumVectorRegs; 6313 } 6314 6315 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6316 // We divide by these constants so assume that we have at least one 6317 // instruction that uses at least one register. 6318 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 6319 6320 // We calculate the interleave count using the following formula. 6321 // Subtract the number of loop invariants from the number of available 6322 // registers. These registers are used by all of the interleaved instances. 6323 // Next, divide the remaining registers by the number of registers that is 6324 // required by the loop, in order to estimate how many parallel instances 6325 // fit without causing spills. All of this is rounded down if necessary to be 6326 // a power of two. We want power of two interleave count to simplify any 6327 // addressing operations or alignment considerations. 6328 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 6329 R.MaxLocalUsers); 6330 6331 // Don't count the induction variable as interleaved. 6332 if (EnableIndVarRegisterHeur) 6333 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 6334 std::max(1U, (R.MaxLocalUsers - 1))); 6335 6336 // Clamp the interleave ranges to reasonable counts. 6337 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 6338 6339 // Check if the user has overridden the max. 6340 if (VF == 1) { 6341 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6342 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6343 } else { 6344 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6345 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6346 } 6347 6348 // If we did not calculate the cost for VF (because the user selected the VF) 6349 // then we calculate the cost of VF here. 6350 if (LoopCost == 0) 6351 LoopCost = expectedCost(VF).first; 6352 6353 // Clamp the calculated IC to be between the 1 and the max interleave count 6354 // that the target allows. 6355 if (IC > MaxInterleaveCount) 6356 IC = MaxInterleaveCount; 6357 else if (IC < 1) 6358 IC = 1; 6359 6360 // Interleave if we vectorized this loop and there is a reduction that could 6361 // benefit from interleaving. 6362 if (VF > 1 && !Legal->getReductionVars()->empty()) { 6363 DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6364 return IC; 6365 } 6366 6367 // Note that if we've already vectorized the loop we will have done the 6368 // runtime check and so interleaving won't require further checks. 6369 bool InterleavingRequiresRuntimePointerCheck = 6370 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 6371 6372 // We want to interleave small loops in order to reduce the loop overhead and 6373 // potentially expose ILP opportunities. 6374 DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 6375 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6376 // We assume that the cost overhead is 1 and we use the cost model 6377 // to estimate the cost of the loop and interleave until the cost of the 6378 // loop overhead is about 5% of the cost of the loop. 6379 unsigned SmallIC = 6380 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6381 6382 // Interleave until store/load ports (estimated by max interleave count) are 6383 // saturated. 6384 unsigned NumStores = Legal->getNumStores(); 6385 unsigned NumLoads = Legal->getNumLoads(); 6386 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6387 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6388 6389 // If we have a scalar reduction (vector reductions are already dealt with 6390 // by this point), we can increase the critical path length if the loop 6391 // we're interleaving is inside another loop. Limit, by default to 2, so the 6392 // critical path only gets increased by one reduction operation. 6393 if (!Legal->getReductionVars()->empty() && TheLoop->getLoopDepth() > 1) { 6394 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6395 SmallIC = std::min(SmallIC, F); 6396 StoresIC = std::min(StoresIC, F); 6397 LoadsIC = std::min(LoadsIC, F); 6398 } 6399 6400 if (EnableLoadStoreRuntimeInterleave && 6401 std::max(StoresIC, LoadsIC) > SmallIC) { 6402 DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6403 return std::max(StoresIC, LoadsIC); 6404 } 6405 6406 DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6407 return SmallIC; 6408 } 6409 6410 // Interleave if this is a large loop (small loops are already dealt with by 6411 // this point) that could benefit from interleaving. 6412 bool HasReductions = !Legal->getReductionVars()->empty(); 6413 if (TTI.enableAggressiveInterleaving(HasReductions)) { 6414 DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6415 return IC; 6416 } 6417 6418 DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6419 return 1; 6420 } 6421 6422 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6423 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 6424 // This function calculates the register usage by measuring the highest number 6425 // of values that are alive at a single location. Obviously, this is a very 6426 // rough estimation. We scan the loop in a topological order in order and 6427 // assign a number to each instruction. We use RPO to ensure that defs are 6428 // met before their users. We assume that each instruction that has in-loop 6429 // users starts an interval. We record every time that an in-loop value is 6430 // used, so we have a list of the first and last occurrences of each 6431 // instruction. Next, we transpose this data structure into a multi map that 6432 // holds the list of intervals that *end* at a specific location. This multi 6433 // map allows us to perform a linear search. We scan the instructions linearly 6434 // and record each time that a new interval starts, by placing it in a set. 6435 // If we find this value in the multi-map then we remove it from the set. 6436 // The max register usage is the maximum size of the set. 6437 // We also search for instructions that are defined outside the loop, but are 6438 // used inside the loop. We need this number separately from the max-interval 6439 // usage number because when we unroll, loop-invariant values do not take 6440 // more register. 6441 LoopBlocksDFS DFS(TheLoop); 6442 DFS.perform(LI); 6443 6444 RegisterUsage RU; 6445 6446 // Each 'key' in the map opens a new interval. The values 6447 // of the map are the index of the 'last seen' usage of the 6448 // instruction that is the key. 6449 using IntervalMap = DenseMap<Instruction *, unsigned>; 6450 6451 // Maps instruction to its index. 6452 DenseMap<unsigned, Instruction *> IdxToInstr; 6453 // Marks the end of each interval. 6454 IntervalMap EndPoint; 6455 // Saves the list of instruction indices that are used in the loop. 6456 SmallSet<Instruction *, 8> Ends; 6457 // Saves the list of values that are used in the loop but are 6458 // defined outside the loop, such as arguments and constants. 6459 SmallPtrSet<Value *, 8> LoopInvariants; 6460 6461 unsigned Index = 0; 6462 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6463 for (Instruction &I : *BB) { 6464 IdxToInstr[Index++] = &I; 6465 6466 // Save the end location of each USE. 6467 for (Value *U : I.operands()) { 6468 auto *Instr = dyn_cast<Instruction>(U); 6469 6470 // Ignore non-instruction values such as arguments, constants, etc. 6471 if (!Instr) 6472 continue; 6473 6474 // If this instruction is outside the loop then record it and continue. 6475 if (!TheLoop->contains(Instr)) { 6476 LoopInvariants.insert(Instr); 6477 continue; 6478 } 6479 6480 // Overwrite previous end points. 6481 EndPoint[Instr] = Index; 6482 Ends.insert(Instr); 6483 } 6484 } 6485 } 6486 6487 // Saves the list of intervals that end with the index in 'key'. 6488 using InstrList = SmallVector<Instruction *, 2>; 6489 DenseMap<unsigned, InstrList> TransposeEnds; 6490 6491 // Transpose the EndPoints to a list of values that end at each index. 6492 for (auto &Interval : EndPoint) 6493 TransposeEnds[Interval.second].push_back(Interval.first); 6494 6495 SmallSet<Instruction *, 8> OpenIntervals; 6496 6497 // Get the size of the widest register. 6498 unsigned MaxSafeDepDist = -1U; 6499 if (Legal->getMaxSafeDepDistBytes() != -1U) 6500 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 6501 unsigned WidestRegister = 6502 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 6503 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6504 6505 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6506 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 6507 6508 DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6509 6510 // A lambda that gets the register usage for the given type and VF. 6511 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 6512 if (Ty->isTokenTy()) 6513 return 0U; 6514 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 6515 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 6516 }; 6517 6518 for (unsigned int i = 0; i < Index; ++i) { 6519 Instruction *I = IdxToInstr[i]; 6520 6521 // Remove all of the instructions that end at this location. 6522 InstrList &List = TransposeEnds[i]; 6523 for (Instruction *ToRemove : List) 6524 OpenIntervals.erase(ToRemove); 6525 6526 // Ignore instructions that are never used within the loop. 6527 if (!Ends.count(I)) 6528 continue; 6529 6530 // Skip ignored values. 6531 if (ValuesToIgnore.count(I)) 6532 continue; 6533 6534 // For each VF find the maximum usage of registers. 6535 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6536 if (VFs[j] == 1) { 6537 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 6538 continue; 6539 } 6540 collectUniformsAndScalars(VFs[j]); 6541 // Count the number of live intervals. 6542 unsigned RegUsage = 0; 6543 for (auto Inst : OpenIntervals) { 6544 // Skip ignored values for VF > 1. 6545 if (VecValuesToIgnore.count(Inst) || 6546 isScalarAfterVectorization(Inst, VFs[j])) 6547 continue; 6548 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 6549 } 6550 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 6551 } 6552 6553 DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6554 << OpenIntervals.size() << '\n'); 6555 6556 // Add the current instruction to the list of open intervals. 6557 OpenIntervals.insert(I); 6558 } 6559 6560 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6561 unsigned Invariant = 0; 6562 if (VFs[i] == 1) 6563 Invariant = LoopInvariants.size(); 6564 else { 6565 for (auto Inst : LoopInvariants) 6566 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 6567 } 6568 6569 DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 6570 DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 6571 DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); 6572 6573 RU.LoopInvariantRegs = Invariant; 6574 RU.MaxLocalUsers = MaxUsages[i]; 6575 RUs[i] = RU; 6576 } 6577 6578 return RUs; 6579 } 6580 6581 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 6582 // TODO: Cost model for emulated masked load/store is completely 6583 // broken. This hack guides the cost model to use an artificially 6584 // high enough value to practically disable vectorization with such 6585 // operations, except where previously deployed legality hack allowed 6586 // using very low cost values. This is to avoid regressions coming simply 6587 // from moving "masked load/store" check from legality to cost model. 6588 // Masked Load/Gather emulation was previously never allowed. 6589 // Limited number of Masked Store/Scatter emulation was allowed. 6590 assert(isScalarWithPredication(I) && 6591 "Expecting a scalar emulated instruction"); 6592 return isa<LoadInst>(I) || 6593 (isa<StoreInst>(I) && 6594 NumPredStores > NumberOfStoresToPredicate); 6595 } 6596 6597 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) { 6598 // If we aren't vectorizing the loop, or if we've already collected the 6599 // instructions to scalarize, there's nothing to do. Collection may already 6600 // have occurred if we have a user-selected VF and are now computing the 6601 // expected cost for interleaving. 6602 if (VF < 2 || InstsToScalarize.count(VF)) 6603 return; 6604 6605 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6606 // not profitable to scalarize any instructions, the presence of VF in the 6607 // map will indicate that we've analyzed it already. 6608 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6609 6610 // Find all the instructions that are scalar with predication in the loop and 6611 // determine if it would be better to not if-convert the blocks they are in. 6612 // If so, we also record the instructions to scalarize. 6613 for (BasicBlock *BB : TheLoop->blocks()) { 6614 if (!Legal->blockNeedsPredication(BB)) 6615 continue; 6616 for (Instruction &I : *BB) 6617 if (isScalarWithPredication(&I)) { 6618 ScalarCostsTy ScalarCosts; 6619 // Do not apply discount logic if hacked cost is needed 6620 // for emulated masked memrefs. 6621 if (!useEmulatedMaskMemRefHack(&I) && 6622 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6623 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6624 // Remember that BB will remain after vectorization. 6625 PredicatedBBsAfterVectorization.insert(BB); 6626 } 6627 } 6628 } 6629 6630 int LoopVectorizationCostModel::computePredInstDiscount( 6631 Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts, 6632 unsigned VF) { 6633 assert(!isUniformAfterVectorization(PredInst, VF) && 6634 "Instruction marked uniform-after-vectorization will be predicated"); 6635 6636 // Initialize the discount to zero, meaning that the scalar version and the 6637 // vector version cost the same. 6638 int Discount = 0; 6639 6640 // Holds instructions to analyze. The instructions we visit are mapped in 6641 // ScalarCosts. Those instructions are the ones that would be scalarized if 6642 // we find that the scalar version costs less. 6643 SmallVector<Instruction *, 8> Worklist; 6644 6645 // Returns true if the given instruction can be scalarized. 6646 auto canBeScalarized = [&](Instruction *I) -> bool { 6647 // We only attempt to scalarize instructions forming a single-use chain 6648 // from the original predicated block that would otherwise be vectorized. 6649 // Although not strictly necessary, we give up on instructions we know will 6650 // already be scalar to avoid traversing chains that are unlikely to be 6651 // beneficial. 6652 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6653 isScalarAfterVectorization(I, VF)) 6654 return false; 6655 6656 // If the instruction is scalar with predication, it will be analyzed 6657 // separately. We ignore it within the context of PredInst. 6658 if (isScalarWithPredication(I)) 6659 return false; 6660 6661 // If any of the instruction's operands are uniform after vectorization, 6662 // the instruction cannot be scalarized. This prevents, for example, a 6663 // masked load from being scalarized. 6664 // 6665 // We assume we will only emit a value for lane zero of an instruction 6666 // marked uniform after vectorization, rather than VF identical values. 6667 // Thus, if we scalarize an instruction that uses a uniform, we would 6668 // create uses of values corresponding to the lanes we aren't emitting code 6669 // for. This behavior can be changed by allowing getScalarValue to clone 6670 // the lane zero values for uniforms rather than asserting. 6671 for (Use &U : I->operands()) 6672 if (auto *J = dyn_cast<Instruction>(U.get())) 6673 if (isUniformAfterVectorization(J, VF)) 6674 return false; 6675 6676 // Otherwise, we can scalarize the instruction. 6677 return true; 6678 }; 6679 6680 // Returns true if an operand that cannot be scalarized must be extracted 6681 // from a vector. We will account for this scalarization overhead below. Note 6682 // that the non-void predicated instructions are placed in their own blocks, 6683 // and their return values are inserted into vectors. Thus, an extract would 6684 // still be required. 6685 auto needsExtract = [&](Instruction *I) -> bool { 6686 return TheLoop->contains(I) && !isScalarAfterVectorization(I, VF); 6687 }; 6688 6689 // Compute the expected cost discount from scalarizing the entire expression 6690 // feeding the predicated instruction. We currently only consider expressions 6691 // that are single-use instruction chains. 6692 Worklist.push_back(PredInst); 6693 while (!Worklist.empty()) { 6694 Instruction *I = Worklist.pop_back_val(); 6695 6696 // If we've already analyzed the instruction, there's nothing to do. 6697 if (ScalarCosts.count(I)) 6698 continue; 6699 6700 // Compute the cost of the vector instruction. Note that this cost already 6701 // includes the scalarization overhead of the predicated instruction. 6702 unsigned VectorCost = getInstructionCost(I, VF).first; 6703 6704 // Compute the cost of the scalarized instruction. This cost is the cost of 6705 // the instruction as if it wasn't if-converted and instead remained in the 6706 // predicated block. We will scale this cost by block probability after 6707 // computing the scalarization overhead. 6708 unsigned ScalarCost = VF * getInstructionCost(I, 1).first; 6709 6710 // Compute the scalarization overhead of needed insertelement instructions 6711 // and phi nodes. 6712 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6713 ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF), 6714 true, false); 6715 ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI); 6716 } 6717 6718 // Compute the scalarization overhead of needed extractelement 6719 // instructions. For each of the instruction's operands, if the operand can 6720 // be scalarized, add it to the worklist; otherwise, account for the 6721 // overhead. 6722 for (Use &U : I->operands()) 6723 if (auto *J = dyn_cast<Instruction>(U.get())) { 6724 assert(VectorType::isValidElementType(J->getType()) && 6725 "Instruction has non-scalar type"); 6726 if (canBeScalarized(J)) 6727 Worklist.push_back(J); 6728 else if (needsExtract(J)) 6729 ScalarCost += TTI.getScalarizationOverhead( 6730 ToVectorTy(J->getType(),VF), false, true); 6731 } 6732 6733 // Scale the total scalar cost by block probability. 6734 ScalarCost /= getReciprocalPredBlockProb(); 6735 6736 // Compute the discount. A non-negative discount means the vector version 6737 // of the instruction costs more, and scalarizing would be beneficial. 6738 Discount += VectorCost - ScalarCost; 6739 ScalarCosts[I] = ScalarCost; 6740 } 6741 6742 return Discount; 6743 } 6744 6745 LoopVectorizationCostModel::VectorizationCostTy 6746 LoopVectorizationCostModel::expectedCost(unsigned VF) { 6747 VectorizationCostTy Cost; 6748 6749 // For each block. 6750 for (BasicBlock *BB : TheLoop->blocks()) { 6751 VectorizationCostTy BlockCost; 6752 6753 // For each instruction in the old loop. 6754 for (Instruction &I : *BB) { 6755 // Skip dbg intrinsics. 6756 if (isa<DbgInfoIntrinsic>(I)) 6757 continue; 6758 6759 // Skip ignored values. 6760 if (ValuesToIgnore.count(&I) || 6761 (VF > 1 && VecValuesToIgnore.count(&I))) 6762 continue; 6763 6764 VectorizationCostTy C = getInstructionCost(&I, VF); 6765 6766 // Check if we should override the cost. 6767 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6768 C.first = ForceTargetInstructionCost; 6769 6770 BlockCost.first += C.first; 6771 BlockCost.second |= C.second; 6772 DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " 6773 << VF << " For instruction: " << I << '\n'); 6774 } 6775 6776 // If we are vectorizing a predicated block, it will have been 6777 // if-converted. This means that the block's instructions (aside from 6778 // stores and instructions that may divide by zero) will now be 6779 // unconditionally executed. For the scalar case, we may not always execute 6780 // the predicated block. Thus, scale the block's cost by the probability of 6781 // executing it. 6782 if (VF == 1 && Legal->blockNeedsPredication(BB)) 6783 BlockCost.first /= getReciprocalPredBlockProb(); 6784 6785 Cost.first += BlockCost.first; 6786 Cost.second |= BlockCost.second; 6787 } 6788 6789 return Cost; 6790 } 6791 6792 /// \brief Gets Address Access SCEV after verifying that the access pattern 6793 /// is loop invariant except the induction variable dependence. 6794 /// 6795 /// This SCEV can be sent to the Target in order to estimate the address 6796 /// calculation cost. 6797 static const SCEV *getAddressAccessSCEV( 6798 Value *Ptr, 6799 LoopVectorizationLegality *Legal, 6800 PredicatedScalarEvolution &PSE, 6801 const Loop *TheLoop) { 6802 6803 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6804 if (!Gep) 6805 return nullptr; 6806 6807 // We are looking for a gep with all loop invariant indices except for one 6808 // which should be an induction variable. 6809 auto SE = PSE.getSE(); 6810 unsigned NumOperands = Gep->getNumOperands(); 6811 for (unsigned i = 1; i < NumOperands; ++i) { 6812 Value *Opd = Gep->getOperand(i); 6813 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6814 !Legal->isInductionVariable(Opd)) 6815 return nullptr; 6816 } 6817 6818 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6819 return PSE.getSCEV(Ptr); 6820 } 6821 6822 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6823 return Legal->hasStride(I->getOperand(0)) || 6824 Legal->hasStride(I->getOperand(1)); 6825 } 6826 6827 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6828 unsigned VF) { 6829 Type *ValTy = getMemInstValueType(I); 6830 auto SE = PSE.getSE(); 6831 6832 unsigned Alignment = getMemInstAlignment(I); 6833 unsigned AS = getMemInstAddressSpace(I); 6834 Value *Ptr = getLoadStorePointerOperand(I); 6835 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6836 6837 // Figure out whether the access is strided and get the stride value 6838 // if it's known in compile time 6839 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6840 6841 // Get the cost of the scalar memory instruction and address computation. 6842 unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6843 6844 Cost += VF * 6845 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6846 AS, I); 6847 6848 // Get the overhead of the extractelement and insertelement instructions 6849 // we might create due to scalarization. 6850 Cost += getScalarizationOverhead(I, VF, TTI); 6851 6852 // If we have a predicated store, it may not be executed for each vector 6853 // lane. Scale the cost by the probability of executing the predicated 6854 // block. 6855 if (isScalarWithPredication(I)) { 6856 Cost /= getReciprocalPredBlockProb(); 6857 6858 if (useEmulatedMaskMemRefHack(I)) 6859 // Artificially setting to a high enough value to practically disable 6860 // vectorization with such operations. 6861 Cost = 3000000; 6862 } 6863 6864 return Cost; 6865 } 6866 6867 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6868 unsigned VF) { 6869 Type *ValTy = getMemInstValueType(I); 6870 Type *VectorTy = ToVectorTy(ValTy, VF); 6871 unsigned Alignment = getMemInstAlignment(I); 6872 Value *Ptr = getLoadStorePointerOperand(I); 6873 unsigned AS = getMemInstAddressSpace(I); 6874 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6875 6876 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6877 "Stride should be 1 or -1 for consecutive memory access"); 6878 unsigned Cost = 0; 6879 if (Legal->isMaskRequired(I)) 6880 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6881 else 6882 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I); 6883 6884 bool Reverse = ConsecutiveStride < 0; 6885 if (Reverse) 6886 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6887 return Cost; 6888 } 6889 6890 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6891 unsigned VF) { 6892 LoadInst *LI = cast<LoadInst>(I); 6893 Type *ValTy = LI->getType(); 6894 Type *VectorTy = ToVectorTy(ValTy, VF); 6895 unsigned Alignment = LI->getAlignment(); 6896 unsigned AS = LI->getPointerAddressSpace(); 6897 6898 return TTI.getAddressComputationCost(ValTy) + 6899 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) + 6900 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6901 } 6902 6903 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6904 unsigned VF) { 6905 Type *ValTy = getMemInstValueType(I); 6906 Type *VectorTy = ToVectorTy(ValTy, VF); 6907 unsigned Alignment = getMemInstAlignment(I); 6908 Value *Ptr = getLoadStorePointerOperand(I); 6909 6910 return TTI.getAddressComputationCost(VectorTy) + 6911 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 6912 Legal->isMaskRequired(I), Alignment); 6913 } 6914 6915 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6916 unsigned VF) { 6917 Type *ValTy = getMemInstValueType(I); 6918 Type *VectorTy = ToVectorTy(ValTy, VF); 6919 unsigned AS = getMemInstAddressSpace(I); 6920 6921 auto Group = Legal->getInterleavedAccessGroup(I); 6922 assert(Group && "Fail to get an interleaved access group."); 6923 6924 unsigned InterleaveFactor = Group->getFactor(); 6925 Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6926 6927 // Holds the indices of existing members in an interleaved load group. 6928 // An interleaved store group doesn't need this as it doesn't allow gaps. 6929 SmallVector<unsigned, 4> Indices; 6930 if (isa<LoadInst>(I)) { 6931 for (unsigned i = 0; i < InterleaveFactor; i++) 6932 if (Group->getMember(i)) 6933 Indices.push_back(i); 6934 } 6935 6936 // Calculate the cost of the whole interleaved group. 6937 unsigned Cost = TTI.getInterleavedMemoryOpCost(I->getOpcode(), WideVecTy, 6938 Group->getFactor(), Indices, 6939 Group->getAlignment(), AS); 6940 6941 if (Group->isReverse()) 6942 Cost += Group->getNumMembers() * 6943 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6944 return Cost; 6945 } 6946 6947 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 6948 unsigned VF) { 6949 // Calculate scalar cost only. Vectorization cost should be ready at this 6950 // moment. 6951 if (VF == 1) { 6952 Type *ValTy = getMemInstValueType(I); 6953 unsigned Alignment = getMemInstAlignment(I); 6954 unsigned AS = getMemInstAddressSpace(I); 6955 6956 return TTI.getAddressComputationCost(ValTy) + 6957 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I); 6958 } 6959 return getWideningCost(I, VF); 6960 } 6961 6962 LoopVectorizationCostModel::VectorizationCostTy 6963 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 6964 // If we know that this instruction will remain uniform, check the cost of 6965 // the scalar version. 6966 if (isUniformAfterVectorization(I, VF)) 6967 VF = 1; 6968 6969 if (VF > 1 && isProfitableToScalarize(I, VF)) 6970 return VectorizationCostTy(InstsToScalarize[VF][I], false); 6971 6972 // Forced scalars do not have any scalarization overhead. 6973 if (VF > 1 && ForcedScalars.count(VF) && 6974 ForcedScalars.find(VF)->second.count(I)) 6975 return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false); 6976 6977 Type *VectorTy; 6978 unsigned C = getInstructionCost(I, VF, VectorTy); 6979 6980 bool TypeNotScalarized = 6981 VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF; 6982 return VectorizationCostTy(C, TypeNotScalarized); 6983 } 6984 6985 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) { 6986 if (VF == 1) 6987 return; 6988 NumPredStores = 0; 6989 for (BasicBlock *BB : TheLoop->blocks()) { 6990 // For each instruction in the old loop. 6991 for (Instruction &I : *BB) { 6992 Value *Ptr = getLoadStorePointerOperand(&I); 6993 if (!Ptr) 6994 continue; 6995 6996 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 6997 NumPredStores++; 6998 if (isa<LoadInst>(&I) && Legal->isUniform(Ptr)) { 6999 // Scalar load + broadcast 7000 unsigned Cost = getUniformMemOpCost(&I, VF); 7001 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7002 continue; 7003 } 7004 7005 // We assume that widening is the best solution when possible. 7006 if (memoryInstructionCanBeWidened(&I, VF)) { 7007 unsigned Cost = getConsecutiveMemOpCost(&I, VF); 7008 int ConsecutiveStride = 7009 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 7010 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7011 "Expected consecutive stride."); 7012 InstWidening Decision = 7013 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 7014 setWideningDecision(&I, VF, Decision, Cost); 7015 continue; 7016 } 7017 7018 // Choose between Interleaving, Gather/Scatter or Scalarization. 7019 unsigned InterleaveCost = std::numeric_limits<unsigned>::max(); 7020 unsigned NumAccesses = 1; 7021 if (Legal->isAccessInterleaved(&I)) { 7022 auto Group = Legal->getInterleavedAccessGroup(&I); 7023 assert(Group && "Fail to get an interleaved access group."); 7024 7025 // Make one decision for the whole group. 7026 if (getWideningDecision(&I, VF) != CM_Unknown) 7027 continue; 7028 7029 NumAccesses = Group->getNumMembers(); 7030 InterleaveCost = getInterleaveGroupCost(&I, VF); 7031 } 7032 7033 unsigned GatherScatterCost = 7034 isLegalGatherOrScatter(&I) 7035 ? getGatherScatterCost(&I, VF) * NumAccesses 7036 : std::numeric_limits<unsigned>::max(); 7037 7038 unsigned ScalarizationCost = 7039 getMemInstScalarizationCost(&I, VF) * NumAccesses; 7040 7041 // Choose better solution for the current VF, 7042 // write down this decision and use it during vectorization. 7043 unsigned Cost; 7044 InstWidening Decision; 7045 if (InterleaveCost <= GatherScatterCost && 7046 InterleaveCost < ScalarizationCost) { 7047 Decision = CM_Interleave; 7048 Cost = InterleaveCost; 7049 } else if (GatherScatterCost < ScalarizationCost) { 7050 Decision = CM_GatherScatter; 7051 Cost = GatherScatterCost; 7052 } else { 7053 Decision = CM_Scalarize; 7054 Cost = ScalarizationCost; 7055 } 7056 // If the instructions belongs to an interleave group, the whole group 7057 // receives the same decision. The whole group receives the cost, but 7058 // the cost will actually be assigned to one instruction. 7059 if (auto Group = Legal->getInterleavedAccessGroup(&I)) 7060 setWideningDecision(Group, VF, Decision, Cost); 7061 else 7062 setWideningDecision(&I, VF, Decision, Cost); 7063 } 7064 } 7065 7066 // Make sure that any load of address and any other address computation 7067 // remains scalar unless there is gather/scatter support. This avoids 7068 // inevitable extracts into address registers, and also has the benefit of 7069 // activating LSR more, since that pass can't optimize vectorized 7070 // addresses. 7071 if (TTI.prefersVectorizedAddressing()) 7072 return; 7073 7074 // Start with all scalar pointer uses. 7075 SmallPtrSet<Instruction *, 8> AddrDefs; 7076 for (BasicBlock *BB : TheLoop->blocks()) 7077 for (Instruction &I : *BB) { 7078 Instruction *PtrDef = 7079 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7080 if (PtrDef && TheLoop->contains(PtrDef) && 7081 getWideningDecision(&I, VF) != CM_GatherScatter) 7082 AddrDefs.insert(PtrDef); 7083 } 7084 7085 // Add all instructions used to generate the addresses. 7086 SmallVector<Instruction *, 4> Worklist; 7087 for (auto *I : AddrDefs) 7088 Worklist.push_back(I); 7089 while (!Worklist.empty()) { 7090 Instruction *I = Worklist.pop_back_val(); 7091 for (auto &Op : I->operands()) 7092 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7093 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7094 AddrDefs.insert(InstOp).second) 7095 Worklist.push_back(InstOp); 7096 } 7097 7098 for (auto *I : AddrDefs) { 7099 if (isa<LoadInst>(I)) { 7100 // Setting the desired widening decision should ideally be handled in 7101 // by cost functions, but since this involves the task of finding out 7102 // if the loaded register is involved in an address computation, it is 7103 // instead changed here when we know this is the case. 7104 InstWidening Decision = getWideningDecision(I, VF); 7105 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7106 // Scalarize a widened load of address. 7107 setWideningDecision(I, VF, CM_Scalarize, 7108 (VF * getMemoryInstructionCost(I, 1))); 7109 else if (auto Group = Legal->getInterleavedAccessGroup(I)) { 7110 // Scalarize an interleave group of address loads. 7111 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7112 if (Instruction *Member = Group->getMember(I)) 7113 setWideningDecision(Member, VF, CM_Scalarize, 7114 (VF * getMemoryInstructionCost(Member, 1))); 7115 } 7116 } 7117 } else 7118 // Make sure I gets scalarized and a cost estimate without 7119 // scalarization overhead. 7120 ForcedScalars[VF].insert(I); 7121 } 7122 } 7123 7124 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7125 unsigned VF, 7126 Type *&VectorTy) { 7127 Type *RetTy = I->getType(); 7128 if (canTruncateToMinimalBitwidth(I, VF)) 7129 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7130 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 7131 auto SE = PSE.getSE(); 7132 7133 // TODO: We need to estimate the cost of intrinsic calls. 7134 switch (I->getOpcode()) { 7135 case Instruction::GetElementPtr: 7136 // We mark this instruction as zero-cost because the cost of GEPs in 7137 // vectorized code depends on whether the corresponding memory instruction 7138 // is scalarized or not. Therefore, we handle GEPs with the memory 7139 // instruction cost. 7140 return 0; 7141 case Instruction::Br: { 7142 // In cases of scalarized and predicated instructions, there will be VF 7143 // predicated blocks in the vectorized loop. Each branch around these 7144 // blocks requires also an extract of its vector compare i1 element. 7145 bool ScalarPredicatedBB = false; 7146 BranchInst *BI = cast<BranchInst>(I); 7147 if (VF > 1 && BI->isConditional() && 7148 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7149 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7150 ScalarPredicatedBB = true; 7151 7152 if (ScalarPredicatedBB) { 7153 // Return cost for branches around scalarized and predicated blocks. 7154 Type *Vec_i1Ty = 7155 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7156 return (TTI.getScalarizationOverhead(Vec_i1Ty, false, true) + 7157 (TTI.getCFInstrCost(Instruction::Br) * VF)); 7158 } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1) 7159 // The back-edge branch will remain, as will all scalar branches. 7160 return TTI.getCFInstrCost(Instruction::Br); 7161 else 7162 // This branch will be eliminated by if-conversion. 7163 return 0; 7164 // Note: We currently assume zero cost for an unconditional branch inside 7165 // a predicated block since it will become a fall-through, although we 7166 // may decide in the future to call TTI for all branches. 7167 } 7168 case Instruction::PHI: { 7169 auto *Phi = cast<PHINode>(I); 7170 7171 // First-order recurrences are replaced by vector shuffles inside the loop. 7172 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 7173 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 7174 VectorTy, VF - 1, VectorTy); 7175 7176 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7177 // converted into select instructions. We require N - 1 selects per phi 7178 // node, where N is the number of incoming values. 7179 if (VF > 1 && Phi->getParent() != TheLoop->getHeader()) 7180 return (Phi->getNumIncomingValues() - 1) * 7181 TTI.getCmpSelInstrCost( 7182 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7183 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF)); 7184 7185 return TTI.getCFInstrCost(Instruction::PHI); 7186 } 7187 case Instruction::UDiv: 7188 case Instruction::SDiv: 7189 case Instruction::URem: 7190 case Instruction::SRem: 7191 // If we have a predicated instruction, it may not be executed for each 7192 // vector lane. Get the scalarization cost and scale this amount by the 7193 // probability of executing the predicated block. If the instruction is not 7194 // predicated, we fall through to the next case. 7195 if (VF > 1 && isScalarWithPredication(I)) { 7196 unsigned Cost = 0; 7197 7198 // These instructions have a non-void type, so account for the phi nodes 7199 // that we will create. This cost is likely to be zero. The phi node 7200 // cost, if any, should be scaled by the block probability because it 7201 // models a copy at the end of each predicated block. 7202 Cost += VF * TTI.getCFInstrCost(Instruction::PHI); 7203 7204 // The cost of the non-predicated instruction. 7205 Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy); 7206 7207 // The cost of insertelement and extractelement instructions needed for 7208 // scalarization. 7209 Cost += getScalarizationOverhead(I, VF, TTI); 7210 7211 // Scale the cost by the probability of executing the predicated blocks. 7212 // This assumes the predicated block for each vector lane is equally 7213 // likely. 7214 return Cost / getReciprocalPredBlockProb(); 7215 } 7216 LLVM_FALLTHROUGH; 7217 case Instruction::Add: 7218 case Instruction::FAdd: 7219 case Instruction::Sub: 7220 case Instruction::FSub: 7221 case Instruction::Mul: 7222 case Instruction::FMul: 7223 case Instruction::FDiv: 7224 case Instruction::FRem: 7225 case Instruction::Shl: 7226 case Instruction::LShr: 7227 case Instruction::AShr: 7228 case Instruction::And: 7229 case Instruction::Or: 7230 case Instruction::Xor: { 7231 // Since we will replace the stride by 1 the multiplication should go away. 7232 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7233 return 0; 7234 // Certain instructions can be cheaper to vectorize if they have a constant 7235 // second vector operand. One example of this are shifts on x86. 7236 TargetTransformInfo::OperandValueKind Op1VK = 7237 TargetTransformInfo::OK_AnyValue; 7238 TargetTransformInfo::OperandValueKind Op2VK = 7239 TargetTransformInfo::OK_AnyValue; 7240 TargetTransformInfo::OperandValueProperties Op1VP = 7241 TargetTransformInfo::OP_None; 7242 TargetTransformInfo::OperandValueProperties Op2VP = 7243 TargetTransformInfo::OP_None; 7244 Value *Op2 = I->getOperand(1); 7245 7246 // Check for a splat or for a non uniform vector of constants. 7247 if (isa<ConstantInt>(Op2)) { 7248 ConstantInt *CInt = cast<ConstantInt>(Op2); 7249 if (CInt && CInt->getValue().isPowerOf2()) 7250 Op2VP = TargetTransformInfo::OP_PowerOf2; 7251 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 7252 } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) { 7253 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 7254 Constant *SplatValue = cast<Constant>(Op2)->getSplatValue(); 7255 if (SplatValue) { 7256 ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue); 7257 if (CInt && CInt->getValue().isPowerOf2()) 7258 Op2VP = TargetTransformInfo::OP_PowerOf2; 7259 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 7260 } 7261 } else if (Legal->isUniform(Op2)) { 7262 Op2VK = TargetTransformInfo::OK_UniformValue; 7263 } 7264 SmallVector<const Value *, 4> Operands(I->operand_values()); 7265 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 7266 return N * TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, 7267 Op2VK, Op1VP, Op2VP, Operands); 7268 } 7269 case Instruction::Select: { 7270 SelectInst *SI = cast<SelectInst>(I); 7271 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7272 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7273 Type *CondTy = SI->getCondition()->getType(); 7274 if (!ScalarCond) 7275 CondTy = VectorType::get(CondTy, VF); 7276 7277 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I); 7278 } 7279 case Instruction::ICmp: 7280 case Instruction::FCmp: { 7281 Type *ValTy = I->getOperand(0)->getType(); 7282 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7283 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7284 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7285 VectorTy = ToVectorTy(ValTy, VF); 7286 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I); 7287 } 7288 case Instruction::Store: 7289 case Instruction::Load: { 7290 unsigned Width = VF; 7291 if (Width > 1) { 7292 InstWidening Decision = getWideningDecision(I, Width); 7293 assert(Decision != CM_Unknown && 7294 "CM decision should be taken at this point"); 7295 if (Decision == CM_Scalarize) 7296 Width = 1; 7297 } 7298 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 7299 return getMemoryInstructionCost(I, VF); 7300 } 7301 case Instruction::ZExt: 7302 case Instruction::SExt: 7303 case Instruction::FPToUI: 7304 case Instruction::FPToSI: 7305 case Instruction::FPExt: 7306 case Instruction::PtrToInt: 7307 case Instruction::IntToPtr: 7308 case Instruction::SIToFP: 7309 case Instruction::UIToFP: 7310 case Instruction::Trunc: 7311 case Instruction::FPTrunc: 7312 case Instruction::BitCast: { 7313 // We optimize the truncation of induction variables having constant 7314 // integer steps. The cost of these truncations is the same as the scalar 7315 // operation. 7316 if (isOptimizableIVTruncate(I, VF)) { 7317 auto *Trunc = cast<TruncInst>(I); 7318 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7319 Trunc->getSrcTy(), Trunc); 7320 } 7321 7322 Type *SrcScalarTy = I->getOperand(0)->getType(); 7323 Type *SrcVecTy = 7324 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7325 if (canTruncateToMinimalBitwidth(I, VF)) { 7326 // This cast is going to be shrunk. This may remove the cast or it might 7327 // turn it into slightly different cast. For example, if MinBW == 16, 7328 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7329 // 7330 // Calculate the modified src and dest types. 7331 Type *MinVecTy = VectorTy; 7332 if (I->getOpcode() == Instruction::Trunc) { 7333 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7334 VectorTy = 7335 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7336 } else if (I->getOpcode() == Instruction::ZExt || 7337 I->getOpcode() == Instruction::SExt) { 7338 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7339 VectorTy = 7340 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7341 } 7342 } 7343 7344 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 7345 return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I); 7346 } 7347 case Instruction::Call: { 7348 bool NeedToScalarize; 7349 CallInst *CI = cast<CallInst>(I); 7350 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 7351 if (getVectorIntrinsicIDForCall(CI, TLI)) 7352 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 7353 return CallCost; 7354 } 7355 default: 7356 // The cost of executing VF copies of the scalar instruction. This opcode 7357 // is unknown. Assume that it is the same as 'mul'. 7358 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) + 7359 getScalarizationOverhead(I, VF, TTI); 7360 } // end of switch. 7361 } 7362 7363 char LoopVectorize::ID = 0; 7364 7365 static const char lv_name[] = "Loop Vectorization"; 7366 7367 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7368 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7369 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7370 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7371 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7372 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7373 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7374 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7375 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7376 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7377 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7378 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7379 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7380 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7381 7382 namespace llvm { 7383 7384 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { 7385 return new LoopVectorize(NoUnrolling, AlwaysVectorize); 7386 } 7387 7388 } // end namespace llvm 7389 7390 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7391 // Check if the pointer operand of a load or store instruction is 7392 // consecutive. 7393 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7394 return Legal->isConsecutivePtr(Ptr); 7395 return false; 7396 } 7397 7398 void LoopVectorizationCostModel::collectValuesToIgnore() { 7399 // Ignore ephemeral values. 7400 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7401 7402 // Ignore type-promoting instructions we identified during reduction 7403 // detection. 7404 for (auto &Reduction : *Legal->getReductionVars()) { 7405 RecurrenceDescriptor &RedDes = Reduction.second; 7406 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7407 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7408 } 7409 // Ignore type-casting instructions we identified during induction 7410 // detection. 7411 for (auto &Induction : *Legal->getInductionVars()) { 7412 InductionDescriptor &IndDes = Induction.second; 7413 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7414 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7415 } 7416 } 7417 7418 VectorizationFactor 7419 LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) { 7420 // Width 1 means no vectorize, cost 0 means uncomputed cost. 7421 const VectorizationFactor NoVectorization = {1U, 0U}; 7422 Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(OptForSize); 7423 if (!MaybeMaxVF.hasValue()) // Cases considered too costly to vectorize. 7424 return NoVectorization; 7425 7426 if (UserVF) { 7427 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 7428 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 7429 // Collect the instructions (and their associated costs) that will be more 7430 // profitable to scalarize. 7431 CM.selectUserVectorizationFactor(UserVF); 7432 buildVPlans(UserVF, UserVF); 7433 DEBUG(printPlans(dbgs())); 7434 return {UserVF, 0}; 7435 } 7436 7437 unsigned MaxVF = MaybeMaxVF.getValue(); 7438 assert(MaxVF != 0 && "MaxVF is zero."); 7439 7440 for (unsigned VF = 1; VF <= MaxVF; VF *= 2) { 7441 // Collect Uniform and Scalar instructions after vectorization with VF. 7442 CM.collectUniformsAndScalars(VF); 7443 7444 // Collect the instructions (and their associated costs) that will be more 7445 // profitable to scalarize. 7446 if (VF > 1) 7447 CM.collectInstsToScalarize(VF); 7448 } 7449 7450 buildVPlans(1, MaxVF); 7451 DEBUG(printPlans(dbgs())); 7452 if (MaxVF == 1) 7453 return NoVectorization; 7454 7455 // Select the optimal vectorization factor. 7456 return CM.selectVectorizationFactor(MaxVF); 7457 } 7458 7459 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) { 7460 DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF << '\n'); 7461 BestVF = VF; 7462 BestUF = UF; 7463 7464 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 7465 return !Plan->hasVF(VF); 7466 }); 7467 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 7468 } 7469 7470 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 7471 DominatorTree *DT) { 7472 // Perform the actual loop transformation. 7473 7474 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7475 VPCallbackILV CallbackILV(ILV); 7476 7477 VPTransformState State{BestVF, BestUF, LI, 7478 DT, ILV.Builder, ILV.VectorLoopValueMap, 7479 &ILV, CallbackILV}; 7480 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 7481 7482 //===------------------------------------------------===// 7483 // 7484 // Notice: any optimization or new instruction that go 7485 // into the code below should also be implemented in 7486 // the cost-model. 7487 // 7488 //===------------------------------------------------===// 7489 7490 // 2. Copy and widen instructions from the old loop into the new loop. 7491 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 7492 VPlans.front()->execute(&State); 7493 7494 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7495 // predication, updating analyses. 7496 ILV.fixVectorizedLoop(); 7497 } 7498 7499 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 7500 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 7501 BasicBlock *Latch = OrigLoop->getLoopLatch(); 7502 7503 // We create new control-flow for the vectorized loop, so the original 7504 // condition will be dead after vectorization if it's only used by the 7505 // branch. 7506 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 7507 if (Cmp && Cmp->hasOneUse()) 7508 DeadInstructions.insert(Cmp); 7509 7510 // We create new "steps" for induction variable updates to which the original 7511 // induction variables map. An original update instruction will be dead if 7512 // all its users except the induction variable are dead. 7513 for (auto &Induction : *Legal->getInductionVars()) { 7514 PHINode *Ind = Induction.first; 7515 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 7516 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 7517 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 7518 })) 7519 DeadInstructions.insert(IndUpdate); 7520 7521 // We record as "Dead" also the type-casting instructions we had identified 7522 // during induction analysis. We don't need any handling for them in the 7523 // vectorized loop because we have proven that, under a proper runtime 7524 // test guarding the vectorized loop, the value of the phi, and the casted 7525 // value of the phi, are the same. The last instruction in this casting chain 7526 // will get its scalar/vector/widened def from the scalar/vector/widened def 7527 // of the respective phi node. Any other casts in the induction def-use chain 7528 // have no other uses outside the phi update chain, and will be ignored. 7529 InductionDescriptor &IndDes = Induction.second; 7530 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7531 DeadInstructions.insert(Casts.begin(), Casts.end()); 7532 } 7533 } 7534 7535 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 7536 7537 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7538 7539 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 7540 Instruction::BinaryOps BinOp) { 7541 // When unrolling and the VF is 1, we only need to add a simple scalar. 7542 Type *Ty = Val->getType(); 7543 assert(!Ty->isVectorTy() && "Val must be a scalar"); 7544 7545 if (Ty->isFloatingPointTy()) { 7546 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 7547 7548 // Floating point operations had to be 'fast' to enable the unrolling. 7549 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 7550 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 7551 } 7552 Constant *C = ConstantInt::get(Ty, StartIdx); 7553 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 7554 } 7555 7556 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7557 SmallVector<Metadata *, 4> MDs; 7558 // Reserve first location for self reference to the LoopID metadata node. 7559 MDs.push_back(nullptr); 7560 bool IsUnrollMetadata = false; 7561 MDNode *LoopID = L->getLoopID(); 7562 if (LoopID) { 7563 // First find existing loop unrolling disable metadata. 7564 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7565 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7566 if (MD) { 7567 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7568 IsUnrollMetadata = 7569 S && S->getString().startswith("llvm.loop.unroll.disable"); 7570 } 7571 MDs.push_back(LoopID->getOperand(i)); 7572 } 7573 } 7574 7575 if (!IsUnrollMetadata) { 7576 // Add runtime unroll disable metadata. 7577 LLVMContext &Context = L->getHeader()->getContext(); 7578 SmallVector<Metadata *, 1> DisableOperands; 7579 DisableOperands.push_back( 7580 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7581 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7582 MDs.push_back(DisableNode); 7583 MDNode *NewLoopID = MDNode::get(Context, MDs); 7584 // Set operand 0 to refer to the loop id itself. 7585 NewLoopID->replaceOperandWith(0, NewLoopID); 7586 L->setLoopID(NewLoopID); 7587 } 7588 } 7589 7590 bool LoopVectorizationPlanner::getDecisionAndClampRange( 7591 const std::function<bool(unsigned)> &Predicate, VFRange &Range) { 7592 assert(Range.End > Range.Start && "Trying to test an empty VF range."); 7593 bool PredicateAtRangeStart = Predicate(Range.Start); 7594 7595 for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2) 7596 if (Predicate(TmpVF) != PredicateAtRangeStart) { 7597 Range.End = TmpVF; 7598 break; 7599 } 7600 7601 return PredicateAtRangeStart; 7602 } 7603 7604 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 7605 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 7606 /// of VF's starting at a given VF and extending it as much as possible. Each 7607 /// vectorization decision can potentially shorten this sub-range during 7608 /// buildVPlan(). 7609 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) { 7610 7611 // Collect conditions feeding internal conditional branches; they need to be 7612 // represented in VPlan for it to model masking. 7613 SmallPtrSet<Value *, 1> NeedDef; 7614 7615 auto *Latch = OrigLoop->getLoopLatch(); 7616 for (BasicBlock *BB : OrigLoop->blocks()) { 7617 if (BB == Latch) 7618 continue; 7619 BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator()); 7620 if (Branch && Branch->isConditional()) 7621 NeedDef.insert(Branch->getCondition()); 7622 } 7623 7624 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 7625 VFRange SubRange = {VF, MaxVF + 1}; 7626 VPlans.push_back(buildVPlan(SubRange, NeedDef)); 7627 VF = SubRange.End; 7628 } 7629 } 7630 7631 VPValue *LoopVectorizationPlanner::createEdgeMask(BasicBlock *Src, 7632 BasicBlock *Dst, 7633 VPlanPtr &Plan) { 7634 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 7635 7636 // Look for cached value. 7637 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 7638 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 7639 if (ECEntryIt != EdgeMaskCache.end()) 7640 return ECEntryIt->second; 7641 7642 VPValue *SrcMask = createBlockInMask(Src, Plan); 7643 7644 // The terminator has to be a branch inst! 7645 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 7646 assert(BI && "Unexpected terminator found"); 7647 7648 if (!BI->isConditional()) 7649 return EdgeMaskCache[Edge] = SrcMask; 7650 7651 VPValue *EdgeMask = Plan->getVPValue(BI->getCondition()); 7652 assert(EdgeMask && "No Edge Mask found for condition"); 7653 7654 if (BI->getSuccessor(0) != Dst) 7655 EdgeMask = Builder.createNot(EdgeMask); 7656 7657 if (SrcMask) // Otherwise block in-mask is all-one, no need to AND. 7658 EdgeMask = Builder.createAnd(EdgeMask, SrcMask); 7659 7660 return EdgeMaskCache[Edge] = EdgeMask; 7661 } 7662 7663 VPValue *LoopVectorizationPlanner::createBlockInMask(BasicBlock *BB, 7664 VPlanPtr &Plan) { 7665 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 7666 7667 // Look for cached value. 7668 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 7669 if (BCEntryIt != BlockMaskCache.end()) 7670 return BCEntryIt->second; 7671 7672 // All-one mask is modelled as no-mask following the convention for masked 7673 // load/store/gather/scatter. Initialize BlockMask to no-mask. 7674 VPValue *BlockMask = nullptr; 7675 7676 // Loop incoming mask is all-one. 7677 if (OrigLoop->getHeader() == BB) 7678 return BlockMaskCache[BB] = BlockMask; 7679 7680 // This is the block mask. We OR all incoming edges. 7681 for (auto *Predecessor : predecessors(BB)) { 7682 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 7683 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 7684 return BlockMaskCache[BB] = EdgeMask; 7685 7686 if (!BlockMask) { // BlockMask has its initialized nullptr value. 7687 BlockMask = EdgeMask; 7688 continue; 7689 } 7690 7691 BlockMask = Builder.createOr(BlockMask, EdgeMask); 7692 } 7693 7694 return BlockMaskCache[BB] = BlockMask; 7695 } 7696 7697 VPInterleaveRecipe * 7698 LoopVectorizationPlanner::tryToInterleaveMemory(Instruction *I, 7699 VFRange &Range) { 7700 const InterleaveGroup *IG = Legal->getInterleavedAccessGroup(I); 7701 if (!IG) 7702 return nullptr; 7703 7704 // Now check if IG is relevant for VF's in the given range. 7705 auto isIGMember = [&](Instruction *I) -> std::function<bool(unsigned)> { 7706 return [=](unsigned VF) -> bool { 7707 return (VF >= 2 && // Query is illegal for VF == 1 7708 CM.getWideningDecision(I, VF) == 7709 LoopVectorizationCostModel::CM_Interleave); 7710 }; 7711 }; 7712 if (!getDecisionAndClampRange(isIGMember(I), Range)) 7713 return nullptr; 7714 7715 // I is a member of an InterleaveGroup for VF's in the (possibly trimmed) 7716 // range. If it's the primary member of the IG construct a VPInterleaveRecipe. 7717 // Otherwise, it's an adjunct member of the IG, do not construct any Recipe. 7718 assert(I == IG->getInsertPos() && 7719 "Generating a recipe for an adjunct member of an interleave group"); 7720 7721 return new VPInterleaveRecipe(IG); 7722 } 7723 7724 VPWidenMemoryInstructionRecipe * 7725 LoopVectorizationPlanner::tryToWidenMemory(Instruction *I, VFRange &Range, 7726 VPlanPtr &Plan) { 7727 if (!isa<LoadInst>(I) && !isa<StoreInst>(I)) 7728 return nullptr; 7729 7730 auto willWiden = [&](unsigned VF) -> bool { 7731 if (VF == 1) 7732 return false; 7733 if (CM.isScalarAfterVectorization(I, VF) || 7734 CM.isProfitableToScalarize(I, VF)) 7735 return false; 7736 LoopVectorizationCostModel::InstWidening Decision = 7737 CM.getWideningDecision(I, VF); 7738 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 7739 "CM decision should be taken at this point."); 7740 assert(Decision != LoopVectorizationCostModel::CM_Interleave && 7741 "Interleave memory opportunity should be caught earlier."); 7742 return Decision != LoopVectorizationCostModel::CM_Scalarize; 7743 }; 7744 7745 if (!getDecisionAndClampRange(willWiden, Range)) 7746 return nullptr; 7747 7748 VPValue *Mask = nullptr; 7749 if (Legal->isMaskRequired(I)) 7750 Mask = createBlockInMask(I->getParent(), Plan); 7751 7752 return new VPWidenMemoryInstructionRecipe(*I, Mask); 7753 } 7754 7755 VPWidenIntOrFpInductionRecipe * 7756 LoopVectorizationPlanner::tryToOptimizeInduction(Instruction *I, 7757 VFRange &Range) { 7758 if (PHINode *Phi = dyn_cast<PHINode>(I)) { 7759 // Check if this is an integer or fp induction. If so, build the recipe that 7760 // produces its scalar and vector values. 7761 InductionDescriptor II = Legal->getInductionVars()->lookup(Phi); 7762 if (II.getKind() == InductionDescriptor::IK_IntInduction || 7763 II.getKind() == InductionDescriptor::IK_FpInduction) 7764 return new VPWidenIntOrFpInductionRecipe(Phi); 7765 7766 return nullptr; 7767 } 7768 7769 // Optimize the special case where the source is a constant integer 7770 // induction variable. Notice that we can only optimize the 'trunc' case 7771 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 7772 // (c) other casts depend on pointer size. 7773 7774 // Determine whether \p K is a truncation based on an induction variable that 7775 // can be optimized. 7776 auto isOptimizableIVTruncate = 7777 [&](Instruction *K) -> std::function<bool(unsigned)> { 7778 return 7779 [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); }; 7780 }; 7781 7782 if (isa<TruncInst>(I) && 7783 getDecisionAndClampRange(isOptimizableIVTruncate(I), Range)) 7784 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 7785 cast<TruncInst>(I)); 7786 return nullptr; 7787 } 7788 7789 VPBlendRecipe * 7790 LoopVectorizationPlanner::tryToBlend(Instruction *I, VPlanPtr &Plan) { 7791 PHINode *Phi = dyn_cast<PHINode>(I); 7792 if (!Phi || Phi->getParent() == OrigLoop->getHeader()) 7793 return nullptr; 7794 7795 // We know that all PHIs in non-header blocks are converted into selects, so 7796 // we don't have to worry about the insertion order and we can just use the 7797 // builder. At this point we generate the predication tree. There may be 7798 // duplications since this is a simple recursive scan, but future 7799 // optimizations will clean it up. 7800 7801 SmallVector<VPValue *, 2> Masks; 7802 unsigned NumIncoming = Phi->getNumIncomingValues(); 7803 for (unsigned In = 0; In < NumIncoming; In++) { 7804 VPValue *EdgeMask = 7805 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 7806 assert((EdgeMask || NumIncoming == 1) && 7807 "Multiple predecessors with one having a full mask"); 7808 if (EdgeMask) 7809 Masks.push_back(EdgeMask); 7810 } 7811 return new VPBlendRecipe(Phi, Masks); 7812 } 7813 7814 bool LoopVectorizationPlanner::tryToWiden(Instruction *I, VPBasicBlock *VPBB, 7815 VFRange &Range) { 7816 if (CM.isScalarWithPredication(I)) 7817 return false; 7818 7819 auto IsVectorizableOpcode = [](unsigned Opcode) { 7820 switch (Opcode) { 7821 case Instruction::Add: 7822 case Instruction::And: 7823 case Instruction::AShr: 7824 case Instruction::BitCast: 7825 case Instruction::Br: 7826 case Instruction::Call: 7827 case Instruction::FAdd: 7828 case Instruction::FCmp: 7829 case Instruction::FDiv: 7830 case Instruction::FMul: 7831 case Instruction::FPExt: 7832 case Instruction::FPToSI: 7833 case Instruction::FPToUI: 7834 case Instruction::FPTrunc: 7835 case Instruction::FRem: 7836 case Instruction::FSub: 7837 case Instruction::GetElementPtr: 7838 case Instruction::ICmp: 7839 case Instruction::IntToPtr: 7840 case Instruction::Load: 7841 case Instruction::LShr: 7842 case Instruction::Mul: 7843 case Instruction::Or: 7844 case Instruction::PHI: 7845 case Instruction::PtrToInt: 7846 case Instruction::SDiv: 7847 case Instruction::Select: 7848 case Instruction::SExt: 7849 case Instruction::Shl: 7850 case Instruction::SIToFP: 7851 case Instruction::SRem: 7852 case Instruction::Store: 7853 case Instruction::Sub: 7854 case Instruction::Trunc: 7855 case Instruction::UDiv: 7856 case Instruction::UIToFP: 7857 case Instruction::URem: 7858 case Instruction::Xor: 7859 case Instruction::ZExt: 7860 return true; 7861 } 7862 return false; 7863 }; 7864 7865 if (!IsVectorizableOpcode(I->getOpcode())) 7866 return false; 7867 7868 if (CallInst *CI = dyn_cast<CallInst>(I)) { 7869 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 7870 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 7871 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect)) 7872 return false; 7873 } 7874 7875 auto willWiden = [&](unsigned VF) -> bool { 7876 if (!isa<PHINode>(I) && (CM.isScalarAfterVectorization(I, VF) || 7877 CM.isProfitableToScalarize(I, VF))) 7878 return false; 7879 if (CallInst *CI = dyn_cast<CallInst>(I)) { 7880 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 7881 // The following case may be scalarized depending on the VF. 7882 // The flag shows whether we use Intrinsic or a usual Call for vectorized 7883 // version of the instruction. 7884 // Is it beneficial to perform intrinsic call compared to lib call? 7885 bool NeedToScalarize; 7886 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 7887 bool UseVectorIntrinsic = 7888 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 7889 return UseVectorIntrinsic || !NeedToScalarize; 7890 } 7891 if (isa<LoadInst>(I) || isa<StoreInst>(I)) { 7892 assert(CM.getWideningDecision(I, VF) == 7893 LoopVectorizationCostModel::CM_Scalarize && 7894 "Memory widening decisions should have been taken care by now"); 7895 return false; 7896 } 7897 return true; 7898 }; 7899 7900 if (!getDecisionAndClampRange(willWiden, Range)) 7901 return false; 7902 7903 // Success: widen this instruction. We optimize the common case where 7904 // consecutive instructions can be represented by a single recipe. 7905 if (!VPBB->empty()) { 7906 VPWidenRecipe *LastWidenRecipe = dyn_cast<VPWidenRecipe>(&VPBB->back()); 7907 if (LastWidenRecipe && LastWidenRecipe->appendInstruction(I)) 7908 return true; 7909 } 7910 7911 VPBB->appendRecipe(new VPWidenRecipe(I)); 7912 return true; 7913 } 7914 7915 VPBasicBlock *LoopVectorizationPlanner::handleReplication( 7916 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 7917 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe, 7918 VPlanPtr &Plan) { 7919 bool IsUniform = getDecisionAndClampRange( 7920 [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); }, 7921 Range); 7922 7923 bool IsPredicated = CM.isScalarWithPredication(I); 7924 auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated); 7925 7926 // Find if I uses a predicated instruction. If so, it will use its scalar 7927 // value. Avoid hoisting the insert-element which packs the scalar value into 7928 // a vector value, as that happens iff all users use the vector value. 7929 for (auto &Op : I->operands()) 7930 if (auto *PredInst = dyn_cast<Instruction>(Op)) 7931 if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end()) 7932 PredInst2Recipe[PredInst]->setAlsoPack(false); 7933 7934 // Finalize the recipe for Instr, first if it is not predicated. 7935 if (!IsPredicated) { 7936 DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 7937 VPBB->appendRecipe(Recipe); 7938 return VPBB; 7939 } 7940 DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 7941 assert(VPBB->getSuccessors().empty() && 7942 "VPBB has successors when handling predicated replication."); 7943 // Record predicated instructions for above packing optimizations. 7944 PredInst2Recipe[I] = Recipe; 7945 VPBlockBase *Region = 7946 VPBB->setOneSuccessor(createReplicateRegion(I, Recipe, Plan)); 7947 return cast<VPBasicBlock>(Region->setOneSuccessor(new VPBasicBlock())); 7948 } 7949 7950 VPRegionBlock * 7951 LoopVectorizationPlanner::createReplicateRegion(Instruction *Instr, 7952 VPRecipeBase *PredRecipe, 7953 VPlanPtr &Plan) { 7954 // Instructions marked for predication are replicated and placed under an 7955 // if-then construct to prevent side-effects. 7956 7957 // Generate recipes to compute the block mask for this region. 7958 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 7959 7960 // Build the triangular if-then region. 7961 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 7962 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 7963 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 7964 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 7965 auto *PHIRecipe = 7966 Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr); 7967 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 7968 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 7969 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 7970 7971 // Note: first set Entry as region entry and then connect successors starting 7972 // from it in order, to propagate the "parent" of each VPBasicBlock. 7973 Entry->setTwoSuccessors(Pred, Exit); 7974 Pred->setOneSuccessor(Exit); 7975 7976 return Region; 7977 } 7978 7979 LoopVectorizationPlanner::VPlanPtr 7980 LoopVectorizationPlanner::buildVPlan(VFRange &Range, 7981 const SmallPtrSetImpl<Value *> &NeedDef) { 7982 EdgeMaskCache.clear(); 7983 BlockMaskCache.clear(); 7984 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 7985 DenseMap<Instruction *, Instruction *> SinkAfterInverse; 7986 7987 // Collect instructions from the original loop that will become trivially dead 7988 // in the vectorized loop. We don't need to vectorize these instructions. For 7989 // example, original induction update instructions can become dead because we 7990 // separately emit induction "steps" when generating code for the new loop. 7991 // Similarly, we create a new latch condition when setting up the structure 7992 // of the new loop, so the old one can become dead. 7993 SmallPtrSet<Instruction *, 4> DeadInstructions; 7994 collectTriviallyDeadInstructions(DeadInstructions); 7995 7996 // Hold a mapping from predicated instructions to their recipes, in order to 7997 // fix their AlsoPack behavior if a user is determined to replicate and use a 7998 // scalar instead of vector value. 7999 DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe; 8000 8001 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 8002 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 8003 auto Plan = llvm::make_unique<VPlan>(VPBB); 8004 8005 // Represent values that will have defs inside VPlan. 8006 for (Value *V : NeedDef) 8007 Plan->addVPValue(V); 8008 8009 // Scan the body of the loop in a topological order to visit each basic block 8010 // after having visited its predecessor basic blocks. 8011 LoopBlocksDFS DFS(OrigLoop); 8012 DFS.perform(LI); 8013 8014 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 8015 // Relevant instructions from basic block BB will be grouped into VPRecipe 8016 // ingredients and fill a new VPBasicBlock. 8017 unsigned VPBBsForBB = 0; 8018 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 8019 VPBB->setOneSuccessor(FirstVPBBForBB); 8020 VPBB = FirstVPBBForBB; 8021 Builder.setInsertPoint(VPBB); 8022 8023 std::vector<Instruction *> Ingredients; 8024 8025 // Organize the ingredients to vectorize from current basic block in the 8026 // right order. 8027 for (Instruction &I : *BB) { 8028 Instruction *Instr = &I; 8029 8030 // First filter out irrelevant instructions, to ensure no recipes are 8031 // built for them. 8032 if (isa<BranchInst>(Instr) || isa<DbgInfoIntrinsic>(Instr) || 8033 DeadInstructions.count(Instr)) 8034 continue; 8035 8036 // I is a member of an InterleaveGroup for Range.Start. If it's an adjunct 8037 // member of the IG, do not construct any Recipe for it. 8038 const InterleaveGroup *IG = Legal->getInterleavedAccessGroup(Instr); 8039 if (IG && Instr != IG->getInsertPos() && 8040 Range.Start >= 2 && // Query is illegal for VF == 1 8041 CM.getWideningDecision(Instr, Range.Start) == 8042 LoopVectorizationCostModel::CM_Interleave) { 8043 if (SinkAfterInverse.count(Instr)) 8044 Ingredients.push_back(SinkAfterInverse.find(Instr)->second); 8045 continue; 8046 } 8047 8048 // Move instructions to handle first-order recurrences, step 1: avoid 8049 // handling this instruction until after we've handled the instruction it 8050 // should follow. 8051 auto SAIt = SinkAfter.find(Instr); 8052 if (SAIt != SinkAfter.end()) { 8053 DEBUG(dbgs() << "Sinking" << *SAIt->first << " after" << *SAIt->second 8054 << " to vectorize a 1st order recurrence.\n"); 8055 SinkAfterInverse[SAIt->second] = Instr; 8056 continue; 8057 } 8058 8059 Ingredients.push_back(Instr); 8060 8061 // Move instructions to handle first-order recurrences, step 2: push the 8062 // instruction to be sunk at its insertion point. 8063 auto SAInvIt = SinkAfterInverse.find(Instr); 8064 if (SAInvIt != SinkAfterInverse.end()) 8065 Ingredients.push_back(SAInvIt->second); 8066 } 8067 8068 // Introduce each ingredient into VPlan. 8069 for (Instruction *Instr : Ingredients) { 8070 VPRecipeBase *Recipe = nullptr; 8071 8072 // Check if Instr should belong to an interleave memory recipe, or already 8073 // does. In the latter case Instr is irrelevant. 8074 if ((Recipe = tryToInterleaveMemory(Instr, Range))) { 8075 VPBB->appendRecipe(Recipe); 8076 continue; 8077 } 8078 8079 // Check if Instr is a memory operation that should be widened. 8080 if ((Recipe = tryToWidenMemory(Instr, Range, Plan))) { 8081 VPBB->appendRecipe(Recipe); 8082 continue; 8083 } 8084 8085 // Check if Instr should form some PHI recipe. 8086 if ((Recipe = tryToOptimizeInduction(Instr, Range))) { 8087 VPBB->appendRecipe(Recipe); 8088 continue; 8089 } 8090 if ((Recipe = tryToBlend(Instr, Plan))) { 8091 VPBB->appendRecipe(Recipe); 8092 continue; 8093 } 8094 if (PHINode *Phi = dyn_cast<PHINode>(Instr)) { 8095 VPBB->appendRecipe(new VPWidenPHIRecipe(Phi)); 8096 continue; 8097 } 8098 8099 // Check if Instr is to be widened by a general VPWidenRecipe, after 8100 // having first checked for specific widening recipes that deal with 8101 // Interleave Groups, Inductions and Phi nodes. 8102 if (tryToWiden(Instr, VPBB, Range)) 8103 continue; 8104 8105 // Otherwise, if all widening options failed, Instruction is to be 8106 // replicated. This may create a successor for VPBB. 8107 VPBasicBlock *NextVPBB = 8108 handleReplication(Instr, Range, VPBB, PredInst2Recipe, Plan); 8109 if (NextVPBB != VPBB) { 8110 VPBB = NextVPBB; 8111 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 8112 : ""); 8113 } 8114 } 8115 } 8116 8117 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 8118 // may also be empty, such as the last one VPBB, reflecting original 8119 // basic-blocks with no recipes. 8120 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 8121 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 8122 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 8123 PreEntry->disconnectSuccessor(Entry); 8124 delete PreEntry; 8125 8126 std::string PlanName; 8127 raw_string_ostream RSO(PlanName); 8128 unsigned VF = Range.Start; 8129 Plan->addVF(VF); 8130 RSO << "Initial VPlan for VF={" << VF; 8131 for (VF *= 2; VF < Range.End; VF *= 2) { 8132 Plan->addVF(VF); 8133 RSO << "," << VF; 8134 } 8135 RSO << "},UF>=1"; 8136 RSO.flush(); 8137 Plan->setName(PlanName); 8138 8139 return Plan; 8140 } 8141 8142 Value* LoopVectorizationPlanner::VPCallbackILV:: 8143 getOrCreateVectorValues(Value *V, unsigned Part) { 8144 return ILV.getOrCreateVectorValue(V, Part); 8145 } 8146 8147 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent) const { 8148 O << " +\n" 8149 << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 8150 IG->getInsertPos()->printAsOperand(O, false); 8151 O << "\\l\""; 8152 for (unsigned i = 0; i < IG->getFactor(); ++i) 8153 if (Instruction *I = IG->getMember(i)) 8154 O << " +\n" 8155 << Indent << "\" " << VPlanIngredient(I) << " " << i << "\\l\""; 8156 } 8157 8158 void VPWidenRecipe::execute(VPTransformState &State) { 8159 for (auto &Instr : make_range(Begin, End)) 8160 State.ILV->widenInstruction(Instr); 8161 } 8162 8163 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 8164 assert(!State.Instance && "Int or FP induction being replicated."); 8165 State.ILV->widenIntOrFpInduction(IV, Trunc); 8166 } 8167 8168 void VPWidenPHIRecipe::execute(VPTransformState &State) { 8169 State.ILV->widenPHIInstruction(Phi, State.UF, State.VF); 8170 } 8171 8172 void VPBlendRecipe::execute(VPTransformState &State) { 8173 State.ILV->setDebugLocFromInst(State.Builder, Phi); 8174 // We know that all PHIs in non-header blocks are converted into 8175 // selects, so we don't have to worry about the insertion order and we 8176 // can just use the builder. 8177 // At this point we generate the predication tree. There may be 8178 // duplications since this is a simple recursive scan, but future 8179 // optimizations will clean it up. 8180 8181 unsigned NumIncoming = Phi->getNumIncomingValues(); 8182 8183 assert((User || NumIncoming == 1) && 8184 "Multiple predecessors with predecessors having a full mask"); 8185 // Generate a sequence of selects of the form: 8186 // SELECT(Mask3, In3, 8187 // SELECT(Mask2, In2, 8188 // ( ...))) 8189 InnerLoopVectorizer::VectorParts Entry(State.UF); 8190 for (unsigned In = 0; In < NumIncoming; ++In) { 8191 for (unsigned Part = 0; Part < State.UF; ++Part) { 8192 // We might have single edge PHIs (blocks) - use an identity 8193 // 'select' for the first PHI operand. 8194 Value *In0 = 8195 State.ILV->getOrCreateVectorValue(Phi->getIncomingValue(In), Part); 8196 if (In == 0) 8197 Entry[Part] = In0; // Initialize with the first incoming value. 8198 else { 8199 // Select between the current value and the previous incoming edge 8200 // based on the incoming mask. 8201 Value *Cond = State.get(User->getOperand(In), Part); 8202 Entry[Part] = 8203 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 8204 } 8205 } 8206 } 8207 for (unsigned Part = 0; Part < State.UF; ++Part) 8208 State.ValueMap.setVectorValue(Phi, Part, Entry[Part]); 8209 } 8210 8211 void VPInterleaveRecipe::execute(VPTransformState &State) { 8212 assert(!State.Instance && "Interleave group being replicated."); 8213 State.ILV->vectorizeInterleaveGroup(IG->getInsertPos()); 8214 } 8215 8216 void VPReplicateRecipe::execute(VPTransformState &State) { 8217 if (State.Instance) { // Generate a single instance. 8218 State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated); 8219 // Insert scalar instance packing it into a vector. 8220 if (AlsoPack && State.VF > 1) { 8221 // If we're constructing lane 0, initialize to start from undef. 8222 if (State.Instance->Lane == 0) { 8223 Value *Undef = 8224 UndefValue::get(VectorType::get(Ingredient->getType(), State.VF)); 8225 State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef); 8226 } 8227 State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance); 8228 } 8229 return; 8230 } 8231 8232 // Generate scalar instances for all VF lanes of all UF parts, unless the 8233 // instruction is uniform inwhich case generate only the first lane for each 8234 // of the UF parts. 8235 unsigned EndLane = IsUniform ? 1 : State.VF; 8236 for (unsigned Part = 0; Part < State.UF; ++Part) 8237 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 8238 State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated); 8239 } 8240 8241 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 8242 assert(State.Instance && "Branch on Mask works only on single instance."); 8243 8244 unsigned Part = State.Instance->Part; 8245 unsigned Lane = State.Instance->Lane; 8246 8247 Value *ConditionBit = nullptr; 8248 if (!User) // Block in mask is all-one. 8249 ConditionBit = State.Builder.getTrue(); 8250 else { 8251 VPValue *BlockInMask = User->getOperand(0); 8252 ConditionBit = State.get(BlockInMask, Part); 8253 if (ConditionBit->getType()->isVectorTy()) 8254 ConditionBit = State.Builder.CreateExtractElement( 8255 ConditionBit, State.Builder.getInt32(Lane)); 8256 } 8257 8258 // Replace the temporary unreachable terminator with a new conditional branch, 8259 // whose two destinations will be set later when they are created. 8260 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 8261 assert(isa<UnreachableInst>(CurrentTerminator) && 8262 "Expected to replace unreachable terminator with conditional branch."); 8263 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 8264 CondBr->setSuccessor(0, nullptr); 8265 ReplaceInstWithInst(CurrentTerminator, CondBr); 8266 } 8267 8268 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 8269 assert(State.Instance && "Predicated instruction PHI works per instance."); 8270 Instruction *ScalarPredInst = cast<Instruction>( 8271 State.ValueMap.getScalarValue(PredInst, *State.Instance)); 8272 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 8273 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 8274 assert(PredicatingBB && "Predicated block has no single predecessor."); 8275 8276 // By current pack/unpack logic we need to generate only a single phi node: if 8277 // a vector value for the predicated instruction exists at this point it means 8278 // the instruction has vector users only, and a phi for the vector value is 8279 // needed. In this case the recipe of the predicated instruction is marked to 8280 // also do that packing, thereby "hoisting" the insert-element sequence. 8281 // Otherwise, a phi node for the scalar value is needed. 8282 unsigned Part = State.Instance->Part; 8283 if (State.ValueMap.hasVectorValue(PredInst, Part)) { 8284 Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part); 8285 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 8286 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 8287 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 8288 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 8289 State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache. 8290 } else { 8291 Type *PredInstType = PredInst->getType(); 8292 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 8293 Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB); 8294 Phi->addIncoming(ScalarPredInst, PredicatedBB); 8295 State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi); 8296 } 8297 } 8298 8299 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 8300 if (!User) 8301 return State.ILV->vectorizeMemoryInstruction(&Instr); 8302 8303 // Last (and currently only) operand is a mask. 8304 InnerLoopVectorizer::VectorParts MaskValues(State.UF); 8305 VPValue *Mask = User->getOperand(User->getNumOperands() - 1); 8306 for (unsigned Part = 0; Part < State.UF; ++Part) 8307 MaskValues[Part] = State.get(Mask, Part); 8308 State.ILV->vectorizeMemoryInstruction(&Instr, &MaskValues); 8309 } 8310 8311 bool LoopVectorizePass::processLoop(Loop *L) { 8312 assert(L->empty() && "Only process inner loops."); 8313 8314 #ifndef NDEBUG 8315 const std::string DebugLocStr = getDebugLocString(L); 8316 #endif /* NDEBUG */ 8317 8318 DEBUG(dbgs() << "\nLV: Checking a loop in \"" 8319 << L->getHeader()->getParent()->getName() << "\" from " 8320 << DebugLocStr << "\n"); 8321 8322 LoopVectorizeHints Hints(L, DisableUnrolling, *ORE); 8323 8324 DEBUG(dbgs() << "LV: Loop hints:" 8325 << " force=" 8326 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 8327 ? "disabled" 8328 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 8329 ? "enabled" 8330 : "?")) 8331 << " width=" << Hints.getWidth() 8332 << " unroll=" << Hints.getInterleave() << "\n"); 8333 8334 // Function containing loop 8335 Function *F = L->getHeader()->getParent(); 8336 8337 // Looking at the diagnostic output is the only way to determine if a loop 8338 // was vectorized (other than looking at the IR or machine code), so it 8339 // is important to generate an optimization remark for each loop. Most of 8340 // these messages are generated as OptimizationRemarkAnalysis. Remarks 8341 // generated as OptimizationRemark and OptimizationRemarkMissed are 8342 // less verbose reporting vectorized loops and unvectorized loops that may 8343 // benefit from vectorization, respectively. 8344 8345 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { 8346 DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 8347 return false; 8348 } 8349 8350 PredicatedScalarEvolution PSE(*SE, *L); 8351 8352 // Check if it is legal to vectorize the loop. 8353 LoopVectorizationRequirements Requirements(*ORE); 8354 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI, ORE, 8355 &Requirements, &Hints, DB, AC); 8356 if (!LVL.canVectorize()) { 8357 DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 8358 emitMissedWarning(F, L, Hints, ORE); 8359 return false; 8360 } 8361 8362 // Check the function attributes to find out if this function should be 8363 // optimized for size. 8364 bool OptForSize = 8365 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 8366 8367 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 8368 // count by optimizing for size, to minimize overheads. 8369 // Prefer constant trip counts over profile data, over upper bound estimate. 8370 unsigned ExpectedTC = 0; 8371 bool HasExpectedTC = false; 8372 if (const SCEVConstant *ConstExits = 8373 dyn_cast<SCEVConstant>(SE->getBackedgeTakenCount(L))) { 8374 const APInt &ExitsCount = ConstExits->getAPInt(); 8375 // We are interested in small values for ExpectedTC. Skip over those that 8376 // can't fit an unsigned. 8377 if (ExitsCount.ult(std::numeric_limits<unsigned>::max())) { 8378 ExpectedTC = static_cast<unsigned>(ExitsCount.getZExtValue()) + 1; 8379 HasExpectedTC = true; 8380 } 8381 } 8382 // ExpectedTC may be large because it's bound by a variable. Check 8383 // profiling information to validate we should vectorize. 8384 if (!HasExpectedTC && LoopVectorizeWithBlockFrequency) { 8385 auto EstimatedTC = getLoopEstimatedTripCount(L); 8386 if (EstimatedTC) { 8387 ExpectedTC = *EstimatedTC; 8388 HasExpectedTC = true; 8389 } 8390 } 8391 if (!HasExpectedTC) { 8392 ExpectedTC = SE->getSmallConstantMaxTripCount(L); 8393 HasExpectedTC = (ExpectedTC > 0); 8394 } 8395 8396 if (HasExpectedTC && ExpectedTC < TinyTripCountVectorThreshold) { 8397 DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 8398 << "This loop is worth vectorizing only if no scalar " 8399 << "iteration overheads are incurred."); 8400 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 8401 DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 8402 else { 8403 DEBUG(dbgs() << "\n"); 8404 // Loops with a very small trip count are considered for vectorization 8405 // under OptForSize, thereby making sure the cost of their loop body is 8406 // dominant, free of runtime guards and scalar iteration overheads. 8407 OptForSize = true; 8408 } 8409 } 8410 8411 // Check the function attributes to see if implicit floats are allowed. 8412 // FIXME: This check doesn't seem possibly correct -- what if the loop is 8413 // an integer loop and the vector instructions selected are purely integer 8414 // vector instructions? 8415 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 8416 DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 8417 "attribute is used.\n"); 8418 ORE->emit(createMissedAnalysis(Hints.vectorizeAnalysisPassName(), 8419 "NoImplicitFloat", L) 8420 << "loop not vectorized due to NoImplicitFloat attribute"); 8421 emitMissedWarning(F, L, Hints, ORE); 8422 return false; 8423 } 8424 8425 // Check if the target supports potentially unsafe FP vectorization. 8426 // FIXME: Add a check for the type of safety issue (denormal, signaling) 8427 // for the target we're vectorizing for, to make sure none of the 8428 // additional fp-math flags can help. 8429 if (Hints.isPotentiallyUnsafe() && 8430 TTI->isFPVectorizationPotentiallyUnsafe()) { 8431 DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 8432 ORE->emit( 8433 createMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L) 8434 << "loop not vectorized due to unsafe FP support."); 8435 emitMissedWarning(F, L, Hints, ORE); 8436 return false; 8437 } 8438 8439 // Use the cost model. 8440 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F, 8441 &Hints); 8442 CM.collectValuesToIgnore(); 8443 8444 // Use the planner for vectorization. 8445 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM); 8446 8447 // Get user vectorization factor. 8448 unsigned UserVF = Hints.getWidth(); 8449 8450 // Plan how to best vectorize, return the best VF and its cost. 8451 VectorizationFactor VF = LVP.plan(OptForSize, UserVF); 8452 8453 // Select the interleave count. 8454 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 8455 8456 // Get user interleave count. 8457 unsigned UserIC = Hints.getInterleave(); 8458 8459 // Identify the diagnostic messages that should be produced. 8460 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 8461 bool VectorizeLoop = true, InterleaveLoop = true; 8462 if (Requirements.doesNotMeet(F, L, Hints)) { 8463 DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 8464 "requirements.\n"); 8465 emitMissedWarning(F, L, Hints, ORE); 8466 return false; 8467 } 8468 8469 if (VF.Width == 1) { 8470 DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 8471 VecDiagMsg = std::make_pair( 8472 "VectorizationNotBeneficial", 8473 "the cost-model indicates that vectorization is not beneficial"); 8474 VectorizeLoop = false; 8475 } 8476 8477 if (IC == 1 && UserIC <= 1) { 8478 // Tell the user interleaving is not beneficial. 8479 DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 8480 IntDiagMsg = std::make_pair( 8481 "InterleavingNotBeneficial", 8482 "the cost-model indicates that interleaving is not beneficial"); 8483 InterleaveLoop = false; 8484 if (UserIC == 1) { 8485 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 8486 IntDiagMsg.second += 8487 " and is explicitly disabled or interleave count is set to 1"; 8488 } 8489 } else if (IC > 1 && UserIC == 1) { 8490 // Tell the user interleaving is beneficial, but it explicitly disabled. 8491 DEBUG(dbgs() 8492 << "LV: Interleaving is beneficial but is explicitly disabled."); 8493 IntDiagMsg = std::make_pair( 8494 "InterleavingBeneficialButDisabled", 8495 "the cost-model indicates that interleaving is beneficial " 8496 "but is explicitly disabled or interleave count is set to 1"); 8497 InterleaveLoop = false; 8498 } 8499 8500 // Override IC if user provided an interleave count. 8501 IC = UserIC > 0 ? UserIC : IC; 8502 8503 // Emit diagnostic messages, if any. 8504 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 8505 if (!VectorizeLoop && !InterleaveLoop) { 8506 // Do not vectorize or interleaving the loop. 8507 ORE->emit([&]() { 8508 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 8509 L->getStartLoc(), L->getHeader()) 8510 << VecDiagMsg.second; 8511 }); 8512 ORE->emit([&]() { 8513 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 8514 L->getStartLoc(), L->getHeader()) 8515 << IntDiagMsg.second; 8516 }); 8517 return false; 8518 } else if (!VectorizeLoop && InterleaveLoop) { 8519 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 8520 ORE->emit([&]() { 8521 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 8522 L->getStartLoc(), L->getHeader()) 8523 << VecDiagMsg.second; 8524 }); 8525 } else if (VectorizeLoop && !InterleaveLoop) { 8526 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 8527 << DebugLocStr << '\n'); 8528 ORE->emit([&]() { 8529 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 8530 L->getStartLoc(), L->getHeader()) 8531 << IntDiagMsg.second; 8532 }); 8533 } else if (VectorizeLoop && InterleaveLoop) { 8534 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 8535 << DebugLocStr << '\n'); 8536 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 8537 } 8538 8539 LVP.setBestPlan(VF.Width, IC); 8540 8541 using namespace ore; 8542 8543 if (!VectorizeLoop) { 8544 assert(IC > 1 && "interleave count should not be 1 or 0"); 8545 // If we decided that it is not legal to vectorize the loop, then 8546 // interleave it. 8547 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 8548 &CM); 8549 LVP.executePlan(Unroller, DT); 8550 8551 ORE->emit([&]() { 8552 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 8553 L->getHeader()) 8554 << "interleaved loop (interleaved count: " 8555 << NV("InterleaveCount", IC) << ")"; 8556 }); 8557 } else { 8558 // If we decided that it is *legal* to vectorize the loop, then do it. 8559 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 8560 &LVL, &CM); 8561 LVP.executePlan(LB, DT); 8562 ++LoopsVectorized; 8563 8564 // Add metadata to disable runtime unrolling a scalar loop when there are 8565 // no runtime checks about strides and memory. A scalar loop that is 8566 // rarely used is not worth unrolling. 8567 if (!LB.areSafetyChecksAdded()) 8568 AddRuntimeUnrollDisableMetaData(L); 8569 8570 // Report the vectorization decision. 8571 ORE->emit([&]() { 8572 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 8573 L->getHeader()) 8574 << "vectorized loop (vectorization width: " 8575 << NV("VectorizationFactor", VF.Width) 8576 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 8577 }); 8578 } 8579 8580 // Mark the loop as already vectorized to avoid vectorizing again. 8581 Hints.setAlreadyVectorized(); 8582 8583 DEBUG(verifyFunction(*L->getHeader()->getParent())); 8584 return true; 8585 } 8586 8587 bool LoopVectorizePass::runImpl( 8588 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 8589 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 8590 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 8591 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 8592 OptimizationRemarkEmitter &ORE_) { 8593 SE = &SE_; 8594 LI = &LI_; 8595 TTI = &TTI_; 8596 DT = &DT_; 8597 BFI = &BFI_; 8598 TLI = TLI_; 8599 AA = &AA_; 8600 AC = &AC_; 8601 GetLAA = &GetLAA_; 8602 DB = &DB_; 8603 ORE = &ORE_; 8604 8605 // Don't attempt if 8606 // 1. the target claims to have no vector registers, and 8607 // 2. interleaving won't help ILP. 8608 // 8609 // The second condition is necessary because, even if the target has no 8610 // vector registers, loop vectorization may still enable scalar 8611 // interleaving. 8612 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 8613 return false; 8614 8615 bool Changed = false; 8616 8617 // The vectorizer requires loops to be in simplified form. 8618 // Since simplification may add new inner loops, it has to run before the 8619 // legality and profitability checks. This means running the loop vectorizer 8620 // will simplify all loops, regardless of whether anything end up being 8621 // vectorized. 8622 for (auto &L : *LI) 8623 Changed |= simplifyLoop(L, DT, LI, SE, AC, false /* PreserveLCSSA */); 8624 8625 // Build up a worklist of inner-loops to vectorize. This is necessary as 8626 // the act of vectorizing or partially unrolling a loop creates new loops 8627 // and can invalidate iterators across the loops. 8628 SmallVector<Loop *, 8> Worklist; 8629 8630 for (Loop *L : *LI) 8631 addAcyclicInnerLoop(*L, *LI, Worklist); 8632 8633 LoopsAnalyzed += Worklist.size(); 8634 8635 // Now walk the identified inner loops. 8636 while (!Worklist.empty()) { 8637 Loop *L = Worklist.pop_back_val(); 8638 8639 // For the inner loops we actually process, form LCSSA to simplify the 8640 // transform. 8641 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 8642 8643 Changed |= processLoop(L); 8644 } 8645 8646 // Process each loop nest in the function. 8647 return Changed; 8648 } 8649 8650 PreservedAnalyses LoopVectorizePass::run(Function &F, 8651 FunctionAnalysisManager &AM) { 8652 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 8653 auto &LI = AM.getResult<LoopAnalysis>(F); 8654 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 8655 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 8656 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 8657 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 8658 auto &AA = AM.getResult<AAManager>(F); 8659 auto &AC = AM.getResult<AssumptionAnalysis>(F); 8660 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 8661 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 8662 8663 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 8664 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 8665 [&](Loop &L) -> const LoopAccessInfo & { 8666 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, nullptr}; 8667 return LAM.getResult<LoopAccessAnalysis>(L, AR); 8668 }; 8669 bool Changed = 8670 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE); 8671 if (!Changed) 8672 return PreservedAnalyses::all(); 8673 PreservedAnalyses PA; 8674 PA.preserve<LoopAnalysis>(); 8675 PA.preserve<DominatorTreeAnalysis>(); 8676 PA.preserve<BasicAA>(); 8677 PA.preserve<GlobalsAA>(); 8678 return PA; 8679 } 8680