1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 11 // and generates target-independent LLVM-IR. 12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 13 // of instructions in order to estimate the profitability of vectorization. 14 // 15 // The loop vectorizer combines consecutive loop iterations into a single 16 // 'wide' iteration. After this transformation the index is incremented 17 // by the SIMD vector width, and not by one. 18 // 19 // This pass has three parts: 20 // 1. The main loop pass that drives the different parts. 21 // 2. LoopVectorizationLegality - A unit that checks for the legality 22 // of the vectorization. 23 // 3. InnerLoopVectorizer - A unit that performs the actual 24 // widening of instructions. 25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 26 // of vectorization. It decides on the optimal vector width, which 27 // can be one, if vectorization is not profitable. 28 // 29 //===----------------------------------------------------------------------===// 30 // 31 // The reduction-variable vectorization is based on the paper: 32 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 33 // 34 // Variable uniformity checks are inspired by: 35 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 36 // 37 // The interleaved access vectorization is based on the paper: 38 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 39 // Data for SIMD 40 // 41 // Other ideas/concepts are from: 42 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 43 // 44 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 45 // Vectorizing Compilers. 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 50 #include "VPlan.h" 51 #include "VPlanBuilder.h" 52 #include "llvm/ADT/APInt.h" 53 #include "llvm/ADT/ArrayRef.h" 54 #include "llvm/ADT/DenseMap.h" 55 #include "llvm/ADT/DenseMapInfo.h" 56 #include "llvm/ADT/Hashing.h" 57 #include "llvm/ADT/MapVector.h" 58 #include "llvm/ADT/None.h" 59 #include "llvm/ADT/Optional.h" 60 #include "llvm/ADT/SCCIterator.h" 61 #include "llvm/ADT/STLExtras.h" 62 #include "llvm/ADT/SetVector.h" 63 #include "llvm/ADT/SmallPtrSet.h" 64 #include "llvm/ADT/SmallSet.h" 65 #include "llvm/ADT/SmallVector.h" 66 #include "llvm/ADT/Statistic.h" 67 #include "llvm/ADT/StringRef.h" 68 #include "llvm/ADT/Twine.h" 69 #include "llvm/ADT/iterator_range.h" 70 #include "llvm/Analysis/AssumptionCache.h" 71 #include "llvm/Analysis/BasicAliasAnalysis.h" 72 #include "llvm/Analysis/BlockFrequencyInfo.h" 73 #include "llvm/Analysis/CodeMetrics.h" 74 #include "llvm/Analysis/DemandedBits.h" 75 #include "llvm/Analysis/GlobalsModRef.h" 76 #include "llvm/Analysis/LoopAccessAnalysis.h" 77 #include "llvm/Analysis/LoopAnalysisManager.h" 78 #include "llvm/Analysis/LoopInfo.h" 79 #include "llvm/Analysis/LoopIterator.h" 80 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 81 #include "llvm/Analysis/ScalarEvolution.h" 82 #include "llvm/Analysis/ScalarEvolutionExpander.h" 83 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 84 #include "llvm/Analysis/TargetLibraryInfo.h" 85 #include "llvm/Analysis/TargetTransformInfo.h" 86 #include "llvm/Analysis/VectorUtils.h" 87 #include "llvm/IR/Attributes.h" 88 #include "llvm/IR/BasicBlock.h" 89 #include "llvm/IR/CFG.h" 90 #include "llvm/IR/Constant.h" 91 #include "llvm/IR/Constants.h" 92 #include "llvm/IR/DataLayout.h" 93 #include "llvm/IR/DebugInfoMetadata.h" 94 #include "llvm/IR/DebugLoc.h" 95 #include "llvm/IR/DerivedTypes.h" 96 #include "llvm/IR/DiagnosticInfo.h" 97 #include "llvm/IR/Dominators.h" 98 #include "llvm/IR/Function.h" 99 #include "llvm/IR/IRBuilder.h" 100 #include "llvm/IR/InstrTypes.h" 101 #include "llvm/IR/Instruction.h" 102 #include "llvm/IR/Instructions.h" 103 #include "llvm/IR/IntrinsicInst.h" 104 #include "llvm/IR/Intrinsics.h" 105 #include "llvm/IR/LLVMContext.h" 106 #include "llvm/IR/Metadata.h" 107 #include "llvm/IR/Module.h" 108 #include "llvm/IR/Operator.h" 109 #include "llvm/IR/Type.h" 110 #include "llvm/IR/Use.h" 111 #include "llvm/IR/User.h" 112 #include "llvm/IR/Value.h" 113 #include "llvm/IR/ValueHandle.h" 114 #include "llvm/IR/Verifier.h" 115 #include "llvm/Pass.h" 116 #include "llvm/Support/Casting.h" 117 #include "llvm/Support/CommandLine.h" 118 #include "llvm/Support/Compiler.h" 119 #include "llvm/Support/Debug.h" 120 #include "llvm/Support/ErrorHandling.h" 121 #include "llvm/Support/MathExtras.h" 122 #include "llvm/Support/raw_ostream.h" 123 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 124 #include "llvm/Transforms/Utils/LoopSimplify.h" 125 #include "llvm/Transforms/Utils/LoopUtils.h" 126 #include "llvm/Transforms/Utils/LoopVersioning.h" 127 #include <algorithm> 128 #include <cassert> 129 #include <cstdint> 130 #include <cstdlib> 131 #include <functional> 132 #include <iterator> 133 #include <limits> 134 #include <memory> 135 #include <string> 136 #include <tuple> 137 #include <utility> 138 #include <vector> 139 140 using namespace llvm; 141 142 #define LV_NAME "loop-vectorize" 143 #define DEBUG_TYPE LV_NAME 144 145 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 146 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 147 148 static cl::opt<bool> 149 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, 150 cl::desc("Enable if-conversion during vectorization.")); 151 152 /// Loops with a known constant trip count below this number are vectorized only 153 /// if no scalar iteration overheads are incurred. 154 static cl::opt<unsigned> TinyTripCountVectorThreshold( 155 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 156 cl::desc("Loops with a constant trip count that is smaller than this " 157 "value are vectorized only if no scalar iteration overheads " 158 "are incurred.")); 159 160 static cl::opt<bool> MaximizeBandwidth( 161 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 162 cl::desc("Maximize bandwidth when selecting vectorization factor which " 163 "will be determined by the smallest type in loop.")); 164 165 static cl::opt<bool> EnableInterleavedMemAccesses( 166 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 167 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 168 169 /// Maximum factor for an interleaved memory access. 170 static cl::opt<unsigned> MaxInterleaveGroupFactor( 171 "max-interleave-group-factor", cl::Hidden, 172 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 173 cl::init(8)); 174 175 /// We don't interleave loops with a known constant trip count below this 176 /// number. 177 static const unsigned TinyTripCountInterleaveThreshold = 128; 178 179 static cl::opt<unsigned> ForceTargetNumScalarRegs( 180 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 181 cl::desc("A flag that overrides the target's number of scalar registers.")); 182 183 static cl::opt<unsigned> ForceTargetNumVectorRegs( 184 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 185 cl::desc("A flag that overrides the target's number of vector registers.")); 186 187 /// Maximum vectorization interleave count. 188 static const unsigned MaxInterleaveFactor = 16; 189 190 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 191 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 192 cl::desc("A flag that overrides the target's max interleave factor for " 193 "scalar loops.")); 194 195 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 196 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 197 cl::desc("A flag that overrides the target's max interleave factor for " 198 "vectorized loops.")); 199 200 static cl::opt<unsigned> ForceTargetInstructionCost( 201 "force-target-instruction-cost", cl::init(0), cl::Hidden, 202 cl::desc("A flag that overrides the target's expected cost for " 203 "an instruction to a single constant value. Mostly " 204 "useful for getting consistent testing.")); 205 206 static cl::opt<unsigned> SmallLoopCost( 207 "small-loop-cost", cl::init(20), cl::Hidden, 208 cl::desc( 209 "The cost of a loop that is considered 'small' by the interleaver.")); 210 211 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 212 "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden, 213 cl::desc("Enable the use of the block frequency analysis to access PGO " 214 "heuristics minimizing code growth in cold regions and being more " 215 "aggressive in hot regions.")); 216 217 // Runtime interleave loops for load/store throughput. 218 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 219 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 220 cl::desc( 221 "Enable runtime interleaving until load/store ports are saturated")); 222 223 /// The number of stores in a loop that are allowed to need predication. 224 static cl::opt<unsigned> NumberOfStoresToPredicate( 225 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 226 cl::desc("Max number of stores to be predicated behind an if.")); 227 228 static cl::opt<bool> EnableIndVarRegisterHeur( 229 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 230 cl::desc("Count the induction variable only once when interleaving")); 231 232 static cl::opt<bool> EnableCondStoresVectorization( 233 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 234 cl::desc("Enable if predication of stores during vectorization.")); 235 236 static cl::opt<unsigned> MaxNestedScalarReductionIC( 237 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 238 cl::desc("The maximum interleave count to use when interleaving a scalar " 239 "reduction in a nested loop.")); 240 241 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 242 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 243 cl::desc("The maximum allowed number of runtime memory checks with a " 244 "vectorize(enable) pragma.")); 245 246 static cl::opt<unsigned> VectorizeSCEVCheckThreshold( 247 "vectorize-scev-check-threshold", cl::init(16), cl::Hidden, 248 cl::desc("The maximum number of SCEV checks allowed.")); 249 250 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold( 251 "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden, 252 cl::desc("The maximum number of SCEV checks allowed with a " 253 "vectorize(enable) pragma")); 254 255 /// Create an analysis remark that explains why vectorization failed 256 /// 257 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 258 /// RemarkName is the identifier for the remark. If \p I is passed it is an 259 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 260 /// the location of the remark. \return the remark object that can be 261 /// streamed to. 262 static OptimizationRemarkAnalysis 263 createMissedAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, 264 Instruction *I = nullptr) { 265 Value *CodeRegion = TheLoop->getHeader(); 266 DebugLoc DL = TheLoop->getStartLoc(); 267 268 if (I) { 269 CodeRegion = I->getParent(); 270 // If there is no debug location attached to the instruction, revert back to 271 // using the loop's. 272 if (I->getDebugLoc()) 273 DL = I->getDebugLoc(); 274 } 275 276 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion); 277 R << "loop not vectorized: "; 278 return R; 279 } 280 281 namespace { 282 283 class LoopVectorizationLegality; 284 class LoopVectorizationCostModel; 285 class LoopVectorizationRequirements; 286 class VPBlendRecipe; 287 class VPInterleaveRecipe; 288 class VPReplicateRecipe; 289 class VPWidenIntOrFpInductionRecipe; 290 class VPWidenRecipe; 291 class VPWidenMemoryInstructionRecipe; 292 293 } // end anonymous namespace 294 295 /// Returns true if the given loop body has a cycle, excluding the loop 296 /// itself. 297 static bool hasCyclesInLoopBody(const Loop &L) { 298 if (!L.empty()) 299 return true; 300 301 for (const auto &SCC : 302 make_range(scc_iterator<Loop, LoopBodyTraits>::begin(L), 303 scc_iterator<Loop, LoopBodyTraits>::end(L))) { 304 if (SCC.size() > 1) { 305 DEBUG(dbgs() << "LVL: Detected a cycle in the loop body:\n"); 306 DEBUG(L.dump()); 307 return true; 308 } 309 } 310 return false; 311 } 312 313 /// A helper function for converting Scalar types to vector types. 314 /// If the incoming type is void, we return void. If the VF is 1, we return 315 /// the scalar type. 316 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 317 if (Scalar->isVoidTy() || VF == 1) 318 return Scalar; 319 return VectorType::get(Scalar, VF); 320 } 321 322 // FIXME: The following helper functions have multiple implementations 323 // in the project. They can be effectively organized in a common Load/Store 324 // utilities unit. 325 326 /// A helper function that returns the pointer operand of a load or store 327 /// instruction. 328 static Value *getPointerOperand(Value *I) { 329 if (auto *LI = dyn_cast<LoadInst>(I)) 330 return LI->getPointerOperand(); 331 if (auto *SI = dyn_cast<StoreInst>(I)) 332 return SI->getPointerOperand(); 333 return nullptr; 334 } 335 336 /// A helper function that returns the type of loaded or stored value. 337 static Type *getMemInstValueType(Value *I) { 338 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 339 "Expected Load or Store instruction"); 340 if (auto *LI = dyn_cast<LoadInst>(I)) 341 return LI->getType(); 342 return cast<StoreInst>(I)->getValueOperand()->getType(); 343 } 344 345 /// A helper function that returns the alignment of load or store instruction. 346 static unsigned getMemInstAlignment(Value *I) { 347 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 348 "Expected Load or Store instruction"); 349 if (auto *LI = dyn_cast<LoadInst>(I)) 350 return LI->getAlignment(); 351 return cast<StoreInst>(I)->getAlignment(); 352 } 353 354 /// A helper function that returns the address space of the pointer operand of 355 /// load or store instruction. 356 static unsigned getMemInstAddressSpace(Value *I) { 357 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 358 "Expected Load or Store instruction"); 359 if (auto *LI = dyn_cast<LoadInst>(I)) 360 return LI->getPointerAddressSpace(); 361 return cast<StoreInst>(I)->getPointerAddressSpace(); 362 } 363 364 /// A helper function that returns true if the given type is irregular. The 365 /// type is irregular if its allocated size doesn't equal the store size of an 366 /// element of the corresponding vector type at the given vectorization factor. 367 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) { 368 // Determine if an array of VF elements of type Ty is "bitcast compatible" 369 // with a <VF x Ty> vector. 370 if (VF > 1) { 371 auto *VectorTy = VectorType::get(Ty, VF); 372 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); 373 } 374 375 // If the vectorization factor is one, we just check if an array of type Ty 376 // requires padding between elements. 377 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 378 } 379 380 /// A helper function that returns the reciprocal of the block probability of 381 /// predicated blocks. If we return X, we are assuming the predicated block 382 /// will execute once for for every X iterations of the loop header. 383 /// 384 /// TODO: We should use actual block probability here, if available. Currently, 385 /// we always assume predicated blocks have a 50% chance of executing. 386 static unsigned getReciprocalPredBlockProb() { return 2; } 387 388 /// A helper function that adds a 'fast' flag to floating-point operations. 389 static Value *addFastMathFlag(Value *V) { 390 if (isa<FPMathOperator>(V)) { 391 FastMathFlags Flags; 392 Flags.setFast(); 393 cast<Instruction>(V)->setFastMathFlags(Flags); 394 } 395 return V; 396 } 397 398 /// A helper function that returns an integer or floating-point constant with 399 /// value C. 400 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 401 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 402 : ConstantFP::get(Ty, C); 403 } 404 405 namespace llvm { 406 407 /// InnerLoopVectorizer vectorizes loops which contain only one basic 408 /// block to a specified vectorization factor (VF). 409 /// This class performs the widening of scalars into vectors, or multiple 410 /// scalars. This class also implements the following features: 411 /// * It inserts an epilogue loop for handling loops that don't have iteration 412 /// counts that are known to be a multiple of the vectorization factor. 413 /// * It handles the code generation for reduction variables. 414 /// * Scalarization (implementation using scalars) of un-vectorizable 415 /// instructions. 416 /// InnerLoopVectorizer does not perform any vectorization-legality 417 /// checks, and relies on the caller to check for the different legality 418 /// aspects. The InnerLoopVectorizer relies on the 419 /// LoopVectorizationLegality class to provide information about the induction 420 /// and reduction variables that were found to a given vectorization factor. 421 class InnerLoopVectorizer { 422 public: 423 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 424 LoopInfo *LI, DominatorTree *DT, 425 const TargetLibraryInfo *TLI, 426 const TargetTransformInfo *TTI, AssumptionCache *AC, 427 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 428 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 429 LoopVectorizationCostModel *CM) 430 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 431 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 432 Builder(PSE.getSE()->getContext()), 433 VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {} 434 virtual ~InnerLoopVectorizer() = default; 435 436 /// Create a new empty loop. Unlink the old loop and connect the new one. 437 /// Return the pre-header block of the new loop. 438 BasicBlock *createVectorizedLoopSkeleton(); 439 440 /// Widen a single instruction within the innermost loop. 441 void widenInstruction(Instruction &I); 442 443 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 444 void fixVectorizedLoop(); 445 446 // Return true if any runtime check is added. 447 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 448 449 /// A type for vectorized values in the new loop. Each value from the 450 /// original loop, when vectorized, is represented by UF vector values in the 451 /// new unrolled loop, where UF is the unroll factor. 452 using VectorParts = SmallVector<Value *, 2>; 453 454 /// Vectorize a single PHINode in a block. This method handles the induction 455 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 456 /// arbitrary length vectors. 457 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF); 458 459 /// A helper function to scalarize a single Instruction in the innermost loop. 460 /// Generates a sequence of scalar instances for each lane between \p MinLane 461 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 462 /// inclusive.. 463 void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance, 464 bool IfPredicateInstr); 465 466 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 467 /// is provided, the integer induction variable will first be truncated to 468 /// the corresponding type. 469 void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr); 470 471 /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a 472 /// vector or scalar value on-demand if one is not yet available. When 473 /// vectorizing a loop, we visit the definition of an instruction before its 474 /// uses. When visiting the definition, we either vectorize or scalarize the 475 /// instruction, creating an entry for it in the corresponding map. (In some 476 /// cases, such as induction variables, we will create both vector and scalar 477 /// entries.) Then, as we encounter uses of the definition, we derive values 478 /// for each scalar or vector use unless such a value is already available. 479 /// For example, if we scalarize a definition and one of its uses is vector, 480 /// we build the required vector on-demand with an insertelement sequence 481 /// when visiting the use. Otherwise, if the use is scalar, we can use the 482 /// existing scalar definition. 483 /// 484 /// Return a value in the new loop corresponding to \p V from the original 485 /// loop at unroll index \p Part. If the value has already been vectorized, 486 /// the corresponding vector entry in VectorLoopValueMap is returned. If, 487 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 488 /// a new vector value on-demand by inserting the scalar values into a vector 489 /// with an insertelement sequence. If the value has been neither vectorized 490 /// nor scalarized, it must be loop invariant, so we simply broadcast the 491 /// value into a vector. 492 Value *getOrCreateVectorValue(Value *V, unsigned Part); 493 494 /// Return a value in the new loop corresponding to \p V from the original 495 /// loop at unroll and vector indices \p Instance. If the value has been 496 /// vectorized but not scalarized, the necessary extractelement instruction 497 /// will be generated. 498 Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance); 499 500 /// Construct the vector value of a scalarized value \p V one lane at a time. 501 void packScalarIntoVectorValue(Value *V, const VPIteration &Instance); 502 503 /// Try to vectorize the interleaved access group that \p Instr belongs to. 504 void vectorizeInterleaveGroup(Instruction *Instr); 505 506 /// Vectorize Load and Store instructions, optionally masking the vector 507 /// operations if \p BlockInMask is non-null. 508 void vectorizeMemoryInstruction(Instruction *Instr, 509 VectorParts *BlockInMask = nullptr); 510 511 /// \brief Set the debug location in the builder using the debug location in 512 /// the instruction. 513 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 514 515 protected: 516 friend class LoopVectorizationPlanner; 517 518 /// A small list of PHINodes. 519 using PhiVector = SmallVector<PHINode *, 4>; 520 521 /// A type for scalarized values in the new loop. Each value from the 522 /// original loop, when scalarized, is represented by UF x VF scalar values 523 /// in the new unrolled loop, where UF is the unroll factor and VF is the 524 /// vectorization factor. 525 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 526 527 /// Set up the values of the IVs correctly when exiting the vector loop. 528 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 529 Value *CountRoundDown, Value *EndValue, 530 BasicBlock *MiddleBlock); 531 532 /// Create a new induction variable inside L. 533 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 534 Value *Step, Instruction *DL); 535 536 /// Handle all cross-iteration phis in the header. 537 void fixCrossIterationPHIs(); 538 539 /// Fix a first-order recurrence. This is the second phase of vectorizing 540 /// this phi node. 541 void fixFirstOrderRecurrence(PHINode *Phi); 542 543 /// Fix a reduction cross-iteration phi. This is the second phase of 544 /// vectorizing this phi node. 545 void fixReduction(PHINode *Phi); 546 547 /// \brief The Loop exit block may have single value PHI nodes with some 548 /// incoming value. While vectorizing we only handled real values 549 /// that were defined inside the loop and we should have one value for 550 /// each predecessor of its parent basic block. See PR14725. 551 void fixLCSSAPHIs(); 552 553 /// Iteratively sink the scalarized operands of a predicated instruction into 554 /// the block that was created for it. 555 void sinkScalarOperands(Instruction *PredInst); 556 557 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 558 /// represented as. 559 void truncateToMinimalBitwidths(); 560 561 /// Insert the new loop to the loop hierarchy and pass manager 562 /// and update the analysis passes. 563 void updateAnalysis(); 564 565 /// Create a broadcast instruction. This method generates a broadcast 566 /// instruction (shuffle) for loop invariant values and for the induction 567 /// value. If this is the induction variable then we extend it to N, N+1, ... 568 /// this is needed because each iteration in the loop corresponds to a SIMD 569 /// element. 570 virtual Value *getBroadcastInstrs(Value *V); 571 572 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 573 /// to each vector element of Val. The sequence starts at StartIndex. 574 /// \p Opcode is relevant for FP induction variable. 575 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 576 Instruction::BinaryOps Opcode = 577 Instruction::BinaryOpsEnd); 578 579 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 580 /// variable on which to base the steps, \p Step is the size of the step, and 581 /// \p EntryVal is the value from the original loop that maps to the steps. 582 /// Note that \p EntryVal doesn't have to be an induction variable (e.g., it 583 /// can be a truncate instruction). 584 void buildScalarSteps(Value *ScalarIV, Value *Step, Value *EntryVal, 585 const InductionDescriptor &ID); 586 587 /// Create a vector induction phi node based on an existing scalar one. \p 588 /// EntryVal is the value from the original loop that maps to the vector phi 589 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 590 /// truncate instruction, instead of widening the original IV, we widen a 591 /// version of the IV truncated to \p EntryVal's type. 592 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 593 Value *Step, Instruction *EntryVal); 594 595 /// Returns true if an instruction \p I should be scalarized instead of 596 /// vectorized for the chosen vectorization factor. 597 bool shouldScalarizeInstruction(Instruction *I) const; 598 599 /// Returns true if we should generate a scalar version of \p IV. 600 bool needsScalarInduction(Instruction *IV) const; 601 602 /// Generate a shuffle sequence that will reverse the vector Vec. 603 virtual Value *reverseVector(Value *Vec); 604 605 /// Returns (and creates if needed) the original loop trip count. 606 Value *getOrCreateTripCount(Loop *NewLoop); 607 608 /// Returns (and creates if needed) the trip count of the widened loop. 609 Value *getOrCreateVectorTripCount(Loop *NewLoop); 610 611 /// Returns a bitcasted value to the requested vector type. 612 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 613 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 614 const DataLayout &DL); 615 616 /// Emit a bypass check to see if the vector trip count is zero, including if 617 /// it overflows. 618 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 619 620 /// Emit a bypass check to see if all of the SCEV assumptions we've 621 /// had to make are correct. 622 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 623 624 /// Emit bypass checks to check any memory assumptions we may have made. 625 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 626 627 /// Add additional metadata to \p To that was not present on \p Orig. 628 /// 629 /// Currently this is used to add the noalias annotations based on the 630 /// inserted memchecks. Use this for instructions that are *cloned* into the 631 /// vector loop. 632 void addNewMetadata(Instruction *To, const Instruction *Orig); 633 634 /// Add metadata from one instruction to another. 635 /// 636 /// This includes both the original MDs from \p From and additional ones (\see 637 /// addNewMetadata). Use this for *newly created* instructions in the vector 638 /// loop. 639 void addMetadata(Instruction *To, Instruction *From); 640 641 /// \brief Similar to the previous function but it adds the metadata to a 642 /// vector of instructions. 643 void addMetadata(ArrayRef<Value *> To, Instruction *From); 644 645 /// The original loop. 646 Loop *OrigLoop; 647 648 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 649 /// dynamic knowledge to simplify SCEV expressions and converts them to a 650 /// more usable form. 651 PredicatedScalarEvolution &PSE; 652 653 /// Loop Info. 654 LoopInfo *LI; 655 656 /// Dominator Tree. 657 DominatorTree *DT; 658 659 /// Alias Analysis. 660 AliasAnalysis *AA; 661 662 /// Target Library Info. 663 const TargetLibraryInfo *TLI; 664 665 /// Target Transform Info. 666 const TargetTransformInfo *TTI; 667 668 /// Assumption Cache. 669 AssumptionCache *AC; 670 671 /// Interface to emit optimization remarks. 672 OptimizationRemarkEmitter *ORE; 673 674 /// \brief LoopVersioning. It's only set up (non-null) if memchecks were 675 /// used. 676 /// 677 /// This is currently only used to add no-alias metadata based on the 678 /// memchecks. The actually versioning is performed manually. 679 std::unique_ptr<LoopVersioning> LVer; 680 681 /// The vectorization SIMD factor to use. Each vector will have this many 682 /// vector elements. 683 unsigned VF; 684 685 /// The vectorization unroll factor to use. Each scalar is vectorized to this 686 /// many different vector instructions. 687 unsigned UF; 688 689 /// The builder that we use 690 IRBuilder<> Builder; 691 692 // --- Vectorization state --- 693 694 /// The vector-loop preheader. 695 BasicBlock *LoopVectorPreHeader; 696 697 /// The scalar-loop preheader. 698 BasicBlock *LoopScalarPreHeader; 699 700 /// Middle Block between the vector and the scalar. 701 BasicBlock *LoopMiddleBlock; 702 703 /// The ExitBlock of the scalar loop. 704 BasicBlock *LoopExitBlock; 705 706 /// The vector loop body. 707 BasicBlock *LoopVectorBody; 708 709 /// The scalar loop body. 710 BasicBlock *LoopScalarBody; 711 712 /// A list of all bypass blocks. The first block is the entry of the loop. 713 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 714 715 /// The new Induction variable which was added to the new block. 716 PHINode *Induction = nullptr; 717 718 /// The induction variable of the old basic block. 719 PHINode *OldInduction = nullptr; 720 721 /// Maps values from the original loop to their corresponding values in the 722 /// vectorized loop. A key value can map to either vector values, scalar 723 /// values or both kinds of values, depending on whether the key was 724 /// vectorized and scalarized. 725 VectorizerValueMap VectorLoopValueMap; 726 727 /// Store instructions that were predicated. 728 SmallVector<Instruction *, 4> PredicatedInstructions; 729 730 /// Trip count of the original loop. 731 Value *TripCount = nullptr; 732 733 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 734 Value *VectorTripCount = nullptr; 735 736 /// The legality analysis. 737 LoopVectorizationLegality *Legal; 738 739 /// The profitablity analysis. 740 LoopVectorizationCostModel *Cost; 741 742 // Record whether runtime checks are added. 743 bool AddedSafetyChecks = false; 744 745 // Holds the end values for each induction variable. We save the end values 746 // so we can later fix-up the external users of the induction variables. 747 DenseMap<PHINode *, Value *> IVEndValues; 748 }; 749 750 class InnerLoopUnroller : public InnerLoopVectorizer { 751 public: 752 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 753 LoopInfo *LI, DominatorTree *DT, 754 const TargetLibraryInfo *TLI, 755 const TargetTransformInfo *TTI, AssumptionCache *AC, 756 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 757 LoopVectorizationLegality *LVL, 758 LoopVectorizationCostModel *CM) 759 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 760 UnrollFactor, LVL, CM) {} 761 762 private: 763 Value *getBroadcastInstrs(Value *V) override; 764 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 765 Instruction::BinaryOps Opcode = 766 Instruction::BinaryOpsEnd) override; 767 Value *reverseVector(Value *Vec) override; 768 }; 769 770 } // end namespace llvm 771 772 /// \brief Look for a meaningful debug location on the instruction or it's 773 /// operands. 774 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 775 if (!I) 776 return I; 777 778 DebugLoc Empty; 779 if (I->getDebugLoc() != Empty) 780 return I; 781 782 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 783 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 784 if (OpInst->getDebugLoc() != Empty) 785 return OpInst; 786 } 787 788 return I; 789 } 790 791 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 792 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 793 const DILocation *DIL = Inst->getDebugLoc(); 794 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 795 !isa<DbgInfoIntrinsic>(Inst)) 796 B.SetCurrentDebugLocation(DIL->cloneWithDuplicationFactor(UF * VF)); 797 else 798 B.SetCurrentDebugLocation(DIL); 799 } else 800 B.SetCurrentDebugLocation(DebugLoc()); 801 } 802 803 #ifndef NDEBUG 804 /// \return string containing a file name and a line # for the given loop. 805 static std::string getDebugLocString(const Loop *L) { 806 std::string Result; 807 if (L) { 808 raw_string_ostream OS(Result); 809 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 810 LoopDbgLoc.print(OS); 811 else 812 // Just print the module name. 813 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 814 OS.flush(); 815 } 816 return Result; 817 } 818 #endif 819 820 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 821 const Instruction *Orig) { 822 // If the loop was versioned with memchecks, add the corresponding no-alias 823 // metadata. 824 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 825 LVer->annotateInstWithNoAlias(To, Orig); 826 } 827 828 void InnerLoopVectorizer::addMetadata(Instruction *To, 829 Instruction *From) { 830 propagateMetadata(To, From); 831 addNewMetadata(To, From); 832 } 833 834 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 835 Instruction *From) { 836 for (Value *V : To) { 837 if (Instruction *I = dyn_cast<Instruction>(V)) 838 addMetadata(I, From); 839 } 840 } 841 842 namespace { 843 844 /// \brief The group of interleaved loads/stores sharing the same stride and 845 /// close to each other. 846 /// 847 /// Each member in this group has an index starting from 0, and the largest 848 /// index should be less than interleaved factor, which is equal to the absolute 849 /// value of the access's stride. 850 /// 851 /// E.g. An interleaved load group of factor 4: 852 /// for (unsigned i = 0; i < 1024; i+=4) { 853 /// a = A[i]; // Member of index 0 854 /// b = A[i+1]; // Member of index 1 855 /// d = A[i+3]; // Member of index 3 856 /// ... 857 /// } 858 /// 859 /// An interleaved store group of factor 4: 860 /// for (unsigned i = 0; i < 1024; i+=4) { 861 /// ... 862 /// A[i] = a; // Member of index 0 863 /// A[i+1] = b; // Member of index 1 864 /// A[i+2] = c; // Member of index 2 865 /// A[i+3] = d; // Member of index 3 866 /// } 867 /// 868 /// Note: the interleaved load group could have gaps (missing members), but 869 /// the interleaved store group doesn't allow gaps. 870 class InterleaveGroup { 871 public: 872 InterleaveGroup(Instruction *Instr, int Stride, unsigned Align) 873 : Align(Align), InsertPos(Instr) { 874 assert(Align && "The alignment should be non-zero"); 875 876 Factor = std::abs(Stride); 877 assert(Factor > 1 && "Invalid interleave factor"); 878 879 Reverse = Stride < 0; 880 Members[0] = Instr; 881 } 882 883 bool isReverse() const { return Reverse; } 884 unsigned getFactor() const { return Factor; } 885 unsigned getAlignment() const { return Align; } 886 unsigned getNumMembers() const { return Members.size(); } 887 888 /// \brief Try to insert a new member \p Instr with index \p Index and 889 /// alignment \p NewAlign. The index is related to the leader and it could be 890 /// negative if it is the new leader. 891 /// 892 /// \returns false if the instruction doesn't belong to the group. 893 bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) { 894 assert(NewAlign && "The new member's alignment should be non-zero"); 895 896 int Key = Index + SmallestKey; 897 898 // Skip if there is already a member with the same index. 899 if (Members.count(Key)) 900 return false; 901 902 if (Key > LargestKey) { 903 // The largest index is always less than the interleave factor. 904 if (Index >= static_cast<int>(Factor)) 905 return false; 906 907 LargestKey = Key; 908 } else if (Key < SmallestKey) { 909 // The largest index is always less than the interleave factor. 910 if (LargestKey - Key >= static_cast<int>(Factor)) 911 return false; 912 913 SmallestKey = Key; 914 } 915 916 // It's always safe to select the minimum alignment. 917 Align = std::min(Align, NewAlign); 918 Members[Key] = Instr; 919 return true; 920 } 921 922 /// \brief Get the member with the given index \p Index 923 /// 924 /// \returns nullptr if contains no such member. 925 Instruction *getMember(unsigned Index) const { 926 int Key = SmallestKey + Index; 927 if (!Members.count(Key)) 928 return nullptr; 929 930 return Members.find(Key)->second; 931 } 932 933 /// \brief Get the index for the given member. Unlike the key in the member 934 /// map, the index starts from 0. 935 unsigned getIndex(Instruction *Instr) const { 936 for (auto I : Members) 937 if (I.second == Instr) 938 return I.first - SmallestKey; 939 940 llvm_unreachable("InterleaveGroup contains no such member"); 941 } 942 943 Instruction *getInsertPos() const { return InsertPos; } 944 void setInsertPos(Instruction *Inst) { InsertPos = Inst; } 945 946 /// Add metadata (e.g. alias info) from the instructions in this group to \p 947 /// NewInst. 948 /// 949 /// FIXME: this function currently does not add noalias metadata a'la 950 /// addNewMedata. To do that we need to compute the intersection of the 951 /// noalias info from all members. 952 void addMetadata(Instruction *NewInst) const { 953 SmallVector<Value *, 4> VL; 954 std::transform(Members.begin(), Members.end(), std::back_inserter(VL), 955 [](std::pair<int, Instruction *> p) { return p.second; }); 956 propagateMetadata(NewInst, VL); 957 } 958 959 private: 960 unsigned Factor; // Interleave Factor. 961 bool Reverse; 962 unsigned Align; 963 DenseMap<int, Instruction *> Members; 964 int SmallestKey = 0; 965 int LargestKey = 0; 966 967 // To avoid breaking dependences, vectorized instructions of an interleave 968 // group should be inserted at either the first load or the last store in 969 // program order. 970 // 971 // E.g. %even = load i32 // Insert Position 972 // %add = add i32 %even // Use of %even 973 // %odd = load i32 974 // 975 // store i32 %even 976 // %odd = add i32 // Def of %odd 977 // store i32 %odd // Insert Position 978 Instruction *InsertPos; 979 }; 980 981 /// \brief Drive the analysis of interleaved memory accesses in the loop. 982 /// 983 /// Use this class to analyze interleaved accesses only when we can vectorize 984 /// a loop. Otherwise it's meaningless to do analysis as the vectorization 985 /// on interleaved accesses is unsafe. 986 /// 987 /// The analysis collects interleave groups and records the relationships 988 /// between the member and the group in a map. 989 class InterleavedAccessInfo { 990 public: 991 InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L, 992 DominatorTree *DT, LoopInfo *LI) 993 : PSE(PSE), TheLoop(L), DT(DT), LI(LI) {} 994 995 ~InterleavedAccessInfo() { 996 SmallSet<InterleaveGroup *, 4> DelSet; 997 // Avoid releasing a pointer twice. 998 for (auto &I : InterleaveGroupMap) 999 DelSet.insert(I.second); 1000 for (auto *Ptr : DelSet) 1001 delete Ptr; 1002 } 1003 1004 /// \brief Analyze the interleaved accesses and collect them in interleave 1005 /// groups. Substitute symbolic strides using \p Strides. 1006 void analyzeInterleaving(const ValueToValueMap &Strides); 1007 1008 /// \brief Check if \p Instr belongs to any interleave group. 1009 bool isInterleaved(Instruction *Instr) const { 1010 return InterleaveGroupMap.count(Instr); 1011 } 1012 1013 /// \brief Get the interleave group that \p Instr belongs to. 1014 /// 1015 /// \returns nullptr if doesn't have such group. 1016 InterleaveGroup *getInterleaveGroup(Instruction *Instr) const { 1017 if (InterleaveGroupMap.count(Instr)) 1018 return InterleaveGroupMap.find(Instr)->second; 1019 return nullptr; 1020 } 1021 1022 /// \brief Returns true if an interleaved group that may access memory 1023 /// out-of-bounds requires a scalar epilogue iteration for correctness. 1024 bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; } 1025 1026 /// \brief Initialize the LoopAccessInfo used for dependence checking. 1027 void setLAI(const LoopAccessInfo *Info) { LAI = Info; } 1028 1029 private: 1030 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks. 1031 /// Simplifies SCEV expressions in the context of existing SCEV assumptions. 1032 /// The interleaved access analysis can also add new predicates (for example 1033 /// by versioning strides of pointers). 1034 PredicatedScalarEvolution &PSE; 1035 1036 Loop *TheLoop; 1037 DominatorTree *DT; 1038 LoopInfo *LI; 1039 const LoopAccessInfo *LAI = nullptr; 1040 1041 /// True if the loop may contain non-reversed interleaved groups with 1042 /// out-of-bounds accesses. We ensure we don't speculatively access memory 1043 /// out-of-bounds by executing at least one scalar epilogue iteration. 1044 bool RequiresScalarEpilogue = false; 1045 1046 /// Holds the relationships between the members and the interleave group. 1047 DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap; 1048 1049 /// Holds dependences among the memory accesses in the loop. It maps a source 1050 /// access to a set of dependent sink accesses. 1051 DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences; 1052 1053 /// \brief The descriptor for a strided memory access. 1054 struct StrideDescriptor { 1055 StrideDescriptor() = default; 1056 StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size, 1057 unsigned Align) 1058 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {} 1059 1060 // The access's stride. It is negative for a reverse access. 1061 int64_t Stride = 0; 1062 1063 // The scalar expression of this access. 1064 const SCEV *Scev = nullptr; 1065 1066 // The size of the memory object. 1067 uint64_t Size = 0; 1068 1069 // The alignment of this access. 1070 unsigned Align = 0; 1071 }; 1072 1073 /// \brief A type for holding instructions and their stride descriptors. 1074 using StrideEntry = std::pair<Instruction *, StrideDescriptor>; 1075 1076 /// \brief Create a new interleave group with the given instruction \p Instr, 1077 /// stride \p Stride and alignment \p Align. 1078 /// 1079 /// \returns the newly created interleave group. 1080 InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride, 1081 unsigned Align) { 1082 assert(!InterleaveGroupMap.count(Instr) && 1083 "Already in an interleaved access group"); 1084 InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align); 1085 return InterleaveGroupMap[Instr]; 1086 } 1087 1088 /// \brief Release the group and remove all the relationships. 1089 void releaseGroup(InterleaveGroup *Group) { 1090 for (unsigned i = 0; i < Group->getFactor(); i++) 1091 if (Instruction *Member = Group->getMember(i)) 1092 InterleaveGroupMap.erase(Member); 1093 1094 delete Group; 1095 } 1096 1097 /// \brief Collect all the accesses with a constant stride in program order. 1098 void collectConstStrideAccesses( 1099 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 1100 const ValueToValueMap &Strides); 1101 1102 /// \brief Returns true if \p Stride is allowed in an interleaved group. 1103 static bool isStrided(int Stride) { 1104 unsigned Factor = std::abs(Stride); 1105 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 1106 } 1107 1108 /// \brief Returns true if \p BB is a predicated block. 1109 bool isPredicated(BasicBlock *BB) const { 1110 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 1111 } 1112 1113 /// \brief Returns true if LoopAccessInfo can be used for dependence queries. 1114 bool areDependencesValid() const { 1115 return LAI && LAI->getDepChecker().getDependences(); 1116 } 1117 1118 /// \brief Returns true if memory accesses \p A and \p B can be reordered, if 1119 /// necessary, when constructing interleaved groups. 1120 /// 1121 /// \p A must precede \p B in program order. We return false if reordering is 1122 /// not necessary or is prevented because \p A and \p B may be dependent. 1123 bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A, 1124 StrideEntry *B) const { 1125 // Code motion for interleaved accesses can potentially hoist strided loads 1126 // and sink strided stores. The code below checks the legality of the 1127 // following two conditions: 1128 // 1129 // 1. Potentially moving a strided load (B) before any store (A) that 1130 // precedes B, or 1131 // 1132 // 2. Potentially moving a strided store (A) after any load or store (B) 1133 // that A precedes. 1134 // 1135 // It's legal to reorder A and B if we know there isn't a dependence from A 1136 // to B. Note that this determination is conservative since some 1137 // dependences could potentially be reordered safely. 1138 1139 // A is potentially the source of a dependence. 1140 auto *Src = A->first; 1141 auto SrcDes = A->second; 1142 1143 // B is potentially the sink of a dependence. 1144 auto *Sink = B->first; 1145 auto SinkDes = B->second; 1146 1147 // Code motion for interleaved accesses can't violate WAR dependences. 1148 // Thus, reordering is legal if the source isn't a write. 1149 if (!Src->mayWriteToMemory()) 1150 return true; 1151 1152 // At least one of the accesses must be strided. 1153 if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride)) 1154 return true; 1155 1156 // If dependence information is not available from LoopAccessInfo, 1157 // conservatively assume the instructions can't be reordered. 1158 if (!areDependencesValid()) 1159 return false; 1160 1161 // If we know there is a dependence from source to sink, assume the 1162 // instructions can't be reordered. Otherwise, reordering is legal. 1163 return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink); 1164 } 1165 1166 /// \brief Collect the dependences from LoopAccessInfo. 1167 /// 1168 /// We process the dependences once during the interleaved access analysis to 1169 /// enable constant-time dependence queries. 1170 void collectDependences() { 1171 if (!areDependencesValid()) 1172 return; 1173 auto *Deps = LAI->getDepChecker().getDependences(); 1174 for (auto Dep : *Deps) 1175 Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI)); 1176 } 1177 }; 1178 1179 /// Utility class for getting and setting loop vectorizer hints in the form 1180 /// of loop metadata. 1181 /// This class keeps a number of loop annotations locally (as member variables) 1182 /// and can, upon request, write them back as metadata on the loop. It will 1183 /// initially scan the loop for existing metadata, and will update the local 1184 /// values based on information in the loop. 1185 /// We cannot write all values to metadata, as the mere presence of some info, 1186 /// for example 'force', means a decision has been made. So, we need to be 1187 /// careful NOT to add them if the user hasn't specifically asked so. 1188 class LoopVectorizeHints { 1189 enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE, HK_ISVECTORIZED }; 1190 1191 /// Hint - associates name and validation with the hint value. 1192 struct Hint { 1193 const char *Name; 1194 unsigned Value; // This may have to change for non-numeric values. 1195 HintKind Kind; 1196 1197 Hint(const char *Name, unsigned Value, HintKind Kind) 1198 : Name(Name), Value(Value), Kind(Kind) {} 1199 1200 bool validate(unsigned Val) { 1201 switch (Kind) { 1202 case HK_WIDTH: 1203 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth; 1204 case HK_UNROLL: 1205 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor; 1206 case HK_FORCE: 1207 return (Val <= 1); 1208 case HK_ISVECTORIZED: 1209 return (Val==0 || Val==1); 1210 } 1211 return false; 1212 } 1213 }; 1214 1215 /// Vectorization width. 1216 Hint Width; 1217 1218 /// Vectorization interleave factor. 1219 Hint Interleave; 1220 1221 /// Vectorization forced 1222 Hint Force; 1223 1224 /// Already Vectorized 1225 Hint IsVectorized; 1226 1227 /// Return the loop metadata prefix. 1228 static StringRef Prefix() { return "llvm.loop."; } 1229 1230 /// True if there is any unsafe math in the loop. 1231 bool PotentiallyUnsafe = false; 1232 1233 public: 1234 enum ForceKind { 1235 FK_Undefined = -1, ///< Not selected. 1236 FK_Disabled = 0, ///< Forcing disabled. 1237 FK_Enabled = 1, ///< Forcing enabled. 1238 }; 1239 1240 LoopVectorizeHints(const Loop *L, bool DisableInterleaving, 1241 OptimizationRemarkEmitter &ORE) 1242 : Width("vectorize.width", VectorizerParams::VectorizationFactor, 1243 HK_WIDTH), 1244 Interleave("interleave.count", DisableInterleaving, HK_UNROLL), 1245 Force("vectorize.enable", FK_Undefined, HK_FORCE), 1246 IsVectorized("isvectorized", 0, HK_ISVECTORIZED), TheLoop(L), ORE(ORE) { 1247 // Populate values with existing loop metadata. 1248 getHintsFromMetadata(); 1249 1250 // force-vector-interleave overrides DisableInterleaving. 1251 if (VectorizerParams::isInterleaveForced()) 1252 Interleave.Value = VectorizerParams::VectorizationInterleave; 1253 1254 if (IsVectorized.Value != 1) 1255 // If the vectorization width and interleaving count are both 1 then 1256 // consider the loop to have been already vectorized because there's 1257 // nothing more that we can do. 1258 IsVectorized.Value = Width.Value == 1 && Interleave.Value == 1; 1259 DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs() 1260 << "LV: Interleaving disabled by the pass manager\n"); 1261 } 1262 1263 /// Mark the loop L as already vectorized by setting the width to 1. 1264 void setAlreadyVectorized() { 1265 IsVectorized.Value = 1; 1266 Hint Hints[] = {IsVectorized}; 1267 writeHintsToMetadata(Hints); 1268 } 1269 1270 bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const { 1271 if (getForce() == LoopVectorizeHints::FK_Disabled) { 1272 DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); 1273 emitRemarkWithHints(); 1274 return false; 1275 } 1276 1277 if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) { 1278 DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); 1279 emitRemarkWithHints(); 1280 return false; 1281 } 1282 1283 if (getIsVectorized() == 1) { 1284 DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); 1285 // FIXME: Add interleave.disable metadata. This will allow 1286 // vectorize.disable to be used without disabling the pass and errors 1287 // to differentiate between disabled vectorization and a width of 1. 1288 ORE.emit([&]() { 1289 return OptimizationRemarkAnalysis(vectorizeAnalysisPassName(), 1290 "AllDisabled", L->getStartLoc(), 1291 L->getHeader()) 1292 << "loop not vectorized: vectorization and interleaving are " 1293 "explicitly disabled, or the loop has already been " 1294 "vectorized"; 1295 }); 1296 return false; 1297 } 1298 1299 return true; 1300 } 1301 1302 /// Dumps all the hint information. 1303 void emitRemarkWithHints() const { 1304 using namespace ore; 1305 1306 ORE.emit([&]() { 1307 if (Force.Value == LoopVectorizeHints::FK_Disabled) 1308 return OptimizationRemarkMissed(LV_NAME, "MissedExplicitlyDisabled", 1309 TheLoop->getStartLoc(), 1310 TheLoop->getHeader()) 1311 << "loop not vectorized: vectorization is explicitly disabled"; 1312 else { 1313 OptimizationRemarkMissed R(LV_NAME, "MissedDetails", 1314 TheLoop->getStartLoc(), 1315 TheLoop->getHeader()); 1316 R << "loop not vectorized"; 1317 if (Force.Value == LoopVectorizeHints::FK_Enabled) { 1318 R << " (Force=" << NV("Force", true); 1319 if (Width.Value != 0) 1320 R << ", Vector Width=" << NV("VectorWidth", Width.Value); 1321 if (Interleave.Value != 0) 1322 R << ", Interleave Count=" 1323 << NV("InterleaveCount", Interleave.Value); 1324 R << ")"; 1325 } 1326 return R; 1327 } 1328 }); 1329 } 1330 1331 unsigned getWidth() const { return Width.Value; } 1332 unsigned getInterleave() const { return Interleave.Value; } 1333 unsigned getIsVectorized() const { return IsVectorized.Value; } 1334 enum ForceKind getForce() const { return (ForceKind)Force.Value; } 1335 1336 /// \brief If hints are provided that force vectorization, use the AlwaysPrint 1337 /// pass name to force the frontend to print the diagnostic. 1338 const char *vectorizeAnalysisPassName() const { 1339 if (getWidth() == 1) 1340 return LV_NAME; 1341 if (getForce() == LoopVectorizeHints::FK_Disabled) 1342 return LV_NAME; 1343 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0) 1344 return LV_NAME; 1345 return OptimizationRemarkAnalysis::AlwaysPrint; 1346 } 1347 1348 bool allowReordering() const { 1349 // When enabling loop hints are provided we allow the vectorizer to change 1350 // the order of operations that is given by the scalar loop. This is not 1351 // enabled by default because can be unsafe or inefficient. For example, 1352 // reordering floating-point operations will change the way round-off 1353 // error accumulates in the loop. 1354 return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1; 1355 } 1356 1357 bool isPotentiallyUnsafe() const { 1358 // Avoid FP vectorization if the target is unsure about proper support. 1359 // This may be related to the SIMD unit in the target not handling 1360 // IEEE 754 FP ops properly, or bad single-to-double promotions. 1361 // Otherwise, a sequence of vectorized loops, even without reduction, 1362 // could lead to different end results on the destination vectors. 1363 return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe; 1364 } 1365 1366 void setPotentiallyUnsafe() { PotentiallyUnsafe = true; } 1367 1368 private: 1369 /// Find hints specified in the loop metadata and update local values. 1370 void getHintsFromMetadata() { 1371 MDNode *LoopID = TheLoop->getLoopID(); 1372 if (!LoopID) 1373 return; 1374 1375 // First operand should refer to the loop id itself. 1376 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 1377 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 1378 1379 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1380 const MDString *S = nullptr; 1381 SmallVector<Metadata *, 4> Args; 1382 1383 // The expected hint is either a MDString or a MDNode with the first 1384 // operand a MDString. 1385 if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) { 1386 if (!MD || MD->getNumOperands() == 0) 1387 continue; 1388 S = dyn_cast<MDString>(MD->getOperand(0)); 1389 for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i) 1390 Args.push_back(MD->getOperand(i)); 1391 } else { 1392 S = dyn_cast<MDString>(LoopID->getOperand(i)); 1393 assert(Args.size() == 0 && "too many arguments for MDString"); 1394 } 1395 1396 if (!S) 1397 continue; 1398 1399 // Check if the hint starts with the loop metadata prefix. 1400 StringRef Name = S->getString(); 1401 if (Args.size() == 1) 1402 setHint(Name, Args[0]); 1403 } 1404 } 1405 1406 /// Checks string hint with one operand and set value if valid. 1407 void setHint(StringRef Name, Metadata *Arg) { 1408 if (!Name.startswith(Prefix())) 1409 return; 1410 Name = Name.substr(Prefix().size(), StringRef::npos); 1411 1412 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); 1413 if (!C) 1414 return; 1415 unsigned Val = C->getZExtValue(); 1416 1417 Hint *Hints[] = {&Width, &Interleave, &Force, &IsVectorized}; 1418 for (auto H : Hints) { 1419 if (Name == H->Name) { 1420 if (H->validate(Val)) 1421 H->Value = Val; 1422 else 1423 DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); 1424 break; 1425 } 1426 } 1427 } 1428 1429 /// Create a new hint from name / value pair. 1430 MDNode *createHintMetadata(StringRef Name, unsigned V) const { 1431 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1432 Metadata *MDs[] = {MDString::get(Context, Name), 1433 ConstantAsMetadata::get( 1434 ConstantInt::get(Type::getInt32Ty(Context), V))}; 1435 return MDNode::get(Context, MDs); 1436 } 1437 1438 /// Matches metadata with hint name. 1439 bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) { 1440 MDString *Name = dyn_cast<MDString>(Node->getOperand(0)); 1441 if (!Name) 1442 return false; 1443 1444 for (auto H : HintTypes) 1445 if (Name->getString().endswith(H.Name)) 1446 return true; 1447 return false; 1448 } 1449 1450 /// Sets current hints into loop metadata, keeping other values intact. 1451 void writeHintsToMetadata(ArrayRef<Hint> HintTypes) { 1452 if (HintTypes.empty()) 1453 return; 1454 1455 // Reserve the first element to LoopID (see below). 1456 SmallVector<Metadata *, 4> MDs(1); 1457 // If the loop already has metadata, then ignore the existing operands. 1458 MDNode *LoopID = TheLoop->getLoopID(); 1459 if (LoopID) { 1460 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1461 MDNode *Node = cast<MDNode>(LoopID->getOperand(i)); 1462 // If node in update list, ignore old value. 1463 if (!matchesHintMetadataName(Node, HintTypes)) 1464 MDs.push_back(Node); 1465 } 1466 } 1467 1468 // Now, add the missing hints. 1469 for (auto H : HintTypes) 1470 MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value)); 1471 1472 // Replace current metadata node with new one. 1473 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1474 MDNode *NewLoopID = MDNode::get(Context, MDs); 1475 // Set operand 0 to refer to the loop id itself. 1476 NewLoopID->replaceOperandWith(0, NewLoopID); 1477 1478 TheLoop->setLoopID(NewLoopID); 1479 } 1480 1481 /// The loop these hints belong to. 1482 const Loop *TheLoop; 1483 1484 /// Interface to emit optimization remarks. 1485 OptimizationRemarkEmitter &ORE; 1486 }; 1487 1488 } // end anonymous namespace 1489 1490 static void emitMissedWarning(Function *F, Loop *L, 1491 const LoopVectorizeHints &LH, 1492 OptimizationRemarkEmitter *ORE) { 1493 LH.emitRemarkWithHints(); 1494 1495 if (LH.getForce() == LoopVectorizeHints::FK_Enabled) { 1496 if (LH.getWidth() != 1) 1497 ORE->emit(DiagnosticInfoOptimizationFailure( 1498 DEBUG_TYPE, "FailedRequestedVectorization", 1499 L->getStartLoc(), L->getHeader()) 1500 << "loop not vectorized: " 1501 << "failed explicitly specified loop vectorization"); 1502 else if (LH.getInterleave() != 1) 1503 ORE->emit(DiagnosticInfoOptimizationFailure( 1504 DEBUG_TYPE, "FailedRequestedInterleaving", L->getStartLoc(), 1505 L->getHeader()) 1506 << "loop not interleaved: " 1507 << "failed explicitly specified loop interleaving"); 1508 } 1509 } 1510 1511 namespace { 1512 1513 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and 1514 /// to what vectorization factor. 1515 /// This class does not look at the profitability of vectorization, only the 1516 /// legality. This class has two main kinds of checks: 1517 /// * Memory checks - The code in canVectorizeMemory checks if vectorization 1518 /// will change the order of memory accesses in a way that will change the 1519 /// correctness of the program. 1520 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory 1521 /// checks for a number of different conditions, such as the availability of a 1522 /// single induction variable, that all types are supported and vectorize-able, 1523 /// etc. This code reflects the capabilities of InnerLoopVectorizer. 1524 /// This class is also used by InnerLoopVectorizer for identifying 1525 /// induction variable and the different reduction variables. 1526 class LoopVectorizationLegality { 1527 public: 1528 LoopVectorizationLegality( 1529 Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT, 1530 TargetLibraryInfo *TLI, AliasAnalysis *AA, Function *F, 1531 const TargetTransformInfo *TTI, 1532 std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI, 1533 OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R, 1534 LoopVectorizeHints *H) 1535 : TheLoop(L), PSE(PSE), TLI(TLI), TTI(TTI), DT(DT), GetLAA(GetLAA), 1536 ORE(ORE), InterleaveInfo(PSE, L, DT, LI), Requirements(R), Hints(H) {} 1537 1538 /// ReductionList contains the reduction descriptors for all 1539 /// of the reductions that were found in the loop. 1540 using ReductionList = DenseMap<PHINode *, RecurrenceDescriptor>; 1541 1542 /// InductionList saves induction variables and maps them to the 1543 /// induction descriptor. 1544 using InductionList = MapVector<PHINode *, InductionDescriptor>; 1545 1546 /// RecurrenceSet contains the phi nodes that are recurrences other than 1547 /// inductions and reductions. 1548 using RecurrenceSet = SmallPtrSet<const PHINode *, 8>; 1549 1550 /// Returns true if it is legal to vectorize this loop. 1551 /// This does not mean that it is profitable to vectorize this 1552 /// loop, only that it is legal to do so. 1553 bool canVectorize(); 1554 1555 /// Returns the primary induction variable. 1556 PHINode *getPrimaryInduction() { return PrimaryInduction; } 1557 1558 /// Returns the reduction variables found in the loop. 1559 ReductionList *getReductionVars() { return &Reductions; } 1560 1561 /// Returns the induction variables found in the loop. 1562 InductionList *getInductionVars() { return &Inductions; } 1563 1564 /// Return the first-order recurrences found in the loop. 1565 RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; } 1566 1567 /// Return the set of instructions to sink to handle first-order recurrences. 1568 DenseMap<Instruction *, Instruction *> &getSinkAfter() { return SinkAfter; } 1569 1570 /// Returns the widest induction type. 1571 Type *getWidestInductionType() { return WidestIndTy; } 1572 1573 /// Returns True if V is an induction variable in this loop. 1574 bool isInductionVariable(const Value *V); 1575 1576 /// Returns True if PN is a reduction variable in this loop. 1577 bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); } 1578 1579 /// Returns True if Phi is a first-order recurrence in this loop. 1580 bool isFirstOrderRecurrence(const PHINode *Phi); 1581 1582 /// Return true if the block BB needs to be predicated in order for the loop 1583 /// to be vectorized. 1584 bool blockNeedsPredication(BasicBlock *BB); 1585 1586 /// Check if this pointer is consecutive when vectorizing. This happens 1587 /// when the last index of the GEP is the induction variable, or that the 1588 /// pointer itself is an induction variable. 1589 /// This check allows us to vectorize A[idx] into a wide load/store. 1590 /// Returns: 1591 /// 0 - Stride is unknown or non-consecutive. 1592 /// 1 - Address is consecutive. 1593 /// -1 - Address is consecutive, and decreasing. 1594 int isConsecutivePtr(Value *Ptr); 1595 1596 /// Returns true if the value V is uniform within the loop. 1597 bool isUniform(Value *V); 1598 1599 /// Returns the information that we collected about runtime memory check. 1600 const RuntimePointerChecking *getRuntimePointerChecking() const { 1601 return LAI->getRuntimePointerChecking(); 1602 } 1603 1604 const LoopAccessInfo *getLAI() const { return LAI; } 1605 1606 /// \brief Check if \p Instr belongs to any interleaved access group. 1607 bool isAccessInterleaved(Instruction *Instr) { 1608 return InterleaveInfo.isInterleaved(Instr); 1609 } 1610 1611 /// \brief Get the interleaved access group that \p Instr belongs to. 1612 const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { 1613 return InterleaveInfo.getInterleaveGroup(Instr); 1614 } 1615 1616 /// \brief Returns true if an interleaved group requires a scalar iteration 1617 /// to handle accesses with gaps. 1618 bool requiresScalarEpilogue() const { 1619 return InterleaveInfo.requiresScalarEpilogue(); 1620 } 1621 1622 unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); } 1623 1624 uint64_t getMaxSafeRegisterWidth() const { 1625 return LAI->getDepChecker().getMaxSafeRegisterWidth(); 1626 } 1627 1628 bool hasStride(Value *V) { return LAI->hasStride(V); } 1629 1630 /// Returns true if the target machine supports masked store operation 1631 /// for the given \p DataType and kind of access to \p Ptr. 1632 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1633 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType); 1634 } 1635 1636 /// Returns true if the target machine supports masked load operation 1637 /// for the given \p DataType and kind of access to \p Ptr. 1638 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1639 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType); 1640 } 1641 1642 /// Returns true if the target machine supports masked scatter operation 1643 /// for the given \p DataType. 1644 bool isLegalMaskedScatter(Type *DataType) { 1645 return TTI->isLegalMaskedScatter(DataType); 1646 } 1647 1648 /// Returns true if the target machine supports masked gather operation 1649 /// for the given \p DataType. 1650 bool isLegalMaskedGather(Type *DataType) { 1651 return TTI->isLegalMaskedGather(DataType); 1652 } 1653 1654 /// Returns true if the target machine can represent \p V as a masked gather 1655 /// or scatter operation. 1656 bool isLegalGatherOrScatter(Value *V) { 1657 auto *LI = dyn_cast<LoadInst>(V); 1658 auto *SI = dyn_cast<StoreInst>(V); 1659 if (!LI && !SI) 1660 return false; 1661 auto *Ptr = getPointerOperand(V); 1662 auto *Ty = cast<PointerType>(Ptr->getType())->getElementType(); 1663 return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty)); 1664 } 1665 1666 /// Returns true if vector representation of the instruction \p I 1667 /// requires mask. 1668 bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); } 1669 1670 unsigned getNumStores() const { return LAI->getNumStores(); } 1671 unsigned getNumLoads() const { return LAI->getNumLoads(); } 1672 unsigned getNumPredStores() const { return NumPredStores; } 1673 1674 /// Returns true if \p I is an instruction that will be scalarized with 1675 /// predication. Such instructions include conditional stores and 1676 /// instructions that may divide by zero. 1677 bool isScalarWithPredication(Instruction *I); 1678 1679 /// Returns true if \p I is a memory instruction with consecutive memory 1680 /// access that can be widened. 1681 bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1); 1682 1683 // Returns true if the NoNaN attribute is set on the function. 1684 bool hasFunNoNaNAttr() const { return HasFunNoNaNAttr; } 1685 1686 private: 1687 /// Check if a single basic block loop is vectorizable. 1688 /// At this point we know that this is a loop with a constant trip count 1689 /// and we only need to check individual instructions. 1690 bool canVectorizeInstrs(); 1691 1692 /// When we vectorize loops we may change the order in which 1693 /// we read and write from memory. This method checks if it is 1694 /// legal to vectorize the code, considering only memory constrains. 1695 /// Returns true if the loop is vectorizable 1696 bool canVectorizeMemory(); 1697 1698 /// Return true if we can vectorize this loop using the IF-conversion 1699 /// transformation. 1700 bool canVectorizeWithIfConvert(); 1701 1702 /// Return true if all of the instructions in the block can be speculatively 1703 /// executed. \p SafePtrs is a list of addresses that are known to be legal 1704 /// and we know that we can read from them without segfault. 1705 bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs); 1706 1707 /// Updates the vectorization state by adding \p Phi to the inductions list. 1708 /// This can set \p Phi as the main induction of the loop if \p Phi is a 1709 /// better choice for the main induction than the existing one. 1710 void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID, 1711 SmallPtrSetImpl<Value *> &AllowedExit); 1712 1713 /// Create an analysis remark that explains why vectorization failed 1714 /// 1715 /// \p RemarkName is the identifier for the remark. If \p I is passed it is 1716 /// an instruction that prevents vectorization. Otherwise the loop is used 1717 /// for the location of the remark. \return the remark object that can be 1718 /// streamed to. 1719 OptimizationRemarkAnalysis 1720 createMissedAnalysis(StringRef RemarkName, Instruction *I = nullptr) const { 1721 return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(), 1722 RemarkName, TheLoop, I); 1723 } 1724 1725 /// \brief If an access has a symbolic strides, this maps the pointer value to 1726 /// the stride symbol. 1727 const ValueToValueMap *getSymbolicStrides() { 1728 // FIXME: Currently, the set of symbolic strides is sometimes queried before 1729 // it's collected. This happens from canVectorizeWithIfConvert, when the 1730 // pointer is checked to reference consecutive elements suitable for a 1731 // masked access. 1732 return LAI ? &LAI->getSymbolicStrides() : nullptr; 1733 } 1734 1735 unsigned NumPredStores = 0; 1736 1737 /// The loop that we evaluate. 1738 Loop *TheLoop; 1739 1740 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. 1741 /// Applies dynamic knowledge to simplify SCEV expressions in the context 1742 /// of existing SCEV assumptions. The analysis will also add a minimal set 1743 /// of new predicates if this is required to enable vectorization and 1744 /// unrolling. 1745 PredicatedScalarEvolution &PSE; 1746 1747 /// Target Library Info. 1748 TargetLibraryInfo *TLI; 1749 1750 /// Target Transform Info 1751 const TargetTransformInfo *TTI; 1752 1753 /// Dominator Tree. 1754 DominatorTree *DT; 1755 1756 // LoopAccess analysis. 1757 std::function<const LoopAccessInfo &(Loop &)> *GetLAA; 1758 1759 // And the loop-accesses info corresponding to this loop. This pointer is 1760 // null until canVectorizeMemory sets it up. 1761 const LoopAccessInfo *LAI = nullptr; 1762 1763 /// Interface to emit optimization remarks. 1764 OptimizationRemarkEmitter *ORE; 1765 1766 /// The interleave access information contains groups of interleaved accesses 1767 /// with the same stride and close to each other. 1768 InterleavedAccessInfo InterleaveInfo; 1769 1770 // --- vectorization state --- // 1771 1772 /// Holds the primary induction variable. This is the counter of the 1773 /// loop. 1774 PHINode *PrimaryInduction = nullptr; 1775 1776 /// Holds the reduction variables. 1777 ReductionList Reductions; 1778 1779 /// Holds all of the induction variables that we found in the loop. 1780 /// Notice that inductions don't need to start at zero and that induction 1781 /// variables can be pointers. 1782 InductionList Inductions; 1783 1784 /// Holds the phi nodes that are first-order recurrences. 1785 RecurrenceSet FirstOrderRecurrences; 1786 1787 /// Holds instructions that need to sink past other instructions to handle 1788 /// first-order recurrences. 1789 DenseMap<Instruction *, Instruction *> SinkAfter; 1790 1791 /// Holds the widest induction type encountered. 1792 Type *WidestIndTy = nullptr; 1793 1794 /// Allowed outside users. This holds the induction and reduction 1795 /// vars which can be accessed from outside the loop. 1796 SmallPtrSet<Value *, 4> AllowedExit; 1797 1798 /// Can we assume the absence of NaNs. 1799 bool HasFunNoNaNAttr = false; 1800 1801 /// Vectorization requirements that will go through late-evaluation. 1802 LoopVectorizationRequirements *Requirements; 1803 1804 /// Used to emit an analysis of any legality issues. 1805 LoopVectorizeHints *Hints; 1806 1807 /// While vectorizing these instructions we have to generate a 1808 /// call to the appropriate masked intrinsic 1809 SmallPtrSet<const Instruction *, 8> MaskedOp; 1810 }; 1811 1812 /// LoopVectorizationCostModel - estimates the expected speedups due to 1813 /// vectorization. 1814 /// In many cases vectorization is not profitable. This can happen because of 1815 /// a number of reasons. In this class we mainly attempt to predict the 1816 /// expected speedup/slowdowns due to the supported instruction set. We use the 1817 /// TargetTransformInfo to query the different backends for the cost of 1818 /// different operations. 1819 class LoopVectorizationCostModel { 1820 public: 1821 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, 1822 LoopInfo *LI, LoopVectorizationLegality *Legal, 1823 const TargetTransformInfo &TTI, 1824 const TargetLibraryInfo *TLI, DemandedBits *DB, 1825 AssumptionCache *AC, 1826 OptimizationRemarkEmitter *ORE, const Function *F, 1827 const LoopVectorizeHints *Hints) 1828 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 1829 AC(AC), ORE(ORE), TheFunction(F), Hints(Hints) {} 1830 1831 /// \return An upper bound for the vectorization factor, or None if 1832 /// vectorization should be avoided up front. 1833 Optional<unsigned> computeMaxVF(bool OptForSize); 1834 1835 /// Information about vectorization costs 1836 struct VectorizationFactor { 1837 // Vector width with best cost 1838 unsigned Width; 1839 1840 // Cost of the loop with that width 1841 unsigned Cost; 1842 }; 1843 1844 /// \return The most profitable vectorization factor and the cost of that VF. 1845 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 1846 /// then this vectorization factor will be selected if vectorization is 1847 /// possible. 1848 VectorizationFactor selectVectorizationFactor(unsigned MaxVF); 1849 1850 /// Setup cost-based decisions for user vectorization factor. 1851 void selectUserVectorizationFactor(unsigned UserVF) { 1852 collectUniformsAndScalars(UserVF); 1853 collectInstsToScalarize(UserVF); 1854 } 1855 1856 /// \return The size (in bits) of the smallest and widest types in the code 1857 /// that needs to be vectorized. We ignore values that remain scalar such as 1858 /// 64 bit loop indices. 1859 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1860 1861 /// \return The desired interleave count. 1862 /// If interleave count has been specified by metadata it will be returned. 1863 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1864 /// are the selected vectorization factor and the cost of the selected VF. 1865 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 1866 unsigned LoopCost); 1867 1868 /// Memory access instruction may be vectorized in more than one way. 1869 /// Form of instruction after vectorization depends on cost. 1870 /// This function takes cost-based decisions for Load/Store instructions 1871 /// and collects them in a map. This decisions map is used for building 1872 /// the lists of loop-uniform and loop-scalar instructions. 1873 /// The calculated cost is saved with widening decision in order to 1874 /// avoid redundant calculations. 1875 void setCostBasedWideningDecision(unsigned VF); 1876 1877 /// \brief A struct that represents some properties of the register usage 1878 /// of a loop. 1879 struct RegisterUsage { 1880 /// Holds the number of loop invariant values that are used in the loop. 1881 unsigned LoopInvariantRegs; 1882 1883 /// Holds the maximum number of concurrent live intervals in the loop. 1884 unsigned MaxLocalUsers; 1885 1886 /// Holds the number of instructions in the loop. 1887 unsigned NumInstructions; 1888 }; 1889 1890 /// \return Returns information about the register usages of the loop for the 1891 /// given vectorization factors. 1892 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 1893 1894 /// Collect values we want to ignore in the cost model. 1895 void collectValuesToIgnore(); 1896 1897 /// \returns The smallest bitwidth each instruction can be represented with. 1898 /// The vector equivalents of these instructions should be truncated to this 1899 /// type. 1900 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1901 return MinBWs; 1902 } 1903 1904 /// \returns True if it is more profitable to scalarize instruction \p I for 1905 /// vectorization factor \p VF. 1906 bool isProfitableToScalarize(Instruction *I, unsigned VF) const { 1907 assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1."); 1908 auto Scalars = InstsToScalarize.find(VF); 1909 assert(Scalars != InstsToScalarize.end() && 1910 "VF not yet analyzed for scalarization profitability"); 1911 return Scalars->second.count(I); 1912 } 1913 1914 /// Returns true if \p I is known to be uniform after vectorization. 1915 bool isUniformAfterVectorization(Instruction *I, unsigned VF) const { 1916 if (VF == 1) 1917 return true; 1918 assert(Uniforms.count(VF) && "VF not yet analyzed for uniformity"); 1919 auto UniformsPerVF = Uniforms.find(VF); 1920 return UniformsPerVF->second.count(I); 1921 } 1922 1923 /// Returns true if \p I is known to be scalar after vectorization. 1924 bool isScalarAfterVectorization(Instruction *I, unsigned VF) const { 1925 if (VF == 1) 1926 return true; 1927 assert(Scalars.count(VF) && "Scalar values are not calculated for VF"); 1928 auto ScalarsPerVF = Scalars.find(VF); 1929 return ScalarsPerVF->second.count(I); 1930 } 1931 1932 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1933 /// for vectorization factor \p VF. 1934 bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const { 1935 return VF > 1 && MinBWs.count(I) && !isProfitableToScalarize(I, VF) && 1936 !isScalarAfterVectorization(I, VF); 1937 } 1938 1939 /// Decision that was taken during cost calculation for memory instruction. 1940 enum InstWidening { 1941 CM_Unknown, 1942 CM_Widen, 1943 CM_Interleave, 1944 CM_GatherScatter, 1945 CM_Scalarize 1946 }; 1947 1948 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1949 /// instruction \p I and vector width \p VF. 1950 void setWideningDecision(Instruction *I, unsigned VF, InstWidening W, 1951 unsigned Cost) { 1952 assert(VF >= 2 && "Expected VF >=2"); 1953 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1954 } 1955 1956 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1957 /// interleaving group \p Grp and vector width \p VF. 1958 void setWideningDecision(const InterleaveGroup *Grp, unsigned VF, 1959 InstWidening W, unsigned Cost) { 1960 assert(VF >= 2 && "Expected VF >=2"); 1961 /// Broadcast this decicion to all instructions inside the group. 1962 /// But the cost will be assigned to one instruction only. 1963 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1964 if (auto *I = Grp->getMember(i)) { 1965 if (Grp->getInsertPos() == I) 1966 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1967 else 1968 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1969 } 1970 } 1971 } 1972 1973 /// Return the cost model decision for the given instruction \p I and vector 1974 /// width \p VF. Return CM_Unknown if this instruction did not pass 1975 /// through the cost modeling. 1976 InstWidening getWideningDecision(Instruction *I, unsigned VF) { 1977 assert(VF >= 2 && "Expected VF >=2"); 1978 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1979 auto Itr = WideningDecisions.find(InstOnVF); 1980 if (Itr == WideningDecisions.end()) 1981 return CM_Unknown; 1982 return Itr->second.first; 1983 } 1984 1985 /// Return the vectorization cost for the given instruction \p I and vector 1986 /// width \p VF. 1987 unsigned getWideningCost(Instruction *I, unsigned VF) { 1988 assert(VF >= 2 && "Expected VF >=2"); 1989 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1990 assert(WideningDecisions.count(InstOnVF) && "The cost is not calculated"); 1991 return WideningDecisions[InstOnVF].second; 1992 } 1993 1994 /// Return True if instruction \p I is an optimizable truncate whose operand 1995 /// is an induction variable. Such a truncate will be removed by adding a new 1996 /// induction variable with the destination type. 1997 bool isOptimizableIVTruncate(Instruction *I, unsigned VF) { 1998 // If the instruction is not a truncate, return false. 1999 auto *Trunc = dyn_cast<TruncInst>(I); 2000 if (!Trunc) 2001 return false; 2002 2003 // Get the source and destination types of the truncate. 2004 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 2005 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 2006 2007 // If the truncate is free for the given types, return false. Replacing a 2008 // free truncate with an induction variable would add an induction variable 2009 // update instruction to each iteration of the loop. We exclude from this 2010 // check the primary induction variable since it will need an update 2011 // instruction regardless. 2012 Value *Op = Trunc->getOperand(0); 2013 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 2014 return false; 2015 2016 // If the truncated value is not an induction variable, return false. 2017 return Legal->isInductionVariable(Op); 2018 } 2019 2020 /// Collects the instructions to scalarize for each predicated instruction in 2021 /// the loop. 2022 void collectInstsToScalarize(unsigned VF); 2023 2024 /// Collect Uniform and Scalar values for the given \p VF. 2025 /// The sets depend on CM decision for Load/Store instructions 2026 /// that may be vectorized as interleave, gather-scatter or scalarized. 2027 void collectUniformsAndScalars(unsigned VF) { 2028 // Do the analysis once. 2029 if (VF == 1 || Uniforms.count(VF)) 2030 return; 2031 setCostBasedWideningDecision(VF); 2032 collectLoopUniforms(VF); 2033 collectLoopScalars(VF); 2034 } 2035 2036 private: 2037 /// \return An upper bound for the vectorization factor, larger than zero. 2038 /// One is returned if vectorization should best be avoided due to cost. 2039 unsigned computeFeasibleMaxVF(bool OptForSize, unsigned ConstTripCount); 2040 2041 /// The vectorization cost is a combination of the cost itself and a boolean 2042 /// indicating whether any of the contributing operations will actually 2043 /// operate on 2044 /// vector values after type legalization in the backend. If this latter value 2045 /// is 2046 /// false, then all operations will be scalarized (i.e. no vectorization has 2047 /// actually taken place). 2048 using VectorizationCostTy = std::pair<unsigned, bool>; 2049 2050 /// Returns the expected execution cost. The unit of the cost does 2051 /// not matter because we use the 'cost' units to compare different 2052 /// vector widths. The cost that is returned is *not* normalized by 2053 /// the factor width. 2054 VectorizationCostTy expectedCost(unsigned VF); 2055 2056 /// Returns the execution time cost of an instruction for a given vector 2057 /// width. Vector width of one means scalar. 2058 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 2059 2060 /// The cost-computation logic from getInstructionCost which provides 2061 /// the vector type as an output parameter. 2062 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 2063 2064 /// Calculate vectorization cost of memory instruction \p I. 2065 unsigned getMemoryInstructionCost(Instruction *I, unsigned VF); 2066 2067 /// The cost computation for scalarized memory instruction. 2068 unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF); 2069 2070 /// The cost computation for interleaving group of memory instructions. 2071 unsigned getInterleaveGroupCost(Instruction *I, unsigned VF); 2072 2073 /// The cost computation for Gather/Scatter instruction. 2074 unsigned getGatherScatterCost(Instruction *I, unsigned VF); 2075 2076 /// The cost computation for widening instruction \p I with consecutive 2077 /// memory access. 2078 unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF); 2079 2080 /// The cost calculation for Load instruction \p I with uniform pointer - 2081 /// scalar load + broadcast. 2082 unsigned getUniformMemOpCost(Instruction *I, unsigned VF); 2083 2084 /// Returns whether the instruction is a load or store and will be a emitted 2085 /// as a vector operation. 2086 bool isConsecutiveLoadOrStore(Instruction *I); 2087 2088 /// Create an analysis remark that explains why vectorization failed 2089 /// 2090 /// \p RemarkName is the identifier for the remark. \return the remark object 2091 /// that can be streamed to. 2092 OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) { 2093 return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(), 2094 RemarkName, TheLoop); 2095 } 2096 2097 /// Map of scalar integer values to the smallest bitwidth they can be legally 2098 /// represented as. The vector equivalents of these values should be truncated 2099 /// to this type. 2100 MapVector<Instruction *, uint64_t> MinBWs; 2101 2102 /// A type representing the costs for instructions if they were to be 2103 /// scalarized rather than vectorized. The entries are Instruction-Cost 2104 /// pairs. 2105 using ScalarCostsTy = DenseMap<Instruction *, unsigned>; 2106 2107 /// A set containing all BasicBlocks that are known to present after 2108 /// vectorization as a predicated block. 2109 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 2110 2111 /// A map holding scalar costs for different vectorization factors. The 2112 /// presence of a cost for an instruction in the mapping indicates that the 2113 /// instruction will be scalarized when vectorizing with the associated 2114 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 2115 DenseMap<unsigned, ScalarCostsTy> InstsToScalarize; 2116 2117 /// Holds the instructions known to be uniform after vectorization. 2118 /// The data is collected per VF. 2119 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms; 2120 2121 /// Holds the instructions known to be scalar after vectorization. 2122 /// The data is collected per VF. 2123 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars; 2124 2125 /// Holds the instructions (address computations) that are forced to be 2126 /// scalarized. 2127 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars; 2128 2129 /// Returns the expected difference in cost from scalarizing the expression 2130 /// feeding a predicated instruction \p PredInst. The instructions to 2131 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 2132 /// non-negative return value implies the expression will be scalarized. 2133 /// Currently, only single-use chains are considered for scalarization. 2134 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 2135 unsigned VF); 2136 2137 /// Collect the instructions that are uniform after vectorization. An 2138 /// instruction is uniform if we represent it with a single scalar value in 2139 /// the vectorized loop corresponding to each vector iteration. Examples of 2140 /// uniform instructions include pointer operands of consecutive or 2141 /// interleaved memory accesses. Note that although uniformity implies an 2142 /// instruction will be scalar, the reverse is not true. In general, a 2143 /// scalarized instruction will be represented by VF scalar values in the 2144 /// vectorized loop, each corresponding to an iteration of the original 2145 /// scalar loop. 2146 void collectLoopUniforms(unsigned VF); 2147 2148 /// Collect the instructions that are scalar after vectorization. An 2149 /// instruction is scalar if it is known to be uniform or will be scalarized 2150 /// during vectorization. Non-uniform scalarized instructions will be 2151 /// represented by VF values in the vectorized loop, each corresponding to an 2152 /// iteration of the original scalar loop. 2153 void collectLoopScalars(unsigned VF); 2154 2155 /// Keeps cost model vectorization decision and cost for instructions. 2156 /// Right now it is used for memory instructions only. 2157 using DecisionList = DenseMap<std::pair<Instruction *, unsigned>, 2158 std::pair<InstWidening, unsigned>>; 2159 2160 DecisionList WideningDecisions; 2161 2162 public: 2163 /// The loop that we evaluate. 2164 Loop *TheLoop; 2165 2166 /// Predicated scalar evolution analysis. 2167 PredicatedScalarEvolution &PSE; 2168 2169 /// Loop Info analysis. 2170 LoopInfo *LI; 2171 2172 /// Vectorization legality. 2173 LoopVectorizationLegality *Legal; 2174 2175 /// Vector target information. 2176 const TargetTransformInfo &TTI; 2177 2178 /// Target Library Info. 2179 const TargetLibraryInfo *TLI; 2180 2181 /// Demanded bits analysis. 2182 DemandedBits *DB; 2183 2184 /// Assumption cache. 2185 AssumptionCache *AC; 2186 2187 /// Interface to emit optimization remarks. 2188 OptimizationRemarkEmitter *ORE; 2189 2190 const Function *TheFunction; 2191 2192 /// Loop Vectorize Hint. 2193 const LoopVectorizeHints *Hints; 2194 2195 /// Values to ignore in the cost model. 2196 SmallPtrSet<const Value *, 16> ValuesToIgnore; 2197 2198 /// Values to ignore in the cost model when VF > 1. 2199 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 2200 }; 2201 2202 } // end anonymous namespace 2203 2204 namespace llvm { 2205 2206 /// InnerLoopVectorizer vectorizes loops which contain only one basic 2207 /// LoopVectorizationPlanner - drives the vectorization process after having 2208 /// passed Legality checks. 2209 /// The planner builds and optimizes the Vectorization Plans which record the 2210 /// decisions how to vectorize the given loop. In particular, represent the 2211 /// control-flow of the vectorized version, the replication of instructions that 2212 /// are to be scalarized, and interleave access groups. 2213 class LoopVectorizationPlanner { 2214 /// The loop that we evaluate. 2215 Loop *OrigLoop; 2216 2217 /// Loop Info analysis. 2218 LoopInfo *LI; 2219 2220 /// Target Library Info. 2221 const TargetLibraryInfo *TLI; 2222 2223 /// Target Transform Info. 2224 const TargetTransformInfo *TTI; 2225 2226 /// The legality analysis. 2227 LoopVectorizationLegality *Legal; 2228 2229 /// The profitablity analysis. 2230 LoopVectorizationCostModel &CM; 2231 2232 using VPlanPtr = std::unique_ptr<VPlan>; 2233 2234 SmallVector<VPlanPtr, 4> VPlans; 2235 2236 /// This class is used to enable the VPlan to invoke a method of ILV. This is 2237 /// needed until the method is refactored out of ILV and becomes reusable. 2238 struct VPCallbackILV : public VPCallback { 2239 InnerLoopVectorizer &ILV; 2240 2241 VPCallbackILV(InnerLoopVectorizer &ILV) : ILV(ILV) {} 2242 2243 Value *getOrCreateVectorValues(Value *V, unsigned Part) override { 2244 return ILV.getOrCreateVectorValue(V, Part); 2245 } 2246 }; 2247 2248 /// A builder used to construct the current plan. 2249 VPBuilder Builder; 2250 2251 /// When we if-convert we need to create edge masks. We have to cache values 2252 /// so that we don't end up with exponential recursion/IR. Note that 2253 /// if-conversion currently takes place during VPlan-construction, so these 2254 /// caches are only used at that stage. 2255 using EdgeMaskCacheTy = 2256 DenseMap<std::pair<BasicBlock *, BasicBlock *>, VPValue *>; 2257 using BlockMaskCacheTy = DenseMap<BasicBlock *, VPValue *>; 2258 EdgeMaskCacheTy EdgeMaskCache; 2259 BlockMaskCacheTy BlockMaskCache; 2260 2261 unsigned BestVF = 0; 2262 unsigned BestUF = 0; 2263 2264 public: 2265 LoopVectorizationPlanner(Loop *L, LoopInfo *LI, const TargetLibraryInfo *TLI, 2266 const TargetTransformInfo *TTI, 2267 LoopVectorizationLegality *Legal, 2268 LoopVectorizationCostModel &CM) 2269 : OrigLoop(L), LI(LI), TLI(TLI), TTI(TTI), Legal(Legal), CM(CM) {} 2270 2271 /// Plan how to best vectorize, return the best VF and its cost. 2272 LoopVectorizationCostModel::VectorizationFactor plan(bool OptForSize, 2273 unsigned UserVF); 2274 2275 /// Finalize the best decision and dispose of all other VPlans. 2276 void setBestPlan(unsigned VF, unsigned UF); 2277 2278 /// Generate the IR code for the body of the vectorized loop according to the 2279 /// best selected VPlan. 2280 void executePlan(InnerLoopVectorizer &LB, DominatorTree *DT); 2281 2282 void printPlans(raw_ostream &O) { 2283 for (const auto &Plan : VPlans) 2284 O << *Plan; 2285 } 2286 2287 protected: 2288 /// Collect the instructions from the original loop that would be trivially 2289 /// dead in the vectorized loop if generated. 2290 void collectTriviallyDeadInstructions( 2291 SmallPtrSetImpl<Instruction *> &DeadInstructions); 2292 2293 /// A range of powers-of-2 vectorization factors with fixed start and 2294 /// adjustable end. The range includes start and excludes end, e.g.,: 2295 /// [1, 9) = {1, 2, 4, 8} 2296 struct VFRange { 2297 // A power of 2. 2298 const unsigned Start; 2299 2300 // Need not be a power of 2. If End <= Start range is empty. 2301 unsigned End; 2302 }; 2303 2304 /// Test a \p Predicate on a \p Range of VF's. Return the value of applying 2305 /// \p Predicate on Range.Start, possibly decreasing Range.End such that the 2306 /// returned value holds for the entire \p Range. 2307 bool getDecisionAndClampRange(const std::function<bool(unsigned)> &Predicate, 2308 VFRange &Range); 2309 2310 /// Build VPlans for power-of-2 VF's between \p MinVF and \p MaxVF inclusive, 2311 /// according to the information gathered by Legal when it checked if it is 2312 /// legal to vectorize the loop. 2313 void buildVPlans(unsigned MinVF, unsigned MaxVF); 2314 2315 private: 2316 /// A helper function that computes the predicate of the block BB, assuming 2317 /// that the header block of the loop is set to True. It returns the *entry* 2318 /// mask for the block BB. 2319 VPValue *createBlockInMask(BasicBlock *BB, VPlanPtr &Plan); 2320 2321 /// A helper function that computes the predicate of the edge between SRC 2322 /// and DST. 2323 VPValue *createEdgeMask(BasicBlock *Src, BasicBlock *Dst, VPlanPtr &Plan); 2324 2325 /// Check if \I belongs to an Interleave Group within the given VF \p Range, 2326 /// \return true in the first returned value if so and false otherwise. 2327 /// Build a new VPInterleaveGroup Recipe if \I is the primary member of an IG 2328 /// for \p Range.Start, and provide it as the second returned value. 2329 /// Note that if \I is an adjunct member of an IG for \p Range.Start, the 2330 /// \return value is <true, nullptr>, as it is handled by another recipe. 2331 /// \p Range.End may be decreased to ensure same decision from \p Range.Start 2332 /// to \p Range.End. 2333 VPInterleaveRecipe *tryToInterleaveMemory(Instruction *I, VFRange &Range); 2334 2335 // Check if \I is a memory instruction to be widened for \p Range.Start and 2336 // potentially masked. Such instructions are handled by a recipe that takes an 2337 // additional VPInstruction for the mask. 2338 VPWidenMemoryInstructionRecipe *tryToWidenMemory(Instruction *I, 2339 VFRange &Range, 2340 VPlanPtr &Plan); 2341 2342 /// Check if an induction recipe should be constructed for \I within the given 2343 /// VF \p Range. If so build and return it. If not, return null. \p Range.End 2344 /// may be decreased to ensure same decision from \p Range.Start to 2345 /// \p Range.End. 2346 VPWidenIntOrFpInductionRecipe *tryToOptimizeInduction(Instruction *I, 2347 VFRange &Range); 2348 2349 /// Handle non-loop phi nodes. Currently all such phi nodes are turned into 2350 /// a sequence of select instructions as the vectorizer currently performs 2351 /// full if-conversion. 2352 VPBlendRecipe *tryToBlend(Instruction *I, VPlanPtr &Plan); 2353 2354 /// Check if \p I can be widened within the given VF \p Range. If \p I can be 2355 /// widened for \p Range.Start, check if the last recipe of \p VPBB can be 2356 /// extended to include \p I or else build a new VPWidenRecipe for it and 2357 /// append it to \p VPBB. Return true if \p I can be widened for Range.Start, 2358 /// false otherwise. Range.End may be decreased to ensure same decision from 2359 /// \p Range.Start to \p Range.End. 2360 bool tryToWiden(Instruction *I, VPBasicBlock *VPBB, VFRange &Range); 2361 2362 /// Build a VPReplicationRecipe for \p I and enclose it within a Region if it 2363 /// is predicated. \return \p VPBB augmented with this new recipe if \p I is 2364 /// not predicated, otherwise \return a new VPBasicBlock that succeeds the new 2365 /// Region. Update the packing decision of predicated instructions if they 2366 /// feed \p I. Range.End may be decreased to ensure same recipe behavior from 2367 /// \p Range.Start to \p Range.End. 2368 VPBasicBlock *handleReplication( 2369 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 2370 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe, 2371 VPlanPtr &Plan); 2372 2373 /// Create a replicating region for instruction \p I that requires 2374 /// predication. \p PredRecipe is a VPReplicateRecipe holding \p I. 2375 VPRegionBlock *createReplicateRegion(Instruction *I, VPRecipeBase *PredRecipe, 2376 VPlanPtr &Plan); 2377 2378 /// Build a VPlan according to the information gathered by Legal. \return a 2379 /// VPlan for vectorization factors \p Range.Start and up to \p Range.End 2380 /// exclusive, possibly decreasing \p Range.End. 2381 VPlanPtr buildVPlan(VFRange &Range, 2382 const SmallPtrSetImpl<Value *> &NeedDef); 2383 }; 2384 2385 } // end namespace llvm 2386 2387 namespace { 2388 2389 /// \brief This holds vectorization requirements that must be verified late in 2390 /// the process. The requirements are set by legalize and costmodel. Once 2391 /// vectorization has been determined to be possible and profitable the 2392 /// requirements can be verified by looking for metadata or compiler options. 2393 /// For example, some loops require FP commutativity which is only allowed if 2394 /// vectorization is explicitly specified or if the fast-math compiler option 2395 /// has been provided. 2396 /// Late evaluation of these requirements allows helpful diagnostics to be 2397 /// composed that tells the user what need to be done to vectorize the loop. For 2398 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late 2399 /// evaluation should be used only when diagnostics can generated that can be 2400 /// followed by a non-expert user. 2401 class LoopVectorizationRequirements { 2402 public: 2403 LoopVectorizationRequirements(OptimizationRemarkEmitter &ORE) : ORE(ORE) {} 2404 2405 void addUnsafeAlgebraInst(Instruction *I) { 2406 // First unsafe algebra instruction. 2407 if (!UnsafeAlgebraInst) 2408 UnsafeAlgebraInst = I; 2409 } 2410 2411 void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; } 2412 2413 bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) { 2414 const char *PassName = Hints.vectorizeAnalysisPassName(); 2415 bool Failed = false; 2416 if (UnsafeAlgebraInst && !Hints.allowReordering()) { 2417 ORE.emit([&]() { 2418 return OptimizationRemarkAnalysisFPCommute( 2419 PassName, "CantReorderFPOps", 2420 UnsafeAlgebraInst->getDebugLoc(), 2421 UnsafeAlgebraInst->getParent()) 2422 << "loop not vectorized: cannot prove it is safe to reorder " 2423 "floating-point operations"; 2424 }); 2425 Failed = true; 2426 } 2427 2428 // Test if runtime memcheck thresholds are exceeded. 2429 bool PragmaThresholdReached = 2430 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 2431 bool ThresholdReached = 2432 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 2433 if ((ThresholdReached && !Hints.allowReordering()) || 2434 PragmaThresholdReached) { 2435 ORE.emit([&]() { 2436 return OptimizationRemarkAnalysisAliasing(PassName, "CantReorderMemOps", 2437 L->getStartLoc(), 2438 L->getHeader()) 2439 << "loop not vectorized: cannot prove it is safe to reorder " 2440 "memory operations"; 2441 }); 2442 DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 2443 Failed = true; 2444 } 2445 2446 return Failed; 2447 } 2448 2449 private: 2450 unsigned NumRuntimePointerChecks = 0; 2451 Instruction *UnsafeAlgebraInst = nullptr; 2452 2453 /// Interface to emit optimization remarks. 2454 OptimizationRemarkEmitter &ORE; 2455 }; 2456 2457 } // end anonymous namespace 2458 2459 static void addAcyclicInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) { 2460 if (L.empty()) { 2461 if (!hasCyclesInLoopBody(L)) 2462 V.push_back(&L); 2463 return; 2464 } 2465 for (Loop *InnerL : L) 2466 addAcyclicInnerLoop(*InnerL, V); 2467 } 2468 2469 namespace { 2470 2471 /// The LoopVectorize Pass. 2472 struct LoopVectorize : public FunctionPass { 2473 /// Pass identification, replacement for typeid 2474 static char ID; 2475 2476 LoopVectorizePass Impl; 2477 2478 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) 2479 : FunctionPass(ID) { 2480 Impl.DisableUnrolling = NoUnrolling; 2481 Impl.AlwaysVectorize = AlwaysVectorize; 2482 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2483 } 2484 2485 bool runOnFunction(Function &F) override { 2486 if (skipFunction(F)) 2487 return false; 2488 2489 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2490 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2491 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2492 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2493 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2494 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2495 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 2496 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2497 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2498 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2499 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2500 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2501 2502 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2503 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2504 2505 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2506 GetLAA, *ORE); 2507 } 2508 2509 void getAnalysisUsage(AnalysisUsage &AU) const override { 2510 AU.addRequired<AssumptionCacheTracker>(); 2511 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2512 AU.addRequired<DominatorTreeWrapperPass>(); 2513 AU.addRequired<LoopInfoWrapperPass>(); 2514 AU.addRequired<ScalarEvolutionWrapperPass>(); 2515 AU.addRequired<TargetTransformInfoWrapperPass>(); 2516 AU.addRequired<AAResultsWrapperPass>(); 2517 AU.addRequired<LoopAccessLegacyAnalysis>(); 2518 AU.addRequired<DemandedBitsWrapperPass>(); 2519 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2520 AU.addPreserved<LoopInfoWrapperPass>(); 2521 AU.addPreserved<DominatorTreeWrapperPass>(); 2522 AU.addPreserved<BasicAAWrapperPass>(); 2523 AU.addPreserved<GlobalsAAWrapperPass>(); 2524 } 2525 }; 2526 2527 } // end anonymous namespace 2528 2529 //===----------------------------------------------------------------------===// 2530 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2531 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2532 //===----------------------------------------------------------------------===// 2533 2534 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2535 // We need to place the broadcast of invariant variables outside the loop. 2536 Instruction *Instr = dyn_cast<Instruction>(V); 2537 bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody); 2538 bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr; 2539 2540 // Place the code for broadcasting invariant variables in the new preheader. 2541 IRBuilder<>::InsertPointGuard Guard(Builder); 2542 if (Invariant) 2543 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2544 2545 // Broadcast the scalar into all locations in the vector. 2546 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2547 2548 return Shuf; 2549 } 2550 2551 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2552 const InductionDescriptor &II, Value *Step, Instruction *EntryVal) { 2553 Value *Start = II.getStartValue(); 2554 2555 // Construct the initial value of the vector IV in the vector loop preheader 2556 auto CurrIP = Builder.saveIP(); 2557 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2558 if (isa<TruncInst>(EntryVal)) { 2559 assert(Start->getType()->isIntegerTy() && 2560 "Truncation requires an integer type"); 2561 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2562 Step = Builder.CreateTrunc(Step, TruncType); 2563 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2564 } 2565 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2566 Value *SteppedStart = 2567 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 2568 2569 // We create vector phi nodes for both integer and floating-point induction 2570 // variables. Here, we determine the kind of arithmetic we will perform. 2571 Instruction::BinaryOps AddOp; 2572 Instruction::BinaryOps MulOp; 2573 if (Step->getType()->isIntegerTy()) { 2574 AddOp = Instruction::Add; 2575 MulOp = Instruction::Mul; 2576 } else { 2577 AddOp = II.getInductionOpcode(); 2578 MulOp = Instruction::FMul; 2579 } 2580 2581 // Multiply the vectorization factor by the step using integer or 2582 // floating-point arithmetic as appropriate. 2583 Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF); 2584 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF)); 2585 2586 // Create a vector splat to use in the induction update. 2587 // 2588 // FIXME: If the step is non-constant, we create the vector splat with 2589 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2590 // handle a constant vector splat. 2591 Value *SplatVF = isa<Constant>(Mul) 2592 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 2593 : Builder.CreateVectorSplat(VF, Mul); 2594 Builder.restoreIP(CurrIP); 2595 2596 // We may need to add the step a number of times, depending on the unroll 2597 // factor. The last of those goes into the PHI. 2598 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2599 &*LoopVectorBody->getFirstInsertionPt()); 2600 Instruction *LastInduction = VecInd; 2601 for (unsigned Part = 0; Part < UF; ++Part) { 2602 VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction); 2603 if (isa<TruncInst>(EntryVal)) 2604 addMetadata(LastInduction, EntryVal); 2605 LastInduction = cast<Instruction>(addFastMathFlag( 2606 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"))); 2607 } 2608 2609 // Move the last step to the end of the latch block. This ensures consistent 2610 // placement of all induction updates. 2611 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2612 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2613 auto *ICmp = cast<Instruction>(Br->getCondition()); 2614 LastInduction->moveBefore(ICmp); 2615 LastInduction->setName("vec.ind.next"); 2616 2617 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2618 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2619 } 2620 2621 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2622 return Cost->isScalarAfterVectorization(I, VF) || 2623 Cost->isProfitableToScalarize(I, VF); 2624 } 2625 2626 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2627 if (shouldScalarizeInstruction(IV)) 2628 return true; 2629 auto isScalarInst = [&](User *U) -> bool { 2630 auto *I = cast<Instruction>(U); 2631 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2632 }; 2633 return llvm::any_of(IV->users(), isScalarInst); 2634 } 2635 2636 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) { 2637 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2638 "Primary induction variable must have an integer type"); 2639 2640 auto II = Legal->getInductionVars()->find(IV); 2641 assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); 2642 2643 auto ID = II->second; 2644 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2645 2646 // The scalar value to broadcast. This will be derived from the canonical 2647 // induction variable. 2648 Value *ScalarIV = nullptr; 2649 2650 // The value from the original loop to which we are mapping the new induction 2651 // variable. 2652 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2653 2654 // True if we have vectorized the induction variable. 2655 auto VectorizedIV = false; 2656 2657 // Determine if we want a scalar version of the induction variable. This is 2658 // true if the induction variable itself is not widened, or if it has at 2659 // least one user in the loop that is not widened. 2660 auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal); 2661 2662 // Generate code for the induction step. Note that induction steps are 2663 // required to be loop-invariant 2664 assert(PSE.getSE()->isLoopInvariant(ID.getStep(), OrigLoop) && 2665 "Induction step should be loop invariant"); 2666 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2667 Value *Step = nullptr; 2668 if (PSE.getSE()->isSCEVable(IV->getType())) { 2669 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2670 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), 2671 LoopVectorPreHeader->getTerminator()); 2672 } else { 2673 Step = cast<SCEVUnknown>(ID.getStep())->getValue(); 2674 } 2675 2676 // Try to create a new independent vector induction variable. If we can't 2677 // create the phi node, we will splat the scalar induction variable in each 2678 // loop iteration. 2679 if (VF > 1 && !shouldScalarizeInstruction(EntryVal)) { 2680 createVectorIntOrFpInductionPHI(ID, Step, EntryVal); 2681 VectorizedIV = true; 2682 } 2683 2684 // If we haven't yet vectorized the induction variable, or if we will create 2685 // a scalar one, we need to define the scalar induction variable and step 2686 // values. If we were given a truncation type, truncate the canonical 2687 // induction variable and step. Otherwise, derive these values from the 2688 // induction descriptor. 2689 if (!VectorizedIV || NeedsScalarIV) { 2690 ScalarIV = Induction; 2691 if (IV != OldInduction) { 2692 ScalarIV = IV->getType()->isIntegerTy() 2693 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2694 : Builder.CreateCast(Instruction::SIToFP, Induction, 2695 IV->getType()); 2696 ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL); 2697 ScalarIV->setName("offset.idx"); 2698 } 2699 if (Trunc) { 2700 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2701 assert(Step->getType()->isIntegerTy() && 2702 "Truncation requires an integer step"); 2703 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2704 Step = Builder.CreateTrunc(Step, TruncType); 2705 } 2706 } 2707 2708 // If we haven't yet vectorized the induction variable, splat the scalar 2709 // induction variable, and build the necessary step vectors. 2710 if (!VectorizedIV) { 2711 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2712 for (unsigned Part = 0; Part < UF; ++Part) { 2713 Value *EntryPart = 2714 getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode()); 2715 VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart); 2716 if (Trunc) 2717 addMetadata(EntryPart, Trunc); 2718 } 2719 } 2720 2721 // If an induction variable is only used for counting loop iterations or 2722 // calculating addresses, it doesn't need to be widened. Create scalar steps 2723 // that can be used by instructions we will later scalarize. Note that the 2724 // addition of the scalar steps will not increase the number of instructions 2725 // in the loop in the common case prior to InstCombine. We will be trading 2726 // one vector extract for each scalar step. 2727 if (NeedsScalarIV) 2728 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 2729 } 2730 2731 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2732 Instruction::BinaryOps BinOp) { 2733 // Create and check the types. 2734 assert(Val->getType()->isVectorTy() && "Must be a vector"); 2735 int VLen = Val->getType()->getVectorNumElements(); 2736 2737 Type *STy = Val->getType()->getScalarType(); 2738 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2739 "Induction Step must be an integer or FP"); 2740 assert(Step->getType() == STy && "Step has wrong type"); 2741 2742 SmallVector<Constant *, 8> Indices; 2743 2744 if (STy->isIntegerTy()) { 2745 // Create a vector of consecutive numbers from zero to VF. 2746 for (int i = 0; i < VLen; ++i) 2747 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 2748 2749 // Add the consecutive indices to the vector value. 2750 Constant *Cv = ConstantVector::get(Indices); 2751 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2752 Step = Builder.CreateVectorSplat(VLen, Step); 2753 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2754 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2755 // which can be found from the original scalar operations. 2756 Step = Builder.CreateMul(Cv, Step); 2757 return Builder.CreateAdd(Val, Step, "induction"); 2758 } 2759 2760 // Floating point induction. 2761 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2762 "Binary Opcode should be specified for FP induction"); 2763 // Create a vector of consecutive numbers from zero to VF. 2764 for (int i = 0; i < VLen; ++i) 2765 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2766 2767 // Add the consecutive indices to the vector value. 2768 Constant *Cv = ConstantVector::get(Indices); 2769 2770 Step = Builder.CreateVectorSplat(VLen, Step); 2771 2772 // Floating point operations had to be 'fast' to enable the induction. 2773 FastMathFlags Flags; 2774 Flags.setFast(); 2775 2776 Value *MulOp = Builder.CreateFMul(Cv, Step); 2777 if (isa<Instruction>(MulOp)) 2778 // Have to check, MulOp may be a constant 2779 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2780 2781 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2782 if (isa<Instruction>(BOp)) 2783 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2784 return BOp; 2785 } 2786 2787 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2788 Value *EntryVal, 2789 const InductionDescriptor &ID) { 2790 // We shouldn't have to build scalar steps if we aren't vectorizing. 2791 assert(VF > 1 && "VF should be greater than one"); 2792 2793 // Get the value type and ensure it and the step have the same integer type. 2794 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2795 assert(ScalarIVTy == Step->getType() && 2796 "Val and Step should have the same type"); 2797 2798 // We build scalar steps for both integer and floating-point induction 2799 // variables. Here, we determine the kind of arithmetic we will perform. 2800 Instruction::BinaryOps AddOp; 2801 Instruction::BinaryOps MulOp; 2802 if (ScalarIVTy->isIntegerTy()) { 2803 AddOp = Instruction::Add; 2804 MulOp = Instruction::Mul; 2805 } else { 2806 AddOp = ID.getInductionOpcode(); 2807 MulOp = Instruction::FMul; 2808 } 2809 2810 // Determine the number of scalars we need to generate for each unroll 2811 // iteration. If EntryVal is uniform, we only need to generate the first 2812 // lane. Otherwise, we generate all VF values. 2813 unsigned Lanes = 2814 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1 2815 : VF; 2816 // Compute the scalar steps and save the results in VectorLoopValueMap. 2817 for (unsigned Part = 0; Part < UF; ++Part) { 2818 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2819 auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane); 2820 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step)); 2821 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul)); 2822 VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add); 2823 } 2824 } 2825 } 2826 2827 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { 2828 const ValueToValueMap &Strides = getSymbolicStrides() ? *getSymbolicStrides() : 2829 ValueToValueMap(); 2830 2831 int Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, true, false); 2832 if (Stride == 1 || Stride == -1) 2833 return Stride; 2834 return 0; 2835 } 2836 2837 bool LoopVectorizationLegality::isUniform(Value *V) { 2838 return LAI->isUniform(V); 2839 } 2840 2841 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) { 2842 assert(V != Induction && "The new induction variable should not be used."); 2843 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2844 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2845 2846 // If we have a stride that is replaced by one, do it here. 2847 if (Legal->hasStride(V)) 2848 V = ConstantInt::get(V->getType(), 1); 2849 2850 // If we have a vector mapped to this value, return it. 2851 if (VectorLoopValueMap.hasVectorValue(V, Part)) 2852 return VectorLoopValueMap.getVectorValue(V, Part); 2853 2854 // If the value has not been vectorized, check if it has been scalarized 2855 // instead. If it has been scalarized, and we actually need the value in 2856 // vector form, we will construct the vector values on demand. 2857 if (VectorLoopValueMap.hasAnyScalarValue(V)) { 2858 Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0}); 2859 2860 // If we've scalarized a value, that value should be an instruction. 2861 auto *I = cast<Instruction>(V); 2862 2863 // If we aren't vectorizing, we can just copy the scalar map values over to 2864 // the vector map. 2865 if (VF == 1) { 2866 VectorLoopValueMap.setVectorValue(V, Part, ScalarValue); 2867 return ScalarValue; 2868 } 2869 2870 // Get the last scalar instruction we generated for V and Part. If the value 2871 // is known to be uniform after vectorization, this corresponds to lane zero 2872 // of the Part unroll iteration. Otherwise, the last instruction is the one 2873 // we created for the last vector lane of the Part unroll iteration. 2874 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1; 2875 auto *LastInst = cast<Instruction>( 2876 VectorLoopValueMap.getScalarValue(V, {Part, LastLane})); 2877 2878 // Set the insert point after the last scalarized instruction. This ensures 2879 // the insertelement sequence will directly follow the scalar definitions. 2880 auto OldIP = Builder.saveIP(); 2881 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 2882 Builder.SetInsertPoint(&*NewIP); 2883 2884 // However, if we are vectorizing, we need to construct the vector values. 2885 // If the value is known to be uniform after vectorization, we can just 2886 // broadcast the scalar value corresponding to lane zero for each unroll 2887 // iteration. Otherwise, we construct the vector values using insertelement 2888 // instructions. Since the resulting vectors are stored in 2889 // VectorLoopValueMap, we will only generate the insertelements once. 2890 Value *VectorValue = nullptr; 2891 if (Cost->isUniformAfterVectorization(I, VF)) { 2892 VectorValue = getBroadcastInstrs(ScalarValue); 2893 VectorLoopValueMap.setVectorValue(V, Part, VectorValue); 2894 } else { 2895 // Initialize packing with insertelements to start from undef. 2896 Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF)); 2897 VectorLoopValueMap.setVectorValue(V, Part, Undef); 2898 for (unsigned Lane = 0; Lane < VF; ++Lane) 2899 packScalarIntoVectorValue(V, {Part, Lane}); 2900 VectorValue = VectorLoopValueMap.getVectorValue(V, Part); 2901 } 2902 Builder.restoreIP(OldIP); 2903 return VectorValue; 2904 } 2905 2906 // If this scalar is unknown, assume that it is a constant or that it is 2907 // loop invariant. Broadcast V and save the value for future uses. 2908 Value *B = getBroadcastInstrs(V); 2909 VectorLoopValueMap.setVectorValue(V, Part, B); 2910 return B; 2911 } 2912 2913 Value * 2914 InnerLoopVectorizer::getOrCreateScalarValue(Value *V, 2915 const VPIteration &Instance) { 2916 // If the value is not an instruction contained in the loop, it should 2917 // already be scalar. 2918 if (OrigLoop->isLoopInvariant(V)) 2919 return V; 2920 2921 assert(Instance.Lane > 0 2922 ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF) 2923 : true && "Uniform values only have lane zero"); 2924 2925 // If the value from the original loop has not been vectorized, it is 2926 // represented by UF x VF scalar values in the new loop. Return the requested 2927 // scalar value. 2928 if (VectorLoopValueMap.hasScalarValue(V, Instance)) 2929 return VectorLoopValueMap.getScalarValue(V, Instance); 2930 2931 // If the value has not been scalarized, get its entry in VectorLoopValueMap 2932 // for the given unroll part. If this entry is not a vector type (i.e., the 2933 // vectorization factor is one), there is no need to generate an 2934 // extractelement instruction. 2935 auto *U = getOrCreateVectorValue(V, Instance.Part); 2936 if (!U->getType()->isVectorTy()) { 2937 assert(VF == 1 && "Value not scalarized has non-vector type"); 2938 return U; 2939 } 2940 2941 // Otherwise, the value from the original loop has been vectorized and is 2942 // represented by UF vector values. Extract and return the requested scalar 2943 // value from the appropriate vector lane. 2944 return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane)); 2945 } 2946 2947 void InnerLoopVectorizer::packScalarIntoVectorValue( 2948 Value *V, const VPIteration &Instance) { 2949 assert(V != Induction && "The new induction variable should not be used."); 2950 assert(!V->getType()->isVectorTy() && "Can't pack a vector"); 2951 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2952 2953 Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance); 2954 Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part); 2955 VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst, 2956 Builder.getInt32(Instance.Lane)); 2957 VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue); 2958 } 2959 2960 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2961 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2962 SmallVector<Constant *, 8> ShuffleMask; 2963 for (unsigned i = 0; i < VF; ++i) 2964 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 2965 2966 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2967 ConstantVector::get(ShuffleMask), 2968 "reverse"); 2969 } 2970 2971 // Try to vectorize the interleave group that \p Instr belongs to. 2972 // 2973 // E.g. Translate following interleaved load group (factor = 3): 2974 // for (i = 0; i < N; i+=3) { 2975 // R = Pic[i]; // Member of index 0 2976 // G = Pic[i+1]; // Member of index 1 2977 // B = Pic[i+2]; // Member of index 2 2978 // ... // do something to R, G, B 2979 // } 2980 // To: 2981 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2982 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2983 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2984 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2985 // 2986 // Or translate following interleaved store group (factor = 3): 2987 // for (i = 0; i < N; i+=3) { 2988 // ... do something to R, G, B 2989 // Pic[i] = R; // Member of index 0 2990 // Pic[i+1] = G; // Member of index 1 2991 // Pic[i+2] = B; // Member of index 2 2992 // } 2993 // To: 2994 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2995 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2996 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2997 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2998 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2999 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { 3000 const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr); 3001 assert(Group && "Fail to get an interleaved access group."); 3002 3003 // Skip if current instruction is not the insert position. 3004 if (Instr != Group->getInsertPos()) 3005 return; 3006 3007 const DataLayout &DL = Instr->getModule()->getDataLayout(); 3008 Value *Ptr = getPointerOperand(Instr); 3009 3010 // Prepare for the vector type of the interleaved load/store. 3011 Type *ScalarTy = getMemInstValueType(Instr); 3012 unsigned InterleaveFactor = Group->getFactor(); 3013 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 3014 Type *PtrTy = VecTy->getPointerTo(getMemInstAddressSpace(Instr)); 3015 3016 // Prepare for the new pointers. 3017 setDebugLocFromInst(Builder, Ptr); 3018 SmallVector<Value *, 2> NewPtrs; 3019 unsigned Index = Group->getIndex(Instr); 3020 3021 // If the group is reverse, adjust the index to refer to the last vector lane 3022 // instead of the first. We adjust the index from the first vector lane, 3023 // rather than directly getting the pointer for lane VF - 1, because the 3024 // pointer operand of the interleaved access is supposed to be uniform. For 3025 // uniform instructions, we're only required to generate a value for the 3026 // first vector lane in each unroll iteration. 3027 if (Group->isReverse()) 3028 Index += (VF - 1) * Group->getFactor(); 3029 3030 for (unsigned Part = 0; Part < UF; Part++) { 3031 Value *NewPtr = getOrCreateScalarValue(Ptr, {Part, 0}); 3032 3033 // Notice current instruction could be any index. Need to adjust the address 3034 // to the member of index 0. 3035 // 3036 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 3037 // b = A[i]; // Member of index 0 3038 // Current pointer is pointed to A[i+1], adjust it to A[i]. 3039 // 3040 // E.g. A[i+1] = a; // Member of index 1 3041 // A[i] = b; // Member of index 0 3042 // A[i+2] = c; // Member of index 2 (Current instruction) 3043 // Current pointer is pointed to A[i+2], adjust it to A[i]. 3044 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 3045 3046 // Cast to the vector pointer type. 3047 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 3048 } 3049 3050 setDebugLocFromInst(Builder, Instr); 3051 Value *UndefVec = UndefValue::get(VecTy); 3052 3053 // Vectorize the interleaved load group. 3054 if (isa<LoadInst>(Instr)) { 3055 // For each unroll part, create a wide load for the group. 3056 SmallVector<Value *, 2> NewLoads; 3057 for (unsigned Part = 0; Part < UF; Part++) { 3058 auto *NewLoad = Builder.CreateAlignedLoad( 3059 NewPtrs[Part], Group->getAlignment(), "wide.vec"); 3060 Group->addMetadata(NewLoad); 3061 NewLoads.push_back(NewLoad); 3062 } 3063 3064 // For each member in the group, shuffle out the appropriate data from the 3065 // wide loads. 3066 for (unsigned I = 0; I < InterleaveFactor; ++I) { 3067 Instruction *Member = Group->getMember(I); 3068 3069 // Skip the gaps in the group. 3070 if (!Member) 3071 continue; 3072 3073 Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF); 3074 for (unsigned Part = 0; Part < UF; Part++) { 3075 Value *StridedVec = Builder.CreateShuffleVector( 3076 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 3077 3078 // If this member has different type, cast the result type. 3079 if (Member->getType() != ScalarTy) { 3080 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 3081 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 3082 } 3083 3084 if (Group->isReverse()) 3085 StridedVec = reverseVector(StridedVec); 3086 3087 VectorLoopValueMap.setVectorValue(Member, Part, StridedVec); 3088 } 3089 } 3090 return; 3091 } 3092 3093 // The sub vector type for current instruction. 3094 VectorType *SubVT = VectorType::get(ScalarTy, VF); 3095 3096 // Vectorize the interleaved store group. 3097 for (unsigned Part = 0; Part < UF; Part++) { 3098 // Collect the stored vector from each member. 3099 SmallVector<Value *, 4> StoredVecs; 3100 for (unsigned i = 0; i < InterleaveFactor; i++) { 3101 // Interleaved store group doesn't allow a gap, so each index has a member 3102 Instruction *Member = Group->getMember(i); 3103 assert(Member && "Fail to get a member from an interleaved store group"); 3104 3105 Value *StoredVec = getOrCreateVectorValue( 3106 cast<StoreInst>(Member)->getValueOperand(), Part); 3107 if (Group->isReverse()) 3108 StoredVec = reverseVector(StoredVec); 3109 3110 // If this member has different type, cast it to a unified type. 3111 3112 if (StoredVec->getType() != SubVT) 3113 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 3114 3115 StoredVecs.push_back(StoredVec); 3116 } 3117 3118 // Concatenate all vectors into a wide vector. 3119 Value *WideVec = concatenateVectors(Builder, StoredVecs); 3120 3121 // Interleave the elements in the wide vector. 3122 Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor); 3123 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 3124 "interleaved.vec"); 3125 3126 Instruction *NewStoreInstr = 3127 Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment()); 3128 3129 Group->addMetadata(NewStoreInstr); 3130 } 3131 } 3132 3133 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr, 3134 VectorParts *BlockInMask) { 3135 // Attempt to issue a wide load. 3136 LoadInst *LI = dyn_cast<LoadInst>(Instr); 3137 StoreInst *SI = dyn_cast<StoreInst>(Instr); 3138 3139 assert((LI || SI) && "Invalid Load/Store instruction"); 3140 3141 LoopVectorizationCostModel::InstWidening Decision = 3142 Cost->getWideningDecision(Instr, VF); 3143 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 3144 "CM decision should be taken at this point"); 3145 if (Decision == LoopVectorizationCostModel::CM_Interleave) 3146 return vectorizeInterleaveGroup(Instr); 3147 3148 Type *ScalarDataTy = getMemInstValueType(Instr); 3149 Type *DataTy = VectorType::get(ScalarDataTy, VF); 3150 Value *Ptr = getPointerOperand(Instr); 3151 unsigned Alignment = getMemInstAlignment(Instr); 3152 // An alignment of 0 means target abi alignment. We need to use the scalar's 3153 // target abi alignment in such a case. 3154 const DataLayout &DL = Instr->getModule()->getDataLayout(); 3155 if (!Alignment) 3156 Alignment = DL.getABITypeAlignment(ScalarDataTy); 3157 unsigned AddressSpace = getMemInstAddressSpace(Instr); 3158 3159 // Determine if the pointer operand of the access is either consecutive or 3160 // reverse consecutive. 3161 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 3162 bool Reverse = ConsecutiveStride < 0; 3163 bool CreateGatherScatter = 3164 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 3165 3166 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 3167 // gather/scatter. Otherwise Decision should have been to Scalarize. 3168 assert((ConsecutiveStride || CreateGatherScatter) && 3169 "The instruction should be scalarized"); 3170 3171 // Handle consecutive loads/stores. 3172 if (ConsecutiveStride) 3173 Ptr = getOrCreateScalarValue(Ptr, {0, 0}); 3174 3175 VectorParts Mask; 3176 bool isMaskRequired = BlockInMask; 3177 if (isMaskRequired) 3178 Mask = *BlockInMask; 3179 3180 // Handle Stores: 3181 if (SI) { 3182 assert(!Legal->isUniform(SI->getPointerOperand()) && 3183 "We do not allow storing to uniform addresses"); 3184 setDebugLocFromInst(Builder, SI); 3185 3186 for (unsigned Part = 0; Part < UF; ++Part) { 3187 Instruction *NewSI = nullptr; 3188 Value *StoredVal = getOrCreateVectorValue(SI->getValueOperand(), Part); 3189 if (CreateGatherScatter) { 3190 Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr; 3191 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 3192 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 3193 MaskPart); 3194 } else { 3195 // Calculate the pointer for the specific unroll-part. 3196 Value *PartPtr = 3197 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 3198 3199 if (Reverse) { 3200 // If we store to reverse consecutive memory locations, then we need 3201 // to reverse the order of elements in the stored value. 3202 StoredVal = reverseVector(StoredVal); 3203 // We don't want to update the value in the map as it might be used in 3204 // another expression. So don't call resetVectorValue(StoredVal). 3205 3206 // If the address is consecutive but reversed, then the 3207 // wide store needs to start at the last vector element. 3208 PartPtr = 3209 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 3210 PartPtr = 3211 Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 3212 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 3213 Mask[Part] = reverseVector(Mask[Part]); 3214 } 3215 3216 Value *VecPtr = 3217 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 3218 3219 if (isMaskRequired) 3220 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 3221 Mask[Part]); 3222 else 3223 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 3224 } 3225 addMetadata(NewSI, SI); 3226 } 3227 return; 3228 } 3229 3230 // Handle loads. 3231 assert(LI && "Must have a load instruction"); 3232 setDebugLocFromInst(Builder, LI); 3233 for (unsigned Part = 0; Part < UF; ++Part) { 3234 Value *NewLI; 3235 if (CreateGatherScatter) { 3236 Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr; 3237 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 3238 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 3239 nullptr, "wide.masked.gather"); 3240 addMetadata(NewLI, LI); 3241 } else { 3242 // Calculate the pointer for the specific unroll-part. 3243 Value *PartPtr = 3244 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 3245 3246 if (Reverse) { 3247 // If the address is consecutive but reversed, then the 3248 // wide load needs to start at the last vector element. 3249 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 3250 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 3251 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 3252 Mask[Part] = reverseVector(Mask[Part]); 3253 } 3254 3255 Value *VecPtr = 3256 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 3257 if (isMaskRequired) 3258 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 3259 UndefValue::get(DataTy), 3260 "wide.masked.load"); 3261 else 3262 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 3263 3264 // Add metadata to the load, but setVectorValue to the reverse shuffle. 3265 addMetadata(NewLI, LI); 3266 if (Reverse) 3267 NewLI = reverseVector(NewLI); 3268 } 3269 VectorLoopValueMap.setVectorValue(Instr, Part, NewLI); 3270 } 3271 } 3272 3273 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 3274 const VPIteration &Instance, 3275 bool IfPredicateInstr) { 3276 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 3277 3278 setDebugLocFromInst(Builder, Instr); 3279 3280 // Does this instruction return a value ? 3281 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 3282 3283 Instruction *Cloned = Instr->clone(); 3284 if (!IsVoidRetTy) 3285 Cloned->setName(Instr->getName() + ".cloned"); 3286 3287 // Replace the operands of the cloned instructions with their scalar 3288 // equivalents in the new loop. 3289 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 3290 auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance); 3291 Cloned->setOperand(op, NewOp); 3292 } 3293 addNewMetadata(Cloned, Instr); 3294 3295 // Place the cloned scalar in the new loop. 3296 Builder.Insert(Cloned); 3297 3298 // Add the cloned scalar to the scalar map entry. 3299 VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned); 3300 3301 // If we just cloned a new assumption, add it the assumption cache. 3302 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 3303 if (II->getIntrinsicID() == Intrinsic::assume) 3304 AC->registerAssumption(II); 3305 3306 // End if-block. 3307 if (IfPredicateInstr) 3308 PredicatedInstructions.push_back(Cloned); 3309 } 3310 3311 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3312 Value *End, Value *Step, 3313 Instruction *DL) { 3314 BasicBlock *Header = L->getHeader(); 3315 BasicBlock *Latch = L->getLoopLatch(); 3316 // As we're just creating this loop, it's possible no latch exists 3317 // yet. If so, use the header as this will be a single block loop. 3318 if (!Latch) 3319 Latch = Header; 3320 3321 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 3322 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 3323 setDebugLocFromInst(Builder, OldInst); 3324 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 3325 3326 Builder.SetInsertPoint(Latch->getTerminator()); 3327 setDebugLocFromInst(Builder, OldInst); 3328 3329 // Create i+1 and fill the PHINode. 3330 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 3331 Induction->addIncoming(Start, L->getLoopPreheader()); 3332 Induction->addIncoming(Next, Latch); 3333 // Create the compare. 3334 Value *ICmp = Builder.CreateICmpEQ(Next, End); 3335 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 3336 3337 // Now we have two terminators. Remove the old one from the block. 3338 Latch->getTerminator()->eraseFromParent(); 3339 3340 return Induction; 3341 } 3342 3343 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3344 if (TripCount) 3345 return TripCount; 3346 3347 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3348 // Find the loop boundaries. 3349 ScalarEvolution *SE = PSE.getSE(); 3350 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3351 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 3352 "Invalid loop count"); 3353 3354 Type *IdxTy = Legal->getWidestInductionType(); 3355 3356 // The exit count might have the type of i64 while the phi is i32. This can 3357 // happen if we have an induction variable that is sign extended before the 3358 // compare. The only way that we get a backedge taken count is that the 3359 // induction variable was signed and as such will not overflow. In such a case 3360 // truncation is legal. 3361 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 3362 IdxTy->getPrimitiveSizeInBits()) 3363 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3364 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3365 3366 // Get the total trip count from the count by adding 1. 3367 const SCEV *ExitCount = SE->getAddExpr( 3368 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3369 3370 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3371 3372 // Expand the trip count and place the new instructions in the preheader. 3373 // Notice that the pre-header does not change, only the loop body. 3374 SCEVExpander Exp(*SE, DL, "induction"); 3375 3376 // Count holds the overall loop count (N). 3377 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3378 L->getLoopPreheader()->getTerminator()); 3379 3380 if (TripCount->getType()->isPointerTy()) 3381 TripCount = 3382 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3383 L->getLoopPreheader()->getTerminator()); 3384 3385 return TripCount; 3386 } 3387 3388 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3389 if (VectorTripCount) 3390 return VectorTripCount; 3391 3392 Value *TC = getOrCreateTripCount(L); 3393 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3394 3395 // Now we need to generate the expression for the part of the loop that the 3396 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3397 // iterations are not required for correctness, or N - Step, otherwise. Step 3398 // is equal to the vectorization factor (number of SIMD elements) times the 3399 // unroll factor (number of SIMD instructions). 3400 Constant *Step = ConstantInt::get(TC->getType(), VF * UF); 3401 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3402 3403 // If there is a non-reversed interleaved group that may speculatively access 3404 // memory out-of-bounds, we need to ensure that there will be at least one 3405 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 3406 // the trip count, we set the remainder to be equal to the step. If the step 3407 // does not evenly divide the trip count, no adjustment is necessary since 3408 // there will already be scalar iterations. Note that the minimum iterations 3409 // check ensures that N >= Step. 3410 if (VF > 1 && Legal->requiresScalarEpilogue()) { 3411 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3412 R = Builder.CreateSelect(IsZero, Step, R); 3413 } 3414 3415 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3416 3417 return VectorTripCount; 3418 } 3419 3420 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3421 const DataLayout &DL) { 3422 // Verify that V is a vector type with same number of elements as DstVTy. 3423 unsigned VF = DstVTy->getNumElements(); 3424 VectorType *SrcVecTy = cast<VectorType>(V->getType()); 3425 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3426 Type *SrcElemTy = SrcVecTy->getElementType(); 3427 Type *DstElemTy = DstVTy->getElementType(); 3428 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3429 "Vector elements must have same size"); 3430 3431 // Do a direct cast if element types are castable. 3432 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3433 return Builder.CreateBitOrPointerCast(V, DstVTy); 3434 } 3435 // V cannot be directly casted to desired vector type. 3436 // May happen when V is a floating point vector but DstVTy is a vector of 3437 // pointers or vice-versa. Handle this using a two-step bitcast using an 3438 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3439 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3440 "Only one type should be a pointer type"); 3441 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3442 "Only one type should be a floating point type"); 3443 Type *IntTy = 3444 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3445 VectorType *VecIntTy = VectorType::get(IntTy, VF); 3446 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3447 return Builder.CreateBitOrPointerCast(CastVal, DstVTy); 3448 } 3449 3450 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3451 BasicBlock *Bypass) { 3452 Value *Count = getOrCreateTripCount(L); 3453 BasicBlock *BB = L->getLoopPreheader(); 3454 IRBuilder<> Builder(BB->getTerminator()); 3455 3456 // Generate code to check if the loop's trip count is less than VF * UF, or 3457 // equal to it in case a scalar epilogue is required; this implies that the 3458 // vector trip count is zero. This check also covers the case where adding one 3459 // to the backedge-taken count overflowed leading to an incorrect trip count 3460 // of zero. In this case we will also jump to the scalar loop. 3461 auto P = Legal->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 3462 : ICmpInst::ICMP_ULT; 3463 Value *CheckMinIters = Builder.CreateICmp( 3464 P, Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check"); 3465 3466 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3467 // Update dominator tree immediately if the generated block is a 3468 // LoopBypassBlock because SCEV expansions to generate loop bypass 3469 // checks may query it before the current function is finished. 3470 DT->addNewBlock(NewBB, BB); 3471 if (L->getParentLoop()) 3472 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3473 ReplaceInstWithInst(BB->getTerminator(), 3474 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 3475 LoopBypassBlocks.push_back(BB); 3476 } 3477 3478 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3479 BasicBlock *BB = L->getLoopPreheader(); 3480 3481 // Generate the code to check that the SCEV assumptions that we made. 3482 // We want the new basic block to start at the first instruction in a 3483 // sequence of instructions that form a check. 3484 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 3485 "scev.check"); 3486 Value *SCEVCheck = 3487 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 3488 3489 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 3490 if (C->isZero()) 3491 return; 3492 3493 // Create a new block containing the stride check. 3494 BB->setName("vector.scevcheck"); 3495 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3496 // Update dominator tree immediately if the generated block is a 3497 // LoopBypassBlock because SCEV expansions to generate loop bypass 3498 // checks may query it before the current function is finished. 3499 DT->addNewBlock(NewBB, BB); 3500 if (L->getParentLoop()) 3501 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3502 ReplaceInstWithInst(BB->getTerminator(), 3503 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 3504 LoopBypassBlocks.push_back(BB); 3505 AddedSafetyChecks = true; 3506 } 3507 3508 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 3509 BasicBlock *BB = L->getLoopPreheader(); 3510 3511 // Generate the code that checks in runtime if arrays overlap. We put the 3512 // checks into a separate block to make the more common case of few elements 3513 // faster. 3514 Instruction *FirstCheckInst; 3515 Instruction *MemRuntimeCheck; 3516 std::tie(FirstCheckInst, MemRuntimeCheck) = 3517 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 3518 if (!MemRuntimeCheck) 3519 return; 3520 3521 // Create a new block containing the memory check. 3522 BB->setName("vector.memcheck"); 3523 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3524 // Update dominator tree immediately if the generated block is a 3525 // LoopBypassBlock because SCEV expansions to generate loop bypass 3526 // checks may query it before the current function is finished. 3527 DT->addNewBlock(NewBB, BB); 3528 if (L->getParentLoop()) 3529 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3530 ReplaceInstWithInst(BB->getTerminator(), 3531 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 3532 LoopBypassBlocks.push_back(BB); 3533 AddedSafetyChecks = true; 3534 3535 // We currently don't use LoopVersioning for the actual loop cloning but we 3536 // still use it to add the noalias metadata. 3537 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 3538 PSE.getSE()); 3539 LVer->prepareNoAliasMetadata(); 3540 } 3541 3542 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3543 /* 3544 In this function we generate a new loop. The new loop will contain 3545 the vectorized instructions while the old loop will continue to run the 3546 scalar remainder. 3547 3548 [ ] <-- loop iteration number check. 3549 / | 3550 / v 3551 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3552 | / | 3553 | / v 3554 || [ ] <-- vector pre header. 3555 |/ | 3556 | v 3557 | [ ] \ 3558 | [ ]_| <-- vector loop. 3559 | | 3560 | v 3561 | -[ ] <--- middle-block. 3562 | / | 3563 | / v 3564 -|- >[ ] <--- new preheader. 3565 | | 3566 | v 3567 | [ ] \ 3568 | [ ]_| <-- old scalar loop to handle remainder. 3569 \ | 3570 \ v 3571 >[ ] <-- exit block. 3572 ... 3573 */ 3574 3575 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 3576 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 3577 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 3578 assert(VectorPH && "Invalid loop structure"); 3579 assert(ExitBlock && "Must have an exit block"); 3580 3581 // Some loops have a single integer induction variable, while other loops 3582 // don't. One example is c++ iterators that often have multiple pointer 3583 // induction variables. In the code below we also support a case where we 3584 // don't have a single induction variable. 3585 // 3586 // We try to obtain an induction variable from the original loop as hard 3587 // as possible. However if we don't find one that: 3588 // - is an integer 3589 // - counts from zero, stepping by one 3590 // - is the size of the widest induction variable type 3591 // then we create a new one. 3592 OldInduction = Legal->getPrimaryInduction(); 3593 Type *IdxTy = Legal->getWidestInductionType(); 3594 3595 // Split the single block loop into the two loop structure described above. 3596 BasicBlock *VecBody = 3597 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 3598 BasicBlock *MiddleBlock = 3599 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 3600 BasicBlock *ScalarPH = 3601 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 3602 3603 // Create and register the new vector loop. 3604 Loop *Lp = LI->AllocateLoop(); 3605 Loop *ParentLoop = OrigLoop->getParentLoop(); 3606 3607 // Insert the new loop into the loop nest and register the new basic blocks 3608 // before calling any utilities such as SCEV that require valid LoopInfo. 3609 if (ParentLoop) { 3610 ParentLoop->addChildLoop(Lp); 3611 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 3612 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 3613 } else { 3614 LI->addTopLevelLoop(Lp); 3615 } 3616 Lp->addBasicBlockToLoop(VecBody, *LI); 3617 3618 // Find the loop boundaries. 3619 Value *Count = getOrCreateTripCount(Lp); 3620 3621 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3622 3623 // Now, compare the new count to zero. If it is zero skip the vector loop and 3624 // jump to the scalar loop. This check also covers the case where the 3625 // backedge-taken count is uint##_max: adding one to it will overflow leading 3626 // to an incorrect trip count of zero. In this (rare) case we will also jump 3627 // to the scalar loop. 3628 emitMinimumIterationCountCheck(Lp, ScalarPH); 3629 3630 // Generate the code to check any assumptions that we've made for SCEV 3631 // expressions. 3632 emitSCEVChecks(Lp, ScalarPH); 3633 3634 // Generate the code that checks in runtime if arrays overlap. We put the 3635 // checks into a separate block to make the more common case of few elements 3636 // faster. 3637 emitMemRuntimeChecks(Lp, ScalarPH); 3638 3639 // Generate the induction variable. 3640 // The loop step is equal to the vectorization factor (num of SIMD elements) 3641 // times the unroll factor (num of SIMD instructions). 3642 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3643 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 3644 Induction = 3645 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3646 getDebugLocFromInstOrOperands(OldInduction)); 3647 3648 // We are going to resume the execution of the scalar loop. 3649 // Go over all of the induction variables that we found and fix the 3650 // PHIs that are left in the scalar version of the loop. 3651 // The starting values of PHI nodes depend on the counter of the last 3652 // iteration in the vectorized loop. 3653 // If we come from a bypass edge then we need to start from the original 3654 // start value. 3655 3656 // This variable saves the new starting index for the scalar loop. It is used 3657 // to test if there are any tail iterations left once the vector loop has 3658 // completed. 3659 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 3660 for (auto &InductionEntry : *List) { 3661 PHINode *OrigPhi = InductionEntry.first; 3662 InductionDescriptor II = InductionEntry.second; 3663 3664 // Create phi nodes to merge from the backedge-taken check block. 3665 PHINode *BCResumeVal = PHINode::Create( 3666 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 3667 Value *&EndValue = IVEndValues[OrigPhi]; 3668 if (OrigPhi == OldInduction) { 3669 // We know what the end value is. 3670 EndValue = CountRoundDown; 3671 } else { 3672 IRBuilder<> B(Lp->getLoopPreheader()->getTerminator()); 3673 Type *StepType = II.getStep()->getType(); 3674 Instruction::CastOps CastOp = 3675 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 3676 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 3677 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3678 EndValue = II.transform(B, CRD, PSE.getSE(), DL); 3679 EndValue->setName("ind.end"); 3680 } 3681 3682 // The new PHI merges the original incoming value, in case of a bypass, 3683 // or the value at the end of the vectorized loop. 3684 BCResumeVal->addIncoming(EndValue, MiddleBlock); 3685 3686 // Fix the scalar body counter (PHI node). 3687 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 3688 3689 // The old induction's phi node in the scalar body needs the truncated 3690 // value. 3691 for (BasicBlock *BB : LoopBypassBlocks) 3692 BCResumeVal->addIncoming(II.getStartValue(), BB); 3693 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 3694 } 3695 3696 // Add a check in the middle block to see if we have completed 3697 // all of the iterations in the first vector loop. 3698 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3699 Value *CmpN = 3700 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 3701 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 3702 ReplaceInstWithInst(MiddleBlock->getTerminator(), 3703 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 3704 3705 // Get ready to start creating new instructions into the vectorized body. 3706 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 3707 3708 // Save the state. 3709 LoopVectorPreHeader = Lp->getLoopPreheader(); 3710 LoopScalarPreHeader = ScalarPH; 3711 LoopMiddleBlock = MiddleBlock; 3712 LoopExitBlock = ExitBlock; 3713 LoopVectorBody = VecBody; 3714 LoopScalarBody = OldBasicBlock; 3715 3716 // Keep all loop hints from the original loop on the vector loop (we'll 3717 // replace the vectorizer-specific hints below). 3718 if (MDNode *LID = OrigLoop->getLoopID()) 3719 Lp->setLoopID(LID); 3720 3721 LoopVectorizeHints Hints(Lp, true, *ORE); 3722 Hints.setAlreadyVectorized(); 3723 3724 return LoopVectorPreHeader; 3725 } 3726 3727 // Fix up external users of the induction variable. At this point, we are 3728 // in LCSSA form, with all external PHIs that use the IV having one input value, 3729 // coming from the remainder loop. We need those PHIs to also have a correct 3730 // value for the IV when arriving directly from the middle block. 3731 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3732 const InductionDescriptor &II, 3733 Value *CountRoundDown, Value *EndValue, 3734 BasicBlock *MiddleBlock) { 3735 // There are two kinds of external IV usages - those that use the value 3736 // computed in the last iteration (the PHI) and those that use the penultimate 3737 // value (the value that feeds into the phi from the loop latch). 3738 // We allow both, but they, obviously, have different values. 3739 3740 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 3741 3742 DenseMap<Value *, Value *> MissingVals; 3743 3744 // An external user of the last iteration's value should see the value that 3745 // the remainder loop uses to initialize its own IV. 3746 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3747 for (User *U : PostInc->users()) { 3748 Instruction *UI = cast<Instruction>(U); 3749 if (!OrigLoop->contains(UI)) { 3750 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3751 MissingVals[UI] = EndValue; 3752 } 3753 } 3754 3755 // An external user of the penultimate value need to see EndValue - Step. 3756 // The simplest way to get this is to recompute it from the constituent SCEVs, 3757 // that is Start + (Step * (CRD - 1)). 3758 for (User *U : OrigPhi->users()) { 3759 auto *UI = cast<Instruction>(U); 3760 if (!OrigLoop->contains(UI)) { 3761 const DataLayout &DL = 3762 OrigLoop->getHeader()->getModule()->getDataLayout(); 3763 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3764 3765 IRBuilder<> B(MiddleBlock->getTerminator()); 3766 Value *CountMinusOne = B.CreateSub( 3767 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3768 Value *CMO = 3769 !II.getStep()->getType()->isIntegerTy() 3770 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3771 II.getStep()->getType()) 3772 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3773 CMO->setName("cast.cmo"); 3774 Value *Escape = II.transform(B, CMO, PSE.getSE(), DL); 3775 Escape->setName("ind.escape"); 3776 MissingVals[UI] = Escape; 3777 } 3778 } 3779 3780 for (auto &I : MissingVals) { 3781 PHINode *PHI = cast<PHINode>(I.first); 3782 // One corner case we have to handle is two IVs "chasing" each-other, 3783 // that is %IV2 = phi [...], [ %IV1, %latch ] 3784 // In this case, if IV1 has an external use, we need to avoid adding both 3785 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3786 // don't already have an incoming value for the middle block. 3787 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3788 PHI->addIncoming(I.second, MiddleBlock); 3789 } 3790 } 3791 3792 namespace { 3793 3794 struct CSEDenseMapInfo { 3795 static bool canHandle(const Instruction *I) { 3796 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3797 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3798 } 3799 3800 static inline Instruction *getEmptyKey() { 3801 return DenseMapInfo<Instruction *>::getEmptyKey(); 3802 } 3803 3804 static inline Instruction *getTombstoneKey() { 3805 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3806 } 3807 3808 static unsigned getHashValue(const Instruction *I) { 3809 assert(canHandle(I) && "Unknown instruction!"); 3810 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3811 I->value_op_end())); 3812 } 3813 3814 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3815 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3816 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3817 return LHS == RHS; 3818 return LHS->isIdenticalTo(RHS); 3819 } 3820 }; 3821 3822 } // end anonymous namespace 3823 3824 ///\brief Perform cse of induction variable instructions. 3825 static void cse(BasicBlock *BB) { 3826 // Perform simple cse. 3827 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3828 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3829 Instruction *In = &*I++; 3830 3831 if (!CSEDenseMapInfo::canHandle(In)) 3832 continue; 3833 3834 // Check if we can replace this instruction with any of the 3835 // visited instructions. 3836 if (Instruction *V = CSEMap.lookup(In)) { 3837 In->replaceAllUsesWith(V); 3838 In->eraseFromParent(); 3839 continue; 3840 } 3841 3842 CSEMap[In] = In; 3843 } 3844 } 3845 3846 /// \brief Estimate the overhead of scalarizing an instruction. This is a 3847 /// convenience wrapper for the type-based getScalarizationOverhead API. 3848 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF, 3849 const TargetTransformInfo &TTI) { 3850 if (VF == 1) 3851 return 0; 3852 3853 unsigned Cost = 0; 3854 Type *RetTy = ToVectorTy(I->getType(), VF); 3855 if (!RetTy->isVoidTy() && 3856 (!isa<LoadInst>(I) || 3857 !TTI.supportsEfficientVectorElementLoadStore())) 3858 Cost += TTI.getScalarizationOverhead(RetTy, true, false); 3859 3860 if (CallInst *CI = dyn_cast<CallInst>(I)) { 3861 SmallVector<const Value *, 4> Operands(CI->arg_operands()); 3862 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3863 } 3864 else if (!isa<StoreInst>(I) || 3865 !TTI.supportsEfficientVectorElementLoadStore()) { 3866 SmallVector<const Value *, 4> Operands(I->operand_values()); 3867 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3868 } 3869 3870 return Cost; 3871 } 3872 3873 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3874 // Return the cost of the instruction, including scalarization overhead if it's 3875 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3876 // i.e. either vector version isn't available, or is too expensive. 3877 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3878 const TargetTransformInfo &TTI, 3879 const TargetLibraryInfo *TLI, 3880 bool &NeedToScalarize) { 3881 Function *F = CI->getCalledFunction(); 3882 StringRef FnName = CI->getCalledFunction()->getName(); 3883 Type *ScalarRetTy = CI->getType(); 3884 SmallVector<Type *, 4> Tys, ScalarTys; 3885 for (auto &ArgOp : CI->arg_operands()) 3886 ScalarTys.push_back(ArgOp->getType()); 3887 3888 // Estimate cost of scalarized vector call. The source operands are assumed 3889 // to be vectors, so we need to extract individual elements from there, 3890 // execute VF scalar calls, and then gather the result into the vector return 3891 // value. 3892 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3893 if (VF == 1) 3894 return ScalarCallCost; 3895 3896 // Compute corresponding vector type for return value and arguments. 3897 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3898 for (Type *ScalarTy : ScalarTys) 3899 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3900 3901 // Compute costs of unpacking argument values for the scalar calls and 3902 // packing the return values to a vector. 3903 unsigned ScalarizationCost = getScalarizationOverhead(CI, VF, TTI); 3904 3905 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3906 3907 // If we can't emit a vector call for this function, then the currently found 3908 // cost is the cost we need to return. 3909 NeedToScalarize = true; 3910 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3911 return Cost; 3912 3913 // If the corresponding vector cost is cheaper, return its cost. 3914 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3915 if (VectorCallCost < Cost) { 3916 NeedToScalarize = false; 3917 return VectorCallCost; 3918 } 3919 return Cost; 3920 } 3921 3922 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3923 // factor VF. Return the cost of the instruction, including scalarization 3924 // overhead if it's needed. 3925 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3926 const TargetTransformInfo &TTI, 3927 const TargetLibraryInfo *TLI) { 3928 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3929 assert(ID && "Expected intrinsic call!"); 3930 3931 FastMathFlags FMF; 3932 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3933 FMF = FPMO->getFastMathFlags(); 3934 3935 SmallVector<Value *, 4> Operands(CI->arg_operands()); 3936 return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF); 3937 } 3938 3939 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3940 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3941 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3942 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3943 } 3944 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3945 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3946 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3947 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3948 } 3949 3950 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3951 // For every instruction `I` in MinBWs, truncate the operands, create a 3952 // truncated version of `I` and reextend its result. InstCombine runs 3953 // later and will remove any ext/trunc pairs. 3954 SmallPtrSet<Value *, 4> Erased; 3955 for (const auto &KV : Cost->getMinimalBitwidths()) { 3956 // If the value wasn't vectorized, we must maintain the original scalar 3957 // type. The absence of the value from VectorLoopValueMap indicates that it 3958 // wasn't vectorized. 3959 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3960 continue; 3961 for (unsigned Part = 0; Part < UF; ++Part) { 3962 Value *I = getOrCreateVectorValue(KV.first, Part); 3963 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3964 continue; 3965 Type *OriginalTy = I->getType(); 3966 Type *ScalarTruncatedTy = 3967 IntegerType::get(OriginalTy->getContext(), KV.second); 3968 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3969 OriginalTy->getVectorNumElements()); 3970 if (TruncatedTy == OriginalTy) 3971 continue; 3972 3973 IRBuilder<> B(cast<Instruction>(I)); 3974 auto ShrinkOperand = [&](Value *V) -> Value * { 3975 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3976 if (ZI->getSrcTy() == TruncatedTy) 3977 return ZI->getOperand(0); 3978 return B.CreateZExtOrTrunc(V, TruncatedTy); 3979 }; 3980 3981 // The actual instruction modification depends on the instruction type, 3982 // unfortunately. 3983 Value *NewI = nullptr; 3984 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3985 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3986 ShrinkOperand(BO->getOperand(1))); 3987 3988 // Any wrapping introduced by shrinking this operation shouldn't be 3989 // considered undefined behavior. So, we can't unconditionally copy 3990 // arithmetic wrapping flags to NewI. 3991 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3992 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3993 NewI = 3994 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3995 ShrinkOperand(CI->getOperand(1))); 3996 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3997 NewI = B.CreateSelect(SI->getCondition(), 3998 ShrinkOperand(SI->getTrueValue()), 3999 ShrinkOperand(SI->getFalseValue())); 4000 } else if (auto *CI = dyn_cast<CastInst>(I)) { 4001 switch (CI->getOpcode()) { 4002 default: 4003 llvm_unreachable("Unhandled cast!"); 4004 case Instruction::Trunc: 4005 NewI = ShrinkOperand(CI->getOperand(0)); 4006 break; 4007 case Instruction::SExt: 4008 NewI = B.CreateSExtOrTrunc( 4009 CI->getOperand(0), 4010 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 4011 break; 4012 case Instruction::ZExt: 4013 NewI = B.CreateZExtOrTrunc( 4014 CI->getOperand(0), 4015 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 4016 break; 4017 } 4018 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 4019 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 4020 auto *O0 = B.CreateZExtOrTrunc( 4021 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 4022 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 4023 auto *O1 = B.CreateZExtOrTrunc( 4024 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 4025 4026 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 4027 } else if (isa<LoadInst>(I)) { 4028 // Don't do anything with the operands, just extend the result. 4029 continue; 4030 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 4031 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 4032 auto *O0 = B.CreateZExtOrTrunc( 4033 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 4034 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 4035 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 4036 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 4037 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 4038 auto *O0 = B.CreateZExtOrTrunc( 4039 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 4040 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 4041 } else { 4042 llvm_unreachable("Unhandled instruction type!"); 4043 } 4044 4045 // Lastly, extend the result. 4046 NewI->takeName(cast<Instruction>(I)); 4047 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 4048 I->replaceAllUsesWith(Res); 4049 cast<Instruction>(I)->eraseFromParent(); 4050 Erased.insert(I); 4051 VectorLoopValueMap.resetVectorValue(KV.first, Part, Res); 4052 } 4053 } 4054 4055 // We'll have created a bunch of ZExts that are now parentless. Clean up. 4056 for (const auto &KV : Cost->getMinimalBitwidths()) { 4057 // If the value wasn't vectorized, we must maintain the original scalar 4058 // type. The absence of the value from VectorLoopValueMap indicates that it 4059 // wasn't vectorized. 4060 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 4061 continue; 4062 for (unsigned Part = 0; Part < UF; ++Part) { 4063 Value *I = getOrCreateVectorValue(KV.first, Part); 4064 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 4065 if (Inst && Inst->use_empty()) { 4066 Value *NewI = Inst->getOperand(0); 4067 Inst->eraseFromParent(); 4068 VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI); 4069 } 4070 } 4071 } 4072 } 4073 4074 void InnerLoopVectorizer::fixVectorizedLoop() { 4075 // Insert truncates and extends for any truncated instructions as hints to 4076 // InstCombine. 4077 if (VF > 1) 4078 truncateToMinimalBitwidths(); 4079 4080 // At this point every instruction in the original loop is widened to a 4081 // vector form. Now we need to fix the recurrences in the loop. These PHI 4082 // nodes are currently empty because we did not want to introduce cycles. 4083 // This is the second stage of vectorizing recurrences. 4084 fixCrossIterationPHIs(); 4085 4086 // Update the dominator tree. 4087 // 4088 // FIXME: After creating the structure of the new loop, the dominator tree is 4089 // no longer up-to-date, and it remains that way until we update it 4090 // here. An out-of-date dominator tree is problematic for SCEV, 4091 // because SCEVExpander uses it to guide code generation. The 4092 // vectorizer use SCEVExpanders in several places. Instead, we should 4093 // keep the dominator tree up-to-date as we go. 4094 updateAnalysis(); 4095 4096 // Fix-up external users of the induction variables. 4097 for (auto &Entry : *Legal->getInductionVars()) 4098 fixupIVUsers(Entry.first, Entry.second, 4099 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 4100 IVEndValues[Entry.first], LoopMiddleBlock); 4101 4102 fixLCSSAPHIs(); 4103 for (Instruction *PI : PredicatedInstructions) 4104 sinkScalarOperands(&*PI); 4105 4106 // Remove redundant induction instructions. 4107 cse(LoopVectorBody); 4108 } 4109 4110 void InnerLoopVectorizer::fixCrossIterationPHIs() { 4111 // In order to support recurrences we need to be able to vectorize Phi nodes. 4112 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4113 // stage #2: We now need to fix the recurrences by adding incoming edges to 4114 // the currently empty PHI nodes. At this point every instruction in the 4115 // original loop is widened to a vector form so we can use them to construct 4116 // the incoming edges. 4117 for (Instruction &I : *OrigLoop->getHeader()) { 4118 PHINode *Phi = dyn_cast<PHINode>(&I); 4119 if (!Phi) 4120 break; 4121 // Handle first-order recurrences and reductions that need to be fixed. 4122 if (Legal->isFirstOrderRecurrence(Phi)) 4123 fixFirstOrderRecurrence(Phi); 4124 else if (Legal->isReductionVariable(Phi)) 4125 fixReduction(Phi); 4126 } 4127 } 4128 4129 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 4130 // This is the second phase of vectorizing first-order recurrences. An 4131 // overview of the transformation is described below. Suppose we have the 4132 // following loop. 4133 // 4134 // for (int i = 0; i < n; ++i) 4135 // b[i] = a[i] - a[i - 1]; 4136 // 4137 // There is a first-order recurrence on "a". For this loop, the shorthand 4138 // scalar IR looks like: 4139 // 4140 // scalar.ph: 4141 // s_init = a[-1] 4142 // br scalar.body 4143 // 4144 // scalar.body: 4145 // i = phi [0, scalar.ph], [i+1, scalar.body] 4146 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4147 // s2 = a[i] 4148 // b[i] = s2 - s1 4149 // br cond, scalar.body, ... 4150 // 4151 // In this example, s1 is a recurrence because it's value depends on the 4152 // previous iteration. In the first phase of vectorization, we created a 4153 // temporary value for s1. We now complete the vectorization and produce the 4154 // shorthand vector IR shown below (for VF = 4, UF = 1). 4155 // 4156 // vector.ph: 4157 // v_init = vector(..., ..., ..., a[-1]) 4158 // br vector.body 4159 // 4160 // vector.body 4161 // i = phi [0, vector.ph], [i+4, vector.body] 4162 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4163 // v2 = a[i, i+1, i+2, i+3]; 4164 // v3 = vector(v1(3), v2(0, 1, 2)) 4165 // b[i, i+1, i+2, i+3] = v2 - v3 4166 // br cond, vector.body, middle.block 4167 // 4168 // middle.block: 4169 // x = v2(3) 4170 // br scalar.ph 4171 // 4172 // scalar.ph: 4173 // s_init = phi [x, middle.block], [a[-1], otherwise] 4174 // br scalar.body 4175 // 4176 // After execution completes the vector loop, we extract the next value of 4177 // the recurrence (x) to use as the initial value in the scalar loop. 4178 4179 // Get the original loop preheader and single loop latch. 4180 auto *Preheader = OrigLoop->getLoopPreheader(); 4181 auto *Latch = OrigLoop->getLoopLatch(); 4182 4183 // Get the initial and previous values of the scalar recurrence. 4184 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 4185 auto *Previous = Phi->getIncomingValueForBlock(Latch); 4186 4187 // Create a vector from the initial value. 4188 auto *VectorInit = ScalarInit; 4189 if (VF > 1) { 4190 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4191 VectorInit = Builder.CreateInsertElement( 4192 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 4193 Builder.getInt32(VF - 1), "vector.recur.init"); 4194 } 4195 4196 // We constructed a temporary phi node in the first phase of vectorization. 4197 // This phi node will eventually be deleted. 4198 Builder.SetInsertPoint( 4199 cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0))); 4200 4201 // Create a phi node for the new recurrence. The current value will either be 4202 // the initial value inserted into a vector or loop-varying vector value. 4203 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 4204 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 4205 4206 // Get the vectorized previous value of the last part UF - 1. It appears last 4207 // among all unrolled iterations, due to the order of their construction. 4208 Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1); 4209 4210 // Set the insertion point after the previous value if it is an instruction. 4211 // Note that the previous value may have been constant-folded so it is not 4212 // guaranteed to be an instruction in the vector loop. Also, if the previous 4213 // value is a phi node, we should insert after all the phi nodes to avoid 4214 // breaking basic block verification. 4215 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart) || 4216 isa<PHINode>(PreviousLastPart)) 4217 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 4218 else 4219 Builder.SetInsertPoint( 4220 &*++BasicBlock::iterator(cast<Instruction>(PreviousLastPart))); 4221 4222 // We will construct a vector for the recurrence by combining the values for 4223 // the current and previous iterations. This is the required shuffle mask. 4224 SmallVector<Constant *, 8> ShuffleMask(VF); 4225 ShuffleMask[0] = Builder.getInt32(VF - 1); 4226 for (unsigned I = 1; I < VF; ++I) 4227 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 4228 4229 // The vector from which to take the initial value for the current iteration 4230 // (actual or unrolled). Initially, this is the vector phi node. 4231 Value *Incoming = VecPhi; 4232 4233 // Shuffle the current and previous vector and update the vector parts. 4234 for (unsigned Part = 0; Part < UF; ++Part) { 4235 Value *PreviousPart = getOrCreateVectorValue(Previous, Part); 4236 Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part); 4237 auto *Shuffle = 4238 VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart, 4239 ConstantVector::get(ShuffleMask)) 4240 : Incoming; 4241 PhiPart->replaceAllUsesWith(Shuffle); 4242 cast<Instruction>(PhiPart)->eraseFromParent(); 4243 VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle); 4244 Incoming = PreviousPart; 4245 } 4246 4247 // Fix the latch value of the new recurrence in the vector loop. 4248 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4249 4250 // Extract the last vector element in the middle block. This will be the 4251 // initial value for the recurrence when jumping to the scalar loop. 4252 auto *ExtractForScalar = Incoming; 4253 if (VF > 1) { 4254 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4255 ExtractForScalar = Builder.CreateExtractElement( 4256 ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract"); 4257 } 4258 // Extract the second last element in the middle block if the 4259 // Phi is used outside the loop. We need to extract the phi itself 4260 // and not the last element (the phi update in the current iteration). This 4261 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4262 // when the scalar loop is not run at all. 4263 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4264 if (VF > 1) 4265 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4266 Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi"); 4267 // When loop is unrolled without vectorizing, initialize 4268 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 4269 // `Incoming`. This is analogous to the vectorized case above: extracting the 4270 // second last element when VF > 1. 4271 else if (UF > 1) 4272 ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2); 4273 4274 // Fix the initial value of the original recurrence in the scalar loop. 4275 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4276 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4277 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4278 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4279 Start->addIncoming(Incoming, BB); 4280 } 4281 4282 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 4283 Phi->setName("scalar.recur"); 4284 4285 // Finally, fix users of the recurrence outside the loop. The users will need 4286 // either the last value of the scalar recurrence or the last value of the 4287 // vector recurrence we extracted in the middle block. Since the loop is in 4288 // LCSSA form, we just need to find the phi node for the original scalar 4289 // recurrence in the exit block, and then add an edge for the middle block. 4290 for (auto &I : *LoopExitBlock) { 4291 auto *LCSSAPhi = dyn_cast<PHINode>(&I); 4292 if (!LCSSAPhi) 4293 break; 4294 if (LCSSAPhi->getIncomingValue(0) == Phi) { 4295 LCSSAPhi->addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4296 break; 4297 } 4298 } 4299 } 4300 4301 void InnerLoopVectorizer::fixReduction(PHINode *Phi) { 4302 Constant *Zero = Builder.getInt32(0); 4303 4304 // Get it's reduction variable descriptor. 4305 assert(Legal->isReductionVariable(Phi) && 4306 "Unable to find the reduction variable"); 4307 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 4308 4309 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 4310 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4311 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4312 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 4313 RdxDesc.getMinMaxRecurrenceKind(); 4314 setDebugLocFromInst(Builder, ReductionStartValue); 4315 4316 // We need to generate a reduction vector from the incoming scalar. 4317 // To do so, we need to generate the 'identity' vector and override 4318 // one of the elements with the incoming scalar reduction. We need 4319 // to do it in the vector-loop preheader. 4320 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4321 4322 // This is the vector-clone of the value that leaves the loop. 4323 Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType(); 4324 4325 // Find the reduction identity variable. Zero for addition, or, xor, 4326 // one for multiplication, -1 for And. 4327 Value *Identity; 4328 Value *VectorStart; 4329 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 4330 RK == RecurrenceDescriptor::RK_FloatMinMax) { 4331 // MinMax reduction have the start value as their identify. 4332 if (VF == 1) { 4333 VectorStart = Identity = ReductionStartValue; 4334 } else { 4335 VectorStart = Identity = 4336 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 4337 } 4338 } else { 4339 // Handle other reduction kinds: 4340 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 4341 RK, VecTy->getScalarType()); 4342 if (VF == 1) { 4343 Identity = Iden; 4344 // This vector is the Identity vector where the first element is the 4345 // incoming scalar reduction. 4346 VectorStart = ReductionStartValue; 4347 } else { 4348 Identity = ConstantVector::getSplat(VF, Iden); 4349 4350 // This vector is the Identity vector where the first element is the 4351 // incoming scalar reduction. 4352 VectorStart = 4353 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 4354 } 4355 } 4356 4357 // Fix the vector-loop phi. 4358 4359 // Reductions do not have to start at zero. They can start with 4360 // any loop invariant values. 4361 BasicBlock *Latch = OrigLoop->getLoopLatch(); 4362 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 4363 for (unsigned Part = 0; Part < UF; ++Part) { 4364 Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part); 4365 Value *Val = getOrCreateVectorValue(LoopVal, Part); 4366 // Make sure to add the reduction stat value only to the 4367 // first unroll part. 4368 Value *StartVal = (Part == 0) ? VectorStart : Identity; 4369 cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader); 4370 cast<PHINode>(VecRdxPhi) 4371 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4372 } 4373 4374 // Before each round, move the insertion point right between 4375 // the PHIs and the values we are going to write. 4376 // This allows us to write both PHINodes and the extractelement 4377 // instructions. 4378 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4379 4380 setDebugLocFromInst(Builder, LoopExitInst); 4381 4382 // If the vector reduction can be performed in a smaller type, we truncate 4383 // then extend the loop exit value to enable InstCombine to evaluate the 4384 // entire expression in the smaller type. 4385 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 4386 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4387 Builder.SetInsertPoint( 4388 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4389 VectorParts RdxParts(UF); 4390 for (unsigned Part = 0; Part < UF; ++Part) { 4391 RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 4392 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4393 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4394 : Builder.CreateZExt(Trunc, VecTy); 4395 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 4396 UI != RdxParts[Part]->user_end();) 4397 if (*UI != Trunc) { 4398 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 4399 RdxParts[Part] = Extnd; 4400 } else { 4401 ++UI; 4402 } 4403 } 4404 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4405 for (unsigned Part = 0; Part < UF; ++Part) { 4406 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4407 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]); 4408 } 4409 } 4410 4411 // Reduce all of the unrolled parts into a single vector. 4412 Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0); 4413 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 4414 setDebugLocFromInst(Builder, ReducedPartRdx); 4415 for (unsigned Part = 1; Part < UF; ++Part) { 4416 Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 4417 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 4418 // Floating point operations had to be 'fast' to enable the reduction. 4419 ReducedPartRdx = addFastMathFlag( 4420 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart, 4421 ReducedPartRdx, "bin.rdx")); 4422 else 4423 ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp( 4424 Builder, MinMaxKind, ReducedPartRdx, RdxPart); 4425 } 4426 4427 if (VF > 1) { 4428 bool NoNaN = Legal->hasFunNoNaNAttr(); 4429 ReducedPartRdx = 4430 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN); 4431 // If the reduction can be performed in a smaller type, we need to extend 4432 // the reduction to the wider type before we branch to the original loop. 4433 if (Phi->getType() != RdxDesc.getRecurrenceType()) 4434 ReducedPartRdx = 4435 RdxDesc.isSigned() 4436 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 4437 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 4438 } 4439 4440 // Create a phi node that merges control-flow from the backedge-taken check 4441 // block and the middle block. 4442 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 4443 LoopScalarPreHeader->getTerminator()); 4444 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4445 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4446 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4447 4448 // Now, we need to fix the users of the reduction variable 4449 // inside and outside of the scalar remainder loop. 4450 // We know that the loop is in LCSSA form. We need to update the 4451 // PHI nodes in the exit blocks. 4452 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 4453 LEE = LoopExitBlock->end(); 4454 LEI != LEE; ++LEI) { 4455 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 4456 if (!LCSSAPhi) 4457 break; 4458 4459 // All PHINodes need to have a single entry edge, or two if 4460 // we already fixed them. 4461 assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 4462 4463 // We found a reduction value exit-PHI. Update it with the 4464 // incoming bypass edge. 4465 if (LCSSAPhi->getIncomingValue(0) == LoopExitInst) 4466 LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4467 } // end of the LCSSA phi scan. 4468 4469 // Fix the scalar loop reduction variable with the incoming reduction sum 4470 // from the vector body and from the backedge value. 4471 int IncomingEdgeBlockIdx = 4472 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4473 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4474 // Pick the other block. 4475 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4476 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4477 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4478 } 4479 4480 void InnerLoopVectorizer::fixLCSSAPHIs() { 4481 for (Instruction &LEI : *LoopExitBlock) { 4482 auto *LCSSAPhi = dyn_cast<PHINode>(&LEI); 4483 if (!LCSSAPhi) 4484 break; 4485 if (LCSSAPhi->getNumIncomingValues() == 1) { 4486 assert(OrigLoop->isLoopInvariant(LCSSAPhi->getIncomingValue(0)) && 4487 "Incoming value isn't loop invariant"); 4488 LCSSAPhi->addIncoming(LCSSAPhi->getIncomingValue(0), LoopMiddleBlock); 4489 } 4490 } 4491 } 4492 4493 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4494 // The basic block and loop containing the predicated instruction. 4495 auto *PredBB = PredInst->getParent(); 4496 auto *VectorLoop = LI->getLoopFor(PredBB); 4497 4498 // Initialize a worklist with the operands of the predicated instruction. 4499 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4500 4501 // Holds instructions that we need to analyze again. An instruction may be 4502 // reanalyzed if we don't yet know if we can sink it or not. 4503 SmallVector<Instruction *, 8> InstsToReanalyze; 4504 4505 // Returns true if a given use occurs in the predicated block. Phi nodes use 4506 // their operands in their corresponding predecessor blocks. 4507 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4508 auto *I = cast<Instruction>(U.getUser()); 4509 BasicBlock *BB = I->getParent(); 4510 if (auto *Phi = dyn_cast<PHINode>(I)) 4511 BB = Phi->getIncomingBlock( 4512 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4513 return BB == PredBB; 4514 }; 4515 4516 // Iteratively sink the scalarized operands of the predicated instruction 4517 // into the block we created for it. When an instruction is sunk, it's 4518 // operands are then added to the worklist. The algorithm ends after one pass 4519 // through the worklist doesn't sink a single instruction. 4520 bool Changed; 4521 do { 4522 // Add the instructions that need to be reanalyzed to the worklist, and 4523 // reset the changed indicator. 4524 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4525 InstsToReanalyze.clear(); 4526 Changed = false; 4527 4528 while (!Worklist.empty()) { 4529 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4530 4531 // We can't sink an instruction if it is a phi node, is already in the 4532 // predicated block, is not in the loop, or may have side effects. 4533 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 4534 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 4535 continue; 4536 4537 // It's legal to sink the instruction if all its uses occur in the 4538 // predicated block. Otherwise, there's nothing to do yet, and we may 4539 // need to reanalyze the instruction. 4540 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4541 InstsToReanalyze.push_back(I); 4542 continue; 4543 } 4544 4545 // Move the instruction to the beginning of the predicated block, and add 4546 // it's operands to the worklist. 4547 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4548 Worklist.insert(I->op_begin(), I->op_end()); 4549 4550 // The sinking may have enabled other instructions to be sunk, so we will 4551 // need to iterate. 4552 Changed = true; 4553 } 4554 } while (Changed); 4555 } 4556 4557 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 4558 unsigned VF) { 4559 assert(PN->getParent() == OrigLoop->getHeader() && 4560 "Non-header phis should have been handled elsewhere"); 4561 4562 PHINode *P = cast<PHINode>(PN); 4563 // In order to support recurrences we need to be able to vectorize Phi nodes. 4564 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4565 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4566 // this value when we vectorize all of the instructions that use the PHI. 4567 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 4568 for (unsigned Part = 0; Part < UF; ++Part) { 4569 // This is phase one of vectorizing PHIs. 4570 Type *VecTy = 4571 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4572 Value *EntryPart = PHINode::Create( 4573 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4574 VectorLoopValueMap.setVectorValue(P, Part, EntryPart); 4575 } 4576 return; 4577 } 4578 4579 setDebugLocFromInst(Builder, P); 4580 4581 // This PHINode must be an induction variable. 4582 // Make sure that we know about it. 4583 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 4584 4585 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 4586 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4587 4588 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4589 // which can be found from the original scalar operations. 4590 switch (II.getKind()) { 4591 case InductionDescriptor::IK_NoInduction: 4592 llvm_unreachable("Unknown induction"); 4593 case InductionDescriptor::IK_IntInduction: 4594 case InductionDescriptor::IK_FpInduction: 4595 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4596 case InductionDescriptor::IK_PtrInduction: { 4597 // Handle the pointer induction variable case. 4598 assert(P->getType()->isPointerTy() && "Unexpected type."); 4599 // This is the normalized GEP that starts counting at zero. 4600 Value *PtrInd = Induction; 4601 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 4602 // Determine the number of scalars we need to generate for each unroll 4603 // iteration. If the instruction is uniform, we only need to generate the 4604 // first lane. Otherwise, we generate all VF values. 4605 unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF; 4606 // These are the scalar results. Notice that we don't generate vector GEPs 4607 // because scalar GEPs result in better code. 4608 for (unsigned Part = 0; Part < UF; ++Part) { 4609 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4610 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 4611 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4612 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 4613 SclrGep->setName("next.gep"); 4614 VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep); 4615 } 4616 } 4617 return; 4618 } 4619 } 4620 } 4621 4622 /// A helper function for checking whether an integer division-related 4623 /// instruction may divide by zero (in which case it must be predicated if 4624 /// executed conditionally in the scalar code). 4625 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4626 /// Non-zero divisors that are non compile-time constants will not be 4627 /// converted into multiplication, so we will still end up scalarizing 4628 /// the division, but can do so w/o predication. 4629 static bool mayDivideByZero(Instruction &I) { 4630 assert((I.getOpcode() == Instruction::UDiv || 4631 I.getOpcode() == Instruction::SDiv || 4632 I.getOpcode() == Instruction::URem || 4633 I.getOpcode() == Instruction::SRem) && 4634 "Unexpected instruction"); 4635 Value *Divisor = I.getOperand(1); 4636 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4637 return !CInt || CInt->isZero(); 4638 } 4639 4640 void InnerLoopVectorizer::widenInstruction(Instruction &I) { 4641 switch (I.getOpcode()) { 4642 case Instruction::Br: 4643 case Instruction::PHI: 4644 llvm_unreachable("This instruction is handled by a different recipe."); 4645 case Instruction::GetElementPtr: { 4646 // Construct a vector GEP by widening the operands of the scalar GEP as 4647 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4648 // results in a vector of pointers when at least one operand of the GEP 4649 // is vector-typed. Thus, to keep the representation compact, we only use 4650 // vector-typed operands for loop-varying values. 4651 auto *GEP = cast<GetElementPtrInst>(&I); 4652 4653 if (VF > 1 && OrigLoop->hasLoopInvariantOperands(GEP)) { 4654 // If we are vectorizing, but the GEP has only loop-invariant operands, 4655 // the GEP we build (by only using vector-typed operands for 4656 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4657 // produce a vector of pointers, we need to either arbitrarily pick an 4658 // operand to broadcast, or broadcast a clone of the original GEP. 4659 // Here, we broadcast a clone of the original. 4660 // 4661 // TODO: If at some point we decide to scalarize instructions having 4662 // loop-invariant operands, this special case will no longer be 4663 // required. We would add the scalarization decision to 4664 // collectLoopScalars() and teach getVectorValue() to broadcast 4665 // the lane-zero scalar value. 4666 auto *Clone = Builder.Insert(GEP->clone()); 4667 for (unsigned Part = 0; Part < UF; ++Part) { 4668 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4669 VectorLoopValueMap.setVectorValue(&I, Part, EntryPart); 4670 addMetadata(EntryPart, GEP); 4671 } 4672 } else { 4673 // If the GEP has at least one loop-varying operand, we are sure to 4674 // produce a vector of pointers. But if we are only unrolling, we want 4675 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4676 // produce with the code below will be scalar (if VF == 1) or vector 4677 // (otherwise). Note that for the unroll-only case, we still maintain 4678 // values in the vector mapping with initVector, as we do for other 4679 // instructions. 4680 for (unsigned Part = 0; Part < UF; ++Part) { 4681 // The pointer operand of the new GEP. If it's loop-invariant, we 4682 // won't broadcast it. 4683 auto *Ptr = 4684 OrigLoop->isLoopInvariant(GEP->getPointerOperand()) 4685 ? GEP->getPointerOperand() 4686 : getOrCreateVectorValue(GEP->getPointerOperand(), Part); 4687 4688 // Collect all the indices for the new GEP. If any index is 4689 // loop-invariant, we won't broadcast it. 4690 SmallVector<Value *, 4> Indices; 4691 for (auto &U : make_range(GEP->idx_begin(), GEP->idx_end())) { 4692 if (OrigLoop->isLoopInvariant(U.get())) 4693 Indices.push_back(U.get()); 4694 else 4695 Indices.push_back(getOrCreateVectorValue(U.get(), Part)); 4696 } 4697 4698 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4699 // but it should be a vector, otherwise. 4700 auto *NewGEP = GEP->isInBounds() 4701 ? Builder.CreateInBoundsGEP(Ptr, Indices) 4702 : Builder.CreateGEP(Ptr, Indices); 4703 assert((VF == 1 || NewGEP->getType()->isVectorTy()) && 4704 "NewGEP is not a pointer vector"); 4705 VectorLoopValueMap.setVectorValue(&I, Part, NewGEP); 4706 addMetadata(NewGEP, GEP); 4707 } 4708 } 4709 4710 break; 4711 } 4712 case Instruction::UDiv: 4713 case Instruction::SDiv: 4714 case Instruction::SRem: 4715 case Instruction::URem: 4716 case Instruction::Add: 4717 case Instruction::FAdd: 4718 case Instruction::Sub: 4719 case Instruction::FSub: 4720 case Instruction::Mul: 4721 case Instruction::FMul: 4722 case Instruction::FDiv: 4723 case Instruction::FRem: 4724 case Instruction::Shl: 4725 case Instruction::LShr: 4726 case Instruction::AShr: 4727 case Instruction::And: 4728 case Instruction::Or: 4729 case Instruction::Xor: { 4730 // Just widen binops. 4731 auto *BinOp = cast<BinaryOperator>(&I); 4732 setDebugLocFromInst(Builder, BinOp); 4733 4734 for (unsigned Part = 0; Part < UF; ++Part) { 4735 Value *A = getOrCreateVectorValue(BinOp->getOperand(0), Part); 4736 Value *B = getOrCreateVectorValue(BinOp->getOperand(1), Part); 4737 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A, B); 4738 4739 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 4740 VecOp->copyIRFlags(BinOp); 4741 4742 // Use this vector value for all users of the original instruction. 4743 VectorLoopValueMap.setVectorValue(&I, Part, V); 4744 addMetadata(V, BinOp); 4745 } 4746 4747 break; 4748 } 4749 case Instruction::Select: { 4750 // Widen selects. 4751 // If the selector is loop invariant we can create a select 4752 // instruction with a scalar condition. Otherwise, use vector-select. 4753 auto *SE = PSE.getSE(); 4754 bool InvariantCond = 4755 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); 4756 setDebugLocFromInst(Builder, &I); 4757 4758 // The condition can be loop invariant but still defined inside the 4759 // loop. This means that we can't just use the original 'cond' value. 4760 // We have to take the 'vectorized' value and pick the first lane. 4761 // Instcombine will make this a no-op. 4762 4763 auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0}); 4764 4765 for (unsigned Part = 0; Part < UF; ++Part) { 4766 Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part); 4767 Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part); 4768 Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part); 4769 Value *Sel = 4770 Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1); 4771 VectorLoopValueMap.setVectorValue(&I, Part, Sel); 4772 addMetadata(Sel, &I); 4773 } 4774 4775 break; 4776 } 4777 4778 case Instruction::ICmp: 4779 case Instruction::FCmp: { 4780 // Widen compares. Generate vector compares. 4781 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4782 auto *Cmp = dyn_cast<CmpInst>(&I); 4783 setDebugLocFromInst(Builder, Cmp); 4784 for (unsigned Part = 0; Part < UF; ++Part) { 4785 Value *A = getOrCreateVectorValue(Cmp->getOperand(0), Part); 4786 Value *B = getOrCreateVectorValue(Cmp->getOperand(1), Part); 4787 Value *C = nullptr; 4788 if (FCmp) { 4789 // Propagate fast math flags. 4790 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4791 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4792 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4793 } else { 4794 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4795 } 4796 VectorLoopValueMap.setVectorValue(&I, Part, C); 4797 addMetadata(C, &I); 4798 } 4799 4800 break; 4801 } 4802 4803 case Instruction::ZExt: 4804 case Instruction::SExt: 4805 case Instruction::FPToUI: 4806 case Instruction::FPToSI: 4807 case Instruction::FPExt: 4808 case Instruction::PtrToInt: 4809 case Instruction::IntToPtr: 4810 case Instruction::SIToFP: 4811 case Instruction::UIToFP: 4812 case Instruction::Trunc: 4813 case Instruction::FPTrunc: 4814 case Instruction::BitCast: { 4815 auto *CI = dyn_cast<CastInst>(&I); 4816 setDebugLocFromInst(Builder, CI); 4817 4818 /// Vectorize casts. 4819 Type *DestTy = 4820 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4821 4822 for (unsigned Part = 0; Part < UF; ++Part) { 4823 Value *A = getOrCreateVectorValue(CI->getOperand(0), Part); 4824 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4825 VectorLoopValueMap.setVectorValue(&I, Part, Cast); 4826 addMetadata(Cast, &I); 4827 } 4828 break; 4829 } 4830 4831 case Instruction::Call: { 4832 // Ignore dbg intrinsics. 4833 if (isa<DbgInfoIntrinsic>(I)) 4834 break; 4835 setDebugLocFromInst(Builder, &I); 4836 4837 Module *M = I.getParent()->getParent()->getParent(); 4838 auto *CI = cast<CallInst>(&I); 4839 4840 StringRef FnName = CI->getCalledFunction()->getName(); 4841 Function *F = CI->getCalledFunction(); 4842 Type *RetTy = ToVectorTy(CI->getType(), VF); 4843 SmallVector<Type *, 4> Tys; 4844 for (Value *ArgOperand : CI->arg_operands()) 4845 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4846 4847 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4848 4849 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4850 // version of the instruction. 4851 // Is it beneficial to perform intrinsic call compared to lib call? 4852 bool NeedToScalarize; 4853 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4854 bool UseVectorIntrinsic = 4855 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4856 assert((UseVectorIntrinsic || !NeedToScalarize) && 4857 "Instruction should be scalarized elsewhere."); 4858 4859 for (unsigned Part = 0; Part < UF; ++Part) { 4860 SmallVector<Value *, 4> Args; 4861 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4862 Value *Arg = CI->getArgOperand(i); 4863 // Some intrinsics have a scalar argument - don't replace it with a 4864 // vector. 4865 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) 4866 Arg = getOrCreateVectorValue(CI->getArgOperand(i), Part); 4867 Args.push_back(Arg); 4868 } 4869 4870 Function *VectorF; 4871 if (UseVectorIntrinsic) { 4872 // Use vector version of the intrinsic. 4873 Type *TysForDecl[] = {CI->getType()}; 4874 if (VF > 1) 4875 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4876 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4877 } else { 4878 // Use vector version of the library call. 4879 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4880 assert(!VFnName.empty() && "Vector function name is empty."); 4881 VectorF = M->getFunction(VFnName); 4882 if (!VectorF) { 4883 // Generate a declaration 4884 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4885 VectorF = 4886 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4887 VectorF->copyAttributesFrom(F); 4888 } 4889 } 4890 assert(VectorF && "Can't create vector function."); 4891 4892 SmallVector<OperandBundleDef, 1> OpBundles; 4893 CI->getOperandBundlesAsDefs(OpBundles); 4894 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4895 4896 if (isa<FPMathOperator>(V)) 4897 V->copyFastMathFlags(CI); 4898 4899 VectorLoopValueMap.setVectorValue(&I, Part, V); 4900 addMetadata(V, &I); 4901 } 4902 4903 break; 4904 } 4905 4906 default: 4907 // This instruction is not vectorized by simple widening. 4908 DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4909 llvm_unreachable("Unhandled instruction!"); 4910 } // end of switch. 4911 } 4912 4913 void InnerLoopVectorizer::updateAnalysis() { 4914 // Forget the original basic block. 4915 PSE.getSE()->forgetLoop(OrigLoop); 4916 4917 // Update the dominator tree information. 4918 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4919 "Entry does not dominate exit."); 4920 4921 DT->addNewBlock(LoopMiddleBlock, 4922 LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4923 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4924 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4925 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4926 DEBUG(DT->verifyDomTree()); 4927 } 4928 4929 /// \brief Check whether it is safe to if-convert this phi node. 4930 /// 4931 /// Phi nodes with constant expressions that can trap are not safe to if 4932 /// convert. 4933 static bool canIfConvertPHINodes(BasicBlock *BB) { 4934 for (Instruction &I : *BB) { 4935 auto *Phi = dyn_cast<PHINode>(&I); 4936 if (!Phi) 4937 return true; 4938 for (Value *V : Phi->incoming_values()) 4939 if (auto *C = dyn_cast<Constant>(V)) 4940 if (C->canTrap()) 4941 return false; 4942 } 4943 return true; 4944 } 4945 4946 bool LoopVectorizationLegality::canVectorizeWithIfConvert() { 4947 if (!EnableIfConversion) { 4948 ORE->emit(createMissedAnalysis("IfConversionDisabled") 4949 << "if-conversion is disabled"); 4950 return false; 4951 } 4952 4953 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable"); 4954 4955 // A list of pointers that we can safely read and write to. 4956 SmallPtrSet<Value *, 8> SafePointes; 4957 4958 // Collect safe addresses. 4959 for (BasicBlock *BB : TheLoop->blocks()) { 4960 if (blockNeedsPredication(BB)) 4961 continue; 4962 4963 for (Instruction &I : *BB) 4964 if (auto *Ptr = getPointerOperand(&I)) 4965 SafePointes.insert(Ptr); 4966 } 4967 4968 // Collect the blocks that need predication. 4969 BasicBlock *Header = TheLoop->getHeader(); 4970 for (BasicBlock *BB : TheLoop->blocks()) { 4971 // We don't support switch statements inside loops. 4972 if (!isa<BranchInst>(BB->getTerminator())) { 4973 ORE->emit(createMissedAnalysis("LoopContainsSwitch", BB->getTerminator()) 4974 << "loop contains a switch statement"); 4975 return false; 4976 } 4977 4978 // We must be able to predicate all blocks that need to be predicated. 4979 if (blockNeedsPredication(BB)) { 4980 if (!blockCanBePredicated(BB, SafePointes)) { 4981 ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator()) 4982 << "control flow cannot be substituted for a select"); 4983 return false; 4984 } 4985 } else if (BB != Header && !canIfConvertPHINodes(BB)) { 4986 ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator()) 4987 << "control flow cannot be substituted for a select"); 4988 return false; 4989 } 4990 } 4991 4992 // We can if-convert this loop. 4993 return true; 4994 } 4995 4996 bool LoopVectorizationLegality::canVectorize() { 4997 // Store the result and return it at the end instead of exiting early, in case 4998 // allowExtraAnalysis is used to report multiple reasons for not vectorizing. 4999 bool Result = true; 5000 5001 bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE); 5002 if (DoExtraAnalysis) 5003 // We must have a loop in canonical form. Loops with indirectbr in them cannot 5004 // be canonicalized. 5005 if (!TheLoop->getLoopPreheader()) { 5006 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 5007 << "loop control flow is not understood by vectorizer"); 5008 if (DoExtraAnalysis) 5009 Result = false; 5010 else 5011 return false; 5012 } 5013 5014 // FIXME: The code is currently dead, since the loop gets sent to 5015 // LoopVectorizationLegality is already an innermost loop. 5016 // 5017 // We can only vectorize innermost loops. 5018 if (!TheLoop->empty()) { 5019 ORE->emit(createMissedAnalysis("NotInnermostLoop") 5020 << "loop is not the innermost loop"); 5021 if (DoExtraAnalysis) 5022 Result = false; 5023 else 5024 return false; 5025 } 5026 5027 // We must have a single backedge. 5028 if (TheLoop->getNumBackEdges() != 1) { 5029 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 5030 << "loop control flow is not understood by vectorizer"); 5031 if (DoExtraAnalysis) 5032 Result = false; 5033 else 5034 return false; 5035 } 5036 5037 // We must have a single exiting block. 5038 if (!TheLoop->getExitingBlock()) { 5039 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 5040 << "loop control flow is not understood by vectorizer"); 5041 if (DoExtraAnalysis) 5042 Result = false; 5043 else 5044 return false; 5045 } 5046 5047 // We only handle bottom-tested loops, i.e. loop in which the condition is 5048 // checked at the end of each iteration. With that we can assume that all 5049 // instructions in the loop are executed the same number of times. 5050 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5051 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 5052 << "loop control flow is not understood by vectorizer"); 5053 if (DoExtraAnalysis) 5054 Result = false; 5055 else 5056 return false; 5057 } 5058 5059 // We need to have a loop header. 5060 DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName() 5061 << '\n'); 5062 5063 // Check if we can if-convert non-single-bb loops. 5064 unsigned NumBlocks = TheLoop->getNumBlocks(); 5065 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { 5066 DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); 5067 if (DoExtraAnalysis) 5068 Result = false; 5069 else 5070 return false; 5071 } 5072 5073 // Check if we can vectorize the instructions and CFG in this loop. 5074 if (!canVectorizeInstrs()) { 5075 DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); 5076 if (DoExtraAnalysis) 5077 Result = false; 5078 else 5079 return false; 5080 } 5081 5082 // Go over each instruction and look at memory deps. 5083 if (!canVectorizeMemory()) { 5084 DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); 5085 if (DoExtraAnalysis) 5086 Result = false; 5087 else 5088 return false; 5089 } 5090 5091 DEBUG(dbgs() << "LV: We can vectorize this loop" 5092 << (LAI->getRuntimePointerChecking()->Need 5093 ? " (with a runtime bound check)" 5094 : "") 5095 << "!\n"); 5096 5097 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 5098 5099 // If an override option has been passed in for interleaved accesses, use it. 5100 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 5101 UseInterleaved = EnableInterleavedMemAccesses; 5102 5103 // Analyze interleaved memory accesses. 5104 if (UseInterleaved) 5105 InterleaveInfo.analyzeInterleaving(*getSymbolicStrides()); 5106 5107 unsigned SCEVThreshold = VectorizeSCEVCheckThreshold; 5108 if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) 5109 SCEVThreshold = PragmaVectorizeSCEVCheckThreshold; 5110 5111 if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) { 5112 ORE->emit(createMissedAnalysis("TooManySCEVRunTimeChecks") 5113 << "Too many SCEV assumptions need to be made and checked " 5114 << "at runtime"); 5115 DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n"); 5116 if (DoExtraAnalysis) 5117 Result = false; 5118 else 5119 return false; 5120 } 5121 5122 // Okay! We've done all the tests. If any have failed, return false. Otherwise 5123 // we can vectorize, and at this point we don't have any other mem analysis 5124 // which may limit our maximum vectorization factor, so just return true with 5125 // no restrictions. 5126 return Result; 5127 } 5128 5129 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { 5130 if (Ty->isPointerTy()) 5131 return DL.getIntPtrType(Ty); 5132 5133 // It is possible that char's or short's overflow when we ask for the loop's 5134 // trip count, work around this by changing the type size. 5135 if (Ty->getScalarSizeInBits() < 32) 5136 return Type::getInt32Ty(Ty->getContext()); 5137 5138 return Ty; 5139 } 5140 5141 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { 5142 Ty0 = convertPointerToIntegerType(DL, Ty0); 5143 Ty1 = convertPointerToIntegerType(DL, Ty1); 5144 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) 5145 return Ty0; 5146 return Ty1; 5147 } 5148 5149 /// \brief Check that the instruction has outside loop users and is not an 5150 /// identified reduction variable. 5151 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, 5152 SmallPtrSetImpl<Value *> &AllowedExit) { 5153 // Reduction and Induction instructions are allowed to have exit users. All 5154 // other instructions must not have external users. 5155 if (!AllowedExit.count(Inst)) 5156 // Check that all of the users of the loop are inside the BB. 5157 for (User *U : Inst->users()) { 5158 Instruction *UI = cast<Instruction>(U); 5159 // This user may be a reduction exit value. 5160 if (!TheLoop->contains(UI)) { 5161 DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); 5162 return true; 5163 } 5164 } 5165 return false; 5166 } 5167 5168 void LoopVectorizationLegality::addInductionPhi( 5169 PHINode *Phi, const InductionDescriptor &ID, 5170 SmallPtrSetImpl<Value *> &AllowedExit) { 5171 Inductions[Phi] = ID; 5172 Type *PhiTy = Phi->getType(); 5173 const DataLayout &DL = Phi->getModule()->getDataLayout(); 5174 5175 // Get the widest type. 5176 if (!PhiTy->isFloatingPointTy()) { 5177 if (!WidestIndTy) 5178 WidestIndTy = convertPointerToIntegerType(DL, PhiTy); 5179 else 5180 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); 5181 } 5182 5183 // Int inductions are special because we only allow one IV. 5184 if (ID.getKind() == InductionDescriptor::IK_IntInduction && 5185 ID.getConstIntStepValue() && 5186 ID.getConstIntStepValue()->isOne() && 5187 isa<Constant>(ID.getStartValue()) && 5188 cast<Constant>(ID.getStartValue())->isNullValue()) { 5189 5190 // Use the phi node with the widest type as induction. Use the last 5191 // one if there are multiple (no good reason for doing this other 5192 // than it is expedient). We've checked that it begins at zero and 5193 // steps by one, so this is a canonical induction variable. 5194 if (!PrimaryInduction || PhiTy == WidestIndTy) 5195 PrimaryInduction = Phi; 5196 } 5197 5198 // Both the PHI node itself, and the "post-increment" value feeding 5199 // back into the PHI node may have external users. 5200 // We can allow those uses, except if the SCEVs we have for them rely 5201 // on predicates that only hold within the loop, since allowing the exit 5202 // currently means re-using this SCEV outside the loop. 5203 if (PSE.getUnionPredicate().isAlwaysTrue()) { 5204 AllowedExit.insert(Phi); 5205 AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch())); 5206 } 5207 5208 DEBUG(dbgs() << "LV: Found an induction variable.\n"); 5209 } 5210 5211 bool LoopVectorizationLegality::canVectorizeInstrs() { 5212 BasicBlock *Header = TheLoop->getHeader(); 5213 5214 // Look for the attribute signaling the absence of NaNs. 5215 Function &F = *Header->getParent(); 5216 HasFunNoNaNAttr = 5217 F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; 5218 5219 // For each block in the loop. 5220 for (BasicBlock *BB : TheLoop->blocks()) { 5221 // Scan the instructions in the block and look for hazards. 5222 for (Instruction &I : *BB) { 5223 if (auto *Phi = dyn_cast<PHINode>(&I)) { 5224 Type *PhiTy = Phi->getType(); 5225 // Check that this PHI type is allowed. 5226 if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() && 5227 !PhiTy->isPointerTy()) { 5228 ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi) 5229 << "loop control flow is not understood by vectorizer"); 5230 DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); 5231 return false; 5232 } 5233 5234 // If this PHINode is not in the header block, then we know that we 5235 // can convert it to select during if-conversion. No need to check if 5236 // the PHIs in this block are induction or reduction variables. 5237 if (BB != Header) { 5238 // Check that this instruction has no outside users or is an 5239 // identified reduction value with an outside user. 5240 if (!hasOutsideLoopUser(TheLoop, Phi, AllowedExit)) 5241 continue; 5242 ORE->emit(createMissedAnalysis("NeitherInductionNorReduction", Phi) 5243 << "value could not be identified as " 5244 "an induction or reduction variable"); 5245 return false; 5246 } 5247 5248 // We only allow if-converted PHIs with exactly two incoming values. 5249 if (Phi->getNumIncomingValues() != 2) { 5250 ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi) 5251 << "control flow not understood by vectorizer"); 5252 DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); 5253 return false; 5254 } 5255 5256 RecurrenceDescriptor RedDes; 5257 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) { 5258 if (RedDes.hasUnsafeAlgebra()) 5259 Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst()); 5260 AllowedExit.insert(RedDes.getLoopExitInstr()); 5261 Reductions[Phi] = RedDes; 5262 continue; 5263 } 5264 5265 InductionDescriptor ID; 5266 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID)) { 5267 addInductionPhi(Phi, ID, AllowedExit); 5268 if (ID.hasUnsafeAlgebra() && !HasFunNoNaNAttr) 5269 Requirements->addUnsafeAlgebraInst(ID.getUnsafeAlgebraInst()); 5270 continue; 5271 } 5272 5273 if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, 5274 SinkAfter, DT)) { 5275 FirstOrderRecurrences.insert(Phi); 5276 continue; 5277 } 5278 5279 // As a last resort, coerce the PHI to a AddRec expression 5280 // and re-try classifying it a an induction PHI. 5281 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true)) { 5282 addInductionPhi(Phi, ID, AllowedExit); 5283 continue; 5284 } 5285 5286 ORE->emit(createMissedAnalysis("NonReductionValueUsedOutsideLoop", Phi) 5287 << "value that could not be identified as " 5288 "reduction is used outside the loop"); 5289 DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n"); 5290 return false; 5291 } // end of PHI handling 5292 5293 // We handle calls that: 5294 // * Are debug info intrinsics. 5295 // * Have a mapping to an IR intrinsic. 5296 // * Have a vector version available. 5297 auto *CI = dyn_cast<CallInst>(&I); 5298 if (CI && !getVectorIntrinsicIDForCall(CI, TLI) && 5299 !isa<DbgInfoIntrinsic>(CI) && 5300 !(CI->getCalledFunction() && TLI && 5301 TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) { 5302 ORE->emit(createMissedAnalysis("CantVectorizeCall", CI) 5303 << "call instruction cannot be vectorized"); 5304 DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); 5305 return false; 5306 } 5307 5308 // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the 5309 // second argument is the same (i.e. loop invariant) 5310 if (CI && hasVectorInstrinsicScalarOpd( 5311 getVectorIntrinsicIDForCall(CI, TLI), 1)) { 5312 auto *SE = PSE.getSE(); 5313 if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) { 5314 ORE->emit(createMissedAnalysis("CantVectorizeIntrinsic", CI) 5315 << "intrinsic instruction cannot be vectorized"); 5316 DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n"); 5317 return false; 5318 } 5319 } 5320 5321 // Check that the instruction return type is vectorizable. 5322 // Also, we can't vectorize extractelement instructions. 5323 if ((!VectorType::isValidElementType(I.getType()) && 5324 !I.getType()->isVoidTy()) || 5325 isa<ExtractElementInst>(I)) { 5326 ORE->emit(createMissedAnalysis("CantVectorizeInstructionReturnType", &I) 5327 << "instruction return type cannot be vectorized"); 5328 DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); 5329 return false; 5330 } 5331 5332 // Check that the stored type is vectorizable. 5333 if (auto *ST = dyn_cast<StoreInst>(&I)) { 5334 Type *T = ST->getValueOperand()->getType(); 5335 if (!VectorType::isValidElementType(T)) { 5336 ORE->emit(createMissedAnalysis("CantVectorizeStore", ST) 5337 << "store instruction cannot be vectorized"); 5338 return false; 5339 } 5340 5341 // FP instructions can allow unsafe algebra, thus vectorizable by 5342 // non-IEEE-754 compliant SIMD units. 5343 // This applies to floating-point math operations and calls, not memory 5344 // operations, shuffles, or casts, as they don't change precision or 5345 // semantics. 5346 } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) && 5347 !I.isFast()) { 5348 DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n"); 5349 Hints->setPotentiallyUnsafe(); 5350 } 5351 5352 // Reduction instructions are allowed to have exit users. 5353 // All other instructions must not have external users. 5354 if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) { 5355 ORE->emit(createMissedAnalysis("ValueUsedOutsideLoop", &I) 5356 << "value cannot be used outside the loop"); 5357 return false; 5358 } 5359 } // next instr. 5360 } 5361 5362 if (!PrimaryInduction) { 5363 DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); 5364 if (Inductions.empty()) { 5365 ORE->emit(createMissedAnalysis("NoInductionVariable") 5366 << "loop induction variable could not be identified"); 5367 return false; 5368 } 5369 } 5370 5371 // Now we know the widest induction type, check if our found induction 5372 // is the same size. If it's not, unset it here and InnerLoopVectorizer 5373 // will create another. 5374 if (PrimaryInduction && WidestIndTy != PrimaryInduction->getType()) 5375 PrimaryInduction = nullptr; 5376 5377 return true; 5378 } 5379 5380 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) { 5381 // We should not collect Scalars more than once per VF. Right now, this 5382 // function is called from collectUniformsAndScalars(), which already does 5383 // this check. Collecting Scalars for VF=1 does not make any sense. 5384 assert(VF >= 2 && !Scalars.count(VF) && 5385 "This function should not be visited twice for the same VF"); 5386 5387 SmallSetVector<Instruction *, 8> Worklist; 5388 5389 // These sets are used to seed the analysis with pointers used by memory 5390 // accesses that will remain scalar. 5391 SmallSetVector<Instruction *, 8> ScalarPtrs; 5392 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 5393 5394 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 5395 // The pointer operands of loads and stores will be scalar as long as the 5396 // memory access is not a gather or scatter operation. The value operand of a 5397 // store will remain scalar if the store is scalarized. 5398 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 5399 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 5400 assert(WideningDecision != CM_Unknown && 5401 "Widening decision should be ready at this moment"); 5402 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 5403 if (Ptr == Store->getValueOperand()) 5404 return WideningDecision == CM_Scalarize; 5405 assert(Ptr == getPointerOperand(MemAccess) && 5406 "Ptr is neither a value or pointer operand"); 5407 return WideningDecision != CM_GatherScatter; 5408 }; 5409 5410 // A helper that returns true if the given value is a bitcast or 5411 // getelementptr instruction contained in the loop. 5412 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 5413 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 5414 isa<GetElementPtrInst>(V)) && 5415 !TheLoop->isLoopInvariant(V); 5416 }; 5417 5418 // A helper that evaluates a memory access's use of a pointer. If the use 5419 // will be a scalar use, and the pointer is only used by memory accesses, we 5420 // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in 5421 // PossibleNonScalarPtrs. 5422 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 5423 // We only care about bitcast and getelementptr instructions contained in 5424 // the loop. 5425 if (!isLoopVaryingBitCastOrGEP(Ptr)) 5426 return; 5427 5428 // If the pointer has already been identified as scalar (e.g., if it was 5429 // also identified as uniform), there's nothing to do. 5430 auto *I = cast<Instruction>(Ptr); 5431 if (Worklist.count(I)) 5432 return; 5433 5434 // If the use of the pointer will be a scalar use, and all users of the 5435 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 5436 // place the pointer in PossibleNonScalarPtrs. 5437 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 5438 return isa<LoadInst>(U) || isa<StoreInst>(U); 5439 })) 5440 ScalarPtrs.insert(I); 5441 else 5442 PossibleNonScalarPtrs.insert(I); 5443 }; 5444 5445 // We seed the scalars analysis with three classes of instructions: (1) 5446 // instructions marked uniform-after-vectorization, (2) bitcast and 5447 // getelementptr instructions used by memory accesses requiring a scalar use, 5448 // and (3) pointer induction variables and their update instructions (we 5449 // currently only scalarize these). 5450 // 5451 // (1) Add to the worklist all instructions that have been identified as 5452 // uniform-after-vectorization. 5453 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 5454 5455 // (2) Add to the worklist all bitcast and getelementptr instructions used by 5456 // memory accesses requiring a scalar use. The pointer operands of loads and 5457 // stores will be scalar as long as the memory accesses is not a gather or 5458 // scatter operation. The value operand of a store will remain scalar if the 5459 // store is scalarized. 5460 for (auto *BB : TheLoop->blocks()) 5461 for (auto &I : *BB) { 5462 if (auto *Load = dyn_cast<LoadInst>(&I)) { 5463 evaluatePtrUse(Load, Load->getPointerOperand()); 5464 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 5465 evaluatePtrUse(Store, Store->getPointerOperand()); 5466 evaluatePtrUse(Store, Store->getValueOperand()); 5467 } 5468 } 5469 for (auto *I : ScalarPtrs) 5470 if (!PossibleNonScalarPtrs.count(I)) { 5471 DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 5472 Worklist.insert(I); 5473 } 5474 5475 // (3) Add to the worklist all pointer induction variables and their update 5476 // instructions. 5477 // 5478 // TODO: Once we are able to vectorize pointer induction variables we should 5479 // no longer insert them into the worklist here. 5480 auto *Latch = TheLoop->getLoopLatch(); 5481 for (auto &Induction : *Legal->getInductionVars()) { 5482 auto *Ind = Induction.first; 5483 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5484 if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction) 5485 continue; 5486 Worklist.insert(Ind); 5487 Worklist.insert(IndUpdate); 5488 DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5489 DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n"); 5490 } 5491 5492 // Insert the forced scalars. 5493 // FIXME: Currently widenPHIInstruction() often creates a dead vector 5494 // induction variable when the PHI user is scalarized. 5495 if (ForcedScalars.count(VF)) 5496 for (auto *I : ForcedScalars.find(VF)->second) 5497 Worklist.insert(I); 5498 5499 // Expand the worklist by looking through any bitcasts and getelementptr 5500 // instructions we've already identified as scalar. This is similar to the 5501 // expansion step in collectLoopUniforms(); however, here we're only 5502 // expanding to include additional bitcasts and getelementptr instructions. 5503 unsigned Idx = 0; 5504 while (Idx != Worklist.size()) { 5505 Instruction *Dst = Worklist[Idx++]; 5506 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 5507 continue; 5508 auto *Src = cast<Instruction>(Dst->getOperand(0)); 5509 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 5510 auto *J = cast<Instruction>(U); 5511 return !TheLoop->contains(J) || Worklist.count(J) || 5512 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 5513 isScalarUse(J, Src)); 5514 })) { 5515 Worklist.insert(Src); 5516 DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 5517 } 5518 } 5519 5520 // An induction variable will remain scalar if all users of the induction 5521 // variable and induction variable update remain scalar. 5522 for (auto &Induction : *Legal->getInductionVars()) { 5523 auto *Ind = Induction.first; 5524 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5525 5526 // We already considered pointer induction variables, so there's no reason 5527 // to look at their users again. 5528 // 5529 // TODO: Once we are able to vectorize pointer induction variables we 5530 // should no longer skip over them here. 5531 if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction) 5532 continue; 5533 5534 // Determine if all users of the induction variable are scalar after 5535 // vectorization. 5536 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5537 auto *I = cast<Instruction>(U); 5538 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 5539 }); 5540 if (!ScalarInd) 5541 continue; 5542 5543 // Determine if all users of the induction variable update instruction are 5544 // scalar after vectorization. 5545 auto ScalarIndUpdate = 5546 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5547 auto *I = cast<Instruction>(U); 5548 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 5549 }); 5550 if (!ScalarIndUpdate) 5551 continue; 5552 5553 // The induction variable and its update instruction will remain scalar. 5554 Worklist.insert(Ind); 5555 Worklist.insert(IndUpdate); 5556 DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5557 DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n"); 5558 } 5559 5560 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 5561 } 5562 5563 bool LoopVectorizationLegality::isScalarWithPredication(Instruction *I) { 5564 if (!blockNeedsPredication(I->getParent())) 5565 return false; 5566 switch(I->getOpcode()) { 5567 default: 5568 break; 5569 case Instruction::Store: 5570 return !isMaskRequired(I); 5571 case Instruction::UDiv: 5572 case Instruction::SDiv: 5573 case Instruction::SRem: 5574 case Instruction::URem: 5575 return mayDivideByZero(*I); 5576 } 5577 return false; 5578 } 5579 5580 bool LoopVectorizationLegality::memoryInstructionCanBeWidened(Instruction *I, 5581 unsigned VF) { 5582 // Get and ensure we have a valid memory instruction. 5583 LoadInst *LI = dyn_cast<LoadInst>(I); 5584 StoreInst *SI = dyn_cast<StoreInst>(I); 5585 assert((LI || SI) && "Invalid memory instruction"); 5586 5587 auto *Ptr = getPointerOperand(I); 5588 5589 // In order to be widened, the pointer should be consecutive, first of all. 5590 if (!isConsecutivePtr(Ptr)) 5591 return false; 5592 5593 // If the instruction is a store located in a predicated block, it will be 5594 // scalarized. 5595 if (isScalarWithPredication(I)) 5596 return false; 5597 5598 // If the instruction's allocated size doesn't equal it's type size, it 5599 // requires padding and will be scalarized. 5600 auto &DL = I->getModule()->getDataLayout(); 5601 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 5602 if (hasIrregularType(ScalarTy, DL, VF)) 5603 return false; 5604 5605 return true; 5606 } 5607 5608 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { 5609 // We should not collect Uniforms more than once per VF. Right now, 5610 // this function is called from collectUniformsAndScalars(), which 5611 // already does this check. Collecting Uniforms for VF=1 does not make any 5612 // sense. 5613 5614 assert(VF >= 2 && !Uniforms.count(VF) && 5615 "This function should not be visited twice for the same VF"); 5616 5617 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5618 // not analyze again. Uniforms.count(VF) will return 1. 5619 Uniforms[VF].clear(); 5620 5621 // We now know that the loop is vectorizable! 5622 // Collect instructions inside the loop that will remain uniform after 5623 // vectorization. 5624 5625 // Global values, params and instructions outside of current loop are out of 5626 // scope. 5627 auto isOutOfScope = [&](Value *V) -> bool { 5628 Instruction *I = dyn_cast<Instruction>(V); 5629 return (!I || !TheLoop->contains(I)); 5630 }; 5631 5632 SetVector<Instruction *> Worklist; 5633 BasicBlock *Latch = TheLoop->getLoopLatch(); 5634 5635 // Start with the conditional branch. If the branch condition is an 5636 // instruction contained in the loop that is only used by the branch, it is 5637 // uniform. 5638 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5639 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) { 5640 Worklist.insert(Cmp); 5641 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); 5642 } 5643 5644 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers 5645 // are pointers that are treated like consecutive pointers during 5646 // vectorization. The pointer operands of interleaved accesses are an 5647 // example. 5648 SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs; 5649 5650 // Holds pointer operands of instructions that are possibly non-uniform. 5651 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs; 5652 5653 auto isUniformDecision = [&](Instruction *I, unsigned VF) { 5654 InstWidening WideningDecision = getWideningDecision(I, VF); 5655 assert(WideningDecision != CM_Unknown && 5656 "Widening decision should be ready at this moment"); 5657 5658 return (WideningDecision == CM_Widen || 5659 WideningDecision == CM_Interleave); 5660 }; 5661 // Iterate over the instructions in the loop, and collect all 5662 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible 5663 // that a consecutive-like pointer operand will be scalarized, we collect it 5664 // in PossibleNonUniformPtrs instead. We use two sets here because a single 5665 // getelementptr instruction can be used by both vectorized and scalarized 5666 // memory instructions. For example, if a loop loads and stores from the same 5667 // location, but the store is conditional, the store will be scalarized, and 5668 // the getelementptr won't remain uniform. 5669 for (auto *BB : TheLoop->blocks()) 5670 for (auto &I : *BB) { 5671 // If there's no pointer operand, there's nothing to do. 5672 auto *Ptr = dyn_cast_or_null<Instruction>(getPointerOperand(&I)); 5673 if (!Ptr) 5674 continue; 5675 5676 // True if all users of Ptr are memory accesses that have Ptr as their 5677 // pointer operand. 5678 auto UsersAreMemAccesses = 5679 llvm::all_of(Ptr->users(), [&](User *U) -> bool { 5680 return getPointerOperand(U) == Ptr; 5681 }); 5682 5683 // Ensure the memory instruction will not be scalarized or used by 5684 // gather/scatter, making its pointer operand non-uniform. If the pointer 5685 // operand is used by any instruction other than a memory access, we 5686 // conservatively assume the pointer operand may be non-uniform. 5687 if (!UsersAreMemAccesses || !isUniformDecision(&I, VF)) 5688 PossibleNonUniformPtrs.insert(Ptr); 5689 5690 // If the memory instruction will be vectorized and its pointer operand 5691 // is consecutive-like, or interleaving - the pointer operand should 5692 // remain uniform. 5693 else 5694 ConsecutiveLikePtrs.insert(Ptr); 5695 } 5696 5697 // Add to the Worklist all consecutive and consecutive-like pointers that 5698 // aren't also identified as possibly non-uniform. 5699 for (auto *V : ConsecutiveLikePtrs) 5700 if (!PossibleNonUniformPtrs.count(V)) { 5701 DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n"); 5702 Worklist.insert(V); 5703 } 5704 5705 // Expand Worklist in topological order: whenever a new instruction 5706 // is added , its users should be either already inside Worklist, or 5707 // out of scope. It ensures a uniform instruction will only be used 5708 // by uniform instructions or out of scope instructions. 5709 unsigned idx = 0; 5710 while (idx != Worklist.size()) { 5711 Instruction *I = Worklist[idx++]; 5712 5713 for (auto OV : I->operand_values()) { 5714 if (isOutOfScope(OV)) 5715 continue; 5716 auto *OI = cast<Instruction>(OV); 5717 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5718 auto *J = cast<Instruction>(U); 5719 return !TheLoop->contains(J) || Worklist.count(J) || 5720 (OI == getPointerOperand(J) && isUniformDecision(J, VF)); 5721 })) { 5722 Worklist.insert(OI); 5723 DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); 5724 } 5725 } 5726 } 5727 5728 // Returns true if Ptr is the pointer operand of a memory access instruction 5729 // I, and I is known to not require scalarization. 5730 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5731 return getPointerOperand(I) == Ptr && isUniformDecision(I, VF); 5732 }; 5733 5734 // For an instruction to be added into Worklist above, all its users inside 5735 // the loop should also be in Worklist. However, this condition cannot be 5736 // true for phi nodes that form a cyclic dependence. We must process phi 5737 // nodes separately. An induction variable will remain uniform if all users 5738 // of the induction variable and induction variable update remain uniform. 5739 // The code below handles both pointer and non-pointer induction variables. 5740 for (auto &Induction : *Legal->getInductionVars()) { 5741 auto *Ind = Induction.first; 5742 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5743 5744 // Determine if all users of the induction variable are uniform after 5745 // vectorization. 5746 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5747 auto *I = cast<Instruction>(U); 5748 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5749 isVectorizedMemAccessUse(I, Ind); 5750 }); 5751 if (!UniformInd) 5752 continue; 5753 5754 // Determine if all users of the induction variable update instruction are 5755 // uniform after vectorization. 5756 auto UniformIndUpdate = 5757 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5758 auto *I = cast<Instruction>(U); 5759 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5760 isVectorizedMemAccessUse(I, IndUpdate); 5761 }); 5762 if (!UniformIndUpdate) 5763 continue; 5764 5765 // The induction variable and its update instruction will remain uniform. 5766 Worklist.insert(Ind); 5767 Worklist.insert(IndUpdate); 5768 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n"); 5769 DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate << "\n"); 5770 } 5771 5772 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5773 } 5774 5775 bool LoopVectorizationLegality::canVectorizeMemory() { 5776 LAI = &(*GetLAA)(*TheLoop); 5777 InterleaveInfo.setLAI(LAI); 5778 const OptimizationRemarkAnalysis *LAR = LAI->getReport(); 5779 if (LAR) { 5780 ORE->emit([&]() { 5781 return OptimizationRemarkAnalysis(Hints->vectorizeAnalysisPassName(), 5782 "loop not vectorized: ", *LAR); 5783 }); 5784 } 5785 if (!LAI->canVectorizeMemory()) 5786 return false; 5787 5788 if (LAI->hasStoreToLoopInvariantAddress()) { 5789 ORE->emit(createMissedAnalysis("CantVectorizeStoreToLoopInvariantAddress") 5790 << "write to a loop invariant address could not be vectorized"); 5791 DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); 5792 return false; 5793 } 5794 5795 Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks()); 5796 PSE.addPredicate(LAI->getPSE().getUnionPredicate()); 5797 5798 return true; 5799 } 5800 5801 bool LoopVectorizationLegality::isInductionVariable(const Value *V) { 5802 Value *In0 = const_cast<Value *>(V); 5803 PHINode *PN = dyn_cast_or_null<PHINode>(In0); 5804 if (!PN) 5805 return false; 5806 5807 return Inductions.count(PN); 5808 } 5809 5810 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) { 5811 return FirstOrderRecurrences.count(Phi); 5812 } 5813 5814 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { 5815 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 5816 } 5817 5818 bool LoopVectorizationLegality::blockCanBePredicated( 5819 BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) { 5820 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 5821 5822 for (Instruction &I : *BB) { 5823 // Check that we don't have a constant expression that can trap as operand. 5824 for (Value *Operand : I.operands()) { 5825 if (auto *C = dyn_cast<Constant>(Operand)) 5826 if (C->canTrap()) 5827 return false; 5828 } 5829 // We might be able to hoist the load. 5830 if (I.mayReadFromMemory()) { 5831 auto *LI = dyn_cast<LoadInst>(&I); 5832 if (!LI) 5833 return false; 5834 if (!SafePtrs.count(LI->getPointerOperand())) { 5835 if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) || 5836 isLegalMaskedGather(LI->getType())) { 5837 MaskedOp.insert(LI); 5838 continue; 5839 } 5840 // !llvm.mem.parallel_loop_access implies if-conversion safety. 5841 if (IsAnnotatedParallel) 5842 continue; 5843 return false; 5844 } 5845 } 5846 5847 if (I.mayWriteToMemory()) { 5848 auto *SI = dyn_cast<StoreInst>(&I); 5849 // We only support predication of stores in basic blocks with one 5850 // predecessor. 5851 if (!SI) 5852 return false; 5853 5854 // Build a masked store if it is legal for the target. 5855 if (isLegalMaskedStore(SI->getValueOperand()->getType(), 5856 SI->getPointerOperand()) || 5857 isLegalMaskedScatter(SI->getValueOperand()->getType())) { 5858 MaskedOp.insert(SI); 5859 continue; 5860 } 5861 5862 bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0); 5863 bool isSinglePredecessor = SI->getParent()->getSinglePredecessor(); 5864 5865 if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr || 5866 !isSinglePredecessor) 5867 return false; 5868 } 5869 if (I.mayThrow()) 5870 return false; 5871 } 5872 5873 return true; 5874 } 5875 5876 void InterleavedAccessInfo::collectConstStrideAccesses( 5877 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 5878 const ValueToValueMap &Strides) { 5879 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 5880 5881 // Since it's desired that the load/store instructions be maintained in 5882 // "program order" for the interleaved access analysis, we have to visit the 5883 // blocks in the loop in reverse postorder (i.e., in a topological order). 5884 // Such an ordering will ensure that any load/store that may be executed 5885 // before a second load/store will precede the second load/store in 5886 // AccessStrideInfo. 5887 LoopBlocksDFS DFS(TheLoop); 5888 DFS.perform(LI); 5889 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 5890 for (auto &I : *BB) { 5891 auto *LI = dyn_cast<LoadInst>(&I); 5892 auto *SI = dyn_cast<StoreInst>(&I); 5893 if (!LI && !SI) 5894 continue; 5895 5896 Value *Ptr = getPointerOperand(&I); 5897 // We don't check wrapping here because we don't know yet if Ptr will be 5898 // part of a full group or a group with gaps. Checking wrapping for all 5899 // pointers (even those that end up in groups with no gaps) will be overly 5900 // conservative. For full groups, wrapping should be ok since if we would 5901 // wrap around the address space we would do a memory access at nullptr 5902 // even without the transformation. The wrapping checks are therefore 5903 // deferred until after we've formed the interleaved groups. 5904 int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, 5905 /*Assume=*/true, /*ShouldCheckWrap=*/false); 5906 5907 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 5908 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 5909 uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 5910 5911 // An alignment of 0 means target ABI alignment. 5912 unsigned Align = getMemInstAlignment(&I); 5913 if (!Align) 5914 Align = DL.getABITypeAlignment(PtrTy->getElementType()); 5915 5916 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align); 5917 } 5918 } 5919 5920 // Analyze interleaved accesses and collect them into interleaved load and 5921 // store groups. 5922 // 5923 // When generating code for an interleaved load group, we effectively hoist all 5924 // loads in the group to the location of the first load in program order. When 5925 // generating code for an interleaved store group, we sink all stores to the 5926 // location of the last store. This code motion can change the order of load 5927 // and store instructions and may break dependences. 5928 // 5929 // The code generation strategy mentioned above ensures that we won't violate 5930 // any write-after-read (WAR) dependences. 5931 // 5932 // E.g., for the WAR dependence: a = A[i]; // (1) 5933 // A[i] = b; // (2) 5934 // 5935 // The store group of (2) is always inserted at or below (2), and the load 5936 // group of (1) is always inserted at or above (1). Thus, the instructions will 5937 // never be reordered. All other dependences are checked to ensure the 5938 // correctness of the instruction reordering. 5939 // 5940 // The algorithm visits all memory accesses in the loop in bottom-up program 5941 // order. Program order is established by traversing the blocks in the loop in 5942 // reverse postorder when collecting the accesses. 5943 // 5944 // We visit the memory accesses in bottom-up order because it can simplify the 5945 // construction of store groups in the presence of write-after-write (WAW) 5946 // dependences. 5947 // 5948 // E.g., for the WAW dependence: A[i] = a; // (1) 5949 // A[i] = b; // (2) 5950 // A[i + 1] = c; // (3) 5951 // 5952 // We will first create a store group with (3) and (2). (1) can't be added to 5953 // this group because it and (2) are dependent. However, (1) can be grouped 5954 // with other accesses that may precede it in program order. Note that a 5955 // bottom-up order does not imply that WAW dependences should not be checked. 5956 void InterleavedAccessInfo::analyzeInterleaving( 5957 const ValueToValueMap &Strides) { 5958 DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 5959 5960 // Holds all accesses with a constant stride. 5961 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 5962 collectConstStrideAccesses(AccessStrideInfo, Strides); 5963 5964 if (AccessStrideInfo.empty()) 5965 return; 5966 5967 // Collect the dependences in the loop. 5968 collectDependences(); 5969 5970 // Holds all interleaved store groups temporarily. 5971 SmallSetVector<InterleaveGroup *, 4> StoreGroups; 5972 // Holds all interleaved load groups temporarily. 5973 SmallSetVector<InterleaveGroup *, 4> LoadGroups; 5974 5975 // Search in bottom-up program order for pairs of accesses (A and B) that can 5976 // form interleaved load or store groups. In the algorithm below, access A 5977 // precedes access B in program order. We initialize a group for B in the 5978 // outer loop of the algorithm, and then in the inner loop, we attempt to 5979 // insert each A into B's group if: 5980 // 5981 // 1. A and B have the same stride, 5982 // 2. A and B have the same memory object size, and 5983 // 3. A belongs in B's group according to its distance from B. 5984 // 5985 // Special care is taken to ensure group formation will not break any 5986 // dependences. 5987 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 5988 BI != E; ++BI) { 5989 Instruction *B = BI->first; 5990 StrideDescriptor DesB = BI->second; 5991 5992 // Initialize a group for B if it has an allowable stride. Even if we don't 5993 // create a group for B, we continue with the bottom-up algorithm to ensure 5994 // we don't break any of B's dependences. 5995 InterleaveGroup *Group = nullptr; 5996 if (isStrided(DesB.Stride)) { 5997 Group = getInterleaveGroup(B); 5998 if (!Group) { 5999 DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n'); 6000 Group = createInterleaveGroup(B, DesB.Stride, DesB.Align); 6001 } 6002 if (B->mayWriteToMemory()) 6003 StoreGroups.insert(Group); 6004 else 6005 LoadGroups.insert(Group); 6006 } 6007 6008 for (auto AI = std::next(BI); AI != E; ++AI) { 6009 Instruction *A = AI->first; 6010 StrideDescriptor DesA = AI->second; 6011 6012 // Our code motion strategy implies that we can't have dependences 6013 // between accesses in an interleaved group and other accesses located 6014 // between the first and last member of the group. Note that this also 6015 // means that a group can't have more than one member at a given offset. 6016 // The accesses in a group can have dependences with other accesses, but 6017 // we must ensure we don't extend the boundaries of the group such that 6018 // we encompass those dependent accesses. 6019 // 6020 // For example, assume we have the sequence of accesses shown below in a 6021 // stride-2 loop: 6022 // 6023 // (1, 2) is a group | A[i] = a; // (1) 6024 // | A[i-1] = b; // (2) | 6025 // A[i-3] = c; // (3) 6026 // A[i] = d; // (4) | (2, 4) is not a group 6027 // 6028 // Because accesses (2) and (3) are dependent, we can group (2) with (1) 6029 // but not with (4). If we did, the dependent access (3) would be within 6030 // the boundaries of the (2, 4) group. 6031 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { 6032 // If a dependence exists and A is already in a group, we know that A 6033 // must be a store since A precedes B and WAR dependences are allowed. 6034 // Thus, A would be sunk below B. We release A's group to prevent this 6035 // illegal code motion. A will then be free to form another group with 6036 // instructions that precede it. 6037 if (isInterleaved(A)) { 6038 InterleaveGroup *StoreGroup = getInterleaveGroup(A); 6039 StoreGroups.remove(StoreGroup); 6040 releaseGroup(StoreGroup); 6041 } 6042 6043 // If a dependence exists and A is not already in a group (or it was 6044 // and we just released it), B might be hoisted above A (if B is a 6045 // load) or another store might be sunk below A (if B is a store). In 6046 // either case, we can't add additional instructions to B's group. B 6047 // will only form a group with instructions that it precedes. 6048 break; 6049 } 6050 6051 // At this point, we've checked for illegal code motion. If either A or B 6052 // isn't strided, there's nothing left to do. 6053 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 6054 continue; 6055 6056 // Ignore A if it's already in a group or isn't the same kind of memory 6057 // operation as B. 6058 if (isInterleaved(A) || A->mayReadFromMemory() != B->mayReadFromMemory()) 6059 continue; 6060 6061 // Check rules 1 and 2. Ignore A if its stride or size is different from 6062 // that of B. 6063 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 6064 continue; 6065 6066 // Ignore A if the memory object of A and B don't belong to the same 6067 // address space 6068 if (getMemInstAddressSpace(A) != getMemInstAddressSpace(B)) 6069 continue; 6070 6071 // Calculate the distance from A to B. 6072 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 6073 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 6074 if (!DistToB) 6075 continue; 6076 int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 6077 6078 // Check rule 3. Ignore A if its distance to B is not a multiple of the 6079 // size. 6080 if (DistanceToB % static_cast<int64_t>(DesB.Size)) 6081 continue; 6082 6083 // Ignore A if either A or B is in a predicated block. Although we 6084 // currently prevent group formation for predicated accesses, we may be 6085 // able to relax this limitation in the future once we handle more 6086 // complicated blocks. 6087 if (isPredicated(A->getParent()) || isPredicated(B->getParent())) 6088 continue; 6089 6090 // The index of A is the index of B plus A's distance to B in multiples 6091 // of the size. 6092 int IndexA = 6093 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 6094 6095 // Try to insert A into B's group. 6096 if (Group->insertMember(A, IndexA, DesA.Align)) { 6097 DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 6098 << " into the interleave group with" << *B << '\n'); 6099 InterleaveGroupMap[A] = Group; 6100 6101 // Set the first load in program order as the insert position. 6102 if (A->mayReadFromMemory()) 6103 Group->setInsertPos(A); 6104 } 6105 } // Iteration over A accesses. 6106 } // Iteration over B accesses. 6107 6108 // Remove interleaved store groups with gaps. 6109 for (InterleaveGroup *Group : StoreGroups) 6110 if (Group->getNumMembers() != Group->getFactor()) { 6111 DEBUG(dbgs() << "LV: Invalidate candidate interleaved store group due " 6112 "to gaps.\n"); 6113 releaseGroup(Group); 6114 } 6115 // Remove interleaved groups with gaps (currently only loads) whose memory 6116 // accesses may wrap around. We have to revisit the getPtrStride analysis, 6117 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does 6118 // not check wrapping (see documentation there). 6119 // FORNOW we use Assume=false; 6120 // TODO: Change to Assume=true but making sure we don't exceed the threshold 6121 // of runtime SCEV assumptions checks (thereby potentially failing to 6122 // vectorize altogether). 6123 // Additional optional optimizations: 6124 // TODO: If we are peeling the loop and we know that the first pointer doesn't 6125 // wrap then we can deduce that all pointers in the group don't wrap. 6126 // This means that we can forcefully peel the loop in order to only have to 6127 // check the first pointer for no-wrap. When we'll change to use Assume=true 6128 // we'll only need at most one runtime check per interleaved group. 6129 for (InterleaveGroup *Group : LoadGroups) { 6130 // Case 1: A full group. Can Skip the checks; For full groups, if the wide 6131 // load would wrap around the address space we would do a memory access at 6132 // nullptr even without the transformation. 6133 if (Group->getNumMembers() == Group->getFactor()) 6134 continue; 6135 6136 // Case 2: If first and last members of the group don't wrap this implies 6137 // that all the pointers in the group don't wrap. 6138 // So we check only group member 0 (which is always guaranteed to exist), 6139 // and group member Factor - 1; If the latter doesn't exist we rely on 6140 // peeling (if it is a non-reveresed accsess -- see Case 3). 6141 Value *FirstMemberPtr = getPointerOperand(Group->getMember(0)); 6142 if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false, 6143 /*ShouldCheckWrap=*/true)) { 6144 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 6145 "first group member potentially pointer-wrapping.\n"); 6146 releaseGroup(Group); 6147 continue; 6148 } 6149 Instruction *LastMember = Group->getMember(Group->getFactor() - 1); 6150 if (LastMember) { 6151 Value *LastMemberPtr = getPointerOperand(LastMember); 6152 if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false, 6153 /*ShouldCheckWrap=*/true)) { 6154 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 6155 "last group member potentially pointer-wrapping.\n"); 6156 releaseGroup(Group); 6157 } 6158 } else { 6159 // Case 3: A non-reversed interleaved load group with gaps: We need 6160 // to execute at least one scalar epilogue iteration. This will ensure 6161 // we don't speculatively access memory out-of-bounds. We only need 6162 // to look for a member at index factor - 1, since every group must have 6163 // a member at index zero. 6164 if (Group->isReverse()) { 6165 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 6166 "a reverse access with gaps.\n"); 6167 releaseGroup(Group); 6168 continue; 6169 } 6170 DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 6171 RequiresScalarEpilogue = true; 6172 } 6173 } 6174 } 6175 6176 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) { 6177 if (!EnableCondStoresVectorization && Legal->getNumPredStores()) { 6178 ORE->emit(createMissedAnalysis("ConditionalStore") 6179 << "store that is conditionally executed prevents vectorization"); 6180 DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); 6181 return None; 6182 } 6183 6184 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 6185 // TODO: It may by useful to do since it's still likely to be dynamically 6186 // uniform if the target can skip. 6187 DEBUG(dbgs() << "LV: Not inserting runtime ptr check for divergent target"); 6188 6189 ORE->emit( 6190 createMissedAnalysis("CantVersionLoopWithDivergentTarget") 6191 << "runtime pointer checks needed. Not enabled for divergent target"); 6192 6193 return None; 6194 } 6195 6196 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 6197 if (!OptForSize) // Remaining checks deal with scalar loop when OptForSize. 6198 return computeFeasibleMaxVF(OptForSize, TC); 6199 6200 if (Legal->getRuntimePointerChecking()->Need) { 6201 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 6202 << "runtime pointer checks needed. Enable vectorization of this " 6203 "loop with '#pragma clang loop vectorize(enable)' when " 6204 "compiling with -Os/-Oz"); 6205 DEBUG(dbgs() 6206 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 6207 return None; 6208 } 6209 6210 // If we optimize the program for size, avoid creating the tail loop. 6211 DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 6212 6213 // If we don't know the precise trip count, don't try to vectorize. 6214 if (TC < 2) { 6215 ORE->emit( 6216 createMissedAnalysis("UnknownLoopCountComplexCFG") 6217 << "unable to calculate the loop count due to complex control flow"); 6218 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 6219 return None; 6220 } 6221 6222 unsigned MaxVF = computeFeasibleMaxVF(OptForSize, TC); 6223 6224 if (TC % MaxVF != 0) { 6225 // If the trip count that we found modulo the vectorization factor is not 6226 // zero then we require a tail. 6227 // FIXME: look for a smaller MaxVF that does divide TC rather than give up. 6228 // FIXME: return None if loop requiresScalarEpilog(<MaxVF>), or look for a 6229 // smaller MaxVF that does not require a scalar epilog. 6230 6231 ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize") 6232 << "cannot optimize for size and vectorize at the " 6233 "same time. Enable vectorization of this loop " 6234 "with '#pragma clang loop vectorize(enable)' " 6235 "when compiling with -Os/-Oz"); 6236 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 6237 return None; 6238 } 6239 6240 return MaxVF; 6241 } 6242 6243 unsigned 6244 LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize, 6245 unsigned ConstTripCount) { 6246 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 6247 unsigned SmallestType, WidestType; 6248 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 6249 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 6250 6251 // Get the maximum safe dependence distance in bits computed by LAA. 6252 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 6253 // the memory accesses that is most restrictive (involved in the smallest 6254 // dependence distance). 6255 unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth(); 6256 6257 WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth); 6258 6259 unsigned MaxVectorSize = WidestRegister / WidestType; 6260 6261 DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " 6262 << WidestType << " bits.\n"); 6263 DEBUG(dbgs() << "LV: The Widest register safe to use is: " << WidestRegister 6264 << " bits.\n"); 6265 6266 assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements" 6267 " into one vector!"); 6268 if (MaxVectorSize == 0) { 6269 DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 6270 MaxVectorSize = 1; 6271 return MaxVectorSize; 6272 } else if (ConstTripCount && ConstTripCount < MaxVectorSize && 6273 isPowerOf2_32(ConstTripCount)) { 6274 // We need to clamp the VF to be the ConstTripCount. There is no point in 6275 // choosing a higher viable VF as done in the loop below. 6276 DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 6277 << ConstTripCount << "\n"); 6278 MaxVectorSize = ConstTripCount; 6279 return MaxVectorSize; 6280 } 6281 6282 unsigned MaxVF = MaxVectorSize; 6283 if (MaximizeBandwidth && !OptForSize) { 6284 // Collect all viable vectorization factors larger than the default MaxVF 6285 // (i.e. MaxVectorSize). 6286 SmallVector<unsigned, 8> VFs; 6287 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 6288 for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2) 6289 VFs.push_back(VS); 6290 6291 // For each VF calculate its register usage. 6292 auto RUs = calculateRegisterUsage(VFs); 6293 6294 // Select the largest VF which doesn't require more registers than existing 6295 // ones. 6296 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 6297 for (int i = RUs.size() - 1; i >= 0; --i) { 6298 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 6299 MaxVF = VFs[i]; 6300 break; 6301 } 6302 } 6303 } 6304 return MaxVF; 6305 } 6306 6307 LoopVectorizationCostModel::VectorizationFactor 6308 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) { 6309 float Cost = expectedCost(1).first; 6310 #ifndef NDEBUG 6311 const float ScalarCost = Cost; 6312 #endif /* NDEBUG */ 6313 unsigned Width = 1; 6314 DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 6315 6316 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 6317 // Ignore scalar width, because the user explicitly wants vectorization. 6318 if (ForceVectorization && MaxVF > 1) { 6319 Width = 2; 6320 Cost = expectedCost(Width).first / (float)Width; 6321 } 6322 6323 for (unsigned i = 2; i <= MaxVF; i *= 2) { 6324 // Notice that the vector loop needs to be executed less times, so 6325 // we need to divide the cost of the vector loops by the width of 6326 // the vector elements. 6327 VectorizationCostTy C = expectedCost(i); 6328 float VectorCost = C.first / (float)i; 6329 DEBUG(dbgs() << "LV: Vector loop of width " << i 6330 << " costs: " << (int)VectorCost << ".\n"); 6331 if (!C.second && !ForceVectorization) { 6332 DEBUG( 6333 dbgs() << "LV: Not considering vector loop of width " << i 6334 << " because it will not generate any vector instructions.\n"); 6335 continue; 6336 } 6337 if (VectorCost < Cost) { 6338 Cost = VectorCost; 6339 Width = i; 6340 } 6341 } 6342 6343 DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 6344 << "LV: Vectorization seems to be not beneficial, " 6345 << "but was forced by a user.\n"); 6346 DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 6347 VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)}; 6348 return Factor; 6349 } 6350 6351 std::pair<unsigned, unsigned> 6352 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6353 unsigned MinWidth = -1U; 6354 unsigned MaxWidth = 8; 6355 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6356 6357 // For each block. 6358 for (BasicBlock *BB : TheLoop->blocks()) { 6359 // For each instruction in the loop. 6360 for (Instruction &I : *BB) { 6361 Type *T = I.getType(); 6362 6363 // Skip ignored values. 6364 if (ValuesToIgnore.count(&I)) 6365 continue; 6366 6367 // Only examine Loads, Stores and PHINodes. 6368 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6369 continue; 6370 6371 // Examine PHI nodes that are reduction variables. Update the type to 6372 // account for the recurrence type. 6373 if (auto *PN = dyn_cast<PHINode>(&I)) { 6374 if (!Legal->isReductionVariable(PN)) 6375 continue; 6376 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 6377 T = RdxDesc.getRecurrenceType(); 6378 } 6379 6380 // Examine the stored values. 6381 if (auto *ST = dyn_cast<StoreInst>(&I)) 6382 T = ST->getValueOperand()->getType(); 6383 6384 // Ignore loaded pointer types and stored pointer types that are not 6385 // vectorizable. 6386 // 6387 // FIXME: The check here attempts to predict whether a load or store will 6388 // be vectorized. We only know this for certain after a VF has 6389 // been selected. Here, we assume that if an access can be 6390 // vectorized, it will be. We should also look at extending this 6391 // optimization to non-pointer types. 6392 // 6393 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 6394 !Legal->isAccessInterleaved(&I) && !Legal->isLegalGatherOrScatter(&I)) 6395 continue; 6396 6397 MinWidth = std::min(MinWidth, 6398 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6399 MaxWidth = std::max(MaxWidth, 6400 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6401 } 6402 } 6403 6404 return {MinWidth, MaxWidth}; 6405 } 6406 6407 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 6408 unsigned VF, 6409 unsigned LoopCost) { 6410 // -- The interleave heuristics -- 6411 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6412 // There are many micro-architectural considerations that we can't predict 6413 // at this level. For example, frontend pressure (on decode or fetch) due to 6414 // code size, or the number and capabilities of the execution ports. 6415 // 6416 // We use the following heuristics to select the interleave count: 6417 // 1. If the code has reductions, then we interleave to break the cross 6418 // iteration dependency. 6419 // 2. If the loop is really small, then we interleave to reduce the loop 6420 // overhead. 6421 // 3. We don't interleave if we think that we will spill registers to memory 6422 // due to the increased register pressure. 6423 6424 // When we optimize for size, we don't interleave. 6425 if (OptForSize) 6426 return 1; 6427 6428 // We used the distance for the interleave count. 6429 if (Legal->getMaxSafeDepDistBytes() != -1U) 6430 return 1; 6431 6432 // Do not interleave loops with a relatively small trip count. 6433 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 6434 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 6435 return 1; 6436 6437 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 6438 DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6439 << " registers\n"); 6440 6441 if (VF == 1) { 6442 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6443 TargetNumRegisters = ForceTargetNumScalarRegs; 6444 } else { 6445 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6446 TargetNumRegisters = ForceTargetNumVectorRegs; 6447 } 6448 6449 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6450 // We divide by these constants so assume that we have at least one 6451 // instruction that uses at least one register. 6452 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 6453 R.NumInstructions = std::max(R.NumInstructions, 1U); 6454 6455 // We calculate the interleave count using the following formula. 6456 // Subtract the number of loop invariants from the number of available 6457 // registers. These registers are used by all of the interleaved instances. 6458 // Next, divide the remaining registers by the number of registers that is 6459 // required by the loop, in order to estimate how many parallel instances 6460 // fit without causing spills. All of this is rounded down if necessary to be 6461 // a power of two. We want power of two interleave count to simplify any 6462 // addressing operations or alignment considerations. 6463 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 6464 R.MaxLocalUsers); 6465 6466 // Don't count the induction variable as interleaved. 6467 if (EnableIndVarRegisterHeur) 6468 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 6469 std::max(1U, (R.MaxLocalUsers - 1))); 6470 6471 // Clamp the interleave ranges to reasonable counts. 6472 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 6473 6474 // Check if the user has overridden the max. 6475 if (VF == 1) { 6476 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6477 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6478 } else { 6479 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6480 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6481 } 6482 6483 // If we did not calculate the cost for VF (because the user selected the VF) 6484 // then we calculate the cost of VF here. 6485 if (LoopCost == 0) 6486 LoopCost = expectedCost(VF).first; 6487 6488 // Clamp the calculated IC to be between the 1 and the max interleave count 6489 // that the target allows. 6490 if (IC > MaxInterleaveCount) 6491 IC = MaxInterleaveCount; 6492 else if (IC < 1) 6493 IC = 1; 6494 6495 // Interleave if we vectorized this loop and there is a reduction that could 6496 // benefit from interleaving. 6497 if (VF > 1 && !Legal->getReductionVars()->empty()) { 6498 DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6499 return IC; 6500 } 6501 6502 // Note that if we've already vectorized the loop we will have done the 6503 // runtime check and so interleaving won't require further checks. 6504 bool InterleavingRequiresRuntimePointerCheck = 6505 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 6506 6507 // We want to interleave small loops in order to reduce the loop overhead and 6508 // potentially expose ILP opportunities. 6509 DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 6510 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6511 // We assume that the cost overhead is 1 and we use the cost model 6512 // to estimate the cost of the loop and interleave until the cost of the 6513 // loop overhead is about 5% of the cost of the loop. 6514 unsigned SmallIC = 6515 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6516 6517 // Interleave until store/load ports (estimated by max interleave count) are 6518 // saturated. 6519 unsigned NumStores = Legal->getNumStores(); 6520 unsigned NumLoads = Legal->getNumLoads(); 6521 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6522 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6523 6524 // If we have a scalar reduction (vector reductions are already dealt with 6525 // by this point), we can increase the critical path length if the loop 6526 // we're interleaving is inside another loop. Limit, by default to 2, so the 6527 // critical path only gets increased by one reduction operation. 6528 if (!Legal->getReductionVars()->empty() && TheLoop->getLoopDepth() > 1) { 6529 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6530 SmallIC = std::min(SmallIC, F); 6531 StoresIC = std::min(StoresIC, F); 6532 LoadsIC = std::min(LoadsIC, F); 6533 } 6534 6535 if (EnableLoadStoreRuntimeInterleave && 6536 std::max(StoresIC, LoadsIC) > SmallIC) { 6537 DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6538 return std::max(StoresIC, LoadsIC); 6539 } 6540 6541 DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6542 return SmallIC; 6543 } 6544 6545 // Interleave if this is a large loop (small loops are already dealt with by 6546 // this point) that could benefit from interleaving. 6547 bool HasReductions = !Legal->getReductionVars()->empty(); 6548 if (TTI.enableAggressiveInterleaving(HasReductions)) { 6549 DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6550 return IC; 6551 } 6552 6553 DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6554 return 1; 6555 } 6556 6557 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6558 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 6559 // This function calculates the register usage by measuring the highest number 6560 // of values that are alive at a single location. Obviously, this is a very 6561 // rough estimation. We scan the loop in a topological order in order and 6562 // assign a number to each instruction. We use RPO to ensure that defs are 6563 // met before their users. We assume that each instruction that has in-loop 6564 // users starts an interval. We record every time that an in-loop value is 6565 // used, so we have a list of the first and last occurrences of each 6566 // instruction. Next, we transpose this data structure into a multi map that 6567 // holds the list of intervals that *end* at a specific location. This multi 6568 // map allows us to perform a linear search. We scan the instructions linearly 6569 // and record each time that a new interval starts, by placing it in a set. 6570 // If we find this value in the multi-map then we remove it from the set. 6571 // The max register usage is the maximum size of the set. 6572 // We also search for instructions that are defined outside the loop, but are 6573 // used inside the loop. We need this number separately from the max-interval 6574 // usage number because when we unroll, loop-invariant values do not take 6575 // more register. 6576 LoopBlocksDFS DFS(TheLoop); 6577 DFS.perform(LI); 6578 6579 RegisterUsage RU; 6580 RU.NumInstructions = 0; 6581 6582 // Each 'key' in the map opens a new interval. The values 6583 // of the map are the index of the 'last seen' usage of the 6584 // instruction that is the key. 6585 using IntervalMap = DenseMap<Instruction *, unsigned>; 6586 6587 // Maps instruction to its index. 6588 DenseMap<unsigned, Instruction *> IdxToInstr; 6589 // Marks the end of each interval. 6590 IntervalMap EndPoint; 6591 // Saves the list of instruction indices that are used in the loop. 6592 SmallSet<Instruction *, 8> Ends; 6593 // Saves the list of values that are used in the loop but are 6594 // defined outside the loop, such as arguments and constants. 6595 SmallPtrSet<Value *, 8> LoopInvariants; 6596 6597 unsigned Index = 0; 6598 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6599 RU.NumInstructions += BB->size(); 6600 for (Instruction &I : *BB) { 6601 IdxToInstr[Index++] = &I; 6602 6603 // Save the end location of each USE. 6604 for (Value *U : I.operands()) { 6605 auto *Instr = dyn_cast<Instruction>(U); 6606 6607 // Ignore non-instruction values such as arguments, constants, etc. 6608 if (!Instr) 6609 continue; 6610 6611 // If this instruction is outside the loop then record it and continue. 6612 if (!TheLoop->contains(Instr)) { 6613 LoopInvariants.insert(Instr); 6614 continue; 6615 } 6616 6617 // Overwrite previous end points. 6618 EndPoint[Instr] = Index; 6619 Ends.insert(Instr); 6620 } 6621 } 6622 } 6623 6624 // Saves the list of intervals that end with the index in 'key'. 6625 using InstrList = SmallVector<Instruction *, 2>; 6626 DenseMap<unsigned, InstrList> TransposeEnds; 6627 6628 // Transpose the EndPoints to a list of values that end at each index. 6629 for (auto &Interval : EndPoint) 6630 TransposeEnds[Interval.second].push_back(Interval.first); 6631 6632 SmallSet<Instruction *, 8> OpenIntervals; 6633 6634 // Get the size of the widest register. 6635 unsigned MaxSafeDepDist = -1U; 6636 if (Legal->getMaxSafeDepDistBytes() != -1U) 6637 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 6638 unsigned WidestRegister = 6639 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 6640 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6641 6642 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6643 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 6644 6645 DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6646 6647 // A lambda that gets the register usage for the given type and VF. 6648 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 6649 if (Ty->isTokenTy()) 6650 return 0U; 6651 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 6652 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 6653 }; 6654 6655 for (unsigned int i = 0; i < Index; ++i) { 6656 Instruction *I = IdxToInstr[i]; 6657 6658 // Remove all of the instructions that end at this location. 6659 InstrList &List = TransposeEnds[i]; 6660 for (Instruction *ToRemove : List) 6661 OpenIntervals.erase(ToRemove); 6662 6663 // Ignore instructions that are never used within the loop. 6664 if (!Ends.count(I)) 6665 continue; 6666 6667 // Skip ignored values. 6668 if (ValuesToIgnore.count(I)) 6669 continue; 6670 6671 // For each VF find the maximum usage of registers. 6672 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6673 if (VFs[j] == 1) { 6674 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 6675 continue; 6676 } 6677 collectUniformsAndScalars(VFs[j]); 6678 // Count the number of live intervals. 6679 unsigned RegUsage = 0; 6680 for (auto Inst : OpenIntervals) { 6681 // Skip ignored values for VF > 1. 6682 if (VecValuesToIgnore.count(Inst) || 6683 isScalarAfterVectorization(Inst, VFs[j])) 6684 continue; 6685 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 6686 } 6687 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 6688 } 6689 6690 DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6691 << OpenIntervals.size() << '\n'); 6692 6693 // Add the current instruction to the list of open intervals. 6694 OpenIntervals.insert(I); 6695 } 6696 6697 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6698 unsigned Invariant = 0; 6699 if (VFs[i] == 1) 6700 Invariant = LoopInvariants.size(); 6701 else { 6702 for (auto Inst : LoopInvariants) 6703 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 6704 } 6705 6706 DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 6707 DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 6708 DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); 6709 DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n'); 6710 6711 RU.LoopInvariantRegs = Invariant; 6712 RU.MaxLocalUsers = MaxUsages[i]; 6713 RUs[i] = RU; 6714 } 6715 6716 return RUs; 6717 } 6718 6719 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) { 6720 // If we aren't vectorizing the loop, or if we've already collected the 6721 // instructions to scalarize, there's nothing to do. Collection may already 6722 // have occurred if we have a user-selected VF and are now computing the 6723 // expected cost for interleaving. 6724 if (VF < 2 || InstsToScalarize.count(VF)) 6725 return; 6726 6727 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6728 // not profitable to scalarize any instructions, the presence of VF in the 6729 // map will indicate that we've analyzed it already. 6730 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6731 6732 // Find all the instructions that are scalar with predication in the loop and 6733 // determine if it would be better to not if-convert the blocks they are in. 6734 // If so, we also record the instructions to scalarize. 6735 for (BasicBlock *BB : TheLoop->blocks()) { 6736 if (!Legal->blockNeedsPredication(BB)) 6737 continue; 6738 for (Instruction &I : *BB) 6739 if (Legal->isScalarWithPredication(&I)) { 6740 ScalarCostsTy ScalarCosts; 6741 if (computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6742 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6743 6744 // Remember that BB will remain after vectorization. 6745 PredicatedBBsAfterVectorization.insert(BB); 6746 } 6747 } 6748 } 6749 6750 int LoopVectorizationCostModel::computePredInstDiscount( 6751 Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts, 6752 unsigned VF) { 6753 assert(!isUniformAfterVectorization(PredInst, VF) && 6754 "Instruction marked uniform-after-vectorization will be predicated"); 6755 6756 // Initialize the discount to zero, meaning that the scalar version and the 6757 // vector version cost the same. 6758 int Discount = 0; 6759 6760 // Holds instructions to analyze. The instructions we visit are mapped in 6761 // ScalarCosts. Those instructions are the ones that would be scalarized if 6762 // we find that the scalar version costs less. 6763 SmallVector<Instruction *, 8> Worklist; 6764 6765 // Returns true if the given instruction can be scalarized. 6766 auto canBeScalarized = [&](Instruction *I) -> bool { 6767 // We only attempt to scalarize instructions forming a single-use chain 6768 // from the original predicated block that would otherwise be vectorized. 6769 // Although not strictly necessary, we give up on instructions we know will 6770 // already be scalar to avoid traversing chains that are unlikely to be 6771 // beneficial. 6772 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6773 isScalarAfterVectorization(I, VF)) 6774 return false; 6775 6776 // If the instruction is scalar with predication, it will be analyzed 6777 // separately. We ignore it within the context of PredInst. 6778 if (Legal->isScalarWithPredication(I)) 6779 return false; 6780 6781 // If any of the instruction's operands are uniform after vectorization, 6782 // the instruction cannot be scalarized. This prevents, for example, a 6783 // masked load from being scalarized. 6784 // 6785 // We assume we will only emit a value for lane zero of an instruction 6786 // marked uniform after vectorization, rather than VF identical values. 6787 // Thus, if we scalarize an instruction that uses a uniform, we would 6788 // create uses of values corresponding to the lanes we aren't emitting code 6789 // for. This behavior can be changed by allowing getScalarValue to clone 6790 // the lane zero values for uniforms rather than asserting. 6791 for (Use &U : I->operands()) 6792 if (auto *J = dyn_cast<Instruction>(U.get())) 6793 if (isUniformAfterVectorization(J, VF)) 6794 return false; 6795 6796 // Otherwise, we can scalarize the instruction. 6797 return true; 6798 }; 6799 6800 // Returns true if an operand that cannot be scalarized must be extracted 6801 // from a vector. We will account for this scalarization overhead below. Note 6802 // that the non-void predicated instructions are placed in their own blocks, 6803 // and their return values are inserted into vectors. Thus, an extract would 6804 // still be required. 6805 auto needsExtract = [&](Instruction *I) -> bool { 6806 return TheLoop->contains(I) && !isScalarAfterVectorization(I, VF); 6807 }; 6808 6809 // Compute the expected cost discount from scalarizing the entire expression 6810 // feeding the predicated instruction. We currently only consider expressions 6811 // that are single-use instruction chains. 6812 Worklist.push_back(PredInst); 6813 while (!Worklist.empty()) { 6814 Instruction *I = Worklist.pop_back_val(); 6815 6816 // If we've already analyzed the instruction, there's nothing to do. 6817 if (ScalarCosts.count(I)) 6818 continue; 6819 6820 // Compute the cost of the vector instruction. Note that this cost already 6821 // includes the scalarization overhead of the predicated instruction. 6822 unsigned VectorCost = getInstructionCost(I, VF).first; 6823 6824 // Compute the cost of the scalarized instruction. This cost is the cost of 6825 // the instruction as if it wasn't if-converted and instead remained in the 6826 // predicated block. We will scale this cost by block probability after 6827 // computing the scalarization overhead. 6828 unsigned ScalarCost = VF * getInstructionCost(I, 1).first; 6829 6830 // Compute the scalarization overhead of needed insertelement instructions 6831 // and phi nodes. 6832 if (Legal->isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6833 ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF), 6834 true, false); 6835 ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI); 6836 } 6837 6838 // Compute the scalarization overhead of needed extractelement 6839 // instructions. For each of the instruction's operands, if the operand can 6840 // be scalarized, add it to the worklist; otherwise, account for the 6841 // overhead. 6842 for (Use &U : I->operands()) 6843 if (auto *J = dyn_cast<Instruction>(U.get())) { 6844 assert(VectorType::isValidElementType(J->getType()) && 6845 "Instruction has non-scalar type"); 6846 if (canBeScalarized(J)) 6847 Worklist.push_back(J); 6848 else if (needsExtract(J)) 6849 ScalarCost += TTI.getScalarizationOverhead( 6850 ToVectorTy(J->getType(),VF), false, true); 6851 } 6852 6853 // Scale the total scalar cost by block probability. 6854 ScalarCost /= getReciprocalPredBlockProb(); 6855 6856 // Compute the discount. A non-negative discount means the vector version 6857 // of the instruction costs more, and scalarizing would be beneficial. 6858 Discount += VectorCost - ScalarCost; 6859 ScalarCosts[I] = ScalarCost; 6860 } 6861 6862 return Discount; 6863 } 6864 6865 LoopVectorizationCostModel::VectorizationCostTy 6866 LoopVectorizationCostModel::expectedCost(unsigned VF) { 6867 VectorizationCostTy Cost; 6868 6869 // For each block. 6870 for (BasicBlock *BB : TheLoop->blocks()) { 6871 VectorizationCostTy BlockCost; 6872 6873 // For each instruction in the old loop. 6874 for (Instruction &I : *BB) { 6875 // Skip dbg intrinsics. 6876 if (isa<DbgInfoIntrinsic>(I)) 6877 continue; 6878 6879 // Skip ignored values. 6880 if (ValuesToIgnore.count(&I)) 6881 continue; 6882 6883 VectorizationCostTy C = getInstructionCost(&I, VF); 6884 6885 // Check if we should override the cost. 6886 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6887 C.first = ForceTargetInstructionCost; 6888 6889 BlockCost.first += C.first; 6890 BlockCost.second |= C.second; 6891 DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " 6892 << VF << " For instruction: " << I << '\n'); 6893 } 6894 6895 // If we are vectorizing a predicated block, it will have been 6896 // if-converted. This means that the block's instructions (aside from 6897 // stores and instructions that may divide by zero) will now be 6898 // unconditionally executed. For the scalar case, we may not always execute 6899 // the predicated block. Thus, scale the block's cost by the probability of 6900 // executing it. 6901 if (VF == 1 && Legal->blockNeedsPredication(BB)) 6902 BlockCost.first /= getReciprocalPredBlockProb(); 6903 6904 Cost.first += BlockCost.first; 6905 Cost.second |= BlockCost.second; 6906 } 6907 6908 return Cost; 6909 } 6910 6911 /// \brief Gets Address Access SCEV after verifying that the access pattern 6912 /// is loop invariant except the induction variable dependence. 6913 /// 6914 /// This SCEV can be sent to the Target in order to estimate the address 6915 /// calculation cost. 6916 static const SCEV *getAddressAccessSCEV( 6917 Value *Ptr, 6918 LoopVectorizationLegality *Legal, 6919 ScalarEvolution *SE, 6920 const Loop *TheLoop) { 6921 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6922 if (!Gep) 6923 return nullptr; 6924 6925 // We are looking for a gep with all loop invariant indices except for one 6926 // which should be an induction variable. 6927 unsigned NumOperands = Gep->getNumOperands(); 6928 for (unsigned i = 1; i < NumOperands; ++i) { 6929 Value *Opd = Gep->getOperand(i); 6930 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6931 !Legal->isInductionVariable(Opd)) 6932 return nullptr; 6933 } 6934 6935 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6936 return SE->getSCEV(Ptr); 6937 } 6938 6939 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6940 return Legal->hasStride(I->getOperand(0)) || 6941 Legal->hasStride(I->getOperand(1)); 6942 } 6943 6944 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6945 unsigned VF) { 6946 Type *ValTy = getMemInstValueType(I); 6947 auto SE = PSE.getSE(); 6948 6949 unsigned Alignment = getMemInstAlignment(I); 6950 unsigned AS = getMemInstAddressSpace(I); 6951 Value *Ptr = getPointerOperand(I); 6952 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6953 6954 // Figure out whether the access is strided and get the stride value 6955 // if it's known in compile time 6956 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, SE, TheLoop); 6957 6958 // Get the cost of the scalar memory instruction and address computation. 6959 unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6960 6961 Cost += VF * 6962 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6963 AS, I); 6964 6965 // Get the overhead of the extractelement and insertelement instructions 6966 // we might create due to scalarization. 6967 Cost += getScalarizationOverhead(I, VF, TTI); 6968 6969 // If we have a predicated store, it may not be executed for each vector 6970 // lane. Scale the cost by the probability of executing the predicated 6971 // block. 6972 if (Legal->isScalarWithPredication(I)) 6973 Cost /= getReciprocalPredBlockProb(); 6974 6975 return Cost; 6976 } 6977 6978 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6979 unsigned VF) { 6980 Type *ValTy = getMemInstValueType(I); 6981 Type *VectorTy = ToVectorTy(ValTy, VF); 6982 unsigned Alignment = getMemInstAlignment(I); 6983 Value *Ptr = getPointerOperand(I); 6984 unsigned AS = getMemInstAddressSpace(I); 6985 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6986 6987 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6988 "Stride should be 1 or -1 for consecutive memory access"); 6989 unsigned Cost = 0; 6990 if (Legal->isMaskRequired(I)) 6991 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6992 else 6993 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I); 6994 6995 bool Reverse = ConsecutiveStride < 0; 6996 if (Reverse) 6997 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6998 return Cost; 6999 } 7000 7001 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 7002 unsigned VF) { 7003 LoadInst *LI = cast<LoadInst>(I); 7004 Type *ValTy = LI->getType(); 7005 Type *VectorTy = ToVectorTy(ValTy, VF); 7006 unsigned Alignment = LI->getAlignment(); 7007 unsigned AS = LI->getPointerAddressSpace(); 7008 7009 return TTI.getAddressComputationCost(ValTy) + 7010 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) + 7011 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 7012 } 7013 7014 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 7015 unsigned VF) { 7016 Type *ValTy = getMemInstValueType(I); 7017 Type *VectorTy = ToVectorTy(ValTy, VF); 7018 unsigned Alignment = getMemInstAlignment(I); 7019 Value *Ptr = getPointerOperand(I); 7020 7021 return TTI.getAddressComputationCost(VectorTy) + 7022 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 7023 Legal->isMaskRequired(I), Alignment); 7024 } 7025 7026 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 7027 unsigned VF) { 7028 Type *ValTy = getMemInstValueType(I); 7029 Type *VectorTy = ToVectorTy(ValTy, VF); 7030 unsigned AS = getMemInstAddressSpace(I); 7031 7032 auto Group = Legal->getInterleavedAccessGroup(I); 7033 assert(Group && "Fail to get an interleaved access group."); 7034 7035 unsigned InterleaveFactor = Group->getFactor(); 7036 Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 7037 7038 // Holds the indices of existing members in an interleaved load group. 7039 // An interleaved store group doesn't need this as it doesn't allow gaps. 7040 SmallVector<unsigned, 4> Indices; 7041 if (isa<LoadInst>(I)) { 7042 for (unsigned i = 0; i < InterleaveFactor; i++) 7043 if (Group->getMember(i)) 7044 Indices.push_back(i); 7045 } 7046 7047 // Calculate the cost of the whole interleaved group. 7048 unsigned Cost = TTI.getInterleavedMemoryOpCost(I->getOpcode(), WideVecTy, 7049 Group->getFactor(), Indices, 7050 Group->getAlignment(), AS); 7051 7052 if (Group->isReverse()) 7053 Cost += Group->getNumMembers() * 7054 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 7055 return Cost; 7056 } 7057 7058 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 7059 unsigned VF) { 7060 // Calculate scalar cost only. Vectorization cost should be ready at this 7061 // moment. 7062 if (VF == 1) { 7063 Type *ValTy = getMemInstValueType(I); 7064 unsigned Alignment = getMemInstAlignment(I); 7065 unsigned AS = getMemInstAddressSpace(I); 7066 7067 return TTI.getAddressComputationCost(ValTy) + 7068 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I); 7069 } 7070 return getWideningCost(I, VF); 7071 } 7072 7073 LoopVectorizationCostModel::VectorizationCostTy 7074 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 7075 // If we know that this instruction will remain uniform, check the cost of 7076 // the scalar version. 7077 if (isUniformAfterVectorization(I, VF)) 7078 VF = 1; 7079 7080 if (VF > 1 && isProfitableToScalarize(I, VF)) 7081 return VectorizationCostTy(InstsToScalarize[VF][I], false); 7082 7083 // Forced scalars do not have any scalarization overhead. 7084 if (VF > 1 && ForcedScalars.count(VF) && 7085 ForcedScalars.find(VF)->second.count(I)) 7086 return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false); 7087 7088 Type *VectorTy; 7089 unsigned C = getInstructionCost(I, VF, VectorTy); 7090 7091 bool TypeNotScalarized = 7092 VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF; 7093 return VectorizationCostTy(C, TypeNotScalarized); 7094 } 7095 7096 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) { 7097 if (VF == 1) 7098 return; 7099 for (BasicBlock *BB : TheLoop->blocks()) { 7100 // For each instruction in the old loop. 7101 for (Instruction &I : *BB) { 7102 Value *Ptr = getPointerOperand(&I); 7103 if (!Ptr) 7104 continue; 7105 7106 if (isa<LoadInst>(&I) && Legal->isUniform(Ptr)) { 7107 // Scalar load + broadcast 7108 unsigned Cost = getUniformMemOpCost(&I, VF); 7109 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7110 continue; 7111 } 7112 7113 // We assume that widening is the best solution when possible. 7114 if (Legal->memoryInstructionCanBeWidened(&I, VF)) { 7115 unsigned Cost = getConsecutiveMemOpCost(&I, VF); 7116 setWideningDecision(&I, VF, CM_Widen, Cost); 7117 continue; 7118 } 7119 7120 // Choose between Interleaving, Gather/Scatter or Scalarization. 7121 unsigned InterleaveCost = std::numeric_limits<unsigned>::max(); 7122 unsigned NumAccesses = 1; 7123 if (Legal->isAccessInterleaved(&I)) { 7124 auto Group = Legal->getInterleavedAccessGroup(&I); 7125 assert(Group && "Fail to get an interleaved access group."); 7126 7127 // Make one decision for the whole group. 7128 if (getWideningDecision(&I, VF) != CM_Unknown) 7129 continue; 7130 7131 NumAccesses = Group->getNumMembers(); 7132 InterleaveCost = getInterleaveGroupCost(&I, VF); 7133 } 7134 7135 unsigned GatherScatterCost = 7136 Legal->isLegalGatherOrScatter(&I) 7137 ? getGatherScatterCost(&I, VF) * NumAccesses 7138 : std::numeric_limits<unsigned>::max(); 7139 7140 unsigned ScalarizationCost = 7141 getMemInstScalarizationCost(&I, VF) * NumAccesses; 7142 7143 // Choose better solution for the current VF, 7144 // write down this decision and use it during vectorization. 7145 unsigned Cost; 7146 InstWidening Decision; 7147 if (InterleaveCost <= GatherScatterCost && 7148 InterleaveCost < ScalarizationCost) { 7149 Decision = CM_Interleave; 7150 Cost = InterleaveCost; 7151 } else if (GatherScatterCost < ScalarizationCost) { 7152 Decision = CM_GatherScatter; 7153 Cost = GatherScatterCost; 7154 } else { 7155 Decision = CM_Scalarize; 7156 Cost = ScalarizationCost; 7157 } 7158 // If the instructions belongs to an interleave group, the whole group 7159 // receives the same decision. The whole group receives the cost, but 7160 // the cost will actually be assigned to one instruction. 7161 if (auto Group = Legal->getInterleavedAccessGroup(&I)) 7162 setWideningDecision(Group, VF, Decision, Cost); 7163 else 7164 setWideningDecision(&I, VF, Decision, Cost); 7165 } 7166 } 7167 7168 // Make sure that any load of address and any other address computation 7169 // remains scalar unless there is gather/scatter support. This avoids 7170 // inevitable extracts into address registers, and also has the benefit of 7171 // activating LSR more, since that pass can't optimize vectorized 7172 // addresses. 7173 if (TTI.prefersVectorizedAddressing()) 7174 return; 7175 7176 // Start with all scalar pointer uses. 7177 SmallPtrSet<Instruction *, 8> AddrDefs; 7178 for (BasicBlock *BB : TheLoop->blocks()) 7179 for (Instruction &I : *BB) { 7180 Instruction *PtrDef = 7181 dyn_cast_or_null<Instruction>(getPointerOperand(&I)); 7182 if (PtrDef && TheLoop->contains(PtrDef) && 7183 getWideningDecision(&I, VF) != CM_GatherScatter) 7184 AddrDefs.insert(PtrDef); 7185 } 7186 7187 // Add all instructions used to generate the addresses. 7188 SmallVector<Instruction *, 4> Worklist; 7189 for (auto *I : AddrDefs) 7190 Worklist.push_back(I); 7191 while (!Worklist.empty()) { 7192 Instruction *I = Worklist.pop_back_val(); 7193 for (auto &Op : I->operands()) 7194 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7195 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7196 AddrDefs.insert(InstOp).second) 7197 Worklist.push_back(InstOp); 7198 } 7199 7200 for (auto *I : AddrDefs) { 7201 if (isa<LoadInst>(I)) { 7202 // Setting the desired widening decision should ideally be handled in 7203 // by cost functions, but since this involves the task of finding out 7204 // if the loaded register is involved in an address computation, it is 7205 // instead changed here when we know this is the case. 7206 if (getWideningDecision(I, VF) == CM_Widen) 7207 // Scalarize a widened load of address. 7208 setWideningDecision(I, VF, CM_Scalarize, 7209 (VF * getMemoryInstructionCost(I, 1))); 7210 else if (auto Group = Legal->getInterleavedAccessGroup(I)) { 7211 // Scalarize an interleave group of address loads. 7212 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7213 if (Instruction *Member = Group->getMember(I)) 7214 setWideningDecision(Member, VF, CM_Scalarize, 7215 (VF * getMemoryInstructionCost(Member, 1))); 7216 } 7217 } 7218 } else 7219 // Make sure I gets scalarized and a cost estimate without 7220 // scalarization overhead. 7221 ForcedScalars[VF].insert(I); 7222 } 7223 } 7224 7225 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7226 unsigned VF, 7227 Type *&VectorTy) { 7228 Type *RetTy = I->getType(); 7229 if (canTruncateToMinimalBitwidth(I, VF)) 7230 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7231 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 7232 auto SE = PSE.getSE(); 7233 7234 // TODO: We need to estimate the cost of intrinsic calls. 7235 switch (I->getOpcode()) { 7236 case Instruction::GetElementPtr: 7237 // We mark this instruction as zero-cost because the cost of GEPs in 7238 // vectorized code depends on whether the corresponding memory instruction 7239 // is scalarized or not. Therefore, we handle GEPs with the memory 7240 // instruction cost. 7241 return 0; 7242 case Instruction::Br: { 7243 // In cases of scalarized and predicated instructions, there will be VF 7244 // predicated blocks in the vectorized loop. Each branch around these 7245 // blocks requires also an extract of its vector compare i1 element. 7246 bool ScalarPredicatedBB = false; 7247 BranchInst *BI = cast<BranchInst>(I); 7248 if (VF > 1 && BI->isConditional() && 7249 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7250 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7251 ScalarPredicatedBB = true; 7252 7253 if (ScalarPredicatedBB) { 7254 // Return cost for branches around scalarized and predicated blocks. 7255 Type *Vec_i1Ty = 7256 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7257 return (TTI.getScalarizationOverhead(Vec_i1Ty, false, true) + 7258 (TTI.getCFInstrCost(Instruction::Br) * VF)); 7259 } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1) 7260 // The back-edge branch will remain, as will all scalar branches. 7261 return TTI.getCFInstrCost(Instruction::Br); 7262 else 7263 // This branch will be eliminated by if-conversion. 7264 return 0; 7265 // Note: We currently assume zero cost for an unconditional branch inside 7266 // a predicated block since it will become a fall-through, although we 7267 // may decide in the future to call TTI for all branches. 7268 } 7269 case Instruction::PHI: { 7270 auto *Phi = cast<PHINode>(I); 7271 7272 // First-order recurrences are replaced by vector shuffles inside the loop. 7273 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 7274 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 7275 VectorTy, VF - 1, VectorTy); 7276 7277 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7278 // converted into select instructions. We require N - 1 selects per phi 7279 // node, where N is the number of incoming values. 7280 if (VF > 1 && Phi->getParent() != TheLoop->getHeader()) 7281 return (Phi->getNumIncomingValues() - 1) * 7282 TTI.getCmpSelInstrCost( 7283 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7284 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF)); 7285 7286 return TTI.getCFInstrCost(Instruction::PHI); 7287 } 7288 case Instruction::UDiv: 7289 case Instruction::SDiv: 7290 case Instruction::URem: 7291 case Instruction::SRem: 7292 // If we have a predicated instruction, it may not be executed for each 7293 // vector lane. Get the scalarization cost and scale this amount by the 7294 // probability of executing the predicated block. If the instruction is not 7295 // predicated, we fall through to the next case. 7296 if (VF > 1 && Legal->isScalarWithPredication(I)) { 7297 unsigned Cost = 0; 7298 7299 // These instructions have a non-void type, so account for the phi nodes 7300 // that we will create. This cost is likely to be zero. The phi node 7301 // cost, if any, should be scaled by the block probability because it 7302 // models a copy at the end of each predicated block. 7303 Cost += VF * TTI.getCFInstrCost(Instruction::PHI); 7304 7305 // The cost of the non-predicated instruction. 7306 Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy); 7307 7308 // The cost of insertelement and extractelement instructions needed for 7309 // scalarization. 7310 Cost += getScalarizationOverhead(I, VF, TTI); 7311 7312 // Scale the cost by the probability of executing the predicated blocks. 7313 // This assumes the predicated block for each vector lane is equally 7314 // likely. 7315 return Cost / getReciprocalPredBlockProb(); 7316 } 7317 LLVM_FALLTHROUGH; 7318 case Instruction::Add: 7319 case Instruction::FAdd: 7320 case Instruction::Sub: 7321 case Instruction::FSub: 7322 case Instruction::Mul: 7323 case Instruction::FMul: 7324 case Instruction::FDiv: 7325 case Instruction::FRem: 7326 case Instruction::Shl: 7327 case Instruction::LShr: 7328 case Instruction::AShr: 7329 case Instruction::And: 7330 case Instruction::Or: 7331 case Instruction::Xor: { 7332 // Since we will replace the stride by 1 the multiplication should go away. 7333 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7334 return 0; 7335 // Certain instructions can be cheaper to vectorize if they have a constant 7336 // second vector operand. One example of this are shifts on x86. 7337 TargetTransformInfo::OperandValueKind Op1VK = 7338 TargetTransformInfo::OK_AnyValue; 7339 TargetTransformInfo::OperandValueKind Op2VK = 7340 TargetTransformInfo::OK_AnyValue; 7341 TargetTransformInfo::OperandValueProperties Op1VP = 7342 TargetTransformInfo::OP_None; 7343 TargetTransformInfo::OperandValueProperties Op2VP = 7344 TargetTransformInfo::OP_None; 7345 Value *Op2 = I->getOperand(1); 7346 7347 // Check for a splat or for a non uniform vector of constants. 7348 if (isa<ConstantInt>(Op2)) { 7349 ConstantInt *CInt = cast<ConstantInt>(Op2); 7350 if (CInt && CInt->getValue().isPowerOf2()) 7351 Op2VP = TargetTransformInfo::OP_PowerOf2; 7352 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 7353 } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) { 7354 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 7355 Constant *SplatValue = cast<Constant>(Op2)->getSplatValue(); 7356 if (SplatValue) { 7357 ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue); 7358 if (CInt && CInt->getValue().isPowerOf2()) 7359 Op2VP = TargetTransformInfo::OP_PowerOf2; 7360 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 7361 } 7362 } else if (Legal->isUniform(Op2)) { 7363 Op2VK = TargetTransformInfo::OK_UniformValue; 7364 } 7365 SmallVector<const Value *, 4> Operands(I->operand_values()); 7366 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 7367 return N * TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, 7368 Op2VK, Op1VP, Op2VP, Operands); 7369 } 7370 case Instruction::Select: { 7371 SelectInst *SI = cast<SelectInst>(I); 7372 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7373 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7374 Type *CondTy = SI->getCondition()->getType(); 7375 if (!ScalarCond) 7376 CondTy = VectorType::get(CondTy, VF); 7377 7378 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I); 7379 } 7380 case Instruction::ICmp: 7381 case Instruction::FCmp: { 7382 Type *ValTy = I->getOperand(0)->getType(); 7383 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7384 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7385 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7386 VectorTy = ToVectorTy(ValTy, VF); 7387 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I); 7388 } 7389 case Instruction::Store: 7390 case Instruction::Load: { 7391 unsigned Width = VF; 7392 if (Width > 1) { 7393 InstWidening Decision = getWideningDecision(I, Width); 7394 assert(Decision != CM_Unknown && 7395 "CM decision should be taken at this point"); 7396 if (Decision == CM_Scalarize) 7397 Width = 1; 7398 } 7399 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 7400 return getMemoryInstructionCost(I, VF); 7401 } 7402 case Instruction::ZExt: 7403 case Instruction::SExt: 7404 case Instruction::FPToUI: 7405 case Instruction::FPToSI: 7406 case Instruction::FPExt: 7407 case Instruction::PtrToInt: 7408 case Instruction::IntToPtr: 7409 case Instruction::SIToFP: 7410 case Instruction::UIToFP: 7411 case Instruction::Trunc: 7412 case Instruction::FPTrunc: 7413 case Instruction::BitCast: { 7414 // We optimize the truncation of induction variables having constant 7415 // integer steps. The cost of these truncations is the same as the scalar 7416 // operation. 7417 if (isOptimizableIVTruncate(I, VF)) { 7418 auto *Trunc = cast<TruncInst>(I); 7419 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7420 Trunc->getSrcTy(), Trunc); 7421 } 7422 7423 Type *SrcScalarTy = I->getOperand(0)->getType(); 7424 Type *SrcVecTy = 7425 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7426 if (canTruncateToMinimalBitwidth(I, VF)) { 7427 // This cast is going to be shrunk. This may remove the cast or it might 7428 // turn it into slightly different cast. For example, if MinBW == 16, 7429 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7430 // 7431 // Calculate the modified src and dest types. 7432 Type *MinVecTy = VectorTy; 7433 if (I->getOpcode() == Instruction::Trunc) { 7434 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7435 VectorTy = 7436 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7437 } else if (I->getOpcode() == Instruction::ZExt || 7438 I->getOpcode() == Instruction::SExt) { 7439 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7440 VectorTy = 7441 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7442 } 7443 } 7444 7445 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 7446 return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I); 7447 } 7448 case Instruction::Call: { 7449 bool NeedToScalarize; 7450 CallInst *CI = cast<CallInst>(I); 7451 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 7452 if (getVectorIntrinsicIDForCall(CI, TLI)) 7453 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 7454 return CallCost; 7455 } 7456 default: 7457 // The cost of executing VF copies of the scalar instruction. This opcode 7458 // is unknown. Assume that it is the same as 'mul'. 7459 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) + 7460 getScalarizationOverhead(I, VF, TTI); 7461 } // end of switch. 7462 } 7463 7464 char LoopVectorize::ID = 0; 7465 7466 static const char lv_name[] = "Loop Vectorization"; 7467 7468 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7469 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7470 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7471 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7472 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7473 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7474 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7475 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7476 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7477 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7478 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7479 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7480 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7481 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7482 7483 namespace llvm { 7484 7485 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { 7486 return new LoopVectorize(NoUnrolling, AlwaysVectorize); 7487 } 7488 7489 } // end namespace llvm 7490 7491 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7492 // Check if the pointer operand of a load or store instruction is 7493 // consecutive. 7494 if (auto *Ptr = getPointerOperand(Inst)) 7495 return Legal->isConsecutivePtr(Ptr); 7496 return false; 7497 } 7498 7499 void LoopVectorizationCostModel::collectValuesToIgnore() { 7500 // Ignore ephemeral values. 7501 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7502 7503 // Ignore type-promoting instructions we identified during reduction 7504 // detection. 7505 for (auto &Reduction : *Legal->getReductionVars()) { 7506 RecurrenceDescriptor &RedDes = Reduction.second; 7507 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7508 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7509 } 7510 } 7511 7512 LoopVectorizationCostModel::VectorizationFactor 7513 LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) { 7514 // Width 1 means no vectorize, cost 0 means uncomputed cost. 7515 const LoopVectorizationCostModel::VectorizationFactor NoVectorization = {1U, 7516 0U}; 7517 Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(OptForSize); 7518 if (!MaybeMaxVF.hasValue()) // Cases considered too costly to vectorize. 7519 return NoVectorization; 7520 7521 if (UserVF) { 7522 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 7523 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 7524 // Collect the instructions (and their associated costs) that will be more 7525 // profitable to scalarize. 7526 CM.selectUserVectorizationFactor(UserVF); 7527 buildVPlans(UserVF, UserVF); 7528 DEBUG(printPlans(dbgs())); 7529 return {UserVF, 0}; 7530 } 7531 7532 unsigned MaxVF = MaybeMaxVF.getValue(); 7533 assert(MaxVF != 0 && "MaxVF is zero."); 7534 7535 for (unsigned VF = 1; VF <= MaxVF; VF *= 2) { 7536 // Collect Uniform and Scalar instructions after vectorization with VF. 7537 CM.collectUniformsAndScalars(VF); 7538 7539 // Collect the instructions (and their associated costs) that will be more 7540 // profitable to scalarize. 7541 if (VF > 1) 7542 CM.collectInstsToScalarize(VF); 7543 } 7544 7545 buildVPlans(1, MaxVF); 7546 DEBUG(printPlans(dbgs())); 7547 if (MaxVF == 1) 7548 return NoVectorization; 7549 7550 // Select the optimal vectorization factor. 7551 return CM.selectVectorizationFactor(MaxVF); 7552 } 7553 7554 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) { 7555 DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF << '\n'); 7556 BestVF = VF; 7557 BestUF = UF; 7558 7559 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 7560 return !Plan->hasVF(VF); 7561 }); 7562 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 7563 } 7564 7565 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 7566 DominatorTree *DT) { 7567 // Perform the actual loop transformation. 7568 7569 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7570 VPCallbackILV CallbackILV(ILV); 7571 7572 VPTransformState State{BestVF, BestUF, LI, 7573 DT, ILV.Builder, ILV.VectorLoopValueMap, 7574 &ILV, CallbackILV}; 7575 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 7576 7577 //===------------------------------------------------===// 7578 // 7579 // Notice: any optimization or new instruction that go 7580 // into the code below should also be implemented in 7581 // the cost-model. 7582 // 7583 //===------------------------------------------------===// 7584 7585 // 2. Copy and widen instructions from the old loop into the new loop. 7586 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 7587 VPlans.front()->execute(&State); 7588 7589 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7590 // predication, updating analyses. 7591 ILV.fixVectorizedLoop(); 7592 } 7593 7594 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 7595 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 7596 BasicBlock *Latch = OrigLoop->getLoopLatch(); 7597 7598 // We create new control-flow for the vectorized loop, so the original 7599 // condition will be dead after vectorization if it's only used by the 7600 // branch. 7601 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 7602 if (Cmp && Cmp->hasOneUse()) 7603 DeadInstructions.insert(Cmp); 7604 7605 // We create new "steps" for induction variable updates to which the original 7606 // induction variables map. An original update instruction will be dead if 7607 // all its users except the induction variable are dead. 7608 for (auto &Induction : *Legal->getInductionVars()) { 7609 PHINode *Ind = Induction.first; 7610 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 7611 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 7612 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 7613 })) 7614 DeadInstructions.insert(IndUpdate); 7615 } 7616 } 7617 7618 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 7619 7620 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7621 7622 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 7623 Instruction::BinaryOps BinOp) { 7624 // When unrolling and the VF is 1, we only need to add a simple scalar. 7625 Type *Ty = Val->getType(); 7626 assert(!Ty->isVectorTy() && "Val must be a scalar"); 7627 7628 if (Ty->isFloatingPointTy()) { 7629 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 7630 7631 // Floating point operations had to be 'fast' to enable the unrolling. 7632 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 7633 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 7634 } 7635 Constant *C = ConstantInt::get(Ty, StartIdx); 7636 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 7637 } 7638 7639 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7640 SmallVector<Metadata *, 4> MDs; 7641 // Reserve first location for self reference to the LoopID metadata node. 7642 MDs.push_back(nullptr); 7643 bool IsUnrollMetadata = false; 7644 MDNode *LoopID = L->getLoopID(); 7645 if (LoopID) { 7646 // First find existing loop unrolling disable metadata. 7647 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7648 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7649 if (MD) { 7650 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7651 IsUnrollMetadata = 7652 S && S->getString().startswith("llvm.loop.unroll.disable"); 7653 } 7654 MDs.push_back(LoopID->getOperand(i)); 7655 } 7656 } 7657 7658 if (!IsUnrollMetadata) { 7659 // Add runtime unroll disable metadata. 7660 LLVMContext &Context = L->getHeader()->getContext(); 7661 SmallVector<Metadata *, 1> DisableOperands; 7662 DisableOperands.push_back( 7663 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7664 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7665 MDs.push_back(DisableNode); 7666 MDNode *NewLoopID = MDNode::get(Context, MDs); 7667 // Set operand 0 to refer to the loop id itself. 7668 NewLoopID->replaceOperandWith(0, NewLoopID); 7669 L->setLoopID(NewLoopID); 7670 } 7671 } 7672 7673 namespace { 7674 7675 /// VPWidenRecipe is a recipe for producing a copy of vector type for each 7676 /// Instruction in its ingredients independently, in order. This recipe covers 7677 /// most of the traditional vectorization cases where each ingredient transforms 7678 /// into a vectorized version of itself. 7679 class VPWidenRecipe : public VPRecipeBase { 7680 private: 7681 /// Hold the ingredients by pointing to their original BasicBlock location. 7682 BasicBlock::iterator Begin; 7683 BasicBlock::iterator End; 7684 7685 public: 7686 VPWidenRecipe(Instruction *I) : VPRecipeBase(VPWidenSC) { 7687 End = I->getIterator(); 7688 Begin = End++; 7689 } 7690 7691 ~VPWidenRecipe() override = default; 7692 7693 /// Method to support type inquiry through isa, cast, and dyn_cast. 7694 static inline bool classof(const VPRecipeBase *V) { 7695 return V->getVPRecipeID() == VPRecipeBase::VPWidenSC; 7696 } 7697 7698 /// Produce widened copies of all Ingredients. 7699 void execute(VPTransformState &State) override { 7700 for (auto &Instr : make_range(Begin, End)) 7701 State.ILV->widenInstruction(Instr); 7702 } 7703 7704 /// Augment the recipe to include Instr, if it lies at its End. 7705 bool appendInstruction(Instruction *Instr) { 7706 if (End != Instr->getIterator()) 7707 return false; 7708 End++; 7709 return true; 7710 } 7711 7712 /// Print the recipe. 7713 void print(raw_ostream &O, const Twine &Indent) const override { 7714 O << " +\n" << Indent << "\"WIDEN\\l\""; 7715 for (auto &Instr : make_range(Begin, End)) 7716 O << " +\n" << Indent << "\" " << VPlanIngredient(&Instr) << "\\l\""; 7717 } 7718 }; 7719 7720 /// A recipe for handling phi nodes of integer and floating-point inductions, 7721 /// producing their vector and scalar values. 7722 class VPWidenIntOrFpInductionRecipe : public VPRecipeBase { 7723 private: 7724 PHINode *IV; 7725 TruncInst *Trunc; 7726 7727 public: 7728 VPWidenIntOrFpInductionRecipe(PHINode *IV, TruncInst *Trunc = nullptr) 7729 : VPRecipeBase(VPWidenIntOrFpInductionSC), IV(IV), Trunc(Trunc) {} 7730 ~VPWidenIntOrFpInductionRecipe() override = default; 7731 7732 /// Method to support type inquiry through isa, cast, and dyn_cast. 7733 static inline bool classof(const VPRecipeBase *V) { 7734 return V->getVPRecipeID() == VPRecipeBase::VPWidenIntOrFpInductionSC; 7735 } 7736 7737 /// Generate the vectorized and scalarized versions of the phi node as 7738 /// needed by their users. 7739 void execute(VPTransformState &State) override { 7740 assert(!State.Instance && "Int or FP induction being replicated."); 7741 State.ILV->widenIntOrFpInduction(IV, Trunc); 7742 } 7743 7744 /// Print the recipe. 7745 void print(raw_ostream &O, const Twine &Indent) const override { 7746 O << " +\n" << Indent << "\"WIDEN-INDUCTION"; 7747 if (Trunc) { 7748 O << "\\l\""; 7749 O << " +\n" << Indent << "\" " << VPlanIngredient(IV) << "\\l\""; 7750 O << " +\n" << Indent << "\" " << VPlanIngredient(Trunc) << "\\l\""; 7751 } else 7752 O << " " << VPlanIngredient(IV) << "\\l\""; 7753 } 7754 }; 7755 7756 /// A recipe for handling all phi nodes except for integer and FP inductions. 7757 class VPWidenPHIRecipe : public VPRecipeBase { 7758 private: 7759 PHINode *Phi; 7760 7761 public: 7762 VPWidenPHIRecipe(PHINode *Phi) : VPRecipeBase(VPWidenPHISC), Phi(Phi) {} 7763 ~VPWidenPHIRecipe() override = default; 7764 7765 /// Method to support type inquiry through isa, cast, and dyn_cast. 7766 static inline bool classof(const VPRecipeBase *V) { 7767 return V->getVPRecipeID() == VPRecipeBase::VPWidenPHISC; 7768 } 7769 7770 /// Generate the phi/select nodes. 7771 void execute(VPTransformState &State) override { 7772 State.ILV->widenPHIInstruction(Phi, State.UF, State.VF); 7773 } 7774 7775 /// Print the recipe. 7776 void print(raw_ostream &O, const Twine &Indent) const override { 7777 O << " +\n" << Indent << "\"WIDEN-PHI " << VPlanIngredient(Phi) << "\\l\""; 7778 } 7779 }; 7780 7781 /// A recipe for vectorizing a phi-node as a sequence of mask-based select 7782 /// instructions. 7783 class VPBlendRecipe : public VPRecipeBase { 7784 private: 7785 PHINode *Phi; 7786 7787 /// The blend operation is a User of a mask, if not null. 7788 std::unique_ptr<VPUser> User; 7789 7790 public: 7791 VPBlendRecipe(PHINode *Phi, ArrayRef<VPValue *> Masks) 7792 : VPRecipeBase(VPBlendSC), Phi(Phi) { 7793 assert((Phi->getNumIncomingValues() == 1 || 7794 Phi->getNumIncomingValues() == Masks.size()) && 7795 "Expected the same number of incoming values and masks"); 7796 if (!Masks.empty()) 7797 User.reset(new VPUser(Masks)); 7798 } 7799 7800 /// Method to support type inquiry through isa, cast, and dyn_cast. 7801 static inline bool classof(const VPRecipeBase *V) { 7802 return V->getVPRecipeID() == VPRecipeBase::VPBlendSC; 7803 } 7804 7805 /// Generate the phi/select nodes. 7806 void execute(VPTransformState &State) override { 7807 State.ILV->setDebugLocFromInst(State.Builder, Phi); 7808 // We know that all PHIs in non-header blocks are converted into 7809 // selects, so we don't have to worry about the insertion order and we 7810 // can just use the builder. 7811 // At this point we generate the predication tree. There may be 7812 // duplications since this is a simple recursive scan, but future 7813 // optimizations will clean it up. 7814 7815 unsigned NumIncoming = Phi->getNumIncomingValues(); 7816 7817 assert((User || NumIncoming == 1) && 7818 "Multiple predecessors with predecessors having a full mask"); 7819 // Generate a sequence of selects of the form: 7820 // SELECT(Mask3, In3, 7821 // SELECT(Mask2, In2, 7822 // ( ...))) 7823 InnerLoopVectorizer::VectorParts Entry(State.UF); 7824 for (unsigned In = 0; In < NumIncoming; ++In) { 7825 for (unsigned Part = 0; Part < State.UF; ++Part) { 7826 // We might have single edge PHIs (blocks) - use an identity 7827 // 'select' for the first PHI operand. 7828 Value *In0 = 7829 State.ILV->getOrCreateVectorValue(Phi->getIncomingValue(In), Part); 7830 if (In == 0) 7831 Entry[Part] = In0; // Initialize with the first incoming value. 7832 else { 7833 // Select between the current value and the previous incoming edge 7834 // based on the incoming mask. 7835 Value *Cond = State.get(User->getOperand(In), Part); 7836 Entry[Part] = 7837 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 7838 } 7839 } 7840 } 7841 for (unsigned Part = 0; Part < State.UF; ++Part) 7842 State.ValueMap.setVectorValue(Phi, Part, Entry[Part]); 7843 } 7844 7845 /// Print the recipe. 7846 void print(raw_ostream &O, const Twine &Indent) const override { 7847 O << " +\n" << Indent << "\"BLEND "; 7848 Phi->printAsOperand(O, false); 7849 O << " ="; 7850 if (!User) { 7851 // Not a User of any mask: not really blending, this is a 7852 // single-predecessor phi. 7853 O << " "; 7854 Phi->getIncomingValue(0)->printAsOperand(O, false); 7855 } else { 7856 for (unsigned I = 0, E = User->getNumOperands(); I < E; ++I) { 7857 O << " "; 7858 Phi->getIncomingValue(I)->printAsOperand(O, false); 7859 O << "/"; 7860 User->getOperand(I)->printAsOperand(O); 7861 } 7862 } 7863 O << "\\l\""; 7864 } 7865 }; 7866 7867 /// VPInterleaveRecipe is a recipe for transforming an interleave group of load 7868 /// or stores into one wide load/store and shuffles. 7869 class VPInterleaveRecipe : public VPRecipeBase { 7870 private: 7871 const InterleaveGroup *IG; 7872 7873 public: 7874 VPInterleaveRecipe(const InterleaveGroup *IG) 7875 : VPRecipeBase(VPInterleaveSC), IG(IG) {} 7876 ~VPInterleaveRecipe() override = default; 7877 7878 /// Method to support type inquiry through isa, cast, and dyn_cast. 7879 static inline bool classof(const VPRecipeBase *V) { 7880 return V->getVPRecipeID() == VPRecipeBase::VPInterleaveSC; 7881 } 7882 7883 /// Generate the wide load or store, and shuffles. 7884 void execute(VPTransformState &State) override { 7885 assert(!State.Instance && "Interleave group being replicated."); 7886 State.ILV->vectorizeInterleaveGroup(IG->getInsertPos()); 7887 } 7888 7889 /// Print the recipe. 7890 void print(raw_ostream &O, const Twine &Indent) const override; 7891 7892 const InterleaveGroup *getInterleaveGroup() { return IG; } 7893 }; 7894 7895 /// VPReplicateRecipe replicates a given instruction producing multiple scalar 7896 /// copies of the original scalar type, one per lane, instead of producing a 7897 /// single copy of widened type for all lanes. If the instruction is known to be 7898 /// uniform only one copy, per lane zero, will be generated. 7899 class VPReplicateRecipe : public VPRecipeBase { 7900 private: 7901 /// The instruction being replicated. 7902 Instruction *Ingredient; 7903 7904 /// Indicator if only a single replica per lane is needed. 7905 bool IsUniform; 7906 7907 /// Indicator if the replicas are also predicated. 7908 bool IsPredicated; 7909 7910 /// Indicator if the scalar values should also be packed into a vector. 7911 bool AlsoPack; 7912 7913 public: 7914 VPReplicateRecipe(Instruction *I, bool IsUniform, bool IsPredicated = false) 7915 : VPRecipeBase(VPReplicateSC), Ingredient(I), IsUniform(IsUniform), 7916 IsPredicated(IsPredicated) { 7917 // Retain the previous behavior of predicateInstructions(), where an 7918 // insert-element of a predicated instruction got hoisted into the 7919 // predicated basic block iff it was its only user. This is achieved by 7920 // having predicated instructions also pack their values into a vector by 7921 // default unless they have a replicated user which uses their scalar value. 7922 AlsoPack = IsPredicated && !I->use_empty(); 7923 } 7924 7925 ~VPReplicateRecipe() override = default; 7926 7927 /// Method to support type inquiry through isa, cast, and dyn_cast. 7928 static inline bool classof(const VPRecipeBase *V) { 7929 return V->getVPRecipeID() == VPRecipeBase::VPReplicateSC; 7930 } 7931 7932 /// Generate replicas of the desired Ingredient. Replicas will be generated 7933 /// for all parts and lanes unless a specific part and lane are specified in 7934 /// the \p State. 7935 void execute(VPTransformState &State) override; 7936 7937 void setAlsoPack(bool Pack) { AlsoPack = Pack; } 7938 7939 /// Print the recipe. 7940 void print(raw_ostream &O, const Twine &Indent) const override { 7941 O << " +\n" 7942 << Indent << "\"" << (IsUniform ? "CLONE " : "REPLICATE ") 7943 << VPlanIngredient(Ingredient); 7944 if (AlsoPack) 7945 O << " (S->V)"; 7946 O << "\\l\""; 7947 } 7948 }; 7949 7950 /// A recipe for generating conditional branches on the bits of a mask. 7951 class VPBranchOnMaskRecipe : public VPRecipeBase { 7952 private: 7953 std::unique_ptr<VPUser> User; 7954 7955 public: 7956 VPBranchOnMaskRecipe(VPValue *BlockInMask) : VPRecipeBase(VPBranchOnMaskSC) { 7957 if (BlockInMask) // nullptr means all-one mask. 7958 User.reset(new VPUser({BlockInMask})); 7959 } 7960 7961 /// Method to support type inquiry through isa, cast, and dyn_cast. 7962 static inline bool classof(const VPRecipeBase *V) { 7963 return V->getVPRecipeID() == VPRecipeBase::VPBranchOnMaskSC; 7964 } 7965 7966 /// Generate the extraction of the appropriate bit from the block mask and the 7967 /// conditional branch. 7968 void execute(VPTransformState &State) override; 7969 7970 /// Print the recipe. 7971 void print(raw_ostream &O, const Twine &Indent) const override { 7972 O << " +\n" << Indent << "\"BRANCH-ON-MASK "; 7973 if (User) 7974 O << *User->getOperand(0); 7975 else 7976 O << " All-One"; 7977 O << "\\l\""; 7978 } 7979 }; 7980 7981 /// VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when 7982 /// control converges back from a Branch-on-Mask. The phi nodes are needed in 7983 /// order to merge values that are set under such a branch and feed their uses. 7984 /// The phi nodes can be scalar or vector depending on the users of the value. 7985 /// This recipe works in concert with VPBranchOnMaskRecipe. 7986 class VPPredInstPHIRecipe : public VPRecipeBase { 7987 private: 7988 Instruction *PredInst; 7989 7990 public: 7991 /// Construct a VPPredInstPHIRecipe given \p PredInst whose value needs a phi 7992 /// nodes after merging back from a Branch-on-Mask. 7993 VPPredInstPHIRecipe(Instruction *PredInst) 7994 : VPRecipeBase(VPPredInstPHISC), PredInst(PredInst) {} 7995 ~VPPredInstPHIRecipe() override = default; 7996 7997 /// Method to support type inquiry through isa, cast, and dyn_cast. 7998 static inline bool classof(const VPRecipeBase *V) { 7999 return V->getVPRecipeID() == VPRecipeBase::VPPredInstPHISC; 8000 } 8001 8002 /// Generates phi nodes for live-outs as needed to retain SSA form. 8003 void execute(VPTransformState &State) override; 8004 8005 /// Print the recipe. 8006 void print(raw_ostream &O, const Twine &Indent) const override { 8007 O << " +\n" 8008 << Indent << "\"PHI-PREDICATED-INSTRUCTION " << VPlanIngredient(PredInst) 8009 << "\\l\""; 8010 } 8011 }; 8012 8013 /// A Recipe for widening load/store operations. 8014 /// TODO: We currently execute only per-part unless a specific instance is 8015 /// provided. 8016 class VPWidenMemoryInstructionRecipe : public VPRecipeBase { 8017 private: 8018 Instruction &Instr; 8019 std::unique_ptr<VPUser> User; 8020 8021 public: 8022 VPWidenMemoryInstructionRecipe(Instruction &Instr, VPValue *Mask) 8023 : VPRecipeBase(VPWidenMemoryInstructionSC), Instr(Instr) { 8024 if (Mask) // Create a VPInstruction to register as a user of the mask. 8025 User.reset(new VPUser({Mask})); 8026 } 8027 8028 /// Method to support type inquiry through isa, cast, and dyn_cast. 8029 static inline bool classof(const VPRecipeBase *V) { 8030 return V->getVPRecipeID() == VPRecipeBase::VPWidenMemoryInstructionSC; 8031 } 8032 8033 /// Generate the wide load/store. 8034 void execute(VPTransformState &State) override { 8035 if (!User) 8036 return State.ILV->vectorizeMemoryInstruction(&Instr); 8037 8038 // Last (and currently only) operand is a mask. 8039 InnerLoopVectorizer::VectorParts MaskValues(State.UF); 8040 VPValue *Mask = User->getOperand(User->getNumOperands() - 1); 8041 for (unsigned Part = 0; Part < State.UF; ++Part) 8042 MaskValues[Part] = State.get(Mask, Part); 8043 State.ILV->vectorizeMemoryInstruction(&Instr, &MaskValues); 8044 } 8045 8046 /// Print the recipe. 8047 void print(raw_ostream &O, const Twine &Indent) const override { 8048 O << " +\n" << Indent << "\"WIDEN " << VPlanIngredient(&Instr); 8049 if (User) { 8050 O << ", "; 8051 User->getOperand(0)->printAsOperand(O); 8052 } 8053 O << "\\l\""; 8054 } 8055 }; 8056 } // end anonymous namespace 8057 8058 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8059 const std::function<bool(unsigned)> &Predicate, VFRange &Range) { 8060 assert(Range.End > Range.Start && "Trying to test an empty VF range."); 8061 bool PredicateAtRangeStart = Predicate(Range.Start); 8062 8063 for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2) 8064 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8065 Range.End = TmpVF; 8066 break; 8067 } 8068 8069 return PredicateAtRangeStart; 8070 } 8071 8072 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8073 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8074 /// of VF's starting at a given VF and extending it as much as possible. Each 8075 /// vectorization decision can potentially shorten this sub-range during 8076 /// buildVPlan(). 8077 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) { 8078 8079 // Collect conditions feeding internal conditional branches; they need to be 8080 // represented in VPlan for it to model masking. 8081 SmallPtrSet<Value *, 1> NeedDef; 8082 8083 auto *Latch = OrigLoop->getLoopLatch(); 8084 for (BasicBlock *BB : OrigLoop->blocks()) { 8085 if (BB == Latch) 8086 continue; 8087 BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator()); 8088 if (Branch && Branch->isConditional()) 8089 NeedDef.insert(Branch->getCondition()); 8090 } 8091 8092 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 8093 VFRange SubRange = {VF, MaxVF + 1}; 8094 VPlans.push_back(buildVPlan(SubRange, NeedDef)); 8095 VF = SubRange.End; 8096 } 8097 } 8098 8099 VPValue *LoopVectorizationPlanner::createEdgeMask(BasicBlock *Src, 8100 BasicBlock *Dst, 8101 VPlanPtr &Plan) { 8102 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8103 8104 // Look for cached value. 8105 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8106 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8107 if (ECEntryIt != EdgeMaskCache.end()) 8108 return ECEntryIt->second; 8109 8110 VPValue *SrcMask = createBlockInMask(Src, Plan); 8111 8112 // The terminator has to be a branch inst! 8113 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8114 assert(BI && "Unexpected terminator found"); 8115 8116 if (!BI->isConditional()) 8117 return EdgeMaskCache[Edge] = SrcMask; 8118 8119 VPValue *EdgeMask = Plan->getVPValue(BI->getCondition()); 8120 assert(EdgeMask && "No Edge Mask found for condition"); 8121 8122 if (BI->getSuccessor(0) != Dst) 8123 EdgeMask = Builder.createNot(EdgeMask); 8124 8125 if (SrcMask) // Otherwise block in-mask is all-one, no need to AND. 8126 EdgeMask = Builder.createAnd(EdgeMask, SrcMask); 8127 8128 return EdgeMaskCache[Edge] = EdgeMask; 8129 } 8130 8131 VPValue *LoopVectorizationPlanner::createBlockInMask(BasicBlock *BB, 8132 VPlanPtr &Plan) { 8133 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8134 8135 // Look for cached value. 8136 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8137 if (BCEntryIt != BlockMaskCache.end()) 8138 return BCEntryIt->second; 8139 8140 // All-one mask is modelled as no-mask following the convention for masked 8141 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8142 VPValue *BlockMask = nullptr; 8143 8144 // Loop incoming mask is all-one. 8145 if (OrigLoop->getHeader() == BB) 8146 return BlockMaskCache[BB] = BlockMask; 8147 8148 // This is the block mask. We OR all incoming edges. 8149 for (auto *Predecessor : predecessors(BB)) { 8150 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8151 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8152 return BlockMaskCache[BB] = EdgeMask; 8153 8154 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8155 BlockMask = EdgeMask; 8156 continue; 8157 } 8158 8159 BlockMask = Builder.createOr(BlockMask, EdgeMask); 8160 } 8161 8162 return BlockMaskCache[BB] = BlockMask; 8163 } 8164 8165 VPInterleaveRecipe * 8166 LoopVectorizationPlanner::tryToInterleaveMemory(Instruction *I, 8167 VFRange &Range) { 8168 const InterleaveGroup *IG = Legal->getInterleavedAccessGroup(I); 8169 if (!IG) 8170 return nullptr; 8171 8172 // Now check if IG is relevant for VF's in the given range. 8173 auto isIGMember = [&](Instruction *I) -> std::function<bool(unsigned)> { 8174 return [=](unsigned VF) -> bool { 8175 return (VF >= 2 && // Query is illegal for VF == 1 8176 CM.getWideningDecision(I, VF) == 8177 LoopVectorizationCostModel::CM_Interleave); 8178 }; 8179 }; 8180 if (!getDecisionAndClampRange(isIGMember(I), Range)) 8181 return nullptr; 8182 8183 // I is a member of an InterleaveGroup for VF's in the (possibly trimmed) 8184 // range. If it's the primary member of the IG construct a VPInterleaveRecipe. 8185 // Otherwise, it's an adjunct member of the IG, do not construct any Recipe. 8186 assert(I == IG->getInsertPos() && 8187 "Generating a recipe for an adjunct member of an interleave group"); 8188 8189 return new VPInterleaveRecipe(IG); 8190 } 8191 8192 VPWidenMemoryInstructionRecipe * 8193 LoopVectorizationPlanner::tryToWidenMemory(Instruction *I, VFRange &Range, 8194 VPlanPtr &Plan) { 8195 if (!isa<LoadInst>(I) && !isa<StoreInst>(I)) 8196 return nullptr; 8197 8198 auto willWiden = [&](unsigned VF) -> bool { 8199 if (VF == 1) 8200 return false; 8201 if (CM.isScalarAfterVectorization(I, VF) || 8202 CM.isProfitableToScalarize(I, VF)) 8203 return false; 8204 LoopVectorizationCostModel::InstWidening Decision = 8205 CM.getWideningDecision(I, VF); 8206 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8207 "CM decision should be taken at this point."); 8208 assert(Decision != LoopVectorizationCostModel::CM_Interleave && 8209 "Interleave memory opportunity should be caught earlier."); 8210 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8211 }; 8212 8213 if (!getDecisionAndClampRange(willWiden, Range)) 8214 return nullptr; 8215 8216 VPValue *Mask = nullptr; 8217 if (Legal->isMaskRequired(I)) 8218 Mask = createBlockInMask(I->getParent(), Plan); 8219 8220 return new VPWidenMemoryInstructionRecipe(*I, Mask); 8221 } 8222 8223 VPWidenIntOrFpInductionRecipe * 8224 LoopVectorizationPlanner::tryToOptimizeInduction(Instruction *I, 8225 VFRange &Range) { 8226 if (PHINode *Phi = dyn_cast<PHINode>(I)) { 8227 // Check if this is an integer or fp induction. If so, build the recipe that 8228 // produces its scalar and vector values. 8229 InductionDescriptor II = Legal->getInductionVars()->lookup(Phi); 8230 if (II.getKind() == InductionDescriptor::IK_IntInduction || 8231 II.getKind() == InductionDescriptor::IK_FpInduction) 8232 return new VPWidenIntOrFpInductionRecipe(Phi); 8233 8234 return nullptr; 8235 } 8236 8237 // Optimize the special case where the source is a constant integer 8238 // induction variable. Notice that we can only optimize the 'trunc' case 8239 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8240 // (c) other casts depend on pointer size. 8241 8242 // Determine whether \p K is a truncation based on an induction variable that 8243 // can be optimized. 8244 auto isOptimizableIVTruncate = 8245 [&](Instruction *K) -> std::function<bool(unsigned)> { 8246 return 8247 [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); }; 8248 }; 8249 8250 if (isa<TruncInst>(I) && 8251 getDecisionAndClampRange(isOptimizableIVTruncate(I), Range)) 8252 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 8253 cast<TruncInst>(I)); 8254 return nullptr; 8255 } 8256 8257 VPBlendRecipe * 8258 LoopVectorizationPlanner::tryToBlend(Instruction *I, VPlanPtr &Plan) { 8259 PHINode *Phi = dyn_cast<PHINode>(I); 8260 if (!Phi || Phi->getParent() == OrigLoop->getHeader()) 8261 return nullptr; 8262 8263 // We know that all PHIs in non-header blocks are converted into selects, so 8264 // we don't have to worry about the insertion order and we can just use the 8265 // builder. At this point we generate the predication tree. There may be 8266 // duplications since this is a simple recursive scan, but future 8267 // optimizations will clean it up. 8268 8269 SmallVector<VPValue *, 2> Masks; 8270 unsigned NumIncoming = Phi->getNumIncomingValues(); 8271 for (unsigned In = 0; In < NumIncoming; In++) { 8272 VPValue *EdgeMask = 8273 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8274 assert((EdgeMask || NumIncoming == 1) && 8275 "Multiple predecessors with one having a full mask"); 8276 if (EdgeMask) 8277 Masks.push_back(EdgeMask); 8278 } 8279 return new VPBlendRecipe(Phi, Masks); 8280 } 8281 8282 bool LoopVectorizationPlanner::tryToWiden(Instruction *I, VPBasicBlock *VPBB, 8283 VFRange &Range) { 8284 if (Legal->isScalarWithPredication(I)) 8285 return false; 8286 8287 auto IsVectorizableOpcode = [](unsigned Opcode) { 8288 switch (Opcode) { 8289 case Instruction::Add: 8290 case Instruction::And: 8291 case Instruction::AShr: 8292 case Instruction::BitCast: 8293 case Instruction::Br: 8294 case Instruction::Call: 8295 case Instruction::FAdd: 8296 case Instruction::FCmp: 8297 case Instruction::FDiv: 8298 case Instruction::FMul: 8299 case Instruction::FPExt: 8300 case Instruction::FPToSI: 8301 case Instruction::FPToUI: 8302 case Instruction::FPTrunc: 8303 case Instruction::FRem: 8304 case Instruction::FSub: 8305 case Instruction::GetElementPtr: 8306 case Instruction::ICmp: 8307 case Instruction::IntToPtr: 8308 case Instruction::Load: 8309 case Instruction::LShr: 8310 case Instruction::Mul: 8311 case Instruction::Or: 8312 case Instruction::PHI: 8313 case Instruction::PtrToInt: 8314 case Instruction::SDiv: 8315 case Instruction::Select: 8316 case Instruction::SExt: 8317 case Instruction::Shl: 8318 case Instruction::SIToFP: 8319 case Instruction::SRem: 8320 case Instruction::Store: 8321 case Instruction::Sub: 8322 case Instruction::Trunc: 8323 case Instruction::UDiv: 8324 case Instruction::UIToFP: 8325 case Instruction::URem: 8326 case Instruction::Xor: 8327 case Instruction::ZExt: 8328 return true; 8329 } 8330 return false; 8331 }; 8332 8333 if (!IsVectorizableOpcode(I->getOpcode())) 8334 return false; 8335 8336 if (CallInst *CI = dyn_cast<CallInst>(I)) { 8337 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8338 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8339 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect)) 8340 return false; 8341 } 8342 8343 auto willWiden = [&](unsigned VF) -> bool { 8344 if (!isa<PHINode>(I) && (CM.isScalarAfterVectorization(I, VF) || 8345 CM.isProfitableToScalarize(I, VF))) 8346 return false; 8347 if (CallInst *CI = dyn_cast<CallInst>(I)) { 8348 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8349 // The following case may be scalarized depending on the VF. 8350 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8351 // version of the instruction. 8352 // Is it beneficial to perform intrinsic call compared to lib call? 8353 bool NeedToScalarize; 8354 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 8355 bool UseVectorIntrinsic = 8356 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 8357 return UseVectorIntrinsic || !NeedToScalarize; 8358 } 8359 if (isa<LoadInst>(I) || isa<StoreInst>(I)) { 8360 assert(CM.getWideningDecision(I, VF) == 8361 LoopVectorizationCostModel::CM_Scalarize && 8362 "Memory widening decisions should have been taken care by now"); 8363 return false; 8364 } 8365 return true; 8366 }; 8367 8368 if (!getDecisionAndClampRange(willWiden, Range)) 8369 return false; 8370 8371 // Success: widen this instruction. We optimize the common case where 8372 // consecutive instructions can be represented by a single recipe. 8373 if (!VPBB->empty()) { 8374 VPWidenRecipe *LastWidenRecipe = dyn_cast<VPWidenRecipe>(&VPBB->back()); 8375 if (LastWidenRecipe && LastWidenRecipe->appendInstruction(I)) 8376 return true; 8377 } 8378 8379 VPBB->appendRecipe(new VPWidenRecipe(I)); 8380 return true; 8381 } 8382 8383 VPBasicBlock *LoopVectorizationPlanner::handleReplication( 8384 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8385 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe, 8386 VPlanPtr &Plan) { 8387 bool IsUniform = getDecisionAndClampRange( 8388 [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); }, 8389 Range); 8390 8391 bool IsPredicated = Legal->isScalarWithPredication(I); 8392 auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated); 8393 8394 // Find if I uses a predicated instruction. If so, it will use its scalar 8395 // value. Avoid hoisting the insert-element which packs the scalar value into 8396 // a vector value, as that happens iff all users use the vector value. 8397 for (auto &Op : I->operands()) 8398 if (auto *PredInst = dyn_cast<Instruction>(Op)) 8399 if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end()) 8400 PredInst2Recipe[PredInst]->setAlsoPack(false); 8401 8402 // Finalize the recipe for Instr, first if it is not predicated. 8403 if (!IsPredicated) { 8404 DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8405 VPBB->appendRecipe(Recipe); 8406 return VPBB; 8407 } 8408 DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8409 assert(VPBB->getSuccessors().empty() && 8410 "VPBB has successors when handling predicated replication."); 8411 // Record predicated instructions for above packing optimizations. 8412 PredInst2Recipe[I] = Recipe; 8413 VPBlockBase *Region = 8414 VPBB->setOneSuccessor(createReplicateRegion(I, Recipe, Plan)); 8415 return cast<VPBasicBlock>(Region->setOneSuccessor(new VPBasicBlock())); 8416 } 8417 8418 VPRegionBlock * 8419 LoopVectorizationPlanner::createReplicateRegion(Instruction *Instr, 8420 VPRecipeBase *PredRecipe, 8421 VPlanPtr &Plan) { 8422 // Instructions marked for predication are replicated and placed under an 8423 // if-then construct to prevent side-effects. 8424 8425 // Generate recipes to compute the block mask for this region. 8426 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8427 8428 // Build the triangular if-then region. 8429 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8430 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8431 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8432 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8433 auto *PHIRecipe = 8434 Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr); 8435 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8436 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8437 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8438 8439 // Note: first set Entry as region entry and then connect successors starting 8440 // from it in order, to propagate the "parent" of each VPBasicBlock. 8441 Entry->setTwoSuccessors(Pred, Exit); 8442 Pred->setOneSuccessor(Exit); 8443 8444 return Region; 8445 } 8446 8447 LoopVectorizationPlanner::VPlanPtr 8448 LoopVectorizationPlanner::buildVPlan(VFRange &Range, 8449 const SmallPtrSetImpl<Value *> &NeedDef) { 8450 EdgeMaskCache.clear(); 8451 BlockMaskCache.clear(); 8452 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8453 DenseMap<Instruction *, Instruction *> SinkAfterInverse; 8454 8455 // Collect instructions from the original loop that will become trivially dead 8456 // in the vectorized loop. We don't need to vectorize these instructions. For 8457 // example, original induction update instructions can become dead because we 8458 // separately emit induction "steps" when generating code for the new loop. 8459 // Similarly, we create a new latch condition when setting up the structure 8460 // of the new loop, so the old one can become dead. 8461 SmallPtrSet<Instruction *, 4> DeadInstructions; 8462 collectTriviallyDeadInstructions(DeadInstructions); 8463 8464 // Hold a mapping from predicated instructions to their recipes, in order to 8465 // fix their AlsoPack behavior if a user is determined to replicate and use a 8466 // scalar instead of vector value. 8467 DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe; 8468 8469 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 8470 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 8471 auto Plan = llvm::make_unique<VPlan>(VPBB); 8472 8473 // Represent values that will have defs inside VPlan. 8474 for (Value *V : NeedDef) 8475 Plan->addVPValue(V); 8476 8477 // Scan the body of the loop in a topological order to visit each basic block 8478 // after having visited its predecessor basic blocks. 8479 LoopBlocksDFS DFS(OrigLoop); 8480 DFS.perform(LI); 8481 8482 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 8483 // Relevant instructions from basic block BB will be grouped into VPRecipe 8484 // ingredients and fill a new VPBasicBlock. 8485 unsigned VPBBsForBB = 0; 8486 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 8487 VPBB->setOneSuccessor(FirstVPBBForBB); 8488 VPBB = FirstVPBBForBB; 8489 Builder.setInsertPoint(VPBB); 8490 8491 std::vector<Instruction *> Ingredients; 8492 8493 // Organize the ingredients to vectorize from current basic block in the 8494 // right order. 8495 for (Instruction &I : *BB) { 8496 Instruction *Instr = &I; 8497 8498 // First filter out irrelevant instructions, to ensure no recipes are 8499 // built for them. 8500 if (isa<BranchInst>(Instr) || isa<DbgInfoIntrinsic>(Instr) || 8501 DeadInstructions.count(Instr)) 8502 continue; 8503 8504 // I is a member of an InterleaveGroup for Range.Start. If it's an adjunct 8505 // member of the IG, do not construct any Recipe for it. 8506 const InterleaveGroup *IG = Legal->getInterleavedAccessGroup(Instr); 8507 if (IG && Instr != IG->getInsertPos() && 8508 Range.Start >= 2 && // Query is illegal for VF == 1 8509 CM.getWideningDecision(Instr, Range.Start) == 8510 LoopVectorizationCostModel::CM_Interleave) { 8511 if (SinkAfterInverse.count(Instr)) 8512 Ingredients.push_back(SinkAfterInverse.find(Instr)->second); 8513 continue; 8514 } 8515 8516 // Move instructions to handle first-order recurrences, step 1: avoid 8517 // handling this instruction until after we've handled the instruction it 8518 // should follow. 8519 auto SAIt = SinkAfter.find(Instr); 8520 if (SAIt != SinkAfter.end()) { 8521 DEBUG(dbgs() << "Sinking" << *SAIt->first << " after" << *SAIt->second 8522 << " to vectorize a 1st order recurrence.\n"); 8523 SinkAfterInverse[SAIt->second] = Instr; 8524 continue; 8525 } 8526 8527 Ingredients.push_back(Instr); 8528 8529 // Move instructions to handle first-order recurrences, step 2: push the 8530 // instruction to be sunk at its insertion point. 8531 auto SAInvIt = SinkAfterInverse.find(Instr); 8532 if (SAInvIt != SinkAfterInverse.end()) 8533 Ingredients.push_back(SAInvIt->second); 8534 } 8535 8536 // Introduce each ingredient into VPlan. 8537 for (Instruction *Instr : Ingredients) { 8538 VPRecipeBase *Recipe = nullptr; 8539 8540 // Check if Instr should belong to an interleave memory recipe, or already 8541 // does. In the latter case Instr is irrelevant. 8542 if ((Recipe = tryToInterleaveMemory(Instr, Range))) { 8543 VPBB->appendRecipe(Recipe); 8544 continue; 8545 } 8546 8547 // Check if Instr is a memory operation that should be widened. 8548 if ((Recipe = tryToWidenMemory(Instr, Range, Plan))) { 8549 VPBB->appendRecipe(Recipe); 8550 continue; 8551 } 8552 8553 // Check if Instr should form some PHI recipe. 8554 if ((Recipe = tryToOptimizeInduction(Instr, Range))) { 8555 VPBB->appendRecipe(Recipe); 8556 continue; 8557 } 8558 if ((Recipe = tryToBlend(Instr, Plan))) { 8559 VPBB->appendRecipe(Recipe); 8560 continue; 8561 } 8562 if (PHINode *Phi = dyn_cast<PHINode>(Instr)) { 8563 VPBB->appendRecipe(new VPWidenPHIRecipe(Phi)); 8564 continue; 8565 } 8566 8567 // Check if Instr is to be widened by a general VPWidenRecipe, after 8568 // having first checked for specific widening recipes that deal with 8569 // Interleave Groups, Inductions and Phi nodes. 8570 if (tryToWiden(Instr, VPBB, Range)) 8571 continue; 8572 8573 // Otherwise, if all widening options failed, Instruction is to be 8574 // replicated. This may create a successor for VPBB. 8575 VPBasicBlock *NextVPBB = 8576 handleReplication(Instr, Range, VPBB, PredInst2Recipe, Plan); 8577 if (NextVPBB != VPBB) { 8578 VPBB = NextVPBB; 8579 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 8580 : ""); 8581 } 8582 } 8583 } 8584 8585 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 8586 // may also be empty, such as the last one VPBB, reflecting original 8587 // basic-blocks with no recipes. 8588 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 8589 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 8590 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 8591 PreEntry->disconnectSuccessor(Entry); 8592 delete PreEntry; 8593 8594 std::string PlanName; 8595 raw_string_ostream RSO(PlanName); 8596 unsigned VF = Range.Start; 8597 Plan->addVF(VF); 8598 RSO << "Initial VPlan for VF={" << VF; 8599 for (VF *= 2; VF < Range.End; VF *= 2) { 8600 Plan->addVF(VF); 8601 RSO << "," << VF; 8602 } 8603 RSO << "},UF>=1"; 8604 RSO.flush(); 8605 Plan->setName(PlanName); 8606 8607 return Plan; 8608 } 8609 8610 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent) const { 8611 O << " +\n" 8612 << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 8613 IG->getInsertPos()->printAsOperand(O, false); 8614 O << "\\l\""; 8615 for (unsigned i = 0; i < IG->getFactor(); ++i) 8616 if (Instruction *I = IG->getMember(i)) 8617 O << " +\n" 8618 << Indent << "\" " << VPlanIngredient(I) << " " << i << "\\l\""; 8619 } 8620 8621 void VPReplicateRecipe::execute(VPTransformState &State) { 8622 if (State.Instance) { // Generate a single instance. 8623 State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated); 8624 // Insert scalar instance packing it into a vector. 8625 if (AlsoPack && State.VF > 1) { 8626 // If we're constructing lane 0, initialize to start from undef. 8627 if (State.Instance->Lane == 0) { 8628 Value *Undef = 8629 UndefValue::get(VectorType::get(Ingredient->getType(), State.VF)); 8630 State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef); 8631 } 8632 State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance); 8633 } 8634 return; 8635 } 8636 8637 // Generate scalar instances for all VF lanes of all UF parts, unless the 8638 // instruction is uniform inwhich case generate only the first lane for each 8639 // of the UF parts. 8640 unsigned EndLane = IsUniform ? 1 : State.VF; 8641 for (unsigned Part = 0; Part < State.UF; ++Part) 8642 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 8643 State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated); 8644 } 8645 8646 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 8647 assert(State.Instance && "Branch on Mask works only on single instance."); 8648 8649 unsigned Part = State.Instance->Part; 8650 unsigned Lane = State.Instance->Lane; 8651 8652 Value *ConditionBit = nullptr; 8653 if (!User) // Block in mask is all-one. 8654 ConditionBit = State.Builder.getTrue(); 8655 else { 8656 VPValue *BlockInMask = User->getOperand(0); 8657 ConditionBit = State.get(BlockInMask, Part); 8658 if (ConditionBit->getType()->isVectorTy()) 8659 ConditionBit = State.Builder.CreateExtractElement( 8660 ConditionBit, State.Builder.getInt32(Lane)); 8661 } 8662 8663 // Replace the temporary unreachable terminator with a new conditional branch, 8664 // whose two destinations will be set later when they are created. 8665 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 8666 assert(isa<UnreachableInst>(CurrentTerminator) && 8667 "Expected to replace unreachable terminator with conditional branch."); 8668 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 8669 CondBr->setSuccessor(0, nullptr); 8670 ReplaceInstWithInst(CurrentTerminator, CondBr); 8671 } 8672 8673 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 8674 assert(State.Instance && "Predicated instruction PHI works per instance."); 8675 Instruction *ScalarPredInst = cast<Instruction>( 8676 State.ValueMap.getScalarValue(PredInst, *State.Instance)); 8677 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 8678 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 8679 assert(PredicatingBB && "Predicated block has no single predecessor."); 8680 8681 // By current pack/unpack logic we need to generate only a single phi node: if 8682 // a vector value for the predicated instruction exists at this point it means 8683 // the instruction has vector users only, and a phi for the vector value is 8684 // needed. In this case the recipe of the predicated instruction is marked to 8685 // also do that packing, thereby "hoisting" the insert-element sequence. 8686 // Otherwise, a phi node for the scalar value is needed. 8687 unsigned Part = State.Instance->Part; 8688 if (State.ValueMap.hasVectorValue(PredInst, Part)) { 8689 Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part); 8690 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 8691 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 8692 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 8693 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 8694 State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache. 8695 } else { 8696 Type *PredInstType = PredInst->getType(); 8697 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 8698 Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB); 8699 Phi->addIncoming(ScalarPredInst, PredicatedBB); 8700 State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi); 8701 } 8702 } 8703 8704 bool LoopVectorizePass::processLoop(Loop *L) { 8705 assert(L->empty() && "Only process inner loops."); 8706 8707 #ifndef NDEBUG 8708 const std::string DebugLocStr = getDebugLocString(L); 8709 #endif /* NDEBUG */ 8710 8711 DEBUG(dbgs() << "\nLV: Checking a loop in \"" 8712 << L->getHeader()->getParent()->getName() << "\" from " 8713 << DebugLocStr << "\n"); 8714 8715 LoopVectorizeHints Hints(L, DisableUnrolling, *ORE); 8716 8717 DEBUG(dbgs() << "LV: Loop hints:" 8718 << " force=" 8719 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 8720 ? "disabled" 8721 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 8722 ? "enabled" 8723 : "?")) 8724 << " width=" << Hints.getWidth() 8725 << " unroll=" << Hints.getInterleave() << "\n"); 8726 8727 // Function containing loop 8728 Function *F = L->getHeader()->getParent(); 8729 8730 // Looking at the diagnostic output is the only way to determine if a loop 8731 // was vectorized (other than looking at the IR or machine code), so it 8732 // is important to generate an optimization remark for each loop. Most of 8733 // these messages are generated as OptimizationRemarkAnalysis. Remarks 8734 // generated as OptimizationRemark and OptimizationRemarkMissed are 8735 // less verbose reporting vectorized loops and unvectorized loops that may 8736 // benefit from vectorization, respectively. 8737 8738 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { 8739 DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 8740 return false; 8741 } 8742 8743 PredicatedScalarEvolution PSE(*SE, *L); 8744 8745 // Check if it is legal to vectorize the loop. 8746 LoopVectorizationRequirements Requirements(*ORE); 8747 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI, ORE, 8748 &Requirements, &Hints); 8749 if (!LVL.canVectorize()) { 8750 DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 8751 emitMissedWarning(F, L, Hints, ORE); 8752 return false; 8753 } 8754 8755 // Check the function attributes to find out if this function should be 8756 // optimized for size. 8757 bool OptForSize = 8758 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 8759 8760 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 8761 // count by optimizing for size, to minimize overheads. 8762 unsigned ExpectedTC = SE->getSmallConstantMaxTripCount(L); 8763 bool HasExpectedTC = (ExpectedTC > 0); 8764 8765 if (!HasExpectedTC && LoopVectorizeWithBlockFrequency) { 8766 auto EstimatedTC = getLoopEstimatedTripCount(L); 8767 if (EstimatedTC) { 8768 ExpectedTC = *EstimatedTC; 8769 HasExpectedTC = true; 8770 } 8771 } 8772 8773 if (HasExpectedTC && ExpectedTC < TinyTripCountVectorThreshold) { 8774 DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 8775 << "This loop is worth vectorizing only if no scalar " 8776 << "iteration overheads are incurred."); 8777 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 8778 DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 8779 else { 8780 DEBUG(dbgs() << "\n"); 8781 // Loops with a very small trip count are considered for vectorization 8782 // under OptForSize, thereby making sure the cost of their loop body is 8783 // dominant, free of runtime guards and scalar iteration overheads. 8784 OptForSize = true; 8785 } 8786 } 8787 8788 // Check the function attributes to see if implicit floats are allowed. 8789 // FIXME: This check doesn't seem possibly correct -- what if the loop is 8790 // an integer loop and the vector instructions selected are purely integer 8791 // vector instructions? 8792 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 8793 DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 8794 "attribute is used.\n"); 8795 ORE->emit(createMissedAnalysis(Hints.vectorizeAnalysisPassName(), 8796 "NoImplicitFloat", L) 8797 << "loop not vectorized due to NoImplicitFloat attribute"); 8798 emitMissedWarning(F, L, Hints, ORE); 8799 return false; 8800 } 8801 8802 // Check if the target supports potentially unsafe FP vectorization. 8803 // FIXME: Add a check for the type of safety issue (denormal, signaling) 8804 // for the target we're vectorizing for, to make sure none of the 8805 // additional fp-math flags can help. 8806 if (Hints.isPotentiallyUnsafe() && 8807 TTI->isFPVectorizationPotentiallyUnsafe()) { 8808 DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 8809 ORE->emit( 8810 createMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L) 8811 << "loop not vectorized due to unsafe FP support."); 8812 emitMissedWarning(F, L, Hints, ORE); 8813 return false; 8814 } 8815 8816 // Use the cost model. 8817 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F, 8818 &Hints); 8819 CM.collectValuesToIgnore(); 8820 8821 // Use the planner for vectorization. 8822 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM); 8823 8824 // Get user vectorization factor. 8825 unsigned UserVF = Hints.getWidth(); 8826 8827 // Plan how to best vectorize, return the best VF and its cost. 8828 LoopVectorizationCostModel::VectorizationFactor VF = 8829 LVP.plan(OptForSize, UserVF); 8830 8831 // Select the interleave count. 8832 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 8833 8834 // Get user interleave count. 8835 unsigned UserIC = Hints.getInterleave(); 8836 8837 // Identify the diagnostic messages that should be produced. 8838 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 8839 bool VectorizeLoop = true, InterleaveLoop = true; 8840 if (Requirements.doesNotMeet(F, L, Hints)) { 8841 DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 8842 "requirements.\n"); 8843 emitMissedWarning(F, L, Hints, ORE); 8844 return false; 8845 } 8846 8847 if (VF.Width == 1) { 8848 DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 8849 VecDiagMsg = std::make_pair( 8850 "VectorizationNotBeneficial", 8851 "the cost-model indicates that vectorization is not beneficial"); 8852 VectorizeLoop = false; 8853 } 8854 8855 if (IC == 1 && UserIC <= 1) { 8856 // Tell the user interleaving is not beneficial. 8857 DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 8858 IntDiagMsg = std::make_pair( 8859 "InterleavingNotBeneficial", 8860 "the cost-model indicates that interleaving is not beneficial"); 8861 InterleaveLoop = false; 8862 if (UserIC == 1) { 8863 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 8864 IntDiagMsg.second += 8865 " and is explicitly disabled or interleave count is set to 1"; 8866 } 8867 } else if (IC > 1 && UserIC == 1) { 8868 // Tell the user interleaving is beneficial, but it explicitly disabled. 8869 DEBUG(dbgs() 8870 << "LV: Interleaving is beneficial but is explicitly disabled."); 8871 IntDiagMsg = std::make_pair( 8872 "InterleavingBeneficialButDisabled", 8873 "the cost-model indicates that interleaving is beneficial " 8874 "but is explicitly disabled or interleave count is set to 1"); 8875 InterleaveLoop = false; 8876 } 8877 8878 // Override IC if user provided an interleave count. 8879 IC = UserIC > 0 ? UserIC : IC; 8880 8881 // Emit diagnostic messages, if any. 8882 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 8883 if (!VectorizeLoop && !InterleaveLoop) { 8884 // Do not vectorize or interleaving the loop. 8885 ORE->emit([&]() { 8886 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 8887 L->getStartLoc(), L->getHeader()) 8888 << VecDiagMsg.second; 8889 }); 8890 ORE->emit([&]() { 8891 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 8892 L->getStartLoc(), L->getHeader()) 8893 << IntDiagMsg.second; 8894 }); 8895 return false; 8896 } else if (!VectorizeLoop && InterleaveLoop) { 8897 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 8898 ORE->emit([&]() { 8899 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 8900 L->getStartLoc(), L->getHeader()) 8901 << VecDiagMsg.second; 8902 }); 8903 } else if (VectorizeLoop && !InterleaveLoop) { 8904 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 8905 << DebugLocStr << '\n'); 8906 ORE->emit([&]() { 8907 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 8908 L->getStartLoc(), L->getHeader()) 8909 << IntDiagMsg.second; 8910 }); 8911 } else if (VectorizeLoop && InterleaveLoop) { 8912 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 8913 << DebugLocStr << '\n'); 8914 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 8915 } 8916 8917 LVP.setBestPlan(VF.Width, IC); 8918 8919 using namespace ore; 8920 8921 if (!VectorizeLoop) { 8922 assert(IC > 1 && "interleave count should not be 1 or 0"); 8923 // If we decided that it is not legal to vectorize the loop, then 8924 // interleave it. 8925 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 8926 &CM); 8927 LVP.executePlan(Unroller, DT); 8928 8929 ORE->emit([&]() { 8930 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 8931 L->getHeader()) 8932 << "interleaved loop (interleaved count: " 8933 << NV("InterleaveCount", IC) << ")"; 8934 }); 8935 } else { 8936 // If we decided that it is *legal* to vectorize the loop, then do it. 8937 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 8938 &LVL, &CM); 8939 LVP.executePlan(LB, DT); 8940 ++LoopsVectorized; 8941 8942 // Add metadata to disable runtime unrolling a scalar loop when there are 8943 // no runtime checks about strides and memory. A scalar loop that is 8944 // rarely used is not worth unrolling. 8945 if (!LB.areSafetyChecksAdded()) 8946 AddRuntimeUnrollDisableMetaData(L); 8947 8948 // Report the vectorization decision. 8949 ORE->emit([&]() { 8950 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 8951 L->getHeader()) 8952 << "vectorized loop (vectorization width: " 8953 << NV("VectorizationFactor", VF.Width) 8954 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 8955 }); 8956 } 8957 8958 // Mark the loop as already vectorized to avoid vectorizing again. 8959 Hints.setAlreadyVectorized(); 8960 8961 DEBUG(verifyFunction(*L->getHeader()->getParent())); 8962 return true; 8963 } 8964 8965 bool LoopVectorizePass::runImpl( 8966 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 8967 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 8968 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 8969 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 8970 OptimizationRemarkEmitter &ORE_) { 8971 SE = &SE_; 8972 LI = &LI_; 8973 TTI = &TTI_; 8974 DT = &DT_; 8975 BFI = &BFI_; 8976 TLI = TLI_; 8977 AA = &AA_; 8978 AC = &AC_; 8979 GetLAA = &GetLAA_; 8980 DB = &DB_; 8981 ORE = &ORE_; 8982 8983 // Don't attempt if 8984 // 1. the target claims to have no vector registers, and 8985 // 2. interleaving won't help ILP. 8986 // 8987 // The second condition is necessary because, even if the target has no 8988 // vector registers, loop vectorization may still enable scalar 8989 // interleaving. 8990 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 8991 return false; 8992 8993 bool Changed = false; 8994 8995 // The vectorizer requires loops to be in simplified form. 8996 // Since simplification may add new inner loops, it has to run before the 8997 // legality and profitability checks. This means running the loop vectorizer 8998 // will simplify all loops, regardless of whether anything end up being 8999 // vectorized. 9000 for (auto &L : *LI) 9001 Changed |= simplifyLoop(L, DT, LI, SE, AC, false /* PreserveLCSSA */); 9002 9003 // Build up a worklist of inner-loops to vectorize. This is necessary as 9004 // the act of vectorizing or partially unrolling a loop creates new loops 9005 // and can invalidate iterators across the loops. 9006 SmallVector<Loop *, 8> Worklist; 9007 9008 for (Loop *L : *LI) 9009 addAcyclicInnerLoop(*L, Worklist); 9010 9011 LoopsAnalyzed += Worklist.size(); 9012 9013 // Now walk the identified inner loops. 9014 while (!Worklist.empty()) { 9015 Loop *L = Worklist.pop_back_val(); 9016 9017 // For the inner loops we actually process, form LCSSA to simplify the 9018 // transform. 9019 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 9020 9021 Changed |= processLoop(L); 9022 } 9023 9024 // Process each loop nest in the function. 9025 return Changed; 9026 } 9027 9028 PreservedAnalyses LoopVectorizePass::run(Function &F, 9029 FunctionAnalysisManager &AM) { 9030 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 9031 auto &LI = AM.getResult<LoopAnalysis>(F); 9032 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 9033 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 9034 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 9035 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 9036 auto &AA = AM.getResult<AAManager>(F); 9037 auto &AC = AM.getResult<AssumptionAnalysis>(F); 9038 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 9039 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 9040 9041 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 9042 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 9043 [&](Loop &L) -> const LoopAccessInfo & { 9044 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, nullptr}; 9045 return LAM.getResult<LoopAccessAnalysis>(L, AR); 9046 }; 9047 bool Changed = 9048 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE); 9049 if (!Changed) 9050 return PreservedAnalyses::all(); 9051 PreservedAnalyses PA; 9052 PA.preserve<LoopAnalysis>(); 9053 PA.preserve<DominatorTreeAnalysis>(); 9054 PA.preserve<BasicAA>(); 9055 PA.preserve<GlobalsAA>(); 9056 return PA; 9057 } 9058