1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 11 // and generates target-independent LLVM-IR. 12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 13 // of instructions in order to estimate the profitability of vectorization. 14 // 15 // The loop vectorizer combines consecutive loop iterations into a single 16 // 'wide' iteration. After this transformation the index is incremented 17 // by the SIMD vector width, and not by one. 18 // 19 // This pass has three parts: 20 // 1. The main loop pass that drives the different parts. 21 // 2. LoopVectorizationLegality - A unit that checks for the legality 22 // of the vectorization. 23 // 3. InnerLoopVectorizer - A unit that performs the actual 24 // widening of instructions. 25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 26 // of vectorization. It decides on the optimal vector width, which 27 // can be one, if vectorization is not profitable. 28 // 29 //===----------------------------------------------------------------------===// 30 // 31 // The reduction-variable vectorization is based on the paper: 32 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 33 // 34 // Variable uniformity checks are inspired by: 35 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 36 // 37 // The interleaved access vectorization is based on the paper: 38 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 39 // Data for SIMD 40 // 41 // Other ideas/concepts are from: 42 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 43 // 44 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 45 // Vectorizing Compilers. 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 50 #include "VPlan.h" 51 #include "llvm/ADT/APInt.h" 52 #include "llvm/ADT/ArrayRef.h" 53 #include "llvm/ADT/DenseMap.h" 54 #include "llvm/ADT/DenseMapInfo.h" 55 #include "llvm/ADT/Hashing.h" 56 #include "llvm/ADT/MapVector.h" 57 #include "llvm/ADT/None.h" 58 #include "llvm/ADT/Optional.h" 59 #include "llvm/ADT/SCCIterator.h" 60 #include "llvm/ADT/STLExtras.h" 61 #include "llvm/ADT/SetVector.h" 62 #include "llvm/ADT/SmallPtrSet.h" 63 #include "llvm/ADT/SmallSet.h" 64 #include "llvm/ADT/SmallVector.h" 65 #include "llvm/ADT/Statistic.h" 66 #include "llvm/ADT/StringRef.h" 67 #include "llvm/ADT/Twine.h" 68 #include "llvm/ADT/iterator_range.h" 69 #include "llvm/Analysis/AssumptionCache.h" 70 #include "llvm/Analysis/BasicAliasAnalysis.h" 71 #include "llvm/Analysis/BlockFrequencyInfo.h" 72 #include "llvm/Analysis/CodeMetrics.h" 73 #include "llvm/Analysis/DemandedBits.h" 74 #include "llvm/Analysis/GlobalsModRef.h" 75 #include "llvm/Analysis/LoopAccessAnalysis.h" 76 #include "llvm/Analysis/LoopAnalysisManager.h" 77 #include "llvm/Analysis/LoopInfo.h" 78 #include "llvm/Analysis/LoopIterator.h" 79 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 80 #include "llvm/Analysis/ScalarEvolution.h" 81 #include "llvm/Analysis/ScalarEvolutionExpander.h" 82 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 83 #include "llvm/Analysis/TargetLibraryInfo.h" 84 #include "llvm/Analysis/TargetTransformInfo.h" 85 #include "llvm/Analysis/VectorUtils.h" 86 #include "llvm/IR/Attributes.h" 87 #include "llvm/IR/BasicBlock.h" 88 #include "llvm/IR/CFG.h" 89 #include "llvm/IR/Constant.h" 90 #include "llvm/IR/Constants.h" 91 #include "llvm/IR/DataLayout.h" 92 #include "llvm/IR/DebugInfoMetadata.h" 93 #include "llvm/IR/DebugLoc.h" 94 #include "llvm/IR/DerivedTypes.h" 95 #include "llvm/IR/DiagnosticInfo.h" 96 #include "llvm/IR/Dominators.h" 97 #include "llvm/IR/Function.h" 98 #include "llvm/IR/IRBuilder.h" 99 #include "llvm/IR/InstrTypes.h" 100 #include "llvm/IR/Instruction.h" 101 #include "llvm/IR/Instructions.h" 102 #include "llvm/IR/IntrinsicInst.h" 103 #include "llvm/IR/Intrinsics.h" 104 #include "llvm/IR/LLVMContext.h" 105 #include "llvm/IR/Metadata.h" 106 #include "llvm/IR/Module.h" 107 #include "llvm/IR/Operator.h" 108 #include "llvm/IR/Type.h" 109 #include "llvm/IR/Use.h" 110 #include "llvm/IR/User.h" 111 #include "llvm/IR/Value.h" 112 #include "llvm/IR/ValueHandle.h" 113 #include "llvm/IR/Verifier.h" 114 #include "llvm/Pass.h" 115 #include "llvm/Support/Casting.h" 116 #include "llvm/Support/CommandLine.h" 117 #include "llvm/Support/Compiler.h" 118 #include "llvm/Support/Debug.h" 119 #include "llvm/Support/ErrorHandling.h" 120 #include "llvm/Support/MathExtras.h" 121 #include "llvm/Support/raw_ostream.h" 122 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 123 #include "llvm/Transforms/Utils/LoopSimplify.h" 124 #include "llvm/Transforms/Utils/LoopUtils.h" 125 #include "llvm/Transforms/Utils/LoopVersioning.h" 126 #include <algorithm> 127 #include <cassert> 128 #include <cstdint> 129 #include <cstdlib> 130 #include <functional> 131 #include <iterator> 132 #include <limits> 133 #include <memory> 134 #include <string> 135 #include <tuple> 136 #include <utility> 137 #include <vector> 138 139 using namespace llvm; 140 141 #define LV_NAME "loop-vectorize" 142 #define DEBUG_TYPE LV_NAME 143 144 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 145 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 146 147 static cl::opt<bool> 148 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, 149 cl::desc("Enable if-conversion during vectorization.")); 150 151 /// Loops with a known constant trip count below this number are vectorized only 152 /// if no scalar iteration overheads are incurred. 153 static cl::opt<unsigned> TinyTripCountVectorThreshold( 154 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 155 cl::desc("Loops with a constant trip count that is smaller than this " 156 "value are vectorized only if no scalar iteration overheads " 157 "are incurred.")); 158 159 static cl::opt<bool> MaximizeBandwidth( 160 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 161 cl::desc("Maximize bandwidth when selecting vectorization factor which " 162 "will be determined by the smallest type in loop.")); 163 164 static cl::opt<bool> EnableInterleavedMemAccesses( 165 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 166 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 167 168 /// Maximum factor for an interleaved memory access. 169 static cl::opt<unsigned> MaxInterleaveGroupFactor( 170 "max-interleave-group-factor", cl::Hidden, 171 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 172 cl::init(8)); 173 174 /// We don't interleave loops with a known constant trip count below this 175 /// number. 176 static const unsigned TinyTripCountInterleaveThreshold = 128; 177 178 static cl::opt<unsigned> ForceTargetNumScalarRegs( 179 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 180 cl::desc("A flag that overrides the target's number of scalar registers.")); 181 182 static cl::opt<unsigned> ForceTargetNumVectorRegs( 183 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 184 cl::desc("A flag that overrides the target's number of vector registers.")); 185 186 /// Maximum vectorization interleave count. 187 static const unsigned MaxInterleaveFactor = 16; 188 189 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 190 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 191 cl::desc("A flag that overrides the target's max interleave factor for " 192 "scalar loops.")); 193 194 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 195 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 196 cl::desc("A flag that overrides the target's max interleave factor for " 197 "vectorized loops.")); 198 199 static cl::opt<unsigned> ForceTargetInstructionCost( 200 "force-target-instruction-cost", cl::init(0), cl::Hidden, 201 cl::desc("A flag that overrides the target's expected cost for " 202 "an instruction to a single constant value. Mostly " 203 "useful for getting consistent testing.")); 204 205 static cl::opt<unsigned> SmallLoopCost( 206 "small-loop-cost", cl::init(20), cl::Hidden, 207 cl::desc( 208 "The cost of a loop that is considered 'small' by the interleaver.")); 209 210 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 211 "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden, 212 cl::desc("Enable the use of the block frequency analysis to access PGO " 213 "heuristics minimizing code growth in cold regions and being more " 214 "aggressive in hot regions.")); 215 216 // Runtime interleave loops for load/store throughput. 217 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 218 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 219 cl::desc( 220 "Enable runtime interleaving until load/store ports are saturated")); 221 222 /// The number of stores in a loop that are allowed to need predication. 223 static cl::opt<unsigned> NumberOfStoresToPredicate( 224 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 225 cl::desc("Max number of stores to be predicated behind an if.")); 226 227 static cl::opt<bool> EnableIndVarRegisterHeur( 228 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 229 cl::desc("Count the induction variable only once when interleaving")); 230 231 static cl::opt<bool> EnableCondStoresVectorization( 232 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 233 cl::desc("Enable if predication of stores during vectorization.")); 234 235 static cl::opt<unsigned> MaxNestedScalarReductionIC( 236 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 237 cl::desc("The maximum interleave count to use when interleaving a scalar " 238 "reduction in a nested loop.")); 239 240 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 241 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 242 cl::desc("The maximum allowed number of runtime memory checks with a " 243 "vectorize(enable) pragma.")); 244 245 static cl::opt<unsigned> VectorizeSCEVCheckThreshold( 246 "vectorize-scev-check-threshold", cl::init(16), cl::Hidden, 247 cl::desc("The maximum number of SCEV checks allowed.")); 248 249 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold( 250 "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden, 251 cl::desc("The maximum number of SCEV checks allowed with a " 252 "vectorize(enable) pragma")); 253 254 /// Create an analysis remark that explains why vectorization failed 255 /// 256 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 257 /// RemarkName is the identifier for the remark. If \p I is passed it is an 258 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 259 /// the location of the remark. \return the remark object that can be 260 /// streamed to. 261 static OptimizationRemarkAnalysis 262 createMissedAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, 263 Instruction *I = nullptr) { 264 Value *CodeRegion = TheLoop->getHeader(); 265 DebugLoc DL = TheLoop->getStartLoc(); 266 267 if (I) { 268 CodeRegion = I->getParent(); 269 // If there is no debug location attached to the instruction, revert back to 270 // using the loop's. 271 if (I->getDebugLoc()) 272 DL = I->getDebugLoc(); 273 } 274 275 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion); 276 R << "loop not vectorized: "; 277 return R; 278 } 279 280 namespace { 281 282 class LoopVectorizationLegality; 283 class LoopVectorizationCostModel; 284 class LoopVectorizationRequirements; 285 class VPBlendRecipe; 286 class VPInterleaveRecipe; 287 class VPReplicateRecipe; 288 class VPWidenIntOrFpInductionRecipe; 289 class VPWidenRecipe; 290 class VPWidenMemoryInstructionRecipe; 291 292 } // end anonymous namespace 293 294 /// Returns true if the given loop body has a cycle, excluding the loop 295 /// itself. 296 static bool hasCyclesInLoopBody(const Loop &L) { 297 if (!L.empty()) 298 return true; 299 300 for (const auto &SCC : 301 make_range(scc_iterator<Loop, LoopBodyTraits>::begin(L), 302 scc_iterator<Loop, LoopBodyTraits>::end(L))) { 303 if (SCC.size() > 1) { 304 DEBUG(dbgs() << "LVL: Detected a cycle in the loop body:\n"); 305 DEBUG(L.dump()); 306 return true; 307 } 308 } 309 return false; 310 } 311 312 /// A helper function for converting Scalar types to vector types. 313 /// If the incoming type is void, we return void. If the VF is 1, we return 314 /// the scalar type. 315 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 316 if (Scalar->isVoidTy() || VF == 1) 317 return Scalar; 318 return VectorType::get(Scalar, VF); 319 } 320 321 // FIXME: The following helper functions have multiple implementations 322 // in the project. They can be effectively organized in a common Load/Store 323 // utilities unit. 324 325 /// A helper function that returns the pointer operand of a load or store 326 /// instruction. 327 static Value *getPointerOperand(Value *I) { 328 if (auto *LI = dyn_cast<LoadInst>(I)) 329 return LI->getPointerOperand(); 330 if (auto *SI = dyn_cast<StoreInst>(I)) 331 return SI->getPointerOperand(); 332 return nullptr; 333 } 334 335 /// A helper function that returns the type of loaded or stored value. 336 static Type *getMemInstValueType(Value *I) { 337 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 338 "Expected Load or Store instruction"); 339 if (auto *LI = dyn_cast<LoadInst>(I)) 340 return LI->getType(); 341 return cast<StoreInst>(I)->getValueOperand()->getType(); 342 } 343 344 /// A helper function that returns the alignment of load or store instruction. 345 static unsigned getMemInstAlignment(Value *I) { 346 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 347 "Expected Load or Store instruction"); 348 if (auto *LI = dyn_cast<LoadInst>(I)) 349 return LI->getAlignment(); 350 return cast<StoreInst>(I)->getAlignment(); 351 } 352 353 /// A helper function that returns the address space of the pointer operand of 354 /// load or store instruction. 355 static unsigned getMemInstAddressSpace(Value *I) { 356 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 357 "Expected Load or Store instruction"); 358 if (auto *LI = dyn_cast<LoadInst>(I)) 359 return LI->getPointerAddressSpace(); 360 return cast<StoreInst>(I)->getPointerAddressSpace(); 361 } 362 363 /// A helper function that returns true if the given type is irregular. The 364 /// type is irregular if its allocated size doesn't equal the store size of an 365 /// element of the corresponding vector type at the given vectorization factor. 366 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) { 367 // Determine if an array of VF elements of type Ty is "bitcast compatible" 368 // with a <VF x Ty> vector. 369 if (VF > 1) { 370 auto *VectorTy = VectorType::get(Ty, VF); 371 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); 372 } 373 374 // If the vectorization factor is one, we just check if an array of type Ty 375 // requires padding between elements. 376 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 377 } 378 379 /// A helper function that returns the reciprocal of the block probability of 380 /// predicated blocks. If we return X, we are assuming the predicated block 381 /// will execute once for for every X iterations of the loop header. 382 /// 383 /// TODO: We should use actual block probability here, if available. Currently, 384 /// we always assume predicated blocks have a 50% chance of executing. 385 static unsigned getReciprocalPredBlockProb() { return 2; } 386 387 /// A helper function that adds a 'fast' flag to floating-point operations. 388 static Value *addFastMathFlag(Value *V) { 389 if (isa<FPMathOperator>(V)) { 390 FastMathFlags Flags; 391 Flags.setFast(); 392 cast<Instruction>(V)->setFastMathFlags(Flags); 393 } 394 return V; 395 } 396 397 /// A helper function that returns an integer or floating-point constant with 398 /// value C. 399 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 400 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 401 : ConstantFP::get(Ty, C); 402 } 403 404 namespace llvm { 405 406 /// InnerLoopVectorizer vectorizes loops which contain only one basic 407 /// block to a specified vectorization factor (VF). 408 /// This class performs the widening of scalars into vectors, or multiple 409 /// scalars. This class also implements the following features: 410 /// * It inserts an epilogue loop for handling loops that don't have iteration 411 /// counts that are known to be a multiple of the vectorization factor. 412 /// * It handles the code generation for reduction variables. 413 /// * Scalarization (implementation using scalars) of un-vectorizable 414 /// instructions. 415 /// InnerLoopVectorizer does not perform any vectorization-legality 416 /// checks, and relies on the caller to check for the different legality 417 /// aspects. The InnerLoopVectorizer relies on the 418 /// LoopVectorizationLegality class to provide information about the induction 419 /// and reduction variables that were found to a given vectorization factor. 420 class InnerLoopVectorizer { 421 public: 422 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 423 LoopInfo *LI, DominatorTree *DT, 424 const TargetLibraryInfo *TLI, 425 const TargetTransformInfo *TTI, AssumptionCache *AC, 426 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 427 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 428 LoopVectorizationCostModel *CM) 429 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 430 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 431 Builder(PSE.getSE()->getContext()), 432 VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {} 433 virtual ~InnerLoopVectorizer() = default; 434 435 /// Create a new empty loop. Unlink the old loop and connect the new one. 436 /// Return the pre-header block of the new loop. 437 BasicBlock *createVectorizedLoopSkeleton(); 438 439 /// Widen a single instruction within the innermost loop. 440 void widenInstruction(Instruction &I); 441 442 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 443 void fixVectorizedLoop(); 444 445 // Return true if any runtime check is added. 446 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 447 448 /// A type for vectorized values in the new loop. Each value from the 449 /// original loop, when vectorized, is represented by UF vector values in the 450 /// new unrolled loop, where UF is the unroll factor. 451 using VectorParts = SmallVector<Value *, 2>; 452 453 /// A helper function that computes the predicate of the block BB, assuming 454 /// that the header block of the loop is set to True. It returns the *entry* 455 /// mask for the block BB. 456 VectorParts createBlockInMask(BasicBlock *BB); 457 458 /// A helper function that computes the predicate of the edge between SRC 459 /// and DST. 460 VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst); 461 462 /// Vectorize a single PHINode in a block. This method handles the induction 463 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 464 /// arbitrary length vectors. 465 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF); 466 467 /// A helper function to scalarize a single Instruction in the innermost loop. 468 /// Generates a sequence of scalar instances for each lane between \p MinLane 469 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 470 /// inclusive.. 471 void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance, 472 bool IfPredicateInstr); 473 474 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 475 /// is provided, the integer induction variable will first be truncated to 476 /// the corresponding type. 477 void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr); 478 479 /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a 480 /// vector or scalar value on-demand if one is not yet available. When 481 /// vectorizing a loop, we visit the definition of an instruction before its 482 /// uses. When visiting the definition, we either vectorize or scalarize the 483 /// instruction, creating an entry for it in the corresponding map. (In some 484 /// cases, such as induction variables, we will create both vector and scalar 485 /// entries.) Then, as we encounter uses of the definition, we derive values 486 /// for each scalar or vector use unless such a value is already available. 487 /// For example, if we scalarize a definition and one of its uses is vector, 488 /// we build the required vector on-demand with an insertelement sequence 489 /// when visiting the use. Otherwise, if the use is scalar, we can use the 490 /// existing scalar definition. 491 /// 492 /// Return a value in the new loop corresponding to \p V from the original 493 /// loop at unroll index \p Part. If the value has already been vectorized, 494 /// the corresponding vector entry in VectorLoopValueMap is returned. If, 495 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 496 /// a new vector value on-demand by inserting the scalar values into a vector 497 /// with an insertelement sequence. If the value has been neither vectorized 498 /// nor scalarized, it must be loop invariant, so we simply broadcast the 499 /// value into a vector. 500 Value *getOrCreateVectorValue(Value *V, unsigned Part); 501 502 /// Return a value in the new loop corresponding to \p V from the original 503 /// loop at unroll and vector indices \p Instance. If the value has been 504 /// vectorized but not scalarized, the necessary extractelement instruction 505 /// will be generated. 506 Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance); 507 508 /// Construct the vector value of a scalarized value \p V one lane at a time. 509 void packScalarIntoVectorValue(Value *V, const VPIteration &Instance); 510 511 /// Try to vectorize the interleaved access group that \p Instr belongs to. 512 void vectorizeInterleaveGroup(Instruction *Instr); 513 514 /// Vectorize Load and Store instructions, 515 virtual void vectorizeMemoryInstruction(Instruction *Instr); 516 517 /// \brief Set the debug location in the builder using the debug location in 518 /// the instruction. 519 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 520 521 protected: 522 friend class LoopVectorizationPlanner; 523 524 /// A small list of PHINodes. 525 using PhiVector = SmallVector<PHINode *, 4>; 526 527 /// A type for scalarized values in the new loop. Each value from the 528 /// original loop, when scalarized, is represented by UF x VF scalar values 529 /// in the new unrolled loop, where UF is the unroll factor and VF is the 530 /// vectorization factor. 531 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 532 533 // When we if-convert we need to create edge masks. We have to cache values 534 // so that we don't end up with exponential recursion/IR. 535 using EdgeMaskCacheTy = 536 DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts>; 537 using BlockMaskCacheTy = DenseMap<BasicBlock *, VectorParts>; 538 539 /// Set up the values of the IVs correctly when exiting the vector loop. 540 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 541 Value *CountRoundDown, Value *EndValue, 542 BasicBlock *MiddleBlock); 543 544 /// Create a new induction variable inside L. 545 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 546 Value *Step, Instruction *DL); 547 548 /// Handle all cross-iteration phis in the header. 549 void fixCrossIterationPHIs(); 550 551 /// Fix a first-order recurrence. This is the second phase of vectorizing 552 /// this phi node. 553 void fixFirstOrderRecurrence(PHINode *Phi); 554 555 /// Fix a reduction cross-iteration phi. This is the second phase of 556 /// vectorizing this phi node. 557 void fixReduction(PHINode *Phi); 558 559 /// \brief The Loop exit block may have single value PHI nodes with some 560 /// incoming value. While vectorizing we only handled real values 561 /// that were defined inside the loop and we should have one value for 562 /// each predecessor of its parent basic block. See PR14725. 563 void fixLCSSAPHIs(); 564 565 /// Iteratively sink the scalarized operands of a predicated instruction into 566 /// the block that was created for it. 567 void sinkScalarOperands(Instruction *PredInst); 568 569 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 570 /// represented as. 571 void truncateToMinimalBitwidths(); 572 573 /// Insert the new loop to the loop hierarchy and pass manager 574 /// and update the analysis passes. 575 void updateAnalysis(); 576 577 /// Create a broadcast instruction. This method generates a broadcast 578 /// instruction (shuffle) for loop invariant values and for the induction 579 /// value. If this is the induction variable then we extend it to N, N+1, ... 580 /// this is needed because each iteration in the loop corresponds to a SIMD 581 /// element. 582 virtual Value *getBroadcastInstrs(Value *V); 583 584 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 585 /// to each vector element of Val. The sequence starts at StartIndex. 586 /// \p Opcode is relevant for FP induction variable. 587 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 588 Instruction::BinaryOps Opcode = 589 Instruction::BinaryOpsEnd); 590 591 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 592 /// variable on which to base the steps, \p Step is the size of the step, and 593 /// \p EntryVal is the value from the original loop that maps to the steps. 594 /// Note that \p EntryVal doesn't have to be an induction variable (e.g., it 595 /// can be a truncate instruction). 596 void buildScalarSteps(Value *ScalarIV, Value *Step, Value *EntryVal, 597 const InductionDescriptor &ID); 598 599 /// Create a vector induction phi node based on an existing scalar one. \p 600 /// EntryVal is the value from the original loop that maps to the vector phi 601 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 602 /// truncate instruction, instead of widening the original IV, we widen a 603 /// version of the IV truncated to \p EntryVal's type. 604 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 605 Value *Step, Instruction *EntryVal); 606 607 /// Returns true if an instruction \p I should be scalarized instead of 608 /// vectorized for the chosen vectorization factor. 609 bool shouldScalarizeInstruction(Instruction *I) const; 610 611 /// Returns true if we should generate a scalar version of \p IV. 612 bool needsScalarInduction(Instruction *IV) const; 613 614 /// Generate a shuffle sequence that will reverse the vector Vec. 615 virtual Value *reverseVector(Value *Vec); 616 617 /// Returns (and creates if needed) the original loop trip count. 618 Value *getOrCreateTripCount(Loop *NewLoop); 619 620 /// Returns (and creates if needed) the trip count of the widened loop. 621 Value *getOrCreateVectorTripCount(Loop *NewLoop); 622 623 /// Returns a bitcasted value to the requested vector type. 624 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 625 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 626 const DataLayout &DL); 627 628 /// Emit a bypass check to see if the vector trip count is zero, including if 629 /// it overflows. 630 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 631 632 /// Emit a bypass check to see if all of the SCEV assumptions we've 633 /// had to make are correct. 634 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 635 636 /// Emit bypass checks to check any memory assumptions we may have made. 637 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 638 639 /// Add additional metadata to \p To that was not present on \p Orig. 640 /// 641 /// Currently this is used to add the noalias annotations based on the 642 /// inserted memchecks. Use this for instructions that are *cloned* into the 643 /// vector loop. 644 void addNewMetadata(Instruction *To, const Instruction *Orig); 645 646 /// Add metadata from one instruction to another. 647 /// 648 /// This includes both the original MDs from \p From and additional ones (\see 649 /// addNewMetadata). Use this for *newly created* instructions in the vector 650 /// loop. 651 void addMetadata(Instruction *To, Instruction *From); 652 653 /// \brief Similar to the previous function but it adds the metadata to a 654 /// vector of instructions. 655 void addMetadata(ArrayRef<Value *> To, Instruction *From); 656 657 /// The original loop. 658 Loop *OrigLoop; 659 660 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 661 /// dynamic knowledge to simplify SCEV expressions and converts them to a 662 /// more usable form. 663 PredicatedScalarEvolution &PSE; 664 665 /// Loop Info. 666 LoopInfo *LI; 667 668 /// Dominator Tree. 669 DominatorTree *DT; 670 671 /// Alias Analysis. 672 AliasAnalysis *AA; 673 674 /// Target Library Info. 675 const TargetLibraryInfo *TLI; 676 677 /// Target Transform Info. 678 const TargetTransformInfo *TTI; 679 680 /// Assumption Cache. 681 AssumptionCache *AC; 682 683 /// Interface to emit optimization remarks. 684 OptimizationRemarkEmitter *ORE; 685 686 /// \brief LoopVersioning. It's only set up (non-null) if memchecks were 687 /// used. 688 /// 689 /// This is currently only used to add no-alias metadata based on the 690 /// memchecks. The actually versioning is performed manually. 691 std::unique_ptr<LoopVersioning> LVer; 692 693 /// The vectorization SIMD factor to use. Each vector will have this many 694 /// vector elements. 695 unsigned VF; 696 697 /// The vectorization unroll factor to use. Each scalar is vectorized to this 698 /// many different vector instructions. 699 unsigned UF; 700 701 /// The builder that we use 702 IRBuilder<> Builder; 703 704 // --- Vectorization state --- 705 706 /// The vector-loop preheader. 707 BasicBlock *LoopVectorPreHeader; 708 709 /// The scalar-loop preheader. 710 BasicBlock *LoopScalarPreHeader; 711 712 /// Middle Block between the vector and the scalar. 713 BasicBlock *LoopMiddleBlock; 714 715 /// The ExitBlock of the scalar loop. 716 BasicBlock *LoopExitBlock; 717 718 /// The vector loop body. 719 BasicBlock *LoopVectorBody; 720 721 /// The scalar loop body. 722 BasicBlock *LoopScalarBody; 723 724 /// A list of all bypass blocks. The first block is the entry of the loop. 725 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 726 727 /// The new Induction variable which was added to the new block. 728 PHINode *Induction = nullptr; 729 730 /// The induction variable of the old basic block. 731 PHINode *OldInduction = nullptr; 732 733 /// Maps values from the original loop to their corresponding values in the 734 /// vectorized loop. A key value can map to either vector values, scalar 735 /// values or both kinds of values, depending on whether the key was 736 /// vectorized and scalarized. 737 VectorizerValueMap VectorLoopValueMap; 738 739 /// Store instructions that were predicated. 740 SmallVector<Instruction *, 4> PredicatedInstructions; 741 742 EdgeMaskCacheTy EdgeMaskCache; 743 BlockMaskCacheTy BlockMaskCache; 744 745 /// Trip count of the original loop. 746 Value *TripCount = nullptr; 747 748 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 749 Value *VectorTripCount = nullptr; 750 751 /// The legality analysis. 752 LoopVectorizationLegality *Legal; 753 754 /// The profitablity analysis. 755 LoopVectorizationCostModel *Cost; 756 757 // Record whether runtime checks are added. 758 bool AddedSafetyChecks = false; 759 760 // Holds the end values for each induction variable. We save the end values 761 // so we can later fix-up the external users of the induction variables. 762 DenseMap<PHINode *, Value *> IVEndValues; 763 }; 764 765 class InnerLoopUnroller : public InnerLoopVectorizer { 766 public: 767 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 768 LoopInfo *LI, DominatorTree *DT, 769 const TargetLibraryInfo *TLI, 770 const TargetTransformInfo *TTI, AssumptionCache *AC, 771 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 772 LoopVectorizationLegality *LVL, 773 LoopVectorizationCostModel *CM) 774 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 775 UnrollFactor, LVL, CM) {} 776 777 private: 778 Value *getBroadcastInstrs(Value *V) override; 779 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 780 Instruction::BinaryOps Opcode = 781 Instruction::BinaryOpsEnd) override; 782 Value *reverseVector(Value *Vec) override; 783 }; 784 785 } // end namespace llvm 786 787 /// \brief Look for a meaningful debug location on the instruction or it's 788 /// operands. 789 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 790 if (!I) 791 return I; 792 793 DebugLoc Empty; 794 if (I->getDebugLoc() != Empty) 795 return I; 796 797 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 798 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 799 if (OpInst->getDebugLoc() != Empty) 800 return OpInst; 801 } 802 803 return I; 804 } 805 806 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 807 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 808 const DILocation *DIL = Inst->getDebugLoc(); 809 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 810 !isa<DbgInfoIntrinsic>(Inst)) 811 B.SetCurrentDebugLocation(DIL->cloneWithDuplicationFactor(UF * VF)); 812 else 813 B.SetCurrentDebugLocation(DIL); 814 } else 815 B.SetCurrentDebugLocation(DebugLoc()); 816 } 817 818 #ifndef NDEBUG 819 /// \return string containing a file name and a line # for the given loop. 820 static std::string getDebugLocString(const Loop *L) { 821 std::string Result; 822 if (L) { 823 raw_string_ostream OS(Result); 824 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 825 LoopDbgLoc.print(OS); 826 else 827 // Just print the module name. 828 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 829 OS.flush(); 830 } 831 return Result; 832 } 833 #endif 834 835 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 836 const Instruction *Orig) { 837 // If the loop was versioned with memchecks, add the corresponding no-alias 838 // metadata. 839 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 840 LVer->annotateInstWithNoAlias(To, Orig); 841 } 842 843 void InnerLoopVectorizer::addMetadata(Instruction *To, 844 Instruction *From) { 845 propagateMetadata(To, From); 846 addNewMetadata(To, From); 847 } 848 849 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 850 Instruction *From) { 851 for (Value *V : To) { 852 if (Instruction *I = dyn_cast<Instruction>(V)) 853 addMetadata(I, From); 854 } 855 } 856 857 namespace { 858 859 /// \brief The group of interleaved loads/stores sharing the same stride and 860 /// close to each other. 861 /// 862 /// Each member in this group has an index starting from 0, and the largest 863 /// index should be less than interleaved factor, which is equal to the absolute 864 /// value of the access's stride. 865 /// 866 /// E.g. An interleaved load group of factor 4: 867 /// for (unsigned i = 0; i < 1024; i+=4) { 868 /// a = A[i]; // Member of index 0 869 /// b = A[i+1]; // Member of index 1 870 /// d = A[i+3]; // Member of index 3 871 /// ... 872 /// } 873 /// 874 /// An interleaved store group of factor 4: 875 /// for (unsigned i = 0; i < 1024; i+=4) { 876 /// ... 877 /// A[i] = a; // Member of index 0 878 /// A[i+1] = b; // Member of index 1 879 /// A[i+2] = c; // Member of index 2 880 /// A[i+3] = d; // Member of index 3 881 /// } 882 /// 883 /// Note: the interleaved load group could have gaps (missing members), but 884 /// the interleaved store group doesn't allow gaps. 885 class InterleaveGroup { 886 public: 887 InterleaveGroup(Instruction *Instr, int Stride, unsigned Align) 888 : Align(Align), InsertPos(Instr) { 889 assert(Align && "The alignment should be non-zero"); 890 891 Factor = std::abs(Stride); 892 assert(Factor > 1 && "Invalid interleave factor"); 893 894 Reverse = Stride < 0; 895 Members[0] = Instr; 896 } 897 898 bool isReverse() const { return Reverse; } 899 unsigned getFactor() const { return Factor; } 900 unsigned getAlignment() const { return Align; } 901 unsigned getNumMembers() const { return Members.size(); } 902 903 /// \brief Try to insert a new member \p Instr with index \p Index and 904 /// alignment \p NewAlign. The index is related to the leader and it could be 905 /// negative if it is the new leader. 906 /// 907 /// \returns false if the instruction doesn't belong to the group. 908 bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) { 909 assert(NewAlign && "The new member's alignment should be non-zero"); 910 911 int Key = Index + SmallestKey; 912 913 // Skip if there is already a member with the same index. 914 if (Members.count(Key)) 915 return false; 916 917 if (Key > LargestKey) { 918 // The largest index is always less than the interleave factor. 919 if (Index >= static_cast<int>(Factor)) 920 return false; 921 922 LargestKey = Key; 923 } else if (Key < SmallestKey) { 924 // The largest index is always less than the interleave factor. 925 if (LargestKey - Key >= static_cast<int>(Factor)) 926 return false; 927 928 SmallestKey = Key; 929 } 930 931 // It's always safe to select the minimum alignment. 932 Align = std::min(Align, NewAlign); 933 Members[Key] = Instr; 934 return true; 935 } 936 937 /// \brief Get the member with the given index \p Index 938 /// 939 /// \returns nullptr if contains no such member. 940 Instruction *getMember(unsigned Index) const { 941 int Key = SmallestKey + Index; 942 if (!Members.count(Key)) 943 return nullptr; 944 945 return Members.find(Key)->second; 946 } 947 948 /// \brief Get the index for the given member. Unlike the key in the member 949 /// map, the index starts from 0. 950 unsigned getIndex(Instruction *Instr) const { 951 for (auto I : Members) 952 if (I.second == Instr) 953 return I.first - SmallestKey; 954 955 llvm_unreachable("InterleaveGroup contains no such member"); 956 } 957 958 Instruction *getInsertPos() const { return InsertPos; } 959 void setInsertPos(Instruction *Inst) { InsertPos = Inst; } 960 961 private: 962 unsigned Factor; // Interleave Factor. 963 bool Reverse; 964 unsigned Align; 965 DenseMap<int, Instruction *> Members; 966 int SmallestKey = 0; 967 int LargestKey = 0; 968 969 // To avoid breaking dependences, vectorized instructions of an interleave 970 // group should be inserted at either the first load or the last store in 971 // program order. 972 // 973 // E.g. %even = load i32 // Insert Position 974 // %add = add i32 %even // Use of %even 975 // %odd = load i32 976 // 977 // store i32 %even 978 // %odd = add i32 // Def of %odd 979 // store i32 %odd // Insert Position 980 Instruction *InsertPos; 981 }; 982 983 /// \brief Drive the analysis of interleaved memory accesses in the loop. 984 /// 985 /// Use this class to analyze interleaved accesses only when we can vectorize 986 /// a loop. Otherwise it's meaningless to do analysis as the vectorization 987 /// on interleaved accesses is unsafe. 988 /// 989 /// The analysis collects interleave groups and records the relationships 990 /// between the member and the group in a map. 991 class InterleavedAccessInfo { 992 public: 993 InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L, 994 DominatorTree *DT, LoopInfo *LI) 995 : PSE(PSE), TheLoop(L), DT(DT), LI(LI) {} 996 997 ~InterleavedAccessInfo() { 998 SmallSet<InterleaveGroup *, 4> DelSet; 999 // Avoid releasing a pointer twice. 1000 for (auto &I : InterleaveGroupMap) 1001 DelSet.insert(I.second); 1002 for (auto *Ptr : DelSet) 1003 delete Ptr; 1004 } 1005 1006 /// \brief Analyze the interleaved accesses and collect them in interleave 1007 /// groups. Substitute symbolic strides using \p Strides. 1008 void analyzeInterleaving(const ValueToValueMap &Strides); 1009 1010 /// \brief Check if \p Instr belongs to any interleave group. 1011 bool isInterleaved(Instruction *Instr) const { 1012 return InterleaveGroupMap.count(Instr); 1013 } 1014 1015 /// \brief Get the interleave group that \p Instr belongs to. 1016 /// 1017 /// \returns nullptr if doesn't have such group. 1018 InterleaveGroup *getInterleaveGroup(Instruction *Instr) const { 1019 if (InterleaveGroupMap.count(Instr)) 1020 return InterleaveGroupMap.find(Instr)->second; 1021 return nullptr; 1022 } 1023 1024 /// \brief Returns true if an interleaved group that may access memory 1025 /// out-of-bounds requires a scalar epilogue iteration for correctness. 1026 bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; } 1027 1028 /// \brief Initialize the LoopAccessInfo used for dependence checking. 1029 void setLAI(const LoopAccessInfo *Info) { LAI = Info; } 1030 1031 private: 1032 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks. 1033 /// Simplifies SCEV expressions in the context of existing SCEV assumptions. 1034 /// The interleaved access analysis can also add new predicates (for example 1035 /// by versioning strides of pointers). 1036 PredicatedScalarEvolution &PSE; 1037 1038 Loop *TheLoop; 1039 DominatorTree *DT; 1040 LoopInfo *LI; 1041 const LoopAccessInfo *LAI = nullptr; 1042 1043 /// True if the loop may contain non-reversed interleaved groups with 1044 /// out-of-bounds accesses. We ensure we don't speculatively access memory 1045 /// out-of-bounds by executing at least one scalar epilogue iteration. 1046 bool RequiresScalarEpilogue = false; 1047 1048 /// Holds the relationships between the members and the interleave group. 1049 DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap; 1050 1051 /// Holds dependences among the memory accesses in the loop. It maps a source 1052 /// access to a set of dependent sink accesses. 1053 DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences; 1054 1055 /// \brief The descriptor for a strided memory access. 1056 struct StrideDescriptor { 1057 StrideDescriptor() = default; 1058 StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size, 1059 unsigned Align) 1060 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {} 1061 1062 // The access's stride. It is negative for a reverse access. 1063 int64_t Stride = 0; 1064 1065 // The scalar expression of this access. 1066 const SCEV *Scev = nullptr; 1067 1068 // The size of the memory object. 1069 uint64_t Size = 0; 1070 1071 // The alignment of this access. 1072 unsigned Align = 0; 1073 }; 1074 1075 /// \brief A type for holding instructions and their stride descriptors. 1076 using StrideEntry = std::pair<Instruction *, StrideDescriptor>; 1077 1078 /// \brief Create a new interleave group with the given instruction \p Instr, 1079 /// stride \p Stride and alignment \p Align. 1080 /// 1081 /// \returns the newly created interleave group. 1082 InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride, 1083 unsigned Align) { 1084 assert(!InterleaveGroupMap.count(Instr) && 1085 "Already in an interleaved access group"); 1086 InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align); 1087 return InterleaveGroupMap[Instr]; 1088 } 1089 1090 /// \brief Release the group and remove all the relationships. 1091 void releaseGroup(InterleaveGroup *Group) { 1092 for (unsigned i = 0; i < Group->getFactor(); i++) 1093 if (Instruction *Member = Group->getMember(i)) 1094 InterleaveGroupMap.erase(Member); 1095 1096 delete Group; 1097 } 1098 1099 /// \brief Collect all the accesses with a constant stride in program order. 1100 void collectConstStrideAccesses( 1101 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 1102 const ValueToValueMap &Strides); 1103 1104 /// \brief Returns true if \p Stride is allowed in an interleaved group. 1105 static bool isStrided(int Stride) { 1106 unsigned Factor = std::abs(Stride); 1107 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 1108 } 1109 1110 /// \brief Returns true if \p BB is a predicated block. 1111 bool isPredicated(BasicBlock *BB) const { 1112 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 1113 } 1114 1115 /// \brief Returns true if LoopAccessInfo can be used for dependence queries. 1116 bool areDependencesValid() const { 1117 return LAI && LAI->getDepChecker().getDependences(); 1118 } 1119 1120 /// \brief Returns true if memory accesses \p A and \p B can be reordered, if 1121 /// necessary, when constructing interleaved groups. 1122 /// 1123 /// \p A must precede \p B in program order. We return false if reordering is 1124 /// not necessary or is prevented because \p A and \p B may be dependent. 1125 bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A, 1126 StrideEntry *B) const { 1127 // Code motion for interleaved accesses can potentially hoist strided loads 1128 // and sink strided stores. The code below checks the legality of the 1129 // following two conditions: 1130 // 1131 // 1. Potentially moving a strided load (B) before any store (A) that 1132 // precedes B, or 1133 // 1134 // 2. Potentially moving a strided store (A) after any load or store (B) 1135 // that A precedes. 1136 // 1137 // It's legal to reorder A and B if we know there isn't a dependence from A 1138 // to B. Note that this determination is conservative since some 1139 // dependences could potentially be reordered safely. 1140 1141 // A is potentially the source of a dependence. 1142 auto *Src = A->first; 1143 auto SrcDes = A->second; 1144 1145 // B is potentially the sink of a dependence. 1146 auto *Sink = B->first; 1147 auto SinkDes = B->second; 1148 1149 // Code motion for interleaved accesses can't violate WAR dependences. 1150 // Thus, reordering is legal if the source isn't a write. 1151 if (!Src->mayWriteToMemory()) 1152 return true; 1153 1154 // At least one of the accesses must be strided. 1155 if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride)) 1156 return true; 1157 1158 // If dependence information is not available from LoopAccessInfo, 1159 // conservatively assume the instructions can't be reordered. 1160 if (!areDependencesValid()) 1161 return false; 1162 1163 // If we know there is a dependence from source to sink, assume the 1164 // instructions can't be reordered. Otherwise, reordering is legal. 1165 return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink); 1166 } 1167 1168 /// \brief Collect the dependences from LoopAccessInfo. 1169 /// 1170 /// We process the dependences once during the interleaved access analysis to 1171 /// enable constant-time dependence queries. 1172 void collectDependences() { 1173 if (!areDependencesValid()) 1174 return; 1175 auto *Deps = LAI->getDepChecker().getDependences(); 1176 for (auto Dep : *Deps) 1177 Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI)); 1178 } 1179 }; 1180 1181 /// Utility class for getting and setting loop vectorizer hints in the form 1182 /// of loop metadata. 1183 /// This class keeps a number of loop annotations locally (as member variables) 1184 /// and can, upon request, write them back as metadata on the loop. It will 1185 /// initially scan the loop for existing metadata, and will update the local 1186 /// values based on information in the loop. 1187 /// We cannot write all values to metadata, as the mere presence of some info, 1188 /// for example 'force', means a decision has been made. So, we need to be 1189 /// careful NOT to add them if the user hasn't specifically asked so. 1190 class LoopVectorizeHints { 1191 enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE, HK_ISVECTORIZED }; 1192 1193 /// Hint - associates name and validation with the hint value. 1194 struct Hint { 1195 const char *Name; 1196 unsigned Value; // This may have to change for non-numeric values. 1197 HintKind Kind; 1198 1199 Hint(const char *Name, unsigned Value, HintKind Kind) 1200 : Name(Name), Value(Value), Kind(Kind) {} 1201 1202 bool validate(unsigned Val) { 1203 switch (Kind) { 1204 case HK_WIDTH: 1205 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth; 1206 case HK_UNROLL: 1207 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor; 1208 case HK_FORCE: 1209 return (Val <= 1); 1210 case HK_ISVECTORIZED: 1211 return (Val==0 || Val==1); 1212 } 1213 return false; 1214 } 1215 }; 1216 1217 /// Vectorization width. 1218 Hint Width; 1219 1220 /// Vectorization interleave factor. 1221 Hint Interleave; 1222 1223 /// Vectorization forced 1224 Hint Force; 1225 1226 /// Already Vectorized 1227 Hint IsVectorized; 1228 1229 /// Return the loop metadata prefix. 1230 static StringRef Prefix() { return "llvm.loop."; } 1231 1232 /// True if there is any unsafe math in the loop. 1233 bool PotentiallyUnsafe = false; 1234 1235 public: 1236 enum ForceKind { 1237 FK_Undefined = -1, ///< Not selected. 1238 FK_Disabled = 0, ///< Forcing disabled. 1239 FK_Enabled = 1, ///< Forcing enabled. 1240 }; 1241 1242 LoopVectorizeHints(const Loop *L, bool DisableInterleaving, 1243 OptimizationRemarkEmitter &ORE) 1244 : Width("vectorize.width", VectorizerParams::VectorizationFactor, 1245 HK_WIDTH), 1246 Interleave("interleave.count", DisableInterleaving, HK_UNROLL), 1247 Force("vectorize.enable", FK_Undefined, HK_FORCE), 1248 IsVectorized("isvectorized", 0, HK_ISVECTORIZED), TheLoop(L), ORE(ORE) { 1249 // Populate values with existing loop metadata. 1250 getHintsFromMetadata(); 1251 1252 // force-vector-interleave overrides DisableInterleaving. 1253 if (VectorizerParams::isInterleaveForced()) 1254 Interleave.Value = VectorizerParams::VectorizationInterleave; 1255 1256 if (IsVectorized.Value != 1) 1257 // If the vectorization width and interleaving count are both 1 then 1258 // consider the loop to have been already vectorized because there's 1259 // nothing more that we can do. 1260 IsVectorized.Value = Width.Value == 1 && Interleave.Value == 1; 1261 DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs() 1262 << "LV: Interleaving disabled by the pass manager\n"); 1263 } 1264 1265 /// Mark the loop L as already vectorized by setting the width to 1. 1266 void setAlreadyVectorized() { 1267 IsVectorized.Value = 1; 1268 Hint Hints[] = {IsVectorized}; 1269 writeHintsToMetadata(Hints); 1270 } 1271 1272 bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const { 1273 if (getForce() == LoopVectorizeHints::FK_Disabled) { 1274 DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); 1275 emitRemarkWithHints(); 1276 return false; 1277 } 1278 1279 if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) { 1280 DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); 1281 emitRemarkWithHints(); 1282 return false; 1283 } 1284 1285 if (getIsVectorized() == 1) { 1286 DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); 1287 // FIXME: Add interleave.disable metadata. This will allow 1288 // vectorize.disable to be used without disabling the pass and errors 1289 // to differentiate between disabled vectorization and a width of 1. 1290 ORE.emit([&]() { 1291 return OptimizationRemarkAnalysis(vectorizeAnalysisPassName(), 1292 "AllDisabled", L->getStartLoc(), 1293 L->getHeader()) 1294 << "loop not vectorized: vectorization and interleaving are " 1295 "explicitly disabled, or the loop has already been " 1296 "vectorized"; 1297 }); 1298 return false; 1299 } 1300 1301 return true; 1302 } 1303 1304 /// Dumps all the hint information. 1305 void emitRemarkWithHints() const { 1306 using namespace ore; 1307 1308 ORE.emit([&]() { 1309 if (Force.Value == LoopVectorizeHints::FK_Disabled) 1310 return OptimizationRemarkMissed(LV_NAME, "MissedExplicitlyDisabled", 1311 TheLoop->getStartLoc(), 1312 TheLoop->getHeader()) 1313 << "loop not vectorized: vectorization is explicitly disabled"; 1314 else { 1315 OptimizationRemarkMissed R(LV_NAME, "MissedDetails", 1316 TheLoop->getStartLoc(), 1317 TheLoop->getHeader()); 1318 R << "loop not vectorized"; 1319 if (Force.Value == LoopVectorizeHints::FK_Enabled) { 1320 R << " (Force=" << NV("Force", true); 1321 if (Width.Value != 0) 1322 R << ", Vector Width=" << NV("VectorWidth", Width.Value); 1323 if (Interleave.Value != 0) 1324 R << ", Interleave Count=" 1325 << NV("InterleaveCount", Interleave.Value); 1326 R << ")"; 1327 } 1328 return R; 1329 } 1330 }); 1331 } 1332 1333 unsigned getWidth() const { return Width.Value; } 1334 unsigned getInterleave() const { return Interleave.Value; } 1335 unsigned getIsVectorized() const { return IsVectorized.Value; } 1336 enum ForceKind getForce() const { return (ForceKind)Force.Value; } 1337 1338 /// \brief If hints are provided that force vectorization, use the AlwaysPrint 1339 /// pass name to force the frontend to print the diagnostic. 1340 const char *vectorizeAnalysisPassName() const { 1341 if (getWidth() == 1) 1342 return LV_NAME; 1343 if (getForce() == LoopVectorizeHints::FK_Disabled) 1344 return LV_NAME; 1345 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0) 1346 return LV_NAME; 1347 return OptimizationRemarkAnalysis::AlwaysPrint; 1348 } 1349 1350 bool allowReordering() const { 1351 // When enabling loop hints are provided we allow the vectorizer to change 1352 // the order of operations that is given by the scalar loop. This is not 1353 // enabled by default because can be unsafe or inefficient. For example, 1354 // reordering floating-point operations will change the way round-off 1355 // error accumulates in the loop. 1356 return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1; 1357 } 1358 1359 bool isPotentiallyUnsafe() const { 1360 // Avoid FP vectorization if the target is unsure about proper support. 1361 // This may be related to the SIMD unit in the target not handling 1362 // IEEE 754 FP ops properly, or bad single-to-double promotions. 1363 // Otherwise, a sequence of vectorized loops, even without reduction, 1364 // could lead to different end results on the destination vectors. 1365 return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe; 1366 } 1367 1368 void setPotentiallyUnsafe() { PotentiallyUnsafe = true; } 1369 1370 private: 1371 /// Find hints specified in the loop metadata and update local values. 1372 void getHintsFromMetadata() { 1373 MDNode *LoopID = TheLoop->getLoopID(); 1374 if (!LoopID) 1375 return; 1376 1377 // First operand should refer to the loop id itself. 1378 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 1379 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 1380 1381 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1382 const MDString *S = nullptr; 1383 SmallVector<Metadata *, 4> Args; 1384 1385 // The expected hint is either a MDString or a MDNode with the first 1386 // operand a MDString. 1387 if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) { 1388 if (!MD || MD->getNumOperands() == 0) 1389 continue; 1390 S = dyn_cast<MDString>(MD->getOperand(0)); 1391 for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i) 1392 Args.push_back(MD->getOperand(i)); 1393 } else { 1394 S = dyn_cast<MDString>(LoopID->getOperand(i)); 1395 assert(Args.size() == 0 && "too many arguments for MDString"); 1396 } 1397 1398 if (!S) 1399 continue; 1400 1401 // Check if the hint starts with the loop metadata prefix. 1402 StringRef Name = S->getString(); 1403 if (Args.size() == 1) 1404 setHint(Name, Args[0]); 1405 } 1406 } 1407 1408 /// Checks string hint with one operand and set value if valid. 1409 void setHint(StringRef Name, Metadata *Arg) { 1410 if (!Name.startswith(Prefix())) 1411 return; 1412 Name = Name.substr(Prefix().size(), StringRef::npos); 1413 1414 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); 1415 if (!C) 1416 return; 1417 unsigned Val = C->getZExtValue(); 1418 1419 Hint *Hints[] = {&Width, &Interleave, &Force, &IsVectorized}; 1420 for (auto H : Hints) { 1421 if (Name == H->Name) { 1422 if (H->validate(Val)) 1423 H->Value = Val; 1424 else 1425 DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); 1426 break; 1427 } 1428 } 1429 } 1430 1431 /// Create a new hint from name / value pair. 1432 MDNode *createHintMetadata(StringRef Name, unsigned V) const { 1433 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1434 Metadata *MDs[] = {MDString::get(Context, Name), 1435 ConstantAsMetadata::get( 1436 ConstantInt::get(Type::getInt32Ty(Context), V))}; 1437 return MDNode::get(Context, MDs); 1438 } 1439 1440 /// Matches metadata with hint name. 1441 bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) { 1442 MDString *Name = dyn_cast<MDString>(Node->getOperand(0)); 1443 if (!Name) 1444 return false; 1445 1446 for (auto H : HintTypes) 1447 if (Name->getString().endswith(H.Name)) 1448 return true; 1449 return false; 1450 } 1451 1452 /// Sets current hints into loop metadata, keeping other values intact. 1453 void writeHintsToMetadata(ArrayRef<Hint> HintTypes) { 1454 if (HintTypes.empty()) 1455 return; 1456 1457 // Reserve the first element to LoopID (see below). 1458 SmallVector<Metadata *, 4> MDs(1); 1459 // If the loop already has metadata, then ignore the existing operands. 1460 MDNode *LoopID = TheLoop->getLoopID(); 1461 if (LoopID) { 1462 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1463 MDNode *Node = cast<MDNode>(LoopID->getOperand(i)); 1464 // If node in update list, ignore old value. 1465 if (!matchesHintMetadataName(Node, HintTypes)) 1466 MDs.push_back(Node); 1467 } 1468 } 1469 1470 // Now, add the missing hints. 1471 for (auto H : HintTypes) 1472 MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value)); 1473 1474 // Replace current metadata node with new one. 1475 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1476 MDNode *NewLoopID = MDNode::get(Context, MDs); 1477 // Set operand 0 to refer to the loop id itself. 1478 NewLoopID->replaceOperandWith(0, NewLoopID); 1479 1480 TheLoop->setLoopID(NewLoopID); 1481 } 1482 1483 /// The loop these hints belong to. 1484 const Loop *TheLoop; 1485 1486 /// Interface to emit optimization remarks. 1487 OptimizationRemarkEmitter &ORE; 1488 }; 1489 1490 } // end anonymous namespace 1491 1492 static void emitMissedWarning(Function *F, Loop *L, 1493 const LoopVectorizeHints &LH, 1494 OptimizationRemarkEmitter *ORE) { 1495 LH.emitRemarkWithHints(); 1496 1497 if (LH.getForce() == LoopVectorizeHints::FK_Enabled) { 1498 if (LH.getWidth() != 1) 1499 ORE->emit(DiagnosticInfoOptimizationFailure( 1500 DEBUG_TYPE, "FailedRequestedVectorization", 1501 L->getStartLoc(), L->getHeader()) 1502 << "loop not vectorized: " 1503 << "failed explicitly specified loop vectorization"); 1504 else if (LH.getInterleave() != 1) 1505 ORE->emit(DiagnosticInfoOptimizationFailure( 1506 DEBUG_TYPE, "FailedRequestedInterleaving", L->getStartLoc(), 1507 L->getHeader()) 1508 << "loop not interleaved: " 1509 << "failed explicitly specified loop interleaving"); 1510 } 1511 } 1512 1513 namespace { 1514 1515 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and 1516 /// to what vectorization factor. 1517 /// This class does not look at the profitability of vectorization, only the 1518 /// legality. This class has two main kinds of checks: 1519 /// * Memory checks - The code in canVectorizeMemory checks if vectorization 1520 /// will change the order of memory accesses in a way that will change the 1521 /// correctness of the program. 1522 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory 1523 /// checks for a number of different conditions, such as the availability of a 1524 /// single induction variable, that all types are supported and vectorize-able, 1525 /// etc. This code reflects the capabilities of InnerLoopVectorizer. 1526 /// This class is also used by InnerLoopVectorizer for identifying 1527 /// induction variable and the different reduction variables. 1528 class LoopVectorizationLegality { 1529 public: 1530 LoopVectorizationLegality( 1531 Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT, 1532 TargetLibraryInfo *TLI, AliasAnalysis *AA, Function *F, 1533 const TargetTransformInfo *TTI, 1534 std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI, 1535 OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R, 1536 LoopVectorizeHints *H) 1537 : TheLoop(L), PSE(PSE), TLI(TLI), TTI(TTI), DT(DT), GetLAA(GetLAA), 1538 ORE(ORE), InterleaveInfo(PSE, L, DT, LI), Requirements(R), Hints(H) {} 1539 1540 /// ReductionList contains the reduction descriptors for all 1541 /// of the reductions that were found in the loop. 1542 using ReductionList = DenseMap<PHINode *, RecurrenceDescriptor>; 1543 1544 /// InductionList saves induction variables and maps them to the 1545 /// induction descriptor. 1546 using InductionList = MapVector<PHINode *, InductionDescriptor>; 1547 1548 /// RecurrenceSet contains the phi nodes that are recurrences other than 1549 /// inductions and reductions. 1550 using RecurrenceSet = SmallPtrSet<const PHINode *, 8>; 1551 1552 /// Returns true if it is legal to vectorize this loop. 1553 /// This does not mean that it is profitable to vectorize this 1554 /// loop, only that it is legal to do so. 1555 bool canVectorize(); 1556 1557 /// Returns the primary induction variable. 1558 PHINode *getPrimaryInduction() { return PrimaryInduction; } 1559 1560 /// Returns the reduction variables found in the loop. 1561 ReductionList *getReductionVars() { return &Reductions; } 1562 1563 /// Returns the induction variables found in the loop. 1564 InductionList *getInductionVars() { return &Inductions; } 1565 1566 /// Return the first-order recurrences found in the loop. 1567 RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; } 1568 1569 /// Return the set of instructions to sink to handle first-order recurrences. 1570 DenseMap<Instruction *, Instruction *> &getSinkAfter() { return SinkAfter; } 1571 1572 /// Returns the widest induction type. 1573 Type *getWidestInductionType() { return WidestIndTy; } 1574 1575 /// Returns True if V is an induction variable in this loop. 1576 bool isInductionVariable(const Value *V); 1577 1578 /// Returns True if PN is a reduction variable in this loop. 1579 bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); } 1580 1581 /// Returns True if Phi is a first-order recurrence in this loop. 1582 bool isFirstOrderRecurrence(const PHINode *Phi); 1583 1584 /// Return true if the block BB needs to be predicated in order for the loop 1585 /// to be vectorized. 1586 bool blockNeedsPredication(BasicBlock *BB); 1587 1588 /// Check if this pointer is consecutive when vectorizing. This happens 1589 /// when the last index of the GEP is the induction variable, or that the 1590 /// pointer itself is an induction variable. 1591 /// This check allows us to vectorize A[idx] into a wide load/store. 1592 /// Returns: 1593 /// 0 - Stride is unknown or non-consecutive. 1594 /// 1 - Address is consecutive. 1595 /// -1 - Address is consecutive, and decreasing. 1596 int isConsecutivePtr(Value *Ptr); 1597 1598 /// Returns true if the value V is uniform within the loop. 1599 bool isUniform(Value *V); 1600 1601 /// Returns the information that we collected about runtime memory check. 1602 const RuntimePointerChecking *getRuntimePointerChecking() const { 1603 return LAI->getRuntimePointerChecking(); 1604 } 1605 1606 const LoopAccessInfo *getLAI() const { return LAI; } 1607 1608 /// \brief Check if \p Instr belongs to any interleaved access group. 1609 bool isAccessInterleaved(Instruction *Instr) { 1610 return InterleaveInfo.isInterleaved(Instr); 1611 } 1612 1613 /// \brief Get the interleaved access group that \p Instr belongs to. 1614 const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { 1615 return InterleaveInfo.getInterleaveGroup(Instr); 1616 } 1617 1618 /// \brief Returns true if an interleaved group requires a scalar iteration 1619 /// to handle accesses with gaps. 1620 bool requiresScalarEpilogue() const { 1621 return InterleaveInfo.requiresScalarEpilogue(); 1622 } 1623 1624 unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); } 1625 1626 uint64_t getMaxSafeRegisterWidth() const { 1627 return LAI->getDepChecker().getMaxSafeRegisterWidth(); 1628 } 1629 1630 bool hasStride(Value *V) { return LAI->hasStride(V); } 1631 1632 /// Returns true if the target machine supports masked store operation 1633 /// for the given \p DataType and kind of access to \p Ptr. 1634 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1635 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType); 1636 } 1637 1638 /// Returns true if the target machine supports masked load operation 1639 /// for the given \p DataType and kind of access to \p Ptr. 1640 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1641 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType); 1642 } 1643 1644 /// Returns true if the target machine supports masked scatter operation 1645 /// for the given \p DataType. 1646 bool isLegalMaskedScatter(Type *DataType) { 1647 return TTI->isLegalMaskedScatter(DataType); 1648 } 1649 1650 /// Returns true if the target machine supports masked gather operation 1651 /// for the given \p DataType. 1652 bool isLegalMaskedGather(Type *DataType) { 1653 return TTI->isLegalMaskedGather(DataType); 1654 } 1655 1656 /// Returns true if the target machine can represent \p V as a masked gather 1657 /// or scatter operation. 1658 bool isLegalGatherOrScatter(Value *V) { 1659 auto *LI = dyn_cast<LoadInst>(V); 1660 auto *SI = dyn_cast<StoreInst>(V); 1661 if (!LI && !SI) 1662 return false; 1663 auto *Ptr = getPointerOperand(V); 1664 auto *Ty = cast<PointerType>(Ptr->getType())->getElementType(); 1665 return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty)); 1666 } 1667 1668 /// Returns true if vector representation of the instruction \p I 1669 /// requires mask. 1670 bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); } 1671 1672 unsigned getNumStores() const { return LAI->getNumStores(); } 1673 unsigned getNumLoads() const { return LAI->getNumLoads(); } 1674 unsigned getNumPredStores() const { return NumPredStores; } 1675 1676 /// Returns true if \p I is an instruction that will be scalarized with 1677 /// predication. Such instructions include conditional stores and 1678 /// instructions that may divide by zero. 1679 bool isScalarWithPredication(Instruction *I); 1680 1681 /// Returns true if \p I is a memory instruction with consecutive memory 1682 /// access that can be widened. 1683 bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1); 1684 1685 // Returns true if the NoNaN attribute is set on the function. 1686 bool hasFunNoNaNAttr() const { return HasFunNoNaNAttr; } 1687 1688 private: 1689 /// Check if a single basic block loop is vectorizable. 1690 /// At this point we know that this is a loop with a constant trip count 1691 /// and we only need to check individual instructions. 1692 bool canVectorizeInstrs(); 1693 1694 /// When we vectorize loops we may change the order in which 1695 /// we read and write from memory. This method checks if it is 1696 /// legal to vectorize the code, considering only memory constrains. 1697 /// Returns true if the loop is vectorizable 1698 bool canVectorizeMemory(); 1699 1700 /// Return true if we can vectorize this loop using the IF-conversion 1701 /// transformation. 1702 bool canVectorizeWithIfConvert(); 1703 1704 /// Return true if all of the instructions in the block can be speculatively 1705 /// executed. \p SafePtrs is a list of addresses that are known to be legal 1706 /// and we know that we can read from them without segfault. 1707 bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs); 1708 1709 /// Updates the vectorization state by adding \p Phi to the inductions list. 1710 /// This can set \p Phi as the main induction of the loop if \p Phi is a 1711 /// better choice for the main induction than the existing one. 1712 void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID, 1713 SmallPtrSetImpl<Value *> &AllowedExit); 1714 1715 /// Create an analysis remark that explains why vectorization failed 1716 /// 1717 /// \p RemarkName is the identifier for the remark. If \p I is passed it is 1718 /// an instruction that prevents vectorization. Otherwise the loop is used 1719 /// for the location of the remark. \return the remark object that can be 1720 /// streamed to. 1721 OptimizationRemarkAnalysis 1722 createMissedAnalysis(StringRef RemarkName, Instruction *I = nullptr) const { 1723 return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(), 1724 RemarkName, TheLoop, I); 1725 } 1726 1727 /// \brief If an access has a symbolic strides, this maps the pointer value to 1728 /// the stride symbol. 1729 const ValueToValueMap *getSymbolicStrides() { 1730 // FIXME: Currently, the set of symbolic strides is sometimes queried before 1731 // it's collected. This happens from canVectorizeWithIfConvert, when the 1732 // pointer is checked to reference consecutive elements suitable for a 1733 // masked access. 1734 return LAI ? &LAI->getSymbolicStrides() : nullptr; 1735 } 1736 1737 unsigned NumPredStores = 0; 1738 1739 /// The loop that we evaluate. 1740 Loop *TheLoop; 1741 1742 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. 1743 /// Applies dynamic knowledge to simplify SCEV expressions in the context 1744 /// of existing SCEV assumptions. The analysis will also add a minimal set 1745 /// of new predicates if this is required to enable vectorization and 1746 /// unrolling. 1747 PredicatedScalarEvolution &PSE; 1748 1749 /// Target Library Info. 1750 TargetLibraryInfo *TLI; 1751 1752 /// Target Transform Info 1753 const TargetTransformInfo *TTI; 1754 1755 /// Dominator Tree. 1756 DominatorTree *DT; 1757 1758 // LoopAccess analysis. 1759 std::function<const LoopAccessInfo &(Loop &)> *GetLAA; 1760 1761 // And the loop-accesses info corresponding to this loop. This pointer is 1762 // null until canVectorizeMemory sets it up. 1763 const LoopAccessInfo *LAI = nullptr; 1764 1765 /// Interface to emit optimization remarks. 1766 OptimizationRemarkEmitter *ORE; 1767 1768 /// The interleave access information contains groups of interleaved accesses 1769 /// with the same stride and close to each other. 1770 InterleavedAccessInfo InterleaveInfo; 1771 1772 // --- vectorization state --- // 1773 1774 /// Holds the primary induction variable. This is the counter of the 1775 /// loop. 1776 PHINode *PrimaryInduction = nullptr; 1777 1778 /// Holds the reduction variables. 1779 ReductionList Reductions; 1780 1781 /// Holds all of the induction variables that we found in the loop. 1782 /// Notice that inductions don't need to start at zero and that induction 1783 /// variables can be pointers. 1784 InductionList Inductions; 1785 1786 /// Holds the phi nodes that are first-order recurrences. 1787 RecurrenceSet FirstOrderRecurrences; 1788 1789 /// Holds instructions that need to sink past other instructions to handle 1790 /// first-order recurrences. 1791 DenseMap<Instruction *, Instruction *> SinkAfter; 1792 1793 /// Holds the widest induction type encountered. 1794 Type *WidestIndTy = nullptr; 1795 1796 /// Allowed outside users. This holds the induction and reduction 1797 /// vars which can be accessed from outside the loop. 1798 SmallPtrSet<Value *, 4> AllowedExit; 1799 1800 /// Can we assume the absence of NaNs. 1801 bool HasFunNoNaNAttr = false; 1802 1803 /// Vectorization requirements that will go through late-evaluation. 1804 LoopVectorizationRequirements *Requirements; 1805 1806 /// Used to emit an analysis of any legality issues. 1807 LoopVectorizeHints *Hints; 1808 1809 /// While vectorizing these instructions we have to generate a 1810 /// call to the appropriate masked intrinsic 1811 SmallPtrSet<const Instruction *, 8> MaskedOp; 1812 }; 1813 1814 /// LoopVectorizationCostModel - estimates the expected speedups due to 1815 /// vectorization. 1816 /// In many cases vectorization is not profitable. This can happen because of 1817 /// a number of reasons. In this class we mainly attempt to predict the 1818 /// expected speedup/slowdowns due to the supported instruction set. We use the 1819 /// TargetTransformInfo to query the different backends for the cost of 1820 /// different operations. 1821 class LoopVectorizationCostModel { 1822 public: 1823 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, 1824 LoopInfo *LI, LoopVectorizationLegality *Legal, 1825 const TargetTransformInfo &TTI, 1826 const TargetLibraryInfo *TLI, DemandedBits *DB, 1827 AssumptionCache *AC, 1828 OptimizationRemarkEmitter *ORE, const Function *F, 1829 const LoopVectorizeHints *Hints) 1830 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 1831 AC(AC), ORE(ORE), TheFunction(F), Hints(Hints) {} 1832 1833 /// \return An upper bound for the vectorization factor, or None if 1834 /// vectorization should be avoided up front. 1835 Optional<unsigned> computeMaxVF(bool OptForSize); 1836 1837 /// Information about vectorization costs 1838 struct VectorizationFactor { 1839 // Vector width with best cost 1840 unsigned Width; 1841 1842 // Cost of the loop with that width 1843 unsigned Cost; 1844 }; 1845 1846 /// \return The most profitable vectorization factor and the cost of that VF. 1847 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 1848 /// then this vectorization factor will be selected if vectorization is 1849 /// possible. 1850 VectorizationFactor selectVectorizationFactor(unsigned MaxVF); 1851 1852 /// Setup cost-based decisions for user vectorization factor. 1853 void selectUserVectorizationFactor(unsigned UserVF) { 1854 collectUniformsAndScalars(UserVF); 1855 collectInstsToScalarize(UserVF); 1856 } 1857 1858 /// \return The size (in bits) of the smallest and widest types in the code 1859 /// that needs to be vectorized. We ignore values that remain scalar such as 1860 /// 64 bit loop indices. 1861 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1862 1863 /// \return The desired interleave count. 1864 /// If interleave count has been specified by metadata it will be returned. 1865 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1866 /// are the selected vectorization factor and the cost of the selected VF. 1867 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 1868 unsigned LoopCost); 1869 1870 /// Memory access instruction may be vectorized in more than one way. 1871 /// Form of instruction after vectorization depends on cost. 1872 /// This function takes cost-based decisions for Load/Store instructions 1873 /// and collects them in a map. This decisions map is used for building 1874 /// the lists of loop-uniform and loop-scalar instructions. 1875 /// The calculated cost is saved with widening decision in order to 1876 /// avoid redundant calculations. 1877 void setCostBasedWideningDecision(unsigned VF); 1878 1879 /// \brief A struct that represents some properties of the register usage 1880 /// of a loop. 1881 struct RegisterUsage { 1882 /// Holds the number of loop invariant values that are used in the loop. 1883 unsigned LoopInvariantRegs; 1884 1885 /// Holds the maximum number of concurrent live intervals in the loop. 1886 unsigned MaxLocalUsers; 1887 1888 /// Holds the number of instructions in the loop. 1889 unsigned NumInstructions; 1890 }; 1891 1892 /// \return Returns information about the register usages of the loop for the 1893 /// given vectorization factors. 1894 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 1895 1896 /// Collect values we want to ignore in the cost model. 1897 void collectValuesToIgnore(); 1898 1899 /// \returns The smallest bitwidth each instruction can be represented with. 1900 /// The vector equivalents of these instructions should be truncated to this 1901 /// type. 1902 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1903 return MinBWs; 1904 } 1905 1906 /// \returns True if it is more profitable to scalarize instruction \p I for 1907 /// vectorization factor \p VF. 1908 bool isProfitableToScalarize(Instruction *I, unsigned VF) const { 1909 assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1."); 1910 auto Scalars = InstsToScalarize.find(VF); 1911 assert(Scalars != InstsToScalarize.end() && 1912 "VF not yet analyzed for scalarization profitability"); 1913 return Scalars->second.count(I); 1914 } 1915 1916 /// Returns true if \p I is known to be uniform after vectorization. 1917 bool isUniformAfterVectorization(Instruction *I, unsigned VF) const { 1918 if (VF == 1) 1919 return true; 1920 assert(Uniforms.count(VF) && "VF not yet analyzed for uniformity"); 1921 auto UniformsPerVF = Uniforms.find(VF); 1922 return UniformsPerVF->second.count(I); 1923 } 1924 1925 /// Returns true if \p I is known to be scalar after vectorization. 1926 bool isScalarAfterVectorization(Instruction *I, unsigned VF) const { 1927 if (VF == 1) 1928 return true; 1929 assert(Scalars.count(VF) && "Scalar values are not calculated for VF"); 1930 auto ScalarsPerVF = Scalars.find(VF); 1931 return ScalarsPerVF->second.count(I); 1932 } 1933 1934 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1935 /// for vectorization factor \p VF. 1936 bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const { 1937 return VF > 1 && MinBWs.count(I) && !isProfitableToScalarize(I, VF) && 1938 !isScalarAfterVectorization(I, VF); 1939 } 1940 1941 /// Decision that was taken during cost calculation for memory instruction. 1942 enum InstWidening { 1943 CM_Unknown, 1944 CM_Widen, 1945 CM_Interleave, 1946 CM_GatherScatter, 1947 CM_Scalarize 1948 }; 1949 1950 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1951 /// instruction \p I and vector width \p VF. 1952 void setWideningDecision(Instruction *I, unsigned VF, InstWidening W, 1953 unsigned Cost) { 1954 assert(VF >= 2 && "Expected VF >=2"); 1955 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1956 } 1957 1958 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1959 /// interleaving group \p Grp and vector width \p VF. 1960 void setWideningDecision(const InterleaveGroup *Grp, unsigned VF, 1961 InstWidening W, unsigned Cost) { 1962 assert(VF >= 2 && "Expected VF >=2"); 1963 /// Broadcast this decicion to all instructions inside the group. 1964 /// But the cost will be assigned to one instruction only. 1965 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1966 if (auto *I = Grp->getMember(i)) { 1967 if (Grp->getInsertPos() == I) 1968 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1969 else 1970 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1971 } 1972 } 1973 } 1974 1975 /// Return the cost model decision for the given instruction \p I and vector 1976 /// width \p VF. Return CM_Unknown if this instruction did not pass 1977 /// through the cost modeling. 1978 InstWidening getWideningDecision(Instruction *I, unsigned VF) { 1979 assert(VF >= 2 && "Expected VF >=2"); 1980 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1981 auto Itr = WideningDecisions.find(InstOnVF); 1982 if (Itr == WideningDecisions.end()) 1983 return CM_Unknown; 1984 return Itr->second.first; 1985 } 1986 1987 /// Return the vectorization cost for the given instruction \p I and vector 1988 /// width \p VF. 1989 unsigned getWideningCost(Instruction *I, unsigned VF) { 1990 assert(VF >= 2 && "Expected VF >=2"); 1991 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1992 assert(WideningDecisions.count(InstOnVF) && "The cost is not calculated"); 1993 return WideningDecisions[InstOnVF].second; 1994 } 1995 1996 /// Return True if instruction \p I is an optimizable truncate whose operand 1997 /// is an induction variable. Such a truncate will be removed by adding a new 1998 /// induction variable with the destination type. 1999 bool isOptimizableIVTruncate(Instruction *I, unsigned VF) { 2000 // If the instruction is not a truncate, return false. 2001 auto *Trunc = dyn_cast<TruncInst>(I); 2002 if (!Trunc) 2003 return false; 2004 2005 // Get the source and destination types of the truncate. 2006 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 2007 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 2008 2009 // If the truncate is free for the given types, return false. Replacing a 2010 // free truncate with an induction variable would add an induction variable 2011 // update instruction to each iteration of the loop. We exclude from this 2012 // check the primary induction variable since it will need an update 2013 // instruction regardless. 2014 Value *Op = Trunc->getOperand(0); 2015 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 2016 return false; 2017 2018 // If the truncated value is not an induction variable, return false. 2019 return Legal->isInductionVariable(Op); 2020 } 2021 2022 /// Collects the instructions to scalarize for each predicated instruction in 2023 /// the loop. 2024 void collectInstsToScalarize(unsigned VF); 2025 2026 /// Collect Uniform and Scalar values for the given \p VF. 2027 /// The sets depend on CM decision for Load/Store instructions 2028 /// that may be vectorized as interleave, gather-scatter or scalarized. 2029 void collectUniformsAndScalars(unsigned VF) { 2030 // Do the analysis once. 2031 if (VF == 1 || Uniforms.count(VF)) 2032 return; 2033 setCostBasedWideningDecision(VF); 2034 collectLoopUniforms(VF); 2035 collectLoopScalars(VF); 2036 } 2037 2038 private: 2039 /// \return An upper bound for the vectorization factor, larger than zero. 2040 /// One is returned if vectorization should best be avoided due to cost. 2041 unsigned computeFeasibleMaxVF(bool OptForSize, unsigned ConstTripCount); 2042 2043 /// The vectorization cost is a combination of the cost itself and a boolean 2044 /// indicating whether any of the contributing operations will actually 2045 /// operate on 2046 /// vector values after type legalization in the backend. If this latter value 2047 /// is 2048 /// false, then all operations will be scalarized (i.e. no vectorization has 2049 /// actually taken place). 2050 using VectorizationCostTy = std::pair<unsigned, bool>; 2051 2052 /// Returns the expected execution cost. The unit of the cost does 2053 /// not matter because we use the 'cost' units to compare different 2054 /// vector widths. The cost that is returned is *not* normalized by 2055 /// the factor width. 2056 VectorizationCostTy expectedCost(unsigned VF); 2057 2058 /// Returns the execution time cost of an instruction for a given vector 2059 /// width. Vector width of one means scalar. 2060 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 2061 2062 /// The cost-computation logic from getInstructionCost which provides 2063 /// the vector type as an output parameter. 2064 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 2065 2066 /// Calculate vectorization cost of memory instruction \p I. 2067 unsigned getMemoryInstructionCost(Instruction *I, unsigned VF); 2068 2069 /// The cost computation for scalarized memory instruction. 2070 unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF); 2071 2072 /// The cost computation for interleaving group of memory instructions. 2073 unsigned getInterleaveGroupCost(Instruction *I, unsigned VF); 2074 2075 /// The cost computation for Gather/Scatter instruction. 2076 unsigned getGatherScatterCost(Instruction *I, unsigned VF); 2077 2078 /// The cost computation for widening instruction \p I with consecutive 2079 /// memory access. 2080 unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF); 2081 2082 /// The cost calculation for Load instruction \p I with uniform pointer - 2083 /// scalar load + broadcast. 2084 unsigned getUniformMemOpCost(Instruction *I, unsigned VF); 2085 2086 /// Returns whether the instruction is a load or store and will be a emitted 2087 /// as a vector operation. 2088 bool isConsecutiveLoadOrStore(Instruction *I); 2089 2090 /// Create an analysis remark that explains why vectorization failed 2091 /// 2092 /// \p RemarkName is the identifier for the remark. \return the remark object 2093 /// that can be streamed to. 2094 OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) { 2095 return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(), 2096 RemarkName, TheLoop); 2097 } 2098 2099 /// Map of scalar integer values to the smallest bitwidth they can be legally 2100 /// represented as. The vector equivalents of these values should be truncated 2101 /// to this type. 2102 MapVector<Instruction *, uint64_t> MinBWs; 2103 2104 /// A type representing the costs for instructions if they were to be 2105 /// scalarized rather than vectorized. The entries are Instruction-Cost 2106 /// pairs. 2107 using ScalarCostsTy = DenseMap<Instruction *, unsigned>; 2108 2109 /// A set containing all BasicBlocks that are known to present after 2110 /// vectorization as a predicated block. 2111 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 2112 2113 /// A map holding scalar costs for different vectorization factors. The 2114 /// presence of a cost for an instruction in the mapping indicates that the 2115 /// instruction will be scalarized when vectorizing with the associated 2116 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 2117 DenseMap<unsigned, ScalarCostsTy> InstsToScalarize; 2118 2119 /// Holds the instructions known to be uniform after vectorization. 2120 /// The data is collected per VF. 2121 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms; 2122 2123 /// Holds the instructions known to be scalar after vectorization. 2124 /// The data is collected per VF. 2125 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars; 2126 2127 /// Holds the instructions (address computations) that are forced to be 2128 /// scalarized. 2129 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars; 2130 2131 /// Returns the expected difference in cost from scalarizing the expression 2132 /// feeding a predicated instruction \p PredInst. The instructions to 2133 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 2134 /// non-negative return value implies the expression will be scalarized. 2135 /// Currently, only single-use chains are considered for scalarization. 2136 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 2137 unsigned VF); 2138 2139 /// Collect the instructions that are uniform after vectorization. An 2140 /// instruction is uniform if we represent it with a single scalar value in 2141 /// the vectorized loop corresponding to each vector iteration. Examples of 2142 /// uniform instructions include pointer operands of consecutive or 2143 /// interleaved memory accesses. Note that although uniformity implies an 2144 /// instruction will be scalar, the reverse is not true. In general, a 2145 /// scalarized instruction will be represented by VF scalar values in the 2146 /// vectorized loop, each corresponding to an iteration of the original 2147 /// scalar loop. 2148 void collectLoopUniforms(unsigned VF); 2149 2150 /// Collect the instructions that are scalar after vectorization. An 2151 /// instruction is scalar if it is known to be uniform or will be scalarized 2152 /// during vectorization. Non-uniform scalarized instructions will be 2153 /// represented by VF values in the vectorized loop, each corresponding to an 2154 /// iteration of the original scalar loop. 2155 void collectLoopScalars(unsigned VF); 2156 2157 /// Keeps cost model vectorization decision and cost for instructions. 2158 /// Right now it is used for memory instructions only. 2159 using DecisionList = DenseMap<std::pair<Instruction *, unsigned>, 2160 std::pair<InstWidening, unsigned>>; 2161 2162 DecisionList WideningDecisions; 2163 2164 public: 2165 /// The loop that we evaluate. 2166 Loop *TheLoop; 2167 2168 /// Predicated scalar evolution analysis. 2169 PredicatedScalarEvolution &PSE; 2170 2171 /// Loop Info analysis. 2172 LoopInfo *LI; 2173 2174 /// Vectorization legality. 2175 LoopVectorizationLegality *Legal; 2176 2177 /// Vector target information. 2178 const TargetTransformInfo &TTI; 2179 2180 /// Target Library Info. 2181 const TargetLibraryInfo *TLI; 2182 2183 /// Demanded bits analysis. 2184 DemandedBits *DB; 2185 2186 /// Assumption cache. 2187 AssumptionCache *AC; 2188 2189 /// Interface to emit optimization remarks. 2190 OptimizationRemarkEmitter *ORE; 2191 2192 const Function *TheFunction; 2193 2194 /// Loop Vectorize Hint. 2195 const LoopVectorizeHints *Hints; 2196 2197 /// Values to ignore in the cost model. 2198 SmallPtrSet<const Value *, 16> ValuesToIgnore; 2199 2200 /// Values to ignore in the cost model when VF > 1. 2201 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 2202 }; 2203 2204 } // end anonymous namespace 2205 2206 namespace llvm { 2207 2208 /// InnerLoopVectorizer vectorizes loops which contain only one basic 2209 /// LoopVectorizationPlanner - drives the vectorization process after having 2210 /// passed Legality checks. 2211 /// The planner builds and optimizes the Vectorization Plans which record the 2212 /// decisions how to vectorize the given loop. In particular, represent the 2213 /// control-flow of the vectorized version, the replication of instructions that 2214 /// are to be scalarized, and interleave access groups. 2215 class LoopVectorizationPlanner { 2216 /// The loop that we evaluate. 2217 Loop *OrigLoop; 2218 2219 /// Loop Info analysis. 2220 LoopInfo *LI; 2221 2222 /// Target Library Info. 2223 const TargetLibraryInfo *TLI; 2224 2225 /// Target Transform Info. 2226 const TargetTransformInfo *TTI; 2227 2228 /// The legality analysis. 2229 LoopVectorizationLegality *Legal; 2230 2231 /// The profitablity analysis. 2232 LoopVectorizationCostModel &CM; 2233 2234 SmallVector<std::unique_ptr<VPlan>, 4> VPlans; 2235 2236 unsigned BestVF = 0; 2237 unsigned BestUF = 0; 2238 2239 public: 2240 LoopVectorizationPlanner(Loop *L, LoopInfo *LI, const TargetLibraryInfo *TLI, 2241 const TargetTransformInfo *TTI, 2242 LoopVectorizationLegality *Legal, 2243 LoopVectorizationCostModel &CM) 2244 : OrigLoop(L), LI(LI), TLI(TLI), TTI(TTI), Legal(Legal), CM(CM) {} 2245 2246 /// Plan how to best vectorize, return the best VF and its cost. 2247 LoopVectorizationCostModel::VectorizationFactor plan(bool OptForSize, 2248 unsigned UserVF); 2249 2250 /// Finalize the best decision and dispose of all other VPlans. 2251 void setBestPlan(unsigned VF, unsigned UF); 2252 2253 /// Generate the IR code for the body of the vectorized loop according to the 2254 /// best selected VPlan. 2255 void executePlan(InnerLoopVectorizer &LB, DominatorTree *DT); 2256 2257 void printPlans(raw_ostream &O) { 2258 for (const auto &Plan : VPlans) 2259 O << *Plan; 2260 } 2261 2262 protected: 2263 /// Collect the instructions from the original loop that would be trivially 2264 /// dead in the vectorized loop if generated. 2265 void collectTriviallyDeadInstructions( 2266 SmallPtrSetImpl<Instruction *> &DeadInstructions); 2267 2268 /// A range of powers-of-2 vectorization factors with fixed start and 2269 /// adjustable end. The range includes start and excludes end, e.g.,: 2270 /// [1, 9) = {1, 2, 4, 8} 2271 struct VFRange { 2272 // A power of 2. 2273 const unsigned Start; 2274 2275 // Need not be a power of 2. If End <= Start range is empty. 2276 unsigned End; 2277 }; 2278 2279 /// Test a \p Predicate on a \p Range of VF's. Return the value of applying 2280 /// \p Predicate on Range.Start, possibly decreasing Range.End such that the 2281 /// returned value holds for the entire \p Range. 2282 bool getDecisionAndClampRange(const std::function<bool(unsigned)> &Predicate, 2283 VFRange &Range); 2284 2285 /// Build VPlans for power-of-2 VF's between \p MinVF and \p MaxVF inclusive, 2286 /// according to the information gathered by Legal when it checked if it is 2287 /// legal to vectorize the loop. 2288 void buildVPlans(unsigned MinVF, unsigned MaxVF); 2289 2290 private: 2291 /// Check if \I belongs to an Interleave Group within the given VF \p Range, 2292 /// \return true in the first returned value if so and false otherwise. 2293 /// Build a new VPInterleaveGroup Recipe if \I is the primary member of an IG 2294 /// for \p Range.Start, and provide it as the second returned value. 2295 /// Note that if \I is an adjunct member of an IG for \p Range.Start, the 2296 /// \return value is <true, nullptr>, as it is handled by another recipe. 2297 /// \p Range.End may be decreased to ensure same decision from \p Range.Start 2298 /// to \p Range.End. 2299 VPInterleaveRecipe *tryToInterleaveMemory(Instruction *I, VFRange &Range); 2300 2301 // Check if \I is a memory instruction to be widened for \p Range.Start and 2302 // potentially masked. 2303 VPWidenMemoryInstructionRecipe *tryToWidenMemory(Instruction *I, 2304 VFRange &Range); 2305 2306 /// Check if an induction recipe should be constructed for \I within the given 2307 /// VF \p Range. If so build and return it. If not, return null. \p Range.End 2308 /// may be decreased to ensure same decision from \p Range.Start to 2309 /// \p Range.End. 2310 VPWidenIntOrFpInductionRecipe *tryToOptimizeInduction(Instruction *I, 2311 VFRange &Range); 2312 2313 /// Handle non-loop phi nodes. Currently all such phi nodes are turned into 2314 /// a sequence of select instructions as the vectorizer currently performs 2315 /// full if-conversion. 2316 VPBlendRecipe *tryToBlend(Instruction *I); 2317 2318 /// Check if \p I can be widened within the given VF \p Range. If \p I can be 2319 /// widened for \p Range.Start, check if the last recipe of \p VPBB can be 2320 /// extended to include \p I or else build a new VPWidenRecipe for it and 2321 /// append it to \p VPBB. Return true if \p I can be widened for Range.Start, 2322 /// false otherwise. Range.End may be decreased to ensure same decision from 2323 /// \p Range.Start to \p Range.End. 2324 bool tryToWiden(Instruction *I, VPBasicBlock *VPBB, VFRange &Range); 2325 2326 /// Build a VPReplicationRecipe for \p I and enclose it within a Region if it 2327 /// is predicated. \return \p VPBB augmented with this new recipe if \p I is 2328 /// not predicated, otherwise \return a new VPBasicBlock that succeeds the new 2329 /// Region. Update the packing decision of predicated instructions if they 2330 /// feed \p I. Range.End may be decreased to ensure same recipe behavior from 2331 /// \p Range.Start to \p Range.End. 2332 VPBasicBlock *handleReplication( 2333 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 2334 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe); 2335 2336 /// Create a replicating region for instruction \p I that requires 2337 /// predication. \p PredRecipe is a VPReplicateRecipe holding \p I. 2338 VPRegionBlock *createReplicateRegion(Instruction *I, 2339 VPRecipeBase *PredRecipe); 2340 2341 /// Build a VPlan according to the information gathered by Legal. \return a 2342 /// VPlan for vectorization factors \p Range.Start and up to \p Range.End 2343 /// exclusive, possibly decreasing \p Range.End. 2344 std::unique_ptr<VPlan> buildVPlan(VFRange &Range); 2345 }; 2346 2347 } // end namespace llvm 2348 2349 namespace { 2350 2351 /// \brief This holds vectorization requirements that must be verified late in 2352 /// the process. The requirements are set by legalize and costmodel. Once 2353 /// vectorization has been determined to be possible and profitable the 2354 /// requirements can be verified by looking for metadata or compiler options. 2355 /// For example, some loops require FP commutativity which is only allowed if 2356 /// vectorization is explicitly specified or if the fast-math compiler option 2357 /// has been provided. 2358 /// Late evaluation of these requirements allows helpful diagnostics to be 2359 /// composed that tells the user what need to be done to vectorize the loop. For 2360 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late 2361 /// evaluation should be used only when diagnostics can generated that can be 2362 /// followed by a non-expert user. 2363 class LoopVectorizationRequirements { 2364 public: 2365 LoopVectorizationRequirements(OptimizationRemarkEmitter &ORE) : ORE(ORE) {} 2366 2367 void addUnsafeAlgebraInst(Instruction *I) { 2368 // First unsafe algebra instruction. 2369 if (!UnsafeAlgebraInst) 2370 UnsafeAlgebraInst = I; 2371 } 2372 2373 void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; } 2374 2375 bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) { 2376 const char *PassName = Hints.vectorizeAnalysisPassName(); 2377 bool Failed = false; 2378 if (UnsafeAlgebraInst && !Hints.allowReordering()) { 2379 ORE.emit([&]() { 2380 return OptimizationRemarkAnalysisFPCommute( 2381 PassName, "CantReorderFPOps", 2382 UnsafeAlgebraInst->getDebugLoc(), 2383 UnsafeAlgebraInst->getParent()) 2384 << "loop not vectorized: cannot prove it is safe to reorder " 2385 "floating-point operations"; 2386 }); 2387 Failed = true; 2388 } 2389 2390 // Test if runtime memcheck thresholds are exceeded. 2391 bool PragmaThresholdReached = 2392 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 2393 bool ThresholdReached = 2394 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 2395 if ((ThresholdReached && !Hints.allowReordering()) || 2396 PragmaThresholdReached) { 2397 ORE.emit([&]() { 2398 return OptimizationRemarkAnalysisAliasing(PassName, "CantReorderMemOps", 2399 L->getStartLoc(), 2400 L->getHeader()) 2401 << "loop not vectorized: cannot prove it is safe to reorder " 2402 "memory operations"; 2403 }); 2404 DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 2405 Failed = true; 2406 } 2407 2408 return Failed; 2409 } 2410 2411 private: 2412 unsigned NumRuntimePointerChecks = 0; 2413 Instruction *UnsafeAlgebraInst = nullptr; 2414 2415 /// Interface to emit optimization remarks. 2416 OptimizationRemarkEmitter &ORE; 2417 }; 2418 2419 } // end anonymous namespace 2420 2421 static void addAcyclicInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) { 2422 if (L.empty()) { 2423 if (!hasCyclesInLoopBody(L)) 2424 V.push_back(&L); 2425 return; 2426 } 2427 for (Loop *InnerL : L) 2428 addAcyclicInnerLoop(*InnerL, V); 2429 } 2430 2431 namespace { 2432 2433 /// The LoopVectorize Pass. 2434 struct LoopVectorize : public FunctionPass { 2435 /// Pass identification, replacement for typeid 2436 static char ID; 2437 2438 LoopVectorizePass Impl; 2439 2440 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) 2441 : FunctionPass(ID) { 2442 Impl.DisableUnrolling = NoUnrolling; 2443 Impl.AlwaysVectorize = AlwaysVectorize; 2444 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2445 } 2446 2447 bool runOnFunction(Function &F) override { 2448 if (skipFunction(F)) 2449 return false; 2450 2451 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2452 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2453 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2454 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2455 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2456 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2457 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 2458 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2459 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2460 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2461 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2462 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2463 2464 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2465 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2466 2467 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2468 GetLAA, *ORE); 2469 } 2470 2471 void getAnalysisUsage(AnalysisUsage &AU) const override { 2472 AU.addRequired<AssumptionCacheTracker>(); 2473 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2474 AU.addRequired<DominatorTreeWrapperPass>(); 2475 AU.addRequired<LoopInfoWrapperPass>(); 2476 AU.addRequired<ScalarEvolutionWrapperPass>(); 2477 AU.addRequired<TargetTransformInfoWrapperPass>(); 2478 AU.addRequired<AAResultsWrapperPass>(); 2479 AU.addRequired<LoopAccessLegacyAnalysis>(); 2480 AU.addRequired<DemandedBitsWrapperPass>(); 2481 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2482 AU.addPreserved<LoopInfoWrapperPass>(); 2483 AU.addPreserved<DominatorTreeWrapperPass>(); 2484 AU.addPreserved<BasicAAWrapperPass>(); 2485 AU.addPreserved<GlobalsAAWrapperPass>(); 2486 } 2487 }; 2488 2489 } // end anonymous namespace 2490 2491 //===----------------------------------------------------------------------===// 2492 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2493 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2494 //===----------------------------------------------------------------------===// 2495 2496 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2497 // We need to place the broadcast of invariant variables outside the loop. 2498 Instruction *Instr = dyn_cast<Instruction>(V); 2499 bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody); 2500 bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr; 2501 2502 // Place the code for broadcasting invariant variables in the new preheader. 2503 IRBuilder<>::InsertPointGuard Guard(Builder); 2504 if (Invariant) 2505 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2506 2507 // Broadcast the scalar into all locations in the vector. 2508 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2509 2510 return Shuf; 2511 } 2512 2513 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2514 const InductionDescriptor &II, Value *Step, Instruction *EntryVal) { 2515 Value *Start = II.getStartValue(); 2516 2517 // Construct the initial value of the vector IV in the vector loop preheader 2518 auto CurrIP = Builder.saveIP(); 2519 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2520 if (isa<TruncInst>(EntryVal)) { 2521 assert(Start->getType()->isIntegerTy() && 2522 "Truncation requires an integer type"); 2523 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2524 Step = Builder.CreateTrunc(Step, TruncType); 2525 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2526 } 2527 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2528 Value *SteppedStart = 2529 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 2530 2531 // We create vector phi nodes for both integer and floating-point induction 2532 // variables. Here, we determine the kind of arithmetic we will perform. 2533 Instruction::BinaryOps AddOp; 2534 Instruction::BinaryOps MulOp; 2535 if (Step->getType()->isIntegerTy()) { 2536 AddOp = Instruction::Add; 2537 MulOp = Instruction::Mul; 2538 } else { 2539 AddOp = II.getInductionOpcode(); 2540 MulOp = Instruction::FMul; 2541 } 2542 2543 // Multiply the vectorization factor by the step using integer or 2544 // floating-point arithmetic as appropriate. 2545 Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF); 2546 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF)); 2547 2548 // Create a vector splat to use in the induction update. 2549 // 2550 // FIXME: If the step is non-constant, we create the vector splat with 2551 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2552 // handle a constant vector splat. 2553 Value *SplatVF = isa<Constant>(Mul) 2554 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 2555 : Builder.CreateVectorSplat(VF, Mul); 2556 Builder.restoreIP(CurrIP); 2557 2558 // We may need to add the step a number of times, depending on the unroll 2559 // factor. The last of those goes into the PHI. 2560 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2561 &*LoopVectorBody->getFirstInsertionPt()); 2562 Instruction *LastInduction = VecInd; 2563 for (unsigned Part = 0; Part < UF; ++Part) { 2564 VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction); 2565 if (isa<TruncInst>(EntryVal)) 2566 addMetadata(LastInduction, EntryVal); 2567 LastInduction = cast<Instruction>(addFastMathFlag( 2568 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"))); 2569 } 2570 2571 // Move the last step to the end of the latch block. This ensures consistent 2572 // placement of all induction updates. 2573 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2574 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2575 auto *ICmp = cast<Instruction>(Br->getCondition()); 2576 LastInduction->moveBefore(ICmp); 2577 LastInduction->setName("vec.ind.next"); 2578 2579 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2580 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2581 } 2582 2583 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2584 return Cost->isScalarAfterVectorization(I, VF) || 2585 Cost->isProfitableToScalarize(I, VF); 2586 } 2587 2588 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2589 if (shouldScalarizeInstruction(IV)) 2590 return true; 2591 auto isScalarInst = [&](User *U) -> bool { 2592 auto *I = cast<Instruction>(U); 2593 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2594 }; 2595 return llvm::any_of(IV->users(), isScalarInst); 2596 } 2597 2598 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) { 2599 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2600 "Primary induction variable must have an integer type"); 2601 2602 auto II = Legal->getInductionVars()->find(IV); 2603 assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); 2604 2605 auto ID = II->second; 2606 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2607 2608 // The scalar value to broadcast. This will be derived from the canonical 2609 // induction variable. 2610 Value *ScalarIV = nullptr; 2611 2612 // The value from the original loop to which we are mapping the new induction 2613 // variable. 2614 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2615 2616 // True if we have vectorized the induction variable. 2617 auto VectorizedIV = false; 2618 2619 // Determine if we want a scalar version of the induction variable. This is 2620 // true if the induction variable itself is not widened, or if it has at 2621 // least one user in the loop that is not widened. 2622 auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal); 2623 2624 // Generate code for the induction step. Note that induction steps are 2625 // required to be loop-invariant 2626 assert(PSE.getSE()->isLoopInvariant(ID.getStep(), OrigLoop) && 2627 "Induction step should be loop invariant"); 2628 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2629 Value *Step = nullptr; 2630 if (PSE.getSE()->isSCEVable(IV->getType())) { 2631 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2632 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), 2633 LoopVectorPreHeader->getTerminator()); 2634 } else { 2635 Step = cast<SCEVUnknown>(ID.getStep())->getValue(); 2636 } 2637 2638 // Try to create a new independent vector induction variable. If we can't 2639 // create the phi node, we will splat the scalar induction variable in each 2640 // loop iteration. 2641 if (VF > 1 && !shouldScalarizeInstruction(EntryVal)) { 2642 createVectorIntOrFpInductionPHI(ID, Step, EntryVal); 2643 VectorizedIV = true; 2644 } 2645 2646 // If we haven't yet vectorized the induction variable, or if we will create 2647 // a scalar one, we need to define the scalar induction variable and step 2648 // values. If we were given a truncation type, truncate the canonical 2649 // induction variable and step. Otherwise, derive these values from the 2650 // induction descriptor. 2651 if (!VectorizedIV || NeedsScalarIV) { 2652 ScalarIV = Induction; 2653 if (IV != OldInduction) { 2654 ScalarIV = IV->getType()->isIntegerTy() 2655 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2656 : Builder.CreateCast(Instruction::SIToFP, Induction, 2657 IV->getType()); 2658 ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL); 2659 ScalarIV->setName("offset.idx"); 2660 } 2661 if (Trunc) { 2662 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2663 assert(Step->getType()->isIntegerTy() && 2664 "Truncation requires an integer step"); 2665 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2666 Step = Builder.CreateTrunc(Step, TruncType); 2667 } 2668 } 2669 2670 // If we haven't yet vectorized the induction variable, splat the scalar 2671 // induction variable, and build the necessary step vectors. 2672 if (!VectorizedIV) { 2673 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2674 for (unsigned Part = 0; Part < UF; ++Part) { 2675 Value *EntryPart = 2676 getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode()); 2677 VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart); 2678 if (Trunc) 2679 addMetadata(EntryPart, Trunc); 2680 } 2681 } 2682 2683 // If an induction variable is only used for counting loop iterations or 2684 // calculating addresses, it doesn't need to be widened. Create scalar steps 2685 // that can be used by instructions we will later scalarize. Note that the 2686 // addition of the scalar steps will not increase the number of instructions 2687 // in the loop in the common case prior to InstCombine. We will be trading 2688 // one vector extract for each scalar step. 2689 if (NeedsScalarIV) 2690 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 2691 } 2692 2693 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2694 Instruction::BinaryOps BinOp) { 2695 // Create and check the types. 2696 assert(Val->getType()->isVectorTy() && "Must be a vector"); 2697 int VLen = Val->getType()->getVectorNumElements(); 2698 2699 Type *STy = Val->getType()->getScalarType(); 2700 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2701 "Induction Step must be an integer or FP"); 2702 assert(Step->getType() == STy && "Step has wrong type"); 2703 2704 SmallVector<Constant *, 8> Indices; 2705 2706 if (STy->isIntegerTy()) { 2707 // Create a vector of consecutive numbers from zero to VF. 2708 for (int i = 0; i < VLen; ++i) 2709 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 2710 2711 // Add the consecutive indices to the vector value. 2712 Constant *Cv = ConstantVector::get(Indices); 2713 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2714 Step = Builder.CreateVectorSplat(VLen, Step); 2715 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2716 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2717 // which can be found from the original scalar operations. 2718 Step = Builder.CreateMul(Cv, Step); 2719 return Builder.CreateAdd(Val, Step, "induction"); 2720 } 2721 2722 // Floating point induction. 2723 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2724 "Binary Opcode should be specified for FP induction"); 2725 // Create a vector of consecutive numbers from zero to VF. 2726 for (int i = 0; i < VLen; ++i) 2727 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2728 2729 // Add the consecutive indices to the vector value. 2730 Constant *Cv = ConstantVector::get(Indices); 2731 2732 Step = Builder.CreateVectorSplat(VLen, Step); 2733 2734 // Floating point operations had to be 'fast' to enable the induction. 2735 FastMathFlags Flags; 2736 Flags.setFast(); 2737 2738 Value *MulOp = Builder.CreateFMul(Cv, Step); 2739 if (isa<Instruction>(MulOp)) 2740 // Have to check, MulOp may be a constant 2741 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2742 2743 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2744 if (isa<Instruction>(BOp)) 2745 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2746 return BOp; 2747 } 2748 2749 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2750 Value *EntryVal, 2751 const InductionDescriptor &ID) { 2752 // We shouldn't have to build scalar steps if we aren't vectorizing. 2753 assert(VF > 1 && "VF should be greater than one"); 2754 2755 // Get the value type and ensure it and the step have the same integer type. 2756 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2757 assert(ScalarIVTy == Step->getType() && 2758 "Val and Step should have the same type"); 2759 2760 // We build scalar steps for both integer and floating-point induction 2761 // variables. Here, we determine the kind of arithmetic we will perform. 2762 Instruction::BinaryOps AddOp; 2763 Instruction::BinaryOps MulOp; 2764 if (ScalarIVTy->isIntegerTy()) { 2765 AddOp = Instruction::Add; 2766 MulOp = Instruction::Mul; 2767 } else { 2768 AddOp = ID.getInductionOpcode(); 2769 MulOp = Instruction::FMul; 2770 } 2771 2772 // Determine the number of scalars we need to generate for each unroll 2773 // iteration. If EntryVal is uniform, we only need to generate the first 2774 // lane. Otherwise, we generate all VF values. 2775 unsigned Lanes = 2776 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1 2777 : VF; 2778 // Compute the scalar steps and save the results in VectorLoopValueMap. 2779 for (unsigned Part = 0; Part < UF; ++Part) { 2780 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2781 auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane); 2782 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step)); 2783 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul)); 2784 VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add); 2785 } 2786 } 2787 } 2788 2789 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { 2790 const ValueToValueMap &Strides = getSymbolicStrides() ? *getSymbolicStrides() : 2791 ValueToValueMap(); 2792 2793 int Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, true, false); 2794 if (Stride == 1 || Stride == -1) 2795 return Stride; 2796 return 0; 2797 } 2798 2799 bool LoopVectorizationLegality::isUniform(Value *V) { 2800 return LAI->isUniform(V); 2801 } 2802 2803 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) { 2804 assert(V != Induction && "The new induction variable should not be used."); 2805 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2806 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2807 2808 // If we have a stride that is replaced by one, do it here. 2809 if (Legal->hasStride(V)) 2810 V = ConstantInt::get(V->getType(), 1); 2811 2812 // If we have a vector mapped to this value, return it. 2813 if (VectorLoopValueMap.hasVectorValue(V, Part)) 2814 return VectorLoopValueMap.getVectorValue(V, Part); 2815 2816 // If the value has not been vectorized, check if it has been scalarized 2817 // instead. If it has been scalarized, and we actually need the value in 2818 // vector form, we will construct the vector values on demand. 2819 if (VectorLoopValueMap.hasAnyScalarValue(V)) { 2820 Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0}); 2821 2822 // If we've scalarized a value, that value should be an instruction. 2823 auto *I = cast<Instruction>(V); 2824 2825 // If we aren't vectorizing, we can just copy the scalar map values over to 2826 // the vector map. 2827 if (VF == 1) { 2828 VectorLoopValueMap.setVectorValue(V, Part, ScalarValue); 2829 return ScalarValue; 2830 } 2831 2832 // Get the last scalar instruction we generated for V and Part. If the value 2833 // is known to be uniform after vectorization, this corresponds to lane zero 2834 // of the Part unroll iteration. Otherwise, the last instruction is the one 2835 // we created for the last vector lane of the Part unroll iteration. 2836 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1; 2837 auto *LastInst = cast<Instruction>( 2838 VectorLoopValueMap.getScalarValue(V, {Part, LastLane})); 2839 2840 // Set the insert point after the last scalarized instruction. This ensures 2841 // the insertelement sequence will directly follow the scalar definitions. 2842 auto OldIP = Builder.saveIP(); 2843 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 2844 Builder.SetInsertPoint(&*NewIP); 2845 2846 // However, if we are vectorizing, we need to construct the vector values. 2847 // If the value is known to be uniform after vectorization, we can just 2848 // broadcast the scalar value corresponding to lane zero for each unroll 2849 // iteration. Otherwise, we construct the vector values using insertelement 2850 // instructions. Since the resulting vectors are stored in 2851 // VectorLoopValueMap, we will only generate the insertelements once. 2852 Value *VectorValue = nullptr; 2853 if (Cost->isUniformAfterVectorization(I, VF)) { 2854 VectorValue = getBroadcastInstrs(ScalarValue); 2855 VectorLoopValueMap.setVectorValue(V, Part, VectorValue); 2856 } else { 2857 // Initialize packing with insertelements to start from undef. 2858 Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF)); 2859 VectorLoopValueMap.setVectorValue(V, Part, Undef); 2860 for (unsigned Lane = 0; Lane < VF; ++Lane) 2861 packScalarIntoVectorValue(V, {Part, Lane}); 2862 VectorValue = VectorLoopValueMap.getVectorValue(V, Part); 2863 } 2864 Builder.restoreIP(OldIP); 2865 return VectorValue; 2866 } 2867 2868 // If this scalar is unknown, assume that it is a constant or that it is 2869 // loop invariant. Broadcast V and save the value for future uses. 2870 Value *B = getBroadcastInstrs(V); 2871 VectorLoopValueMap.setVectorValue(V, Part, B); 2872 return B; 2873 } 2874 2875 Value * 2876 InnerLoopVectorizer::getOrCreateScalarValue(Value *V, 2877 const VPIteration &Instance) { 2878 // If the value is not an instruction contained in the loop, it should 2879 // already be scalar. 2880 if (OrigLoop->isLoopInvariant(V)) 2881 return V; 2882 2883 assert(Instance.Lane > 0 2884 ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF) 2885 : true && "Uniform values only have lane zero"); 2886 2887 // If the value from the original loop has not been vectorized, it is 2888 // represented by UF x VF scalar values in the new loop. Return the requested 2889 // scalar value. 2890 if (VectorLoopValueMap.hasScalarValue(V, Instance)) 2891 return VectorLoopValueMap.getScalarValue(V, Instance); 2892 2893 // If the value has not been scalarized, get its entry in VectorLoopValueMap 2894 // for the given unroll part. If this entry is not a vector type (i.e., the 2895 // vectorization factor is one), there is no need to generate an 2896 // extractelement instruction. 2897 auto *U = getOrCreateVectorValue(V, Instance.Part); 2898 if (!U->getType()->isVectorTy()) { 2899 assert(VF == 1 && "Value not scalarized has non-vector type"); 2900 return U; 2901 } 2902 2903 // Otherwise, the value from the original loop has been vectorized and is 2904 // represented by UF vector values. Extract and return the requested scalar 2905 // value from the appropriate vector lane. 2906 return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane)); 2907 } 2908 2909 void InnerLoopVectorizer::packScalarIntoVectorValue( 2910 Value *V, const VPIteration &Instance) { 2911 assert(V != Induction && "The new induction variable should not be used."); 2912 assert(!V->getType()->isVectorTy() && "Can't pack a vector"); 2913 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2914 2915 Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance); 2916 Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part); 2917 VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst, 2918 Builder.getInt32(Instance.Lane)); 2919 VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue); 2920 } 2921 2922 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2923 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2924 SmallVector<Constant *, 8> ShuffleMask; 2925 for (unsigned i = 0; i < VF; ++i) 2926 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 2927 2928 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2929 ConstantVector::get(ShuffleMask), 2930 "reverse"); 2931 } 2932 2933 // Try to vectorize the interleave group that \p Instr belongs to. 2934 // 2935 // E.g. Translate following interleaved load group (factor = 3): 2936 // for (i = 0; i < N; i+=3) { 2937 // R = Pic[i]; // Member of index 0 2938 // G = Pic[i+1]; // Member of index 1 2939 // B = Pic[i+2]; // Member of index 2 2940 // ... // do something to R, G, B 2941 // } 2942 // To: 2943 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2944 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2945 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2946 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2947 // 2948 // Or translate following interleaved store group (factor = 3): 2949 // for (i = 0; i < N; i+=3) { 2950 // ... do something to R, G, B 2951 // Pic[i] = R; // Member of index 0 2952 // Pic[i+1] = G; // Member of index 1 2953 // Pic[i+2] = B; // Member of index 2 2954 // } 2955 // To: 2956 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2957 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2958 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2959 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2960 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2961 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { 2962 const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr); 2963 assert(Group && "Fail to get an interleaved access group."); 2964 2965 // Skip if current instruction is not the insert position. 2966 if (Instr != Group->getInsertPos()) 2967 return; 2968 2969 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2970 Value *Ptr = getPointerOperand(Instr); 2971 2972 // Prepare for the vector type of the interleaved load/store. 2973 Type *ScalarTy = getMemInstValueType(Instr); 2974 unsigned InterleaveFactor = Group->getFactor(); 2975 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2976 Type *PtrTy = VecTy->getPointerTo(getMemInstAddressSpace(Instr)); 2977 2978 // Prepare for the new pointers. 2979 setDebugLocFromInst(Builder, Ptr); 2980 SmallVector<Value *, 2> NewPtrs; 2981 unsigned Index = Group->getIndex(Instr); 2982 2983 // If the group is reverse, adjust the index to refer to the last vector lane 2984 // instead of the first. We adjust the index from the first vector lane, 2985 // rather than directly getting the pointer for lane VF - 1, because the 2986 // pointer operand of the interleaved access is supposed to be uniform. For 2987 // uniform instructions, we're only required to generate a value for the 2988 // first vector lane in each unroll iteration. 2989 if (Group->isReverse()) 2990 Index += (VF - 1) * Group->getFactor(); 2991 2992 for (unsigned Part = 0; Part < UF; Part++) { 2993 Value *NewPtr = getOrCreateScalarValue(Ptr, {Part, 0}); 2994 2995 // Notice current instruction could be any index. Need to adjust the address 2996 // to the member of index 0. 2997 // 2998 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2999 // b = A[i]; // Member of index 0 3000 // Current pointer is pointed to A[i+1], adjust it to A[i]. 3001 // 3002 // E.g. A[i+1] = a; // Member of index 1 3003 // A[i] = b; // Member of index 0 3004 // A[i+2] = c; // Member of index 2 (Current instruction) 3005 // Current pointer is pointed to A[i+2], adjust it to A[i]. 3006 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 3007 3008 // Cast to the vector pointer type. 3009 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 3010 } 3011 3012 setDebugLocFromInst(Builder, Instr); 3013 Value *UndefVec = UndefValue::get(VecTy); 3014 3015 // Vectorize the interleaved load group. 3016 if (isa<LoadInst>(Instr)) { 3017 // For each unroll part, create a wide load for the group. 3018 SmallVector<Value *, 2> NewLoads; 3019 for (unsigned Part = 0; Part < UF; Part++) { 3020 auto *NewLoad = Builder.CreateAlignedLoad( 3021 NewPtrs[Part], Group->getAlignment(), "wide.vec"); 3022 addMetadata(NewLoad, Instr); 3023 NewLoads.push_back(NewLoad); 3024 } 3025 3026 // For each member in the group, shuffle out the appropriate data from the 3027 // wide loads. 3028 for (unsigned I = 0; I < InterleaveFactor; ++I) { 3029 Instruction *Member = Group->getMember(I); 3030 3031 // Skip the gaps in the group. 3032 if (!Member) 3033 continue; 3034 3035 Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF); 3036 for (unsigned Part = 0; Part < UF; Part++) { 3037 Value *StridedVec = Builder.CreateShuffleVector( 3038 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 3039 3040 // If this member has different type, cast the result type. 3041 if (Member->getType() != ScalarTy) { 3042 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 3043 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 3044 } 3045 3046 if (Group->isReverse()) 3047 StridedVec = reverseVector(StridedVec); 3048 3049 VectorLoopValueMap.setVectorValue(Member, Part, StridedVec); 3050 } 3051 } 3052 return; 3053 } 3054 3055 // The sub vector type for current instruction. 3056 VectorType *SubVT = VectorType::get(ScalarTy, VF); 3057 3058 // Vectorize the interleaved store group. 3059 for (unsigned Part = 0; Part < UF; Part++) { 3060 // Collect the stored vector from each member. 3061 SmallVector<Value *, 4> StoredVecs; 3062 for (unsigned i = 0; i < InterleaveFactor; i++) { 3063 // Interleaved store group doesn't allow a gap, so each index has a member 3064 Instruction *Member = Group->getMember(i); 3065 assert(Member && "Fail to get a member from an interleaved store group"); 3066 3067 Value *StoredVec = getOrCreateVectorValue( 3068 cast<StoreInst>(Member)->getValueOperand(), Part); 3069 if (Group->isReverse()) 3070 StoredVec = reverseVector(StoredVec); 3071 3072 // If this member has different type, cast it to a unified type. 3073 3074 if (StoredVec->getType() != SubVT) 3075 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 3076 3077 StoredVecs.push_back(StoredVec); 3078 } 3079 3080 // Concatenate all vectors into a wide vector. 3081 Value *WideVec = concatenateVectors(Builder, StoredVecs); 3082 3083 // Interleave the elements in the wide vector. 3084 Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor); 3085 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 3086 "interleaved.vec"); 3087 3088 Instruction *NewStoreInstr = 3089 Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment()); 3090 addMetadata(NewStoreInstr, Instr); 3091 } 3092 } 3093 3094 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { 3095 // Attempt to issue a wide load. 3096 LoadInst *LI = dyn_cast<LoadInst>(Instr); 3097 StoreInst *SI = dyn_cast<StoreInst>(Instr); 3098 3099 assert((LI || SI) && "Invalid Load/Store instruction"); 3100 3101 LoopVectorizationCostModel::InstWidening Decision = 3102 Cost->getWideningDecision(Instr, VF); 3103 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 3104 "CM decision should be taken at this point"); 3105 if (Decision == LoopVectorizationCostModel::CM_Interleave) 3106 return vectorizeInterleaveGroup(Instr); 3107 3108 Type *ScalarDataTy = getMemInstValueType(Instr); 3109 Type *DataTy = VectorType::get(ScalarDataTy, VF); 3110 Value *Ptr = getPointerOperand(Instr); 3111 unsigned Alignment = getMemInstAlignment(Instr); 3112 // An alignment of 0 means target abi alignment. We need to use the scalar's 3113 // target abi alignment in such a case. 3114 const DataLayout &DL = Instr->getModule()->getDataLayout(); 3115 if (!Alignment) 3116 Alignment = DL.getABITypeAlignment(ScalarDataTy); 3117 unsigned AddressSpace = getMemInstAddressSpace(Instr); 3118 3119 // Determine if the pointer operand of the access is either consecutive or 3120 // reverse consecutive. 3121 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 3122 bool Reverse = ConsecutiveStride < 0; 3123 bool CreateGatherScatter = 3124 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 3125 3126 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 3127 // gather/scatter. Otherwise Decision should have been to Scalarize. 3128 assert((ConsecutiveStride || CreateGatherScatter) && 3129 "The instruction should be scalarized"); 3130 3131 // Handle consecutive loads/stores. 3132 if (ConsecutiveStride) 3133 Ptr = getOrCreateScalarValue(Ptr, {0, 0}); 3134 3135 VectorParts Mask = createBlockInMask(Instr->getParent()); 3136 // Handle Stores: 3137 if (SI) { 3138 assert(!Legal->isUniform(SI->getPointerOperand()) && 3139 "We do not allow storing to uniform addresses"); 3140 setDebugLocFromInst(Builder, SI); 3141 3142 for (unsigned Part = 0; Part < UF; ++Part) { 3143 Instruction *NewSI = nullptr; 3144 Value *StoredVal = getOrCreateVectorValue(SI->getValueOperand(), Part); 3145 if (CreateGatherScatter) { 3146 Value *MaskPart = Legal->isMaskRequired(SI) ? Mask[Part] : nullptr; 3147 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 3148 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 3149 MaskPart); 3150 } else { 3151 // Calculate the pointer for the specific unroll-part. 3152 Value *PartPtr = 3153 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 3154 3155 if (Reverse) { 3156 // If we store to reverse consecutive memory locations, then we need 3157 // to reverse the order of elements in the stored value. 3158 StoredVal = reverseVector(StoredVal); 3159 // We don't want to update the value in the map as it might be used in 3160 // another expression. So don't call resetVectorValue(StoredVal). 3161 3162 // If the address is consecutive but reversed, then the 3163 // wide store needs to start at the last vector element. 3164 PartPtr = 3165 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 3166 PartPtr = 3167 Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 3168 if (Mask[Part]) // The reverse of a null all-one mask is a null mask. 3169 Mask[Part] = reverseVector(Mask[Part]); 3170 } 3171 3172 Value *VecPtr = 3173 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 3174 3175 if (Legal->isMaskRequired(SI) && Mask[Part]) 3176 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 3177 Mask[Part]); 3178 else 3179 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 3180 } 3181 addMetadata(NewSI, SI); 3182 } 3183 return; 3184 } 3185 3186 // Handle loads. 3187 assert(LI && "Must have a load instruction"); 3188 setDebugLocFromInst(Builder, LI); 3189 for (unsigned Part = 0; Part < UF; ++Part) { 3190 Value *NewLI; 3191 if (CreateGatherScatter) { 3192 Value *MaskPart = Legal->isMaskRequired(LI) ? Mask[Part] : nullptr; 3193 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 3194 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 3195 nullptr, "wide.masked.gather"); 3196 addMetadata(NewLI, LI); 3197 } else { 3198 // Calculate the pointer for the specific unroll-part. 3199 Value *PartPtr = 3200 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 3201 3202 if (Reverse) { 3203 // If the address is consecutive but reversed, then the 3204 // wide load needs to start at the last vector element. 3205 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 3206 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 3207 if (Mask[Part]) // The reverse of a null all-one mask is a null mask. 3208 Mask[Part] = reverseVector(Mask[Part]); 3209 } 3210 3211 Value *VecPtr = 3212 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 3213 if (Legal->isMaskRequired(LI) && Mask[Part]) 3214 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 3215 UndefValue::get(DataTy), 3216 "wide.masked.load"); 3217 else 3218 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 3219 3220 // Add metadata to the load, but setVectorValue to the reverse shuffle. 3221 addMetadata(NewLI, LI); 3222 if (Reverse) 3223 NewLI = reverseVector(NewLI); 3224 } 3225 VectorLoopValueMap.setVectorValue(Instr, Part, NewLI); 3226 } 3227 } 3228 3229 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 3230 const VPIteration &Instance, 3231 bool IfPredicateInstr) { 3232 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 3233 3234 setDebugLocFromInst(Builder, Instr); 3235 3236 // Does this instruction return a value ? 3237 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 3238 3239 Instruction *Cloned = Instr->clone(); 3240 if (!IsVoidRetTy) 3241 Cloned->setName(Instr->getName() + ".cloned"); 3242 3243 // Replace the operands of the cloned instructions with their scalar 3244 // equivalents in the new loop. 3245 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 3246 auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance); 3247 Cloned->setOperand(op, NewOp); 3248 } 3249 addNewMetadata(Cloned, Instr); 3250 3251 // Place the cloned scalar in the new loop. 3252 Builder.Insert(Cloned); 3253 3254 // Add the cloned scalar to the scalar map entry. 3255 VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned); 3256 3257 // If we just cloned a new assumption, add it the assumption cache. 3258 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 3259 if (II->getIntrinsicID() == Intrinsic::assume) 3260 AC->registerAssumption(II); 3261 3262 // End if-block. 3263 if (IfPredicateInstr) 3264 PredicatedInstructions.push_back(Cloned); 3265 } 3266 3267 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3268 Value *End, Value *Step, 3269 Instruction *DL) { 3270 BasicBlock *Header = L->getHeader(); 3271 BasicBlock *Latch = L->getLoopLatch(); 3272 // As we're just creating this loop, it's possible no latch exists 3273 // yet. If so, use the header as this will be a single block loop. 3274 if (!Latch) 3275 Latch = Header; 3276 3277 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 3278 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 3279 setDebugLocFromInst(Builder, OldInst); 3280 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 3281 3282 Builder.SetInsertPoint(Latch->getTerminator()); 3283 setDebugLocFromInst(Builder, OldInst); 3284 3285 // Create i+1 and fill the PHINode. 3286 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 3287 Induction->addIncoming(Start, L->getLoopPreheader()); 3288 Induction->addIncoming(Next, Latch); 3289 // Create the compare. 3290 Value *ICmp = Builder.CreateICmpEQ(Next, End); 3291 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 3292 3293 // Now we have two terminators. Remove the old one from the block. 3294 Latch->getTerminator()->eraseFromParent(); 3295 3296 return Induction; 3297 } 3298 3299 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3300 if (TripCount) 3301 return TripCount; 3302 3303 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3304 // Find the loop boundaries. 3305 ScalarEvolution *SE = PSE.getSE(); 3306 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3307 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 3308 "Invalid loop count"); 3309 3310 Type *IdxTy = Legal->getWidestInductionType(); 3311 3312 // The exit count might have the type of i64 while the phi is i32. This can 3313 // happen if we have an induction variable that is sign extended before the 3314 // compare. The only way that we get a backedge taken count is that the 3315 // induction variable was signed and as such will not overflow. In such a case 3316 // truncation is legal. 3317 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 3318 IdxTy->getPrimitiveSizeInBits()) 3319 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3320 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3321 3322 // Get the total trip count from the count by adding 1. 3323 const SCEV *ExitCount = SE->getAddExpr( 3324 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3325 3326 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3327 3328 // Expand the trip count and place the new instructions in the preheader. 3329 // Notice that the pre-header does not change, only the loop body. 3330 SCEVExpander Exp(*SE, DL, "induction"); 3331 3332 // Count holds the overall loop count (N). 3333 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3334 L->getLoopPreheader()->getTerminator()); 3335 3336 if (TripCount->getType()->isPointerTy()) 3337 TripCount = 3338 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3339 L->getLoopPreheader()->getTerminator()); 3340 3341 return TripCount; 3342 } 3343 3344 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3345 if (VectorTripCount) 3346 return VectorTripCount; 3347 3348 Value *TC = getOrCreateTripCount(L); 3349 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3350 3351 // Now we need to generate the expression for the part of the loop that the 3352 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3353 // iterations are not required for correctness, or N - Step, otherwise. Step 3354 // is equal to the vectorization factor (number of SIMD elements) times the 3355 // unroll factor (number of SIMD instructions). 3356 Constant *Step = ConstantInt::get(TC->getType(), VF * UF); 3357 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3358 3359 // If there is a non-reversed interleaved group that may speculatively access 3360 // memory out-of-bounds, we need to ensure that there will be at least one 3361 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 3362 // the trip count, we set the remainder to be equal to the step. If the step 3363 // does not evenly divide the trip count, no adjustment is necessary since 3364 // there will already be scalar iterations. Note that the minimum iterations 3365 // check ensures that N >= Step. 3366 if (VF > 1 && Legal->requiresScalarEpilogue()) { 3367 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3368 R = Builder.CreateSelect(IsZero, Step, R); 3369 } 3370 3371 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3372 3373 return VectorTripCount; 3374 } 3375 3376 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3377 const DataLayout &DL) { 3378 // Verify that V is a vector type with same number of elements as DstVTy. 3379 unsigned VF = DstVTy->getNumElements(); 3380 VectorType *SrcVecTy = cast<VectorType>(V->getType()); 3381 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3382 Type *SrcElemTy = SrcVecTy->getElementType(); 3383 Type *DstElemTy = DstVTy->getElementType(); 3384 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3385 "Vector elements must have same size"); 3386 3387 // Do a direct cast if element types are castable. 3388 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3389 return Builder.CreateBitOrPointerCast(V, DstVTy); 3390 } 3391 // V cannot be directly casted to desired vector type. 3392 // May happen when V is a floating point vector but DstVTy is a vector of 3393 // pointers or vice-versa. Handle this using a two-step bitcast using an 3394 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3395 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3396 "Only one type should be a pointer type"); 3397 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3398 "Only one type should be a floating point type"); 3399 Type *IntTy = 3400 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3401 VectorType *VecIntTy = VectorType::get(IntTy, VF); 3402 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3403 return Builder.CreateBitOrPointerCast(CastVal, DstVTy); 3404 } 3405 3406 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3407 BasicBlock *Bypass) { 3408 Value *Count = getOrCreateTripCount(L); 3409 BasicBlock *BB = L->getLoopPreheader(); 3410 IRBuilder<> Builder(BB->getTerminator()); 3411 3412 // Generate code to check if the loop's trip count is less than VF * UF, or 3413 // equal to it in case a scalar epilogue is required; this implies that the 3414 // vector trip count is zero. This check also covers the case where adding one 3415 // to the backedge-taken count overflowed leading to an incorrect trip count 3416 // of zero. In this case we will also jump to the scalar loop. 3417 auto P = Legal->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 3418 : ICmpInst::ICMP_ULT; 3419 Value *CheckMinIters = Builder.CreateICmp( 3420 P, Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check"); 3421 3422 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3423 // Update dominator tree immediately if the generated block is a 3424 // LoopBypassBlock because SCEV expansions to generate loop bypass 3425 // checks may query it before the current function is finished. 3426 DT->addNewBlock(NewBB, BB); 3427 if (L->getParentLoop()) 3428 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3429 ReplaceInstWithInst(BB->getTerminator(), 3430 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 3431 LoopBypassBlocks.push_back(BB); 3432 } 3433 3434 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3435 BasicBlock *BB = L->getLoopPreheader(); 3436 3437 // Generate the code to check that the SCEV assumptions that we made. 3438 // We want the new basic block to start at the first instruction in a 3439 // sequence of instructions that form a check. 3440 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 3441 "scev.check"); 3442 Value *SCEVCheck = 3443 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 3444 3445 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 3446 if (C->isZero()) 3447 return; 3448 3449 // Create a new block containing the stride check. 3450 BB->setName("vector.scevcheck"); 3451 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3452 // Update dominator tree immediately if the generated block is a 3453 // LoopBypassBlock because SCEV expansions to generate loop bypass 3454 // checks may query it before the current function is finished. 3455 DT->addNewBlock(NewBB, BB); 3456 if (L->getParentLoop()) 3457 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3458 ReplaceInstWithInst(BB->getTerminator(), 3459 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 3460 LoopBypassBlocks.push_back(BB); 3461 AddedSafetyChecks = true; 3462 } 3463 3464 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 3465 BasicBlock *BB = L->getLoopPreheader(); 3466 3467 // Generate the code that checks in runtime if arrays overlap. We put the 3468 // checks into a separate block to make the more common case of few elements 3469 // faster. 3470 Instruction *FirstCheckInst; 3471 Instruction *MemRuntimeCheck; 3472 std::tie(FirstCheckInst, MemRuntimeCheck) = 3473 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 3474 if (!MemRuntimeCheck) 3475 return; 3476 3477 // Create a new block containing the memory check. 3478 BB->setName("vector.memcheck"); 3479 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3480 // Update dominator tree immediately if the generated block is a 3481 // LoopBypassBlock because SCEV expansions to generate loop bypass 3482 // checks may query it before the current function is finished. 3483 DT->addNewBlock(NewBB, BB); 3484 if (L->getParentLoop()) 3485 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3486 ReplaceInstWithInst(BB->getTerminator(), 3487 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 3488 LoopBypassBlocks.push_back(BB); 3489 AddedSafetyChecks = true; 3490 3491 // We currently don't use LoopVersioning for the actual loop cloning but we 3492 // still use it to add the noalias metadata. 3493 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 3494 PSE.getSE()); 3495 LVer->prepareNoAliasMetadata(); 3496 } 3497 3498 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3499 /* 3500 In this function we generate a new loop. The new loop will contain 3501 the vectorized instructions while the old loop will continue to run the 3502 scalar remainder. 3503 3504 [ ] <-- loop iteration number check. 3505 / | 3506 / v 3507 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3508 | / | 3509 | / v 3510 || [ ] <-- vector pre header. 3511 |/ | 3512 | v 3513 | [ ] \ 3514 | [ ]_| <-- vector loop. 3515 | | 3516 | v 3517 | -[ ] <--- middle-block. 3518 | / | 3519 | / v 3520 -|- >[ ] <--- new preheader. 3521 | | 3522 | v 3523 | [ ] \ 3524 | [ ]_| <-- old scalar loop to handle remainder. 3525 \ | 3526 \ v 3527 >[ ] <-- exit block. 3528 ... 3529 */ 3530 3531 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 3532 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 3533 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 3534 assert(VectorPH && "Invalid loop structure"); 3535 assert(ExitBlock && "Must have an exit block"); 3536 3537 // Some loops have a single integer induction variable, while other loops 3538 // don't. One example is c++ iterators that often have multiple pointer 3539 // induction variables. In the code below we also support a case where we 3540 // don't have a single induction variable. 3541 // 3542 // We try to obtain an induction variable from the original loop as hard 3543 // as possible. However if we don't find one that: 3544 // - is an integer 3545 // - counts from zero, stepping by one 3546 // - is the size of the widest induction variable type 3547 // then we create a new one. 3548 OldInduction = Legal->getPrimaryInduction(); 3549 Type *IdxTy = Legal->getWidestInductionType(); 3550 3551 // Split the single block loop into the two loop structure described above. 3552 BasicBlock *VecBody = 3553 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 3554 BasicBlock *MiddleBlock = 3555 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 3556 BasicBlock *ScalarPH = 3557 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 3558 3559 // Create and register the new vector loop. 3560 Loop *Lp = LI->AllocateLoop(); 3561 Loop *ParentLoop = OrigLoop->getParentLoop(); 3562 3563 // Insert the new loop into the loop nest and register the new basic blocks 3564 // before calling any utilities such as SCEV that require valid LoopInfo. 3565 if (ParentLoop) { 3566 ParentLoop->addChildLoop(Lp); 3567 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 3568 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 3569 } else { 3570 LI->addTopLevelLoop(Lp); 3571 } 3572 Lp->addBasicBlockToLoop(VecBody, *LI); 3573 3574 // Find the loop boundaries. 3575 Value *Count = getOrCreateTripCount(Lp); 3576 3577 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3578 3579 // Now, compare the new count to zero. If it is zero skip the vector loop and 3580 // jump to the scalar loop. This check also covers the case where the 3581 // backedge-taken count is uint##_max: adding one to it will overflow leading 3582 // to an incorrect trip count of zero. In this (rare) case we will also jump 3583 // to the scalar loop. 3584 emitMinimumIterationCountCheck(Lp, ScalarPH); 3585 3586 // Generate the code to check any assumptions that we've made for SCEV 3587 // expressions. 3588 emitSCEVChecks(Lp, ScalarPH); 3589 3590 // Generate the code that checks in runtime if arrays overlap. We put the 3591 // checks into a separate block to make the more common case of few elements 3592 // faster. 3593 emitMemRuntimeChecks(Lp, ScalarPH); 3594 3595 // Generate the induction variable. 3596 // The loop step is equal to the vectorization factor (num of SIMD elements) 3597 // times the unroll factor (num of SIMD instructions). 3598 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3599 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 3600 Induction = 3601 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3602 getDebugLocFromInstOrOperands(OldInduction)); 3603 3604 // We are going to resume the execution of the scalar loop. 3605 // Go over all of the induction variables that we found and fix the 3606 // PHIs that are left in the scalar version of the loop. 3607 // The starting values of PHI nodes depend on the counter of the last 3608 // iteration in the vectorized loop. 3609 // If we come from a bypass edge then we need to start from the original 3610 // start value. 3611 3612 // This variable saves the new starting index for the scalar loop. It is used 3613 // to test if there are any tail iterations left once the vector loop has 3614 // completed. 3615 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 3616 for (auto &InductionEntry : *List) { 3617 PHINode *OrigPhi = InductionEntry.first; 3618 InductionDescriptor II = InductionEntry.second; 3619 3620 // Create phi nodes to merge from the backedge-taken check block. 3621 PHINode *BCResumeVal = PHINode::Create( 3622 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 3623 Value *&EndValue = IVEndValues[OrigPhi]; 3624 if (OrigPhi == OldInduction) { 3625 // We know what the end value is. 3626 EndValue = CountRoundDown; 3627 } else { 3628 IRBuilder<> B(Lp->getLoopPreheader()->getTerminator()); 3629 Type *StepType = II.getStep()->getType(); 3630 Instruction::CastOps CastOp = 3631 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 3632 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 3633 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3634 EndValue = II.transform(B, CRD, PSE.getSE(), DL); 3635 EndValue->setName("ind.end"); 3636 } 3637 3638 // The new PHI merges the original incoming value, in case of a bypass, 3639 // or the value at the end of the vectorized loop. 3640 BCResumeVal->addIncoming(EndValue, MiddleBlock); 3641 3642 // Fix the scalar body counter (PHI node). 3643 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 3644 3645 // The old induction's phi node in the scalar body needs the truncated 3646 // value. 3647 for (BasicBlock *BB : LoopBypassBlocks) 3648 BCResumeVal->addIncoming(II.getStartValue(), BB); 3649 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 3650 } 3651 3652 // Add a check in the middle block to see if we have completed 3653 // all of the iterations in the first vector loop. 3654 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3655 Value *CmpN = 3656 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 3657 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 3658 ReplaceInstWithInst(MiddleBlock->getTerminator(), 3659 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 3660 3661 // Get ready to start creating new instructions into the vectorized body. 3662 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 3663 3664 // Save the state. 3665 LoopVectorPreHeader = Lp->getLoopPreheader(); 3666 LoopScalarPreHeader = ScalarPH; 3667 LoopMiddleBlock = MiddleBlock; 3668 LoopExitBlock = ExitBlock; 3669 LoopVectorBody = VecBody; 3670 LoopScalarBody = OldBasicBlock; 3671 3672 // Keep all loop hints from the original loop on the vector loop (we'll 3673 // replace the vectorizer-specific hints below). 3674 if (MDNode *LID = OrigLoop->getLoopID()) 3675 Lp->setLoopID(LID); 3676 3677 LoopVectorizeHints Hints(Lp, true, *ORE); 3678 Hints.setAlreadyVectorized(); 3679 3680 return LoopVectorPreHeader; 3681 } 3682 3683 // Fix up external users of the induction variable. At this point, we are 3684 // in LCSSA form, with all external PHIs that use the IV having one input value, 3685 // coming from the remainder loop. We need those PHIs to also have a correct 3686 // value for the IV when arriving directly from the middle block. 3687 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3688 const InductionDescriptor &II, 3689 Value *CountRoundDown, Value *EndValue, 3690 BasicBlock *MiddleBlock) { 3691 // There are two kinds of external IV usages - those that use the value 3692 // computed in the last iteration (the PHI) and those that use the penultimate 3693 // value (the value that feeds into the phi from the loop latch). 3694 // We allow both, but they, obviously, have different values. 3695 3696 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 3697 3698 DenseMap<Value *, Value *> MissingVals; 3699 3700 // An external user of the last iteration's value should see the value that 3701 // the remainder loop uses to initialize its own IV. 3702 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3703 for (User *U : PostInc->users()) { 3704 Instruction *UI = cast<Instruction>(U); 3705 if (!OrigLoop->contains(UI)) { 3706 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3707 MissingVals[UI] = EndValue; 3708 } 3709 } 3710 3711 // An external user of the penultimate value need to see EndValue - Step. 3712 // The simplest way to get this is to recompute it from the constituent SCEVs, 3713 // that is Start + (Step * (CRD - 1)). 3714 for (User *U : OrigPhi->users()) { 3715 auto *UI = cast<Instruction>(U); 3716 if (!OrigLoop->contains(UI)) { 3717 const DataLayout &DL = 3718 OrigLoop->getHeader()->getModule()->getDataLayout(); 3719 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3720 3721 IRBuilder<> B(MiddleBlock->getTerminator()); 3722 Value *CountMinusOne = B.CreateSub( 3723 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3724 Value *CMO = 3725 !II.getStep()->getType()->isIntegerTy() 3726 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3727 II.getStep()->getType()) 3728 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3729 CMO->setName("cast.cmo"); 3730 Value *Escape = II.transform(B, CMO, PSE.getSE(), DL); 3731 Escape->setName("ind.escape"); 3732 MissingVals[UI] = Escape; 3733 } 3734 } 3735 3736 for (auto &I : MissingVals) { 3737 PHINode *PHI = cast<PHINode>(I.first); 3738 // One corner case we have to handle is two IVs "chasing" each-other, 3739 // that is %IV2 = phi [...], [ %IV1, %latch ] 3740 // In this case, if IV1 has an external use, we need to avoid adding both 3741 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3742 // don't already have an incoming value for the middle block. 3743 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3744 PHI->addIncoming(I.second, MiddleBlock); 3745 } 3746 } 3747 3748 namespace { 3749 3750 struct CSEDenseMapInfo { 3751 static bool canHandle(const Instruction *I) { 3752 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3753 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3754 } 3755 3756 static inline Instruction *getEmptyKey() { 3757 return DenseMapInfo<Instruction *>::getEmptyKey(); 3758 } 3759 3760 static inline Instruction *getTombstoneKey() { 3761 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3762 } 3763 3764 static unsigned getHashValue(const Instruction *I) { 3765 assert(canHandle(I) && "Unknown instruction!"); 3766 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3767 I->value_op_end())); 3768 } 3769 3770 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3771 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3772 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3773 return LHS == RHS; 3774 return LHS->isIdenticalTo(RHS); 3775 } 3776 }; 3777 3778 } // end anonymous namespace 3779 3780 ///\brief Perform cse of induction variable instructions. 3781 static void cse(BasicBlock *BB) { 3782 // Perform simple cse. 3783 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3784 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3785 Instruction *In = &*I++; 3786 3787 if (!CSEDenseMapInfo::canHandle(In)) 3788 continue; 3789 3790 // Check if we can replace this instruction with any of the 3791 // visited instructions. 3792 if (Instruction *V = CSEMap.lookup(In)) { 3793 In->replaceAllUsesWith(V); 3794 In->eraseFromParent(); 3795 continue; 3796 } 3797 3798 CSEMap[In] = In; 3799 } 3800 } 3801 3802 /// \brief Estimate the overhead of scalarizing an instruction. This is a 3803 /// convenience wrapper for the type-based getScalarizationOverhead API. 3804 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF, 3805 const TargetTransformInfo &TTI) { 3806 if (VF == 1) 3807 return 0; 3808 3809 unsigned Cost = 0; 3810 Type *RetTy = ToVectorTy(I->getType(), VF); 3811 if (!RetTy->isVoidTy() && 3812 (!isa<LoadInst>(I) || 3813 !TTI.supportsEfficientVectorElementLoadStore())) 3814 Cost += TTI.getScalarizationOverhead(RetTy, true, false); 3815 3816 if (CallInst *CI = dyn_cast<CallInst>(I)) { 3817 SmallVector<const Value *, 4> Operands(CI->arg_operands()); 3818 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3819 } 3820 else if (!isa<StoreInst>(I) || 3821 !TTI.supportsEfficientVectorElementLoadStore()) { 3822 SmallVector<const Value *, 4> Operands(I->operand_values()); 3823 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3824 } 3825 3826 return Cost; 3827 } 3828 3829 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3830 // Return the cost of the instruction, including scalarization overhead if it's 3831 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3832 // i.e. either vector version isn't available, or is too expensive. 3833 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3834 const TargetTransformInfo &TTI, 3835 const TargetLibraryInfo *TLI, 3836 bool &NeedToScalarize) { 3837 Function *F = CI->getCalledFunction(); 3838 StringRef FnName = CI->getCalledFunction()->getName(); 3839 Type *ScalarRetTy = CI->getType(); 3840 SmallVector<Type *, 4> Tys, ScalarTys; 3841 for (auto &ArgOp : CI->arg_operands()) 3842 ScalarTys.push_back(ArgOp->getType()); 3843 3844 // Estimate cost of scalarized vector call. The source operands are assumed 3845 // to be vectors, so we need to extract individual elements from there, 3846 // execute VF scalar calls, and then gather the result into the vector return 3847 // value. 3848 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3849 if (VF == 1) 3850 return ScalarCallCost; 3851 3852 // Compute corresponding vector type for return value and arguments. 3853 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3854 for (Type *ScalarTy : ScalarTys) 3855 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3856 3857 // Compute costs of unpacking argument values for the scalar calls and 3858 // packing the return values to a vector. 3859 unsigned ScalarizationCost = getScalarizationOverhead(CI, VF, TTI); 3860 3861 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3862 3863 // If we can't emit a vector call for this function, then the currently found 3864 // cost is the cost we need to return. 3865 NeedToScalarize = true; 3866 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3867 return Cost; 3868 3869 // If the corresponding vector cost is cheaper, return its cost. 3870 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3871 if (VectorCallCost < Cost) { 3872 NeedToScalarize = false; 3873 return VectorCallCost; 3874 } 3875 return Cost; 3876 } 3877 3878 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3879 // factor VF. Return the cost of the instruction, including scalarization 3880 // overhead if it's needed. 3881 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3882 const TargetTransformInfo &TTI, 3883 const TargetLibraryInfo *TLI) { 3884 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3885 assert(ID && "Expected intrinsic call!"); 3886 3887 FastMathFlags FMF; 3888 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3889 FMF = FPMO->getFastMathFlags(); 3890 3891 SmallVector<Value *, 4> Operands(CI->arg_operands()); 3892 return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF); 3893 } 3894 3895 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3896 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3897 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3898 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3899 } 3900 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3901 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3902 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3903 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3904 } 3905 3906 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3907 // For every instruction `I` in MinBWs, truncate the operands, create a 3908 // truncated version of `I` and reextend its result. InstCombine runs 3909 // later and will remove any ext/trunc pairs. 3910 SmallPtrSet<Value *, 4> Erased; 3911 for (const auto &KV : Cost->getMinimalBitwidths()) { 3912 // If the value wasn't vectorized, we must maintain the original scalar 3913 // type. The absence of the value from VectorLoopValueMap indicates that it 3914 // wasn't vectorized. 3915 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3916 continue; 3917 for (unsigned Part = 0; Part < UF; ++Part) { 3918 Value *I = getOrCreateVectorValue(KV.first, Part); 3919 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3920 continue; 3921 Type *OriginalTy = I->getType(); 3922 Type *ScalarTruncatedTy = 3923 IntegerType::get(OriginalTy->getContext(), KV.second); 3924 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3925 OriginalTy->getVectorNumElements()); 3926 if (TruncatedTy == OriginalTy) 3927 continue; 3928 3929 IRBuilder<> B(cast<Instruction>(I)); 3930 auto ShrinkOperand = [&](Value *V) -> Value * { 3931 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3932 if (ZI->getSrcTy() == TruncatedTy) 3933 return ZI->getOperand(0); 3934 return B.CreateZExtOrTrunc(V, TruncatedTy); 3935 }; 3936 3937 // The actual instruction modification depends on the instruction type, 3938 // unfortunately. 3939 Value *NewI = nullptr; 3940 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3941 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3942 ShrinkOperand(BO->getOperand(1))); 3943 3944 // Any wrapping introduced by shrinking this operation shouldn't be 3945 // considered undefined behavior. So, we can't unconditionally copy 3946 // arithmetic wrapping flags to NewI. 3947 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3948 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3949 NewI = 3950 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3951 ShrinkOperand(CI->getOperand(1))); 3952 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3953 NewI = B.CreateSelect(SI->getCondition(), 3954 ShrinkOperand(SI->getTrueValue()), 3955 ShrinkOperand(SI->getFalseValue())); 3956 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3957 switch (CI->getOpcode()) { 3958 default: 3959 llvm_unreachable("Unhandled cast!"); 3960 case Instruction::Trunc: 3961 NewI = ShrinkOperand(CI->getOperand(0)); 3962 break; 3963 case Instruction::SExt: 3964 NewI = B.CreateSExtOrTrunc( 3965 CI->getOperand(0), 3966 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3967 break; 3968 case Instruction::ZExt: 3969 NewI = B.CreateZExtOrTrunc( 3970 CI->getOperand(0), 3971 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3972 break; 3973 } 3974 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3975 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3976 auto *O0 = B.CreateZExtOrTrunc( 3977 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3978 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3979 auto *O1 = B.CreateZExtOrTrunc( 3980 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3981 3982 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3983 } else if (isa<LoadInst>(I)) { 3984 // Don't do anything with the operands, just extend the result. 3985 continue; 3986 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3987 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3988 auto *O0 = B.CreateZExtOrTrunc( 3989 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3990 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3991 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3992 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3993 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3994 auto *O0 = B.CreateZExtOrTrunc( 3995 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3996 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3997 } else { 3998 llvm_unreachable("Unhandled instruction type!"); 3999 } 4000 4001 // Lastly, extend the result. 4002 NewI->takeName(cast<Instruction>(I)); 4003 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 4004 I->replaceAllUsesWith(Res); 4005 cast<Instruction>(I)->eraseFromParent(); 4006 Erased.insert(I); 4007 VectorLoopValueMap.resetVectorValue(KV.first, Part, Res); 4008 } 4009 } 4010 4011 // We'll have created a bunch of ZExts that are now parentless. Clean up. 4012 for (const auto &KV : Cost->getMinimalBitwidths()) { 4013 // If the value wasn't vectorized, we must maintain the original scalar 4014 // type. The absence of the value from VectorLoopValueMap indicates that it 4015 // wasn't vectorized. 4016 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 4017 continue; 4018 for (unsigned Part = 0; Part < UF; ++Part) { 4019 Value *I = getOrCreateVectorValue(KV.first, Part); 4020 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 4021 if (Inst && Inst->use_empty()) { 4022 Value *NewI = Inst->getOperand(0); 4023 Inst->eraseFromParent(); 4024 VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI); 4025 } 4026 } 4027 } 4028 } 4029 4030 void InnerLoopVectorizer::fixVectorizedLoop() { 4031 // Insert truncates and extends for any truncated instructions as hints to 4032 // InstCombine. 4033 if (VF > 1) 4034 truncateToMinimalBitwidths(); 4035 4036 // At this point every instruction in the original loop is widened to a 4037 // vector form. Now we need to fix the recurrences in the loop. These PHI 4038 // nodes are currently empty because we did not want to introduce cycles. 4039 // This is the second stage of vectorizing recurrences. 4040 fixCrossIterationPHIs(); 4041 4042 // Update the dominator tree. 4043 // 4044 // FIXME: After creating the structure of the new loop, the dominator tree is 4045 // no longer up-to-date, and it remains that way until we update it 4046 // here. An out-of-date dominator tree is problematic for SCEV, 4047 // because SCEVExpander uses it to guide code generation. The 4048 // vectorizer use SCEVExpanders in several places. Instead, we should 4049 // keep the dominator tree up-to-date as we go. 4050 updateAnalysis(); 4051 4052 // Fix-up external users of the induction variables. 4053 for (auto &Entry : *Legal->getInductionVars()) 4054 fixupIVUsers(Entry.first, Entry.second, 4055 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 4056 IVEndValues[Entry.first], LoopMiddleBlock); 4057 4058 fixLCSSAPHIs(); 4059 for (Instruction *PI : PredicatedInstructions) 4060 sinkScalarOperands(&*PI); 4061 4062 // Remove redundant induction instructions. 4063 cse(LoopVectorBody); 4064 } 4065 4066 void InnerLoopVectorizer::fixCrossIterationPHIs() { 4067 // In order to support recurrences we need to be able to vectorize Phi nodes. 4068 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4069 // stage #2: We now need to fix the recurrences by adding incoming edges to 4070 // the currently empty PHI nodes. At this point every instruction in the 4071 // original loop is widened to a vector form so we can use them to construct 4072 // the incoming edges. 4073 for (Instruction &I : *OrigLoop->getHeader()) { 4074 PHINode *Phi = dyn_cast<PHINode>(&I); 4075 if (!Phi) 4076 break; 4077 // Handle first-order recurrences and reductions that need to be fixed. 4078 if (Legal->isFirstOrderRecurrence(Phi)) 4079 fixFirstOrderRecurrence(Phi); 4080 else if (Legal->isReductionVariable(Phi)) 4081 fixReduction(Phi); 4082 } 4083 } 4084 4085 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 4086 // This is the second phase of vectorizing first-order recurrences. An 4087 // overview of the transformation is described below. Suppose we have the 4088 // following loop. 4089 // 4090 // for (int i = 0; i < n; ++i) 4091 // b[i] = a[i] - a[i - 1]; 4092 // 4093 // There is a first-order recurrence on "a". For this loop, the shorthand 4094 // scalar IR looks like: 4095 // 4096 // scalar.ph: 4097 // s_init = a[-1] 4098 // br scalar.body 4099 // 4100 // scalar.body: 4101 // i = phi [0, scalar.ph], [i+1, scalar.body] 4102 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4103 // s2 = a[i] 4104 // b[i] = s2 - s1 4105 // br cond, scalar.body, ... 4106 // 4107 // In this example, s1 is a recurrence because it's value depends on the 4108 // previous iteration. In the first phase of vectorization, we created a 4109 // temporary value for s1. We now complete the vectorization and produce the 4110 // shorthand vector IR shown below (for VF = 4, UF = 1). 4111 // 4112 // vector.ph: 4113 // v_init = vector(..., ..., ..., a[-1]) 4114 // br vector.body 4115 // 4116 // vector.body 4117 // i = phi [0, vector.ph], [i+4, vector.body] 4118 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4119 // v2 = a[i, i+1, i+2, i+3]; 4120 // v3 = vector(v1(3), v2(0, 1, 2)) 4121 // b[i, i+1, i+2, i+3] = v2 - v3 4122 // br cond, vector.body, middle.block 4123 // 4124 // middle.block: 4125 // x = v2(3) 4126 // br scalar.ph 4127 // 4128 // scalar.ph: 4129 // s_init = phi [x, middle.block], [a[-1], otherwise] 4130 // br scalar.body 4131 // 4132 // After execution completes the vector loop, we extract the next value of 4133 // the recurrence (x) to use as the initial value in the scalar loop. 4134 4135 // Get the original loop preheader and single loop latch. 4136 auto *Preheader = OrigLoop->getLoopPreheader(); 4137 auto *Latch = OrigLoop->getLoopLatch(); 4138 4139 // Get the initial and previous values of the scalar recurrence. 4140 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 4141 auto *Previous = Phi->getIncomingValueForBlock(Latch); 4142 4143 // Create a vector from the initial value. 4144 auto *VectorInit = ScalarInit; 4145 if (VF > 1) { 4146 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4147 VectorInit = Builder.CreateInsertElement( 4148 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 4149 Builder.getInt32(VF - 1), "vector.recur.init"); 4150 } 4151 4152 // We constructed a temporary phi node in the first phase of vectorization. 4153 // This phi node will eventually be deleted. 4154 Builder.SetInsertPoint( 4155 cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0))); 4156 4157 // Create a phi node for the new recurrence. The current value will either be 4158 // the initial value inserted into a vector or loop-varying vector value. 4159 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 4160 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 4161 4162 // Get the vectorized previous value of the last part UF - 1. It appears last 4163 // among all unrolled iterations, due to the order of their construction. 4164 Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1); 4165 4166 // Set the insertion point after the previous value if it is an instruction. 4167 // Note that the previous value may have been constant-folded so it is not 4168 // guaranteed to be an instruction in the vector loop. Also, if the previous 4169 // value is a phi node, we should insert after all the phi nodes to avoid 4170 // breaking basic block verification. 4171 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart) || 4172 isa<PHINode>(PreviousLastPart)) 4173 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 4174 else 4175 Builder.SetInsertPoint( 4176 &*++BasicBlock::iterator(cast<Instruction>(PreviousLastPart))); 4177 4178 // We will construct a vector for the recurrence by combining the values for 4179 // the current and previous iterations. This is the required shuffle mask. 4180 SmallVector<Constant *, 8> ShuffleMask(VF); 4181 ShuffleMask[0] = Builder.getInt32(VF - 1); 4182 for (unsigned I = 1; I < VF; ++I) 4183 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 4184 4185 // The vector from which to take the initial value for the current iteration 4186 // (actual or unrolled). Initially, this is the vector phi node. 4187 Value *Incoming = VecPhi; 4188 4189 // Shuffle the current and previous vector and update the vector parts. 4190 for (unsigned Part = 0; Part < UF; ++Part) { 4191 Value *PreviousPart = getOrCreateVectorValue(Previous, Part); 4192 Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part); 4193 auto *Shuffle = 4194 VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart, 4195 ConstantVector::get(ShuffleMask)) 4196 : Incoming; 4197 PhiPart->replaceAllUsesWith(Shuffle); 4198 cast<Instruction>(PhiPart)->eraseFromParent(); 4199 VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle); 4200 Incoming = PreviousPart; 4201 } 4202 4203 // Fix the latch value of the new recurrence in the vector loop. 4204 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4205 4206 // Extract the last vector element in the middle block. This will be the 4207 // initial value for the recurrence when jumping to the scalar loop. 4208 auto *ExtractForScalar = Incoming; 4209 if (VF > 1) { 4210 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4211 ExtractForScalar = Builder.CreateExtractElement( 4212 ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract"); 4213 } 4214 // Extract the second last element in the middle block if the 4215 // Phi is used outside the loop. We need to extract the phi itself 4216 // and not the last element (the phi update in the current iteration). This 4217 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4218 // when the scalar loop is not run at all. 4219 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4220 if (VF > 1) 4221 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4222 Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi"); 4223 // When loop is unrolled without vectorizing, initialize 4224 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 4225 // `Incoming`. This is analogous to the vectorized case above: extracting the 4226 // second last element when VF > 1. 4227 else if (UF > 1) 4228 ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2); 4229 4230 // Fix the initial value of the original recurrence in the scalar loop. 4231 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4232 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4233 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4234 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4235 Start->addIncoming(Incoming, BB); 4236 } 4237 4238 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 4239 Phi->setName("scalar.recur"); 4240 4241 // Finally, fix users of the recurrence outside the loop. The users will need 4242 // either the last value of the scalar recurrence or the last value of the 4243 // vector recurrence we extracted in the middle block. Since the loop is in 4244 // LCSSA form, we just need to find the phi node for the original scalar 4245 // recurrence in the exit block, and then add an edge for the middle block. 4246 for (auto &I : *LoopExitBlock) { 4247 auto *LCSSAPhi = dyn_cast<PHINode>(&I); 4248 if (!LCSSAPhi) 4249 break; 4250 if (LCSSAPhi->getIncomingValue(0) == Phi) { 4251 LCSSAPhi->addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4252 break; 4253 } 4254 } 4255 } 4256 4257 void InnerLoopVectorizer::fixReduction(PHINode *Phi) { 4258 Constant *Zero = Builder.getInt32(0); 4259 4260 // Get it's reduction variable descriptor. 4261 assert(Legal->isReductionVariable(Phi) && 4262 "Unable to find the reduction variable"); 4263 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 4264 4265 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 4266 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4267 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4268 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 4269 RdxDesc.getMinMaxRecurrenceKind(); 4270 setDebugLocFromInst(Builder, ReductionStartValue); 4271 4272 // We need to generate a reduction vector from the incoming scalar. 4273 // To do so, we need to generate the 'identity' vector and override 4274 // one of the elements with the incoming scalar reduction. We need 4275 // to do it in the vector-loop preheader. 4276 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4277 4278 // This is the vector-clone of the value that leaves the loop. 4279 Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType(); 4280 4281 // Find the reduction identity variable. Zero for addition, or, xor, 4282 // one for multiplication, -1 for And. 4283 Value *Identity; 4284 Value *VectorStart; 4285 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 4286 RK == RecurrenceDescriptor::RK_FloatMinMax) { 4287 // MinMax reduction have the start value as their identify. 4288 if (VF == 1) { 4289 VectorStart = Identity = ReductionStartValue; 4290 } else { 4291 VectorStart = Identity = 4292 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 4293 } 4294 } else { 4295 // Handle other reduction kinds: 4296 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 4297 RK, VecTy->getScalarType()); 4298 if (VF == 1) { 4299 Identity = Iden; 4300 // This vector is the Identity vector where the first element is the 4301 // incoming scalar reduction. 4302 VectorStart = ReductionStartValue; 4303 } else { 4304 Identity = ConstantVector::getSplat(VF, Iden); 4305 4306 // This vector is the Identity vector where the first element is the 4307 // incoming scalar reduction. 4308 VectorStart = 4309 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 4310 } 4311 } 4312 4313 // Fix the vector-loop phi. 4314 4315 // Reductions do not have to start at zero. They can start with 4316 // any loop invariant values. 4317 BasicBlock *Latch = OrigLoop->getLoopLatch(); 4318 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 4319 for (unsigned Part = 0; Part < UF; ++Part) { 4320 Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part); 4321 Value *Val = getOrCreateVectorValue(LoopVal, Part); 4322 // Make sure to add the reduction stat value only to the 4323 // first unroll part. 4324 Value *StartVal = (Part == 0) ? VectorStart : Identity; 4325 cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader); 4326 cast<PHINode>(VecRdxPhi) 4327 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4328 } 4329 4330 // Before each round, move the insertion point right between 4331 // the PHIs and the values we are going to write. 4332 // This allows us to write both PHINodes and the extractelement 4333 // instructions. 4334 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4335 4336 setDebugLocFromInst(Builder, LoopExitInst); 4337 4338 // If the vector reduction can be performed in a smaller type, we truncate 4339 // then extend the loop exit value to enable InstCombine to evaluate the 4340 // entire expression in the smaller type. 4341 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 4342 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4343 Builder.SetInsertPoint( 4344 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4345 VectorParts RdxParts(UF); 4346 for (unsigned Part = 0; Part < UF; ++Part) { 4347 RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 4348 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4349 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4350 : Builder.CreateZExt(Trunc, VecTy); 4351 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 4352 UI != RdxParts[Part]->user_end();) 4353 if (*UI != Trunc) { 4354 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 4355 RdxParts[Part] = Extnd; 4356 } else { 4357 ++UI; 4358 } 4359 } 4360 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4361 for (unsigned Part = 0; Part < UF; ++Part) { 4362 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4363 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]); 4364 } 4365 } 4366 4367 // Reduce all of the unrolled parts into a single vector. 4368 Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0); 4369 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 4370 setDebugLocFromInst(Builder, ReducedPartRdx); 4371 for (unsigned Part = 1; Part < UF; ++Part) { 4372 Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 4373 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 4374 // Floating point operations had to be 'fast' to enable the reduction. 4375 ReducedPartRdx = addFastMathFlag( 4376 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart, 4377 ReducedPartRdx, "bin.rdx")); 4378 else 4379 ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp( 4380 Builder, MinMaxKind, ReducedPartRdx, RdxPart); 4381 } 4382 4383 if (VF > 1) { 4384 bool NoNaN = Legal->hasFunNoNaNAttr(); 4385 ReducedPartRdx = 4386 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN); 4387 // If the reduction can be performed in a smaller type, we need to extend 4388 // the reduction to the wider type before we branch to the original loop. 4389 if (Phi->getType() != RdxDesc.getRecurrenceType()) 4390 ReducedPartRdx = 4391 RdxDesc.isSigned() 4392 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 4393 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 4394 } 4395 4396 // Create a phi node that merges control-flow from the backedge-taken check 4397 // block and the middle block. 4398 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 4399 LoopScalarPreHeader->getTerminator()); 4400 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4401 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4402 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4403 4404 // Now, we need to fix the users of the reduction variable 4405 // inside and outside of the scalar remainder loop. 4406 // We know that the loop is in LCSSA form. We need to update the 4407 // PHI nodes in the exit blocks. 4408 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 4409 LEE = LoopExitBlock->end(); 4410 LEI != LEE; ++LEI) { 4411 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 4412 if (!LCSSAPhi) 4413 break; 4414 4415 // All PHINodes need to have a single entry edge, or two if 4416 // we already fixed them. 4417 assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 4418 4419 // We found a reduction value exit-PHI. Update it with the 4420 // incoming bypass edge. 4421 if (LCSSAPhi->getIncomingValue(0) == LoopExitInst) 4422 LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4423 } // end of the LCSSA phi scan. 4424 4425 // Fix the scalar loop reduction variable with the incoming reduction sum 4426 // from the vector body and from the backedge value. 4427 int IncomingEdgeBlockIdx = 4428 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4429 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4430 // Pick the other block. 4431 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4432 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4433 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4434 } 4435 4436 void InnerLoopVectorizer::fixLCSSAPHIs() { 4437 for (Instruction &LEI : *LoopExitBlock) { 4438 auto *LCSSAPhi = dyn_cast<PHINode>(&LEI); 4439 if (!LCSSAPhi) 4440 break; 4441 if (LCSSAPhi->getNumIncomingValues() == 1) { 4442 assert(OrigLoop->isLoopInvariant(LCSSAPhi->getIncomingValue(0)) && 4443 "Incoming value isn't loop invariant"); 4444 LCSSAPhi->addIncoming(LCSSAPhi->getIncomingValue(0), LoopMiddleBlock); 4445 } 4446 } 4447 } 4448 4449 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4450 // The basic block and loop containing the predicated instruction. 4451 auto *PredBB = PredInst->getParent(); 4452 auto *VectorLoop = LI->getLoopFor(PredBB); 4453 4454 // Initialize a worklist with the operands of the predicated instruction. 4455 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4456 4457 // Holds instructions that we need to analyze again. An instruction may be 4458 // reanalyzed if we don't yet know if we can sink it or not. 4459 SmallVector<Instruction *, 8> InstsToReanalyze; 4460 4461 // Returns true if a given use occurs in the predicated block. Phi nodes use 4462 // their operands in their corresponding predecessor blocks. 4463 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4464 auto *I = cast<Instruction>(U.getUser()); 4465 BasicBlock *BB = I->getParent(); 4466 if (auto *Phi = dyn_cast<PHINode>(I)) 4467 BB = Phi->getIncomingBlock( 4468 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4469 return BB == PredBB; 4470 }; 4471 4472 // Iteratively sink the scalarized operands of the predicated instruction 4473 // into the block we created for it. When an instruction is sunk, it's 4474 // operands are then added to the worklist. The algorithm ends after one pass 4475 // through the worklist doesn't sink a single instruction. 4476 bool Changed; 4477 do { 4478 // Add the instructions that need to be reanalyzed to the worklist, and 4479 // reset the changed indicator. 4480 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4481 InstsToReanalyze.clear(); 4482 Changed = false; 4483 4484 while (!Worklist.empty()) { 4485 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4486 4487 // We can't sink an instruction if it is a phi node, is already in the 4488 // predicated block, is not in the loop, or may have side effects. 4489 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 4490 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 4491 continue; 4492 4493 // It's legal to sink the instruction if all its uses occur in the 4494 // predicated block. Otherwise, there's nothing to do yet, and we may 4495 // need to reanalyze the instruction. 4496 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4497 InstsToReanalyze.push_back(I); 4498 continue; 4499 } 4500 4501 // Move the instruction to the beginning of the predicated block, and add 4502 // it's operands to the worklist. 4503 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4504 Worklist.insert(I->op_begin(), I->op_end()); 4505 4506 // The sinking may have enabled other instructions to be sunk, so we will 4507 // need to iterate. 4508 Changed = true; 4509 } 4510 } while (Changed); 4511 } 4512 4513 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 4514 unsigned VF) { 4515 PHINode *P = cast<PHINode>(PN); 4516 // In order to support recurrences we need to be able to vectorize Phi nodes. 4517 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4518 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4519 // this value when we vectorize all of the instructions that use the PHI. 4520 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 4521 for (unsigned Part = 0; Part < UF; ++Part) { 4522 // This is phase one of vectorizing PHIs. 4523 Type *VecTy = 4524 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4525 Value *EntryPart = PHINode::Create( 4526 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4527 VectorLoopValueMap.setVectorValue(P, Part, EntryPart); 4528 } 4529 return; 4530 } 4531 4532 setDebugLocFromInst(Builder, P); 4533 4534 // This PHINode must be an induction variable. 4535 // Make sure that we know about it. 4536 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 4537 4538 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 4539 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4540 4541 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4542 // which can be found from the original scalar operations. 4543 switch (II.getKind()) { 4544 case InductionDescriptor::IK_NoInduction: 4545 llvm_unreachable("Unknown induction"); 4546 case InductionDescriptor::IK_IntInduction: 4547 case InductionDescriptor::IK_FpInduction: 4548 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4549 case InductionDescriptor::IK_PtrInduction: { 4550 // Handle the pointer induction variable case. 4551 assert(P->getType()->isPointerTy() && "Unexpected type."); 4552 // This is the normalized GEP that starts counting at zero. 4553 Value *PtrInd = Induction; 4554 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 4555 // Determine the number of scalars we need to generate for each unroll 4556 // iteration. If the instruction is uniform, we only need to generate the 4557 // first lane. Otherwise, we generate all VF values. 4558 unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF; 4559 // These are the scalar results. Notice that we don't generate vector GEPs 4560 // because scalar GEPs result in better code. 4561 for (unsigned Part = 0; Part < UF; ++Part) { 4562 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4563 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 4564 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4565 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 4566 SclrGep->setName("next.gep"); 4567 VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep); 4568 } 4569 } 4570 return; 4571 } 4572 } 4573 } 4574 4575 /// A helper function for checking whether an integer division-related 4576 /// instruction may divide by zero (in which case it must be predicated if 4577 /// executed conditionally in the scalar code). 4578 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4579 /// Non-zero divisors that are non compile-time constants will not be 4580 /// converted into multiplication, so we will still end up scalarizing 4581 /// the division, but can do so w/o predication. 4582 static bool mayDivideByZero(Instruction &I) { 4583 assert((I.getOpcode() == Instruction::UDiv || 4584 I.getOpcode() == Instruction::SDiv || 4585 I.getOpcode() == Instruction::URem || 4586 I.getOpcode() == Instruction::SRem) && 4587 "Unexpected instruction"); 4588 Value *Divisor = I.getOperand(1); 4589 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4590 return !CInt || CInt->isZero(); 4591 } 4592 4593 void InnerLoopVectorizer::widenInstruction(Instruction &I) { 4594 switch (I.getOpcode()) { 4595 case Instruction::Br: 4596 case Instruction::PHI: 4597 llvm_unreachable("This instruction is handled by a different recipe."); 4598 case Instruction::GetElementPtr: { 4599 // Construct a vector GEP by widening the operands of the scalar GEP as 4600 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4601 // results in a vector of pointers when at least one operand of the GEP 4602 // is vector-typed. Thus, to keep the representation compact, we only use 4603 // vector-typed operands for loop-varying values. 4604 auto *GEP = cast<GetElementPtrInst>(&I); 4605 4606 if (VF > 1 && OrigLoop->hasLoopInvariantOperands(GEP)) { 4607 // If we are vectorizing, but the GEP has only loop-invariant operands, 4608 // the GEP we build (by only using vector-typed operands for 4609 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4610 // produce a vector of pointers, we need to either arbitrarily pick an 4611 // operand to broadcast, or broadcast a clone of the original GEP. 4612 // Here, we broadcast a clone of the original. 4613 // 4614 // TODO: If at some point we decide to scalarize instructions having 4615 // loop-invariant operands, this special case will no longer be 4616 // required. We would add the scalarization decision to 4617 // collectLoopScalars() and teach getVectorValue() to broadcast 4618 // the lane-zero scalar value. 4619 auto *Clone = Builder.Insert(GEP->clone()); 4620 for (unsigned Part = 0; Part < UF; ++Part) { 4621 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4622 VectorLoopValueMap.setVectorValue(&I, Part, EntryPart); 4623 addMetadata(EntryPart, GEP); 4624 } 4625 } else { 4626 // If the GEP has at least one loop-varying operand, we are sure to 4627 // produce a vector of pointers. But if we are only unrolling, we want 4628 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4629 // produce with the code below will be scalar (if VF == 1) or vector 4630 // (otherwise). Note that for the unroll-only case, we still maintain 4631 // values in the vector mapping with initVector, as we do for other 4632 // instructions. 4633 for (unsigned Part = 0; Part < UF; ++Part) { 4634 // The pointer operand of the new GEP. If it's loop-invariant, we 4635 // won't broadcast it. 4636 auto *Ptr = 4637 OrigLoop->isLoopInvariant(GEP->getPointerOperand()) 4638 ? GEP->getPointerOperand() 4639 : getOrCreateVectorValue(GEP->getPointerOperand(), Part); 4640 4641 // Collect all the indices for the new GEP. If any index is 4642 // loop-invariant, we won't broadcast it. 4643 SmallVector<Value *, 4> Indices; 4644 for (auto &U : make_range(GEP->idx_begin(), GEP->idx_end())) { 4645 if (OrigLoop->isLoopInvariant(U.get())) 4646 Indices.push_back(U.get()); 4647 else 4648 Indices.push_back(getOrCreateVectorValue(U.get(), Part)); 4649 } 4650 4651 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4652 // but it should be a vector, otherwise. 4653 auto *NewGEP = GEP->isInBounds() 4654 ? Builder.CreateInBoundsGEP(Ptr, Indices) 4655 : Builder.CreateGEP(Ptr, Indices); 4656 assert((VF == 1 || NewGEP->getType()->isVectorTy()) && 4657 "NewGEP is not a pointer vector"); 4658 VectorLoopValueMap.setVectorValue(&I, Part, NewGEP); 4659 addMetadata(NewGEP, GEP); 4660 } 4661 } 4662 4663 break; 4664 } 4665 case Instruction::UDiv: 4666 case Instruction::SDiv: 4667 case Instruction::SRem: 4668 case Instruction::URem: 4669 case Instruction::Add: 4670 case Instruction::FAdd: 4671 case Instruction::Sub: 4672 case Instruction::FSub: 4673 case Instruction::Mul: 4674 case Instruction::FMul: 4675 case Instruction::FDiv: 4676 case Instruction::FRem: 4677 case Instruction::Shl: 4678 case Instruction::LShr: 4679 case Instruction::AShr: 4680 case Instruction::And: 4681 case Instruction::Or: 4682 case Instruction::Xor: { 4683 // Just widen binops. 4684 auto *BinOp = cast<BinaryOperator>(&I); 4685 setDebugLocFromInst(Builder, BinOp); 4686 4687 for (unsigned Part = 0; Part < UF; ++Part) { 4688 Value *A = getOrCreateVectorValue(BinOp->getOperand(0), Part); 4689 Value *B = getOrCreateVectorValue(BinOp->getOperand(1), Part); 4690 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A, B); 4691 4692 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 4693 VecOp->copyIRFlags(BinOp); 4694 4695 // Use this vector value for all users of the original instruction. 4696 VectorLoopValueMap.setVectorValue(&I, Part, V); 4697 addMetadata(V, BinOp); 4698 } 4699 4700 break; 4701 } 4702 case Instruction::Select: { 4703 // Widen selects. 4704 // If the selector is loop invariant we can create a select 4705 // instruction with a scalar condition. Otherwise, use vector-select. 4706 auto *SE = PSE.getSE(); 4707 bool InvariantCond = 4708 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); 4709 setDebugLocFromInst(Builder, &I); 4710 4711 // The condition can be loop invariant but still defined inside the 4712 // loop. This means that we can't just use the original 'cond' value. 4713 // We have to take the 'vectorized' value and pick the first lane. 4714 // Instcombine will make this a no-op. 4715 4716 auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0}); 4717 4718 for (unsigned Part = 0; Part < UF; ++Part) { 4719 Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part); 4720 Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part); 4721 Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part); 4722 Value *Sel = 4723 Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1); 4724 VectorLoopValueMap.setVectorValue(&I, Part, Sel); 4725 addMetadata(Sel, &I); 4726 } 4727 4728 break; 4729 } 4730 4731 case Instruction::ICmp: 4732 case Instruction::FCmp: { 4733 // Widen compares. Generate vector compares. 4734 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4735 auto *Cmp = dyn_cast<CmpInst>(&I); 4736 setDebugLocFromInst(Builder, Cmp); 4737 for (unsigned Part = 0; Part < UF; ++Part) { 4738 Value *A = getOrCreateVectorValue(Cmp->getOperand(0), Part); 4739 Value *B = getOrCreateVectorValue(Cmp->getOperand(1), Part); 4740 Value *C = nullptr; 4741 if (FCmp) { 4742 // Propagate fast math flags. 4743 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4744 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4745 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4746 } else { 4747 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4748 } 4749 VectorLoopValueMap.setVectorValue(&I, Part, C); 4750 addMetadata(C, &I); 4751 } 4752 4753 break; 4754 } 4755 4756 case Instruction::ZExt: 4757 case Instruction::SExt: 4758 case Instruction::FPToUI: 4759 case Instruction::FPToSI: 4760 case Instruction::FPExt: 4761 case Instruction::PtrToInt: 4762 case Instruction::IntToPtr: 4763 case Instruction::SIToFP: 4764 case Instruction::UIToFP: 4765 case Instruction::Trunc: 4766 case Instruction::FPTrunc: 4767 case Instruction::BitCast: { 4768 auto *CI = dyn_cast<CastInst>(&I); 4769 setDebugLocFromInst(Builder, CI); 4770 4771 /// Vectorize casts. 4772 Type *DestTy = 4773 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4774 4775 for (unsigned Part = 0; Part < UF; ++Part) { 4776 Value *A = getOrCreateVectorValue(CI->getOperand(0), Part); 4777 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4778 VectorLoopValueMap.setVectorValue(&I, Part, Cast); 4779 addMetadata(Cast, &I); 4780 } 4781 break; 4782 } 4783 4784 case Instruction::Call: { 4785 // Ignore dbg intrinsics. 4786 if (isa<DbgInfoIntrinsic>(I)) 4787 break; 4788 setDebugLocFromInst(Builder, &I); 4789 4790 Module *M = I.getParent()->getParent()->getParent(); 4791 auto *CI = cast<CallInst>(&I); 4792 4793 StringRef FnName = CI->getCalledFunction()->getName(); 4794 Function *F = CI->getCalledFunction(); 4795 Type *RetTy = ToVectorTy(CI->getType(), VF); 4796 SmallVector<Type *, 4> Tys; 4797 for (Value *ArgOperand : CI->arg_operands()) 4798 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4799 4800 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4801 4802 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4803 // version of the instruction. 4804 // Is it beneficial to perform intrinsic call compared to lib call? 4805 bool NeedToScalarize; 4806 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4807 bool UseVectorIntrinsic = 4808 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4809 assert((UseVectorIntrinsic || !NeedToScalarize) && 4810 "Instruction should be scalarized elsewhere."); 4811 4812 for (unsigned Part = 0; Part < UF; ++Part) { 4813 SmallVector<Value *, 4> Args; 4814 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4815 Value *Arg = CI->getArgOperand(i); 4816 // Some intrinsics have a scalar argument - don't replace it with a 4817 // vector. 4818 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) 4819 Arg = getOrCreateVectorValue(CI->getArgOperand(i), Part); 4820 Args.push_back(Arg); 4821 } 4822 4823 Function *VectorF; 4824 if (UseVectorIntrinsic) { 4825 // Use vector version of the intrinsic. 4826 Type *TysForDecl[] = {CI->getType()}; 4827 if (VF > 1) 4828 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4829 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4830 } else { 4831 // Use vector version of the library call. 4832 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4833 assert(!VFnName.empty() && "Vector function name is empty."); 4834 VectorF = M->getFunction(VFnName); 4835 if (!VectorF) { 4836 // Generate a declaration 4837 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4838 VectorF = 4839 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4840 VectorF->copyAttributesFrom(F); 4841 } 4842 } 4843 assert(VectorF && "Can't create vector function."); 4844 4845 SmallVector<OperandBundleDef, 1> OpBundles; 4846 CI->getOperandBundlesAsDefs(OpBundles); 4847 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4848 4849 if (isa<FPMathOperator>(V)) 4850 V->copyFastMathFlags(CI); 4851 4852 VectorLoopValueMap.setVectorValue(&I, Part, V); 4853 addMetadata(V, &I); 4854 } 4855 4856 break; 4857 } 4858 4859 default: 4860 // This instruction is not vectorized by simple widening. 4861 DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4862 llvm_unreachable("Unhandled instruction!"); 4863 } // end of switch. 4864 } 4865 4866 void InnerLoopVectorizer::updateAnalysis() { 4867 // Forget the original basic block. 4868 PSE.getSE()->forgetLoop(OrigLoop); 4869 4870 // Update the dominator tree information. 4871 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4872 "Entry does not dominate exit."); 4873 4874 DT->addNewBlock(LoopMiddleBlock, 4875 LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4876 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4877 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4878 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4879 DEBUG(DT->verifyDomTree()); 4880 } 4881 4882 /// \brief Check whether it is safe to if-convert this phi node. 4883 /// 4884 /// Phi nodes with constant expressions that can trap are not safe to if 4885 /// convert. 4886 static bool canIfConvertPHINodes(BasicBlock *BB) { 4887 for (Instruction &I : *BB) { 4888 auto *Phi = dyn_cast<PHINode>(&I); 4889 if (!Phi) 4890 return true; 4891 for (Value *V : Phi->incoming_values()) 4892 if (auto *C = dyn_cast<Constant>(V)) 4893 if (C->canTrap()) 4894 return false; 4895 } 4896 return true; 4897 } 4898 4899 bool LoopVectorizationLegality::canVectorizeWithIfConvert() { 4900 if (!EnableIfConversion) { 4901 ORE->emit(createMissedAnalysis("IfConversionDisabled") 4902 << "if-conversion is disabled"); 4903 return false; 4904 } 4905 4906 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable"); 4907 4908 // A list of pointers that we can safely read and write to. 4909 SmallPtrSet<Value *, 8> SafePointes; 4910 4911 // Collect safe addresses. 4912 for (BasicBlock *BB : TheLoop->blocks()) { 4913 if (blockNeedsPredication(BB)) 4914 continue; 4915 4916 for (Instruction &I : *BB) 4917 if (auto *Ptr = getPointerOperand(&I)) 4918 SafePointes.insert(Ptr); 4919 } 4920 4921 // Collect the blocks that need predication. 4922 BasicBlock *Header = TheLoop->getHeader(); 4923 for (BasicBlock *BB : TheLoop->blocks()) { 4924 // We don't support switch statements inside loops. 4925 if (!isa<BranchInst>(BB->getTerminator())) { 4926 ORE->emit(createMissedAnalysis("LoopContainsSwitch", BB->getTerminator()) 4927 << "loop contains a switch statement"); 4928 return false; 4929 } 4930 4931 // We must be able to predicate all blocks that need to be predicated. 4932 if (blockNeedsPredication(BB)) { 4933 if (!blockCanBePredicated(BB, SafePointes)) { 4934 ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator()) 4935 << "control flow cannot be substituted for a select"); 4936 return false; 4937 } 4938 } else if (BB != Header && !canIfConvertPHINodes(BB)) { 4939 ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator()) 4940 << "control flow cannot be substituted for a select"); 4941 return false; 4942 } 4943 } 4944 4945 // We can if-convert this loop. 4946 return true; 4947 } 4948 4949 bool LoopVectorizationLegality::canVectorize() { 4950 // Store the result and return it at the end instead of exiting early, in case 4951 // allowExtraAnalysis is used to report multiple reasons for not vectorizing. 4952 bool Result = true; 4953 4954 bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE); 4955 if (DoExtraAnalysis) 4956 // We must have a loop in canonical form. Loops with indirectbr in them cannot 4957 // be canonicalized. 4958 if (!TheLoop->getLoopPreheader()) { 4959 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 4960 << "loop control flow is not understood by vectorizer"); 4961 if (DoExtraAnalysis) 4962 Result = false; 4963 else 4964 return false; 4965 } 4966 4967 // FIXME: The code is currently dead, since the loop gets sent to 4968 // LoopVectorizationLegality is already an innermost loop. 4969 // 4970 // We can only vectorize innermost loops. 4971 if (!TheLoop->empty()) { 4972 ORE->emit(createMissedAnalysis("NotInnermostLoop") 4973 << "loop is not the innermost loop"); 4974 if (DoExtraAnalysis) 4975 Result = false; 4976 else 4977 return false; 4978 } 4979 4980 // We must have a single backedge. 4981 if (TheLoop->getNumBackEdges() != 1) { 4982 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 4983 << "loop control flow is not understood by vectorizer"); 4984 if (DoExtraAnalysis) 4985 Result = false; 4986 else 4987 return false; 4988 } 4989 4990 // We must have a single exiting block. 4991 if (!TheLoop->getExitingBlock()) { 4992 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 4993 << "loop control flow is not understood by vectorizer"); 4994 if (DoExtraAnalysis) 4995 Result = false; 4996 else 4997 return false; 4998 } 4999 5000 // We only handle bottom-tested loops, i.e. loop in which the condition is 5001 // checked at the end of each iteration. With that we can assume that all 5002 // instructions in the loop are executed the same number of times. 5003 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5004 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 5005 << "loop control flow is not understood by vectorizer"); 5006 if (DoExtraAnalysis) 5007 Result = false; 5008 else 5009 return false; 5010 } 5011 5012 // We need to have a loop header. 5013 DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName() 5014 << '\n'); 5015 5016 // Check if we can if-convert non-single-bb loops. 5017 unsigned NumBlocks = TheLoop->getNumBlocks(); 5018 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { 5019 DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); 5020 if (DoExtraAnalysis) 5021 Result = false; 5022 else 5023 return false; 5024 } 5025 5026 // Check if we can vectorize the instructions and CFG in this loop. 5027 if (!canVectorizeInstrs()) { 5028 DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); 5029 if (DoExtraAnalysis) 5030 Result = false; 5031 else 5032 return false; 5033 } 5034 5035 // Go over each instruction and look at memory deps. 5036 if (!canVectorizeMemory()) { 5037 DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); 5038 if (DoExtraAnalysis) 5039 Result = false; 5040 else 5041 return false; 5042 } 5043 5044 DEBUG(dbgs() << "LV: We can vectorize this loop" 5045 << (LAI->getRuntimePointerChecking()->Need 5046 ? " (with a runtime bound check)" 5047 : "") 5048 << "!\n"); 5049 5050 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 5051 5052 // If an override option has been passed in for interleaved accesses, use it. 5053 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 5054 UseInterleaved = EnableInterleavedMemAccesses; 5055 5056 // Analyze interleaved memory accesses. 5057 if (UseInterleaved) 5058 InterleaveInfo.analyzeInterleaving(*getSymbolicStrides()); 5059 5060 unsigned SCEVThreshold = VectorizeSCEVCheckThreshold; 5061 if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) 5062 SCEVThreshold = PragmaVectorizeSCEVCheckThreshold; 5063 5064 if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) { 5065 ORE->emit(createMissedAnalysis("TooManySCEVRunTimeChecks") 5066 << "Too many SCEV assumptions need to be made and checked " 5067 << "at runtime"); 5068 DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n"); 5069 if (DoExtraAnalysis) 5070 Result = false; 5071 else 5072 return false; 5073 } 5074 5075 // Okay! We've done all the tests. If any have failed, return false. Otherwise 5076 // we can vectorize, and at this point we don't have any other mem analysis 5077 // which may limit our maximum vectorization factor, so just return true with 5078 // no restrictions. 5079 return Result; 5080 } 5081 5082 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { 5083 if (Ty->isPointerTy()) 5084 return DL.getIntPtrType(Ty); 5085 5086 // It is possible that char's or short's overflow when we ask for the loop's 5087 // trip count, work around this by changing the type size. 5088 if (Ty->getScalarSizeInBits() < 32) 5089 return Type::getInt32Ty(Ty->getContext()); 5090 5091 return Ty; 5092 } 5093 5094 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { 5095 Ty0 = convertPointerToIntegerType(DL, Ty0); 5096 Ty1 = convertPointerToIntegerType(DL, Ty1); 5097 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) 5098 return Ty0; 5099 return Ty1; 5100 } 5101 5102 /// \brief Check that the instruction has outside loop users and is not an 5103 /// identified reduction variable. 5104 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, 5105 SmallPtrSetImpl<Value *> &AllowedExit) { 5106 // Reduction and Induction instructions are allowed to have exit users. All 5107 // other instructions must not have external users. 5108 if (!AllowedExit.count(Inst)) 5109 // Check that all of the users of the loop are inside the BB. 5110 for (User *U : Inst->users()) { 5111 Instruction *UI = cast<Instruction>(U); 5112 // This user may be a reduction exit value. 5113 if (!TheLoop->contains(UI)) { 5114 DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); 5115 return true; 5116 } 5117 } 5118 return false; 5119 } 5120 5121 void LoopVectorizationLegality::addInductionPhi( 5122 PHINode *Phi, const InductionDescriptor &ID, 5123 SmallPtrSetImpl<Value *> &AllowedExit) { 5124 Inductions[Phi] = ID; 5125 Type *PhiTy = Phi->getType(); 5126 const DataLayout &DL = Phi->getModule()->getDataLayout(); 5127 5128 // Get the widest type. 5129 if (!PhiTy->isFloatingPointTy()) { 5130 if (!WidestIndTy) 5131 WidestIndTy = convertPointerToIntegerType(DL, PhiTy); 5132 else 5133 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); 5134 } 5135 5136 // Int inductions are special because we only allow one IV. 5137 if (ID.getKind() == InductionDescriptor::IK_IntInduction && 5138 ID.getConstIntStepValue() && 5139 ID.getConstIntStepValue()->isOne() && 5140 isa<Constant>(ID.getStartValue()) && 5141 cast<Constant>(ID.getStartValue())->isNullValue()) { 5142 5143 // Use the phi node with the widest type as induction. Use the last 5144 // one if there are multiple (no good reason for doing this other 5145 // than it is expedient). We've checked that it begins at zero and 5146 // steps by one, so this is a canonical induction variable. 5147 if (!PrimaryInduction || PhiTy == WidestIndTy) 5148 PrimaryInduction = Phi; 5149 } 5150 5151 // Both the PHI node itself, and the "post-increment" value feeding 5152 // back into the PHI node may have external users. 5153 // We can allow those uses, except if the SCEVs we have for them rely 5154 // on predicates that only hold within the loop, since allowing the exit 5155 // currently means re-using this SCEV outside the loop. 5156 if (PSE.getUnionPredicate().isAlwaysTrue()) { 5157 AllowedExit.insert(Phi); 5158 AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch())); 5159 } 5160 5161 DEBUG(dbgs() << "LV: Found an induction variable.\n"); 5162 } 5163 5164 bool LoopVectorizationLegality::canVectorizeInstrs() { 5165 BasicBlock *Header = TheLoop->getHeader(); 5166 5167 // Look for the attribute signaling the absence of NaNs. 5168 Function &F = *Header->getParent(); 5169 HasFunNoNaNAttr = 5170 F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; 5171 5172 // For each block in the loop. 5173 for (BasicBlock *BB : TheLoop->blocks()) { 5174 // Scan the instructions in the block and look for hazards. 5175 for (Instruction &I : *BB) { 5176 if (auto *Phi = dyn_cast<PHINode>(&I)) { 5177 Type *PhiTy = Phi->getType(); 5178 // Check that this PHI type is allowed. 5179 if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() && 5180 !PhiTy->isPointerTy()) { 5181 ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi) 5182 << "loop control flow is not understood by vectorizer"); 5183 DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); 5184 return false; 5185 } 5186 5187 // If this PHINode is not in the header block, then we know that we 5188 // can convert it to select during if-conversion. No need to check if 5189 // the PHIs in this block are induction or reduction variables. 5190 if (BB != Header) { 5191 // Check that this instruction has no outside users or is an 5192 // identified reduction value with an outside user. 5193 if (!hasOutsideLoopUser(TheLoop, Phi, AllowedExit)) 5194 continue; 5195 ORE->emit(createMissedAnalysis("NeitherInductionNorReduction", Phi) 5196 << "value could not be identified as " 5197 "an induction or reduction variable"); 5198 return false; 5199 } 5200 5201 // We only allow if-converted PHIs with exactly two incoming values. 5202 if (Phi->getNumIncomingValues() != 2) { 5203 ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi) 5204 << "control flow not understood by vectorizer"); 5205 DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); 5206 return false; 5207 } 5208 5209 RecurrenceDescriptor RedDes; 5210 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) { 5211 if (RedDes.hasUnsafeAlgebra()) 5212 Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst()); 5213 AllowedExit.insert(RedDes.getLoopExitInstr()); 5214 Reductions[Phi] = RedDes; 5215 continue; 5216 } 5217 5218 InductionDescriptor ID; 5219 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID)) { 5220 addInductionPhi(Phi, ID, AllowedExit); 5221 if (ID.hasUnsafeAlgebra() && !HasFunNoNaNAttr) 5222 Requirements->addUnsafeAlgebraInst(ID.getUnsafeAlgebraInst()); 5223 continue; 5224 } 5225 5226 if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, 5227 SinkAfter, DT)) { 5228 FirstOrderRecurrences.insert(Phi); 5229 continue; 5230 } 5231 5232 // As a last resort, coerce the PHI to a AddRec expression 5233 // and re-try classifying it a an induction PHI. 5234 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true)) { 5235 addInductionPhi(Phi, ID, AllowedExit); 5236 continue; 5237 } 5238 5239 ORE->emit(createMissedAnalysis("NonReductionValueUsedOutsideLoop", Phi) 5240 << "value that could not be identified as " 5241 "reduction is used outside the loop"); 5242 DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n"); 5243 return false; 5244 } // end of PHI handling 5245 5246 // We handle calls that: 5247 // * Are debug info intrinsics. 5248 // * Have a mapping to an IR intrinsic. 5249 // * Have a vector version available. 5250 auto *CI = dyn_cast<CallInst>(&I); 5251 if (CI && !getVectorIntrinsicIDForCall(CI, TLI) && 5252 !isa<DbgInfoIntrinsic>(CI) && 5253 !(CI->getCalledFunction() && TLI && 5254 TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) { 5255 ORE->emit(createMissedAnalysis("CantVectorizeCall", CI) 5256 << "call instruction cannot be vectorized"); 5257 DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); 5258 return false; 5259 } 5260 5261 // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the 5262 // second argument is the same (i.e. loop invariant) 5263 if (CI && hasVectorInstrinsicScalarOpd( 5264 getVectorIntrinsicIDForCall(CI, TLI), 1)) { 5265 auto *SE = PSE.getSE(); 5266 if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) { 5267 ORE->emit(createMissedAnalysis("CantVectorizeIntrinsic", CI) 5268 << "intrinsic instruction cannot be vectorized"); 5269 DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n"); 5270 return false; 5271 } 5272 } 5273 5274 // Check that the instruction return type is vectorizable. 5275 // Also, we can't vectorize extractelement instructions. 5276 if ((!VectorType::isValidElementType(I.getType()) && 5277 !I.getType()->isVoidTy()) || 5278 isa<ExtractElementInst>(I)) { 5279 ORE->emit(createMissedAnalysis("CantVectorizeInstructionReturnType", &I) 5280 << "instruction return type cannot be vectorized"); 5281 DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); 5282 return false; 5283 } 5284 5285 // Check that the stored type is vectorizable. 5286 if (auto *ST = dyn_cast<StoreInst>(&I)) { 5287 Type *T = ST->getValueOperand()->getType(); 5288 if (!VectorType::isValidElementType(T)) { 5289 ORE->emit(createMissedAnalysis("CantVectorizeStore", ST) 5290 << "store instruction cannot be vectorized"); 5291 return false; 5292 } 5293 5294 // FP instructions can allow unsafe algebra, thus vectorizable by 5295 // non-IEEE-754 compliant SIMD units. 5296 // This applies to floating-point math operations and calls, not memory 5297 // operations, shuffles, or casts, as they don't change precision or 5298 // semantics. 5299 } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) && 5300 !I.isFast()) { 5301 DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n"); 5302 Hints->setPotentiallyUnsafe(); 5303 } 5304 5305 // Reduction instructions are allowed to have exit users. 5306 // All other instructions must not have external users. 5307 if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) { 5308 ORE->emit(createMissedAnalysis("ValueUsedOutsideLoop", &I) 5309 << "value cannot be used outside the loop"); 5310 return false; 5311 } 5312 } // next instr. 5313 } 5314 5315 if (!PrimaryInduction) { 5316 DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); 5317 if (Inductions.empty()) { 5318 ORE->emit(createMissedAnalysis("NoInductionVariable") 5319 << "loop induction variable could not be identified"); 5320 return false; 5321 } 5322 } 5323 5324 // Now we know the widest induction type, check if our found induction 5325 // is the same size. If it's not, unset it here and InnerLoopVectorizer 5326 // will create another. 5327 if (PrimaryInduction && WidestIndTy != PrimaryInduction->getType()) 5328 PrimaryInduction = nullptr; 5329 5330 return true; 5331 } 5332 5333 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) { 5334 // We should not collect Scalars more than once per VF. Right now, this 5335 // function is called from collectUniformsAndScalars(), which already does 5336 // this check. Collecting Scalars for VF=1 does not make any sense. 5337 assert(VF >= 2 && !Scalars.count(VF) && 5338 "This function should not be visited twice for the same VF"); 5339 5340 SmallSetVector<Instruction *, 8> Worklist; 5341 5342 // These sets are used to seed the analysis with pointers used by memory 5343 // accesses that will remain scalar. 5344 SmallSetVector<Instruction *, 8> ScalarPtrs; 5345 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 5346 5347 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 5348 // The pointer operands of loads and stores will be scalar as long as the 5349 // memory access is not a gather or scatter operation. The value operand of a 5350 // store will remain scalar if the store is scalarized. 5351 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 5352 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 5353 assert(WideningDecision != CM_Unknown && 5354 "Widening decision should be ready at this moment"); 5355 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 5356 if (Ptr == Store->getValueOperand()) 5357 return WideningDecision == CM_Scalarize; 5358 assert(Ptr == getPointerOperand(MemAccess) && 5359 "Ptr is neither a value or pointer operand"); 5360 return WideningDecision != CM_GatherScatter; 5361 }; 5362 5363 // A helper that returns true if the given value is a bitcast or 5364 // getelementptr instruction contained in the loop. 5365 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 5366 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 5367 isa<GetElementPtrInst>(V)) && 5368 !TheLoop->isLoopInvariant(V); 5369 }; 5370 5371 // A helper that evaluates a memory access's use of a pointer. If the use 5372 // will be a scalar use, and the pointer is only used by memory accesses, we 5373 // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in 5374 // PossibleNonScalarPtrs. 5375 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 5376 // We only care about bitcast and getelementptr instructions contained in 5377 // the loop. 5378 if (!isLoopVaryingBitCastOrGEP(Ptr)) 5379 return; 5380 5381 // If the pointer has already been identified as scalar (e.g., if it was 5382 // also identified as uniform), there's nothing to do. 5383 auto *I = cast<Instruction>(Ptr); 5384 if (Worklist.count(I)) 5385 return; 5386 5387 // If the use of the pointer will be a scalar use, and all users of the 5388 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 5389 // place the pointer in PossibleNonScalarPtrs. 5390 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 5391 return isa<LoadInst>(U) || isa<StoreInst>(U); 5392 })) 5393 ScalarPtrs.insert(I); 5394 else 5395 PossibleNonScalarPtrs.insert(I); 5396 }; 5397 5398 // We seed the scalars analysis with three classes of instructions: (1) 5399 // instructions marked uniform-after-vectorization, (2) bitcast and 5400 // getelementptr instructions used by memory accesses requiring a scalar use, 5401 // and (3) pointer induction variables and their update instructions (we 5402 // currently only scalarize these). 5403 // 5404 // (1) Add to the worklist all instructions that have been identified as 5405 // uniform-after-vectorization. 5406 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 5407 5408 // (2) Add to the worklist all bitcast and getelementptr instructions used by 5409 // memory accesses requiring a scalar use. The pointer operands of loads and 5410 // stores will be scalar as long as the memory accesses is not a gather or 5411 // scatter operation. The value operand of a store will remain scalar if the 5412 // store is scalarized. 5413 for (auto *BB : TheLoop->blocks()) 5414 for (auto &I : *BB) { 5415 if (auto *Load = dyn_cast<LoadInst>(&I)) { 5416 evaluatePtrUse(Load, Load->getPointerOperand()); 5417 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 5418 evaluatePtrUse(Store, Store->getPointerOperand()); 5419 evaluatePtrUse(Store, Store->getValueOperand()); 5420 } 5421 } 5422 for (auto *I : ScalarPtrs) 5423 if (!PossibleNonScalarPtrs.count(I)) { 5424 DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 5425 Worklist.insert(I); 5426 } 5427 5428 // (3) Add to the worklist all pointer induction variables and their update 5429 // instructions. 5430 // 5431 // TODO: Once we are able to vectorize pointer induction variables we should 5432 // no longer insert them into the worklist here. 5433 auto *Latch = TheLoop->getLoopLatch(); 5434 for (auto &Induction : *Legal->getInductionVars()) { 5435 auto *Ind = Induction.first; 5436 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5437 if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction) 5438 continue; 5439 Worklist.insert(Ind); 5440 Worklist.insert(IndUpdate); 5441 DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5442 DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n"); 5443 } 5444 5445 // Insert the forced scalars. 5446 // FIXME: Currently widenPHIInstruction() often creates a dead vector 5447 // induction variable when the PHI user is scalarized. 5448 if (ForcedScalars.count(VF)) 5449 for (auto *I : ForcedScalars.find(VF)->second) 5450 Worklist.insert(I); 5451 5452 // Expand the worklist by looking through any bitcasts and getelementptr 5453 // instructions we've already identified as scalar. This is similar to the 5454 // expansion step in collectLoopUniforms(); however, here we're only 5455 // expanding to include additional bitcasts and getelementptr instructions. 5456 unsigned Idx = 0; 5457 while (Idx != Worklist.size()) { 5458 Instruction *Dst = Worklist[Idx++]; 5459 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 5460 continue; 5461 auto *Src = cast<Instruction>(Dst->getOperand(0)); 5462 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 5463 auto *J = cast<Instruction>(U); 5464 return !TheLoop->contains(J) || Worklist.count(J) || 5465 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 5466 isScalarUse(J, Src)); 5467 })) { 5468 Worklist.insert(Src); 5469 DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 5470 } 5471 } 5472 5473 // An induction variable will remain scalar if all users of the induction 5474 // variable and induction variable update remain scalar. 5475 for (auto &Induction : *Legal->getInductionVars()) { 5476 auto *Ind = Induction.first; 5477 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5478 5479 // We already considered pointer induction variables, so there's no reason 5480 // to look at their users again. 5481 // 5482 // TODO: Once we are able to vectorize pointer induction variables we 5483 // should no longer skip over them here. 5484 if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction) 5485 continue; 5486 5487 // Determine if all users of the induction variable are scalar after 5488 // vectorization. 5489 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5490 auto *I = cast<Instruction>(U); 5491 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 5492 }); 5493 if (!ScalarInd) 5494 continue; 5495 5496 // Determine if all users of the induction variable update instruction are 5497 // scalar after vectorization. 5498 auto ScalarIndUpdate = 5499 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5500 auto *I = cast<Instruction>(U); 5501 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 5502 }); 5503 if (!ScalarIndUpdate) 5504 continue; 5505 5506 // The induction variable and its update instruction will remain scalar. 5507 Worklist.insert(Ind); 5508 Worklist.insert(IndUpdate); 5509 DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5510 DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n"); 5511 } 5512 5513 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 5514 } 5515 5516 bool LoopVectorizationLegality::isScalarWithPredication(Instruction *I) { 5517 if (!blockNeedsPredication(I->getParent())) 5518 return false; 5519 switch(I->getOpcode()) { 5520 default: 5521 break; 5522 case Instruction::Store: 5523 return !isMaskRequired(I); 5524 case Instruction::UDiv: 5525 case Instruction::SDiv: 5526 case Instruction::SRem: 5527 case Instruction::URem: 5528 return mayDivideByZero(*I); 5529 } 5530 return false; 5531 } 5532 5533 bool LoopVectorizationLegality::memoryInstructionCanBeWidened(Instruction *I, 5534 unsigned VF) { 5535 // Get and ensure we have a valid memory instruction. 5536 LoadInst *LI = dyn_cast<LoadInst>(I); 5537 StoreInst *SI = dyn_cast<StoreInst>(I); 5538 assert((LI || SI) && "Invalid memory instruction"); 5539 5540 auto *Ptr = getPointerOperand(I); 5541 5542 // In order to be widened, the pointer should be consecutive, first of all. 5543 if (!isConsecutivePtr(Ptr)) 5544 return false; 5545 5546 // If the instruction is a store located in a predicated block, it will be 5547 // scalarized. 5548 if (isScalarWithPredication(I)) 5549 return false; 5550 5551 // If the instruction's allocated size doesn't equal it's type size, it 5552 // requires padding and will be scalarized. 5553 auto &DL = I->getModule()->getDataLayout(); 5554 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 5555 if (hasIrregularType(ScalarTy, DL, VF)) 5556 return false; 5557 5558 return true; 5559 } 5560 5561 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { 5562 // We should not collect Uniforms more than once per VF. Right now, 5563 // this function is called from collectUniformsAndScalars(), which 5564 // already does this check. Collecting Uniforms for VF=1 does not make any 5565 // sense. 5566 5567 assert(VF >= 2 && !Uniforms.count(VF) && 5568 "This function should not be visited twice for the same VF"); 5569 5570 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5571 // not analyze again. Uniforms.count(VF) will return 1. 5572 Uniforms[VF].clear(); 5573 5574 // We now know that the loop is vectorizable! 5575 // Collect instructions inside the loop that will remain uniform after 5576 // vectorization. 5577 5578 // Global values, params and instructions outside of current loop are out of 5579 // scope. 5580 auto isOutOfScope = [&](Value *V) -> bool { 5581 Instruction *I = dyn_cast<Instruction>(V); 5582 return (!I || !TheLoop->contains(I)); 5583 }; 5584 5585 SetVector<Instruction *> Worklist; 5586 BasicBlock *Latch = TheLoop->getLoopLatch(); 5587 5588 // Start with the conditional branch. If the branch condition is an 5589 // instruction contained in the loop that is only used by the branch, it is 5590 // uniform. 5591 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5592 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) { 5593 Worklist.insert(Cmp); 5594 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); 5595 } 5596 5597 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers 5598 // are pointers that are treated like consecutive pointers during 5599 // vectorization. The pointer operands of interleaved accesses are an 5600 // example. 5601 SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs; 5602 5603 // Holds pointer operands of instructions that are possibly non-uniform. 5604 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs; 5605 5606 auto isUniformDecision = [&](Instruction *I, unsigned VF) { 5607 InstWidening WideningDecision = getWideningDecision(I, VF); 5608 assert(WideningDecision != CM_Unknown && 5609 "Widening decision should be ready at this moment"); 5610 5611 return (WideningDecision == CM_Widen || 5612 WideningDecision == CM_Interleave); 5613 }; 5614 // Iterate over the instructions in the loop, and collect all 5615 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible 5616 // that a consecutive-like pointer operand will be scalarized, we collect it 5617 // in PossibleNonUniformPtrs instead. We use two sets here because a single 5618 // getelementptr instruction can be used by both vectorized and scalarized 5619 // memory instructions. For example, if a loop loads and stores from the same 5620 // location, but the store is conditional, the store will be scalarized, and 5621 // the getelementptr won't remain uniform. 5622 for (auto *BB : TheLoop->blocks()) 5623 for (auto &I : *BB) { 5624 // If there's no pointer operand, there's nothing to do. 5625 auto *Ptr = dyn_cast_or_null<Instruction>(getPointerOperand(&I)); 5626 if (!Ptr) 5627 continue; 5628 5629 // True if all users of Ptr are memory accesses that have Ptr as their 5630 // pointer operand. 5631 auto UsersAreMemAccesses = 5632 llvm::all_of(Ptr->users(), [&](User *U) -> bool { 5633 return getPointerOperand(U) == Ptr; 5634 }); 5635 5636 // Ensure the memory instruction will not be scalarized or used by 5637 // gather/scatter, making its pointer operand non-uniform. If the pointer 5638 // operand is used by any instruction other than a memory access, we 5639 // conservatively assume the pointer operand may be non-uniform. 5640 if (!UsersAreMemAccesses || !isUniformDecision(&I, VF)) 5641 PossibleNonUniformPtrs.insert(Ptr); 5642 5643 // If the memory instruction will be vectorized and its pointer operand 5644 // is consecutive-like, or interleaving - the pointer operand should 5645 // remain uniform. 5646 else 5647 ConsecutiveLikePtrs.insert(Ptr); 5648 } 5649 5650 // Add to the Worklist all consecutive and consecutive-like pointers that 5651 // aren't also identified as possibly non-uniform. 5652 for (auto *V : ConsecutiveLikePtrs) 5653 if (!PossibleNonUniformPtrs.count(V)) { 5654 DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n"); 5655 Worklist.insert(V); 5656 } 5657 5658 // Expand Worklist in topological order: whenever a new instruction 5659 // is added , its users should be either already inside Worklist, or 5660 // out of scope. It ensures a uniform instruction will only be used 5661 // by uniform instructions or out of scope instructions. 5662 unsigned idx = 0; 5663 while (idx != Worklist.size()) { 5664 Instruction *I = Worklist[idx++]; 5665 5666 for (auto OV : I->operand_values()) { 5667 if (isOutOfScope(OV)) 5668 continue; 5669 auto *OI = cast<Instruction>(OV); 5670 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5671 auto *J = cast<Instruction>(U); 5672 return !TheLoop->contains(J) || Worklist.count(J) || 5673 (OI == getPointerOperand(J) && isUniformDecision(J, VF)); 5674 })) { 5675 Worklist.insert(OI); 5676 DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); 5677 } 5678 } 5679 } 5680 5681 // Returns true if Ptr is the pointer operand of a memory access instruction 5682 // I, and I is known to not require scalarization. 5683 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5684 return getPointerOperand(I) == Ptr && isUniformDecision(I, VF); 5685 }; 5686 5687 // For an instruction to be added into Worklist above, all its users inside 5688 // the loop should also be in Worklist. However, this condition cannot be 5689 // true for phi nodes that form a cyclic dependence. We must process phi 5690 // nodes separately. An induction variable will remain uniform if all users 5691 // of the induction variable and induction variable update remain uniform. 5692 // The code below handles both pointer and non-pointer induction variables. 5693 for (auto &Induction : *Legal->getInductionVars()) { 5694 auto *Ind = Induction.first; 5695 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5696 5697 // Determine if all users of the induction variable are uniform after 5698 // vectorization. 5699 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5700 auto *I = cast<Instruction>(U); 5701 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5702 isVectorizedMemAccessUse(I, Ind); 5703 }); 5704 if (!UniformInd) 5705 continue; 5706 5707 // Determine if all users of the induction variable update instruction are 5708 // uniform after vectorization. 5709 auto UniformIndUpdate = 5710 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5711 auto *I = cast<Instruction>(U); 5712 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5713 isVectorizedMemAccessUse(I, IndUpdate); 5714 }); 5715 if (!UniformIndUpdate) 5716 continue; 5717 5718 // The induction variable and its update instruction will remain uniform. 5719 Worklist.insert(Ind); 5720 Worklist.insert(IndUpdate); 5721 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n"); 5722 DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate << "\n"); 5723 } 5724 5725 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5726 } 5727 5728 bool LoopVectorizationLegality::canVectorizeMemory() { 5729 LAI = &(*GetLAA)(*TheLoop); 5730 InterleaveInfo.setLAI(LAI); 5731 const OptimizationRemarkAnalysis *LAR = LAI->getReport(); 5732 if (LAR) { 5733 ORE->emit([&]() { 5734 return OptimizationRemarkAnalysis(Hints->vectorizeAnalysisPassName(), 5735 "loop not vectorized: ", *LAR); 5736 }); 5737 } 5738 if (!LAI->canVectorizeMemory()) 5739 return false; 5740 5741 if (LAI->hasStoreToLoopInvariantAddress()) { 5742 ORE->emit(createMissedAnalysis("CantVectorizeStoreToLoopInvariantAddress") 5743 << "write to a loop invariant address could not be vectorized"); 5744 DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); 5745 return false; 5746 } 5747 5748 Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks()); 5749 PSE.addPredicate(LAI->getPSE().getUnionPredicate()); 5750 5751 return true; 5752 } 5753 5754 bool LoopVectorizationLegality::isInductionVariable(const Value *V) { 5755 Value *In0 = const_cast<Value *>(V); 5756 PHINode *PN = dyn_cast_or_null<PHINode>(In0); 5757 if (!PN) 5758 return false; 5759 5760 return Inductions.count(PN); 5761 } 5762 5763 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) { 5764 return FirstOrderRecurrences.count(Phi); 5765 } 5766 5767 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { 5768 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 5769 } 5770 5771 bool LoopVectorizationLegality::blockCanBePredicated( 5772 BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) { 5773 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 5774 5775 for (Instruction &I : *BB) { 5776 // Check that we don't have a constant expression that can trap as operand. 5777 for (Value *Operand : I.operands()) { 5778 if (auto *C = dyn_cast<Constant>(Operand)) 5779 if (C->canTrap()) 5780 return false; 5781 } 5782 // We might be able to hoist the load. 5783 if (I.mayReadFromMemory()) { 5784 auto *LI = dyn_cast<LoadInst>(&I); 5785 if (!LI) 5786 return false; 5787 if (!SafePtrs.count(LI->getPointerOperand())) { 5788 if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) || 5789 isLegalMaskedGather(LI->getType())) { 5790 MaskedOp.insert(LI); 5791 continue; 5792 } 5793 // !llvm.mem.parallel_loop_access implies if-conversion safety. 5794 if (IsAnnotatedParallel) 5795 continue; 5796 return false; 5797 } 5798 } 5799 5800 if (I.mayWriteToMemory()) { 5801 auto *SI = dyn_cast<StoreInst>(&I); 5802 // We only support predication of stores in basic blocks with one 5803 // predecessor. 5804 if (!SI) 5805 return false; 5806 5807 // Build a masked store if it is legal for the target. 5808 if (isLegalMaskedStore(SI->getValueOperand()->getType(), 5809 SI->getPointerOperand()) || 5810 isLegalMaskedScatter(SI->getValueOperand()->getType())) { 5811 MaskedOp.insert(SI); 5812 continue; 5813 } 5814 5815 bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0); 5816 bool isSinglePredecessor = SI->getParent()->getSinglePredecessor(); 5817 5818 if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr || 5819 !isSinglePredecessor) 5820 return false; 5821 } 5822 if (I.mayThrow()) 5823 return false; 5824 } 5825 5826 return true; 5827 } 5828 5829 void InterleavedAccessInfo::collectConstStrideAccesses( 5830 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 5831 const ValueToValueMap &Strides) { 5832 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 5833 5834 // Since it's desired that the load/store instructions be maintained in 5835 // "program order" for the interleaved access analysis, we have to visit the 5836 // blocks in the loop in reverse postorder (i.e., in a topological order). 5837 // Such an ordering will ensure that any load/store that may be executed 5838 // before a second load/store will precede the second load/store in 5839 // AccessStrideInfo. 5840 LoopBlocksDFS DFS(TheLoop); 5841 DFS.perform(LI); 5842 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 5843 for (auto &I : *BB) { 5844 auto *LI = dyn_cast<LoadInst>(&I); 5845 auto *SI = dyn_cast<StoreInst>(&I); 5846 if (!LI && !SI) 5847 continue; 5848 5849 Value *Ptr = getPointerOperand(&I); 5850 // We don't check wrapping here because we don't know yet if Ptr will be 5851 // part of a full group or a group with gaps. Checking wrapping for all 5852 // pointers (even those that end up in groups with no gaps) will be overly 5853 // conservative. For full groups, wrapping should be ok since if we would 5854 // wrap around the address space we would do a memory access at nullptr 5855 // even without the transformation. The wrapping checks are therefore 5856 // deferred until after we've formed the interleaved groups. 5857 int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, 5858 /*Assume=*/true, /*ShouldCheckWrap=*/false); 5859 5860 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 5861 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 5862 uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 5863 5864 // An alignment of 0 means target ABI alignment. 5865 unsigned Align = getMemInstAlignment(&I); 5866 if (!Align) 5867 Align = DL.getABITypeAlignment(PtrTy->getElementType()); 5868 5869 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align); 5870 } 5871 } 5872 5873 // Analyze interleaved accesses and collect them into interleaved load and 5874 // store groups. 5875 // 5876 // When generating code for an interleaved load group, we effectively hoist all 5877 // loads in the group to the location of the first load in program order. When 5878 // generating code for an interleaved store group, we sink all stores to the 5879 // location of the last store. This code motion can change the order of load 5880 // and store instructions and may break dependences. 5881 // 5882 // The code generation strategy mentioned above ensures that we won't violate 5883 // any write-after-read (WAR) dependences. 5884 // 5885 // E.g., for the WAR dependence: a = A[i]; // (1) 5886 // A[i] = b; // (2) 5887 // 5888 // The store group of (2) is always inserted at or below (2), and the load 5889 // group of (1) is always inserted at or above (1). Thus, the instructions will 5890 // never be reordered. All other dependences are checked to ensure the 5891 // correctness of the instruction reordering. 5892 // 5893 // The algorithm visits all memory accesses in the loop in bottom-up program 5894 // order. Program order is established by traversing the blocks in the loop in 5895 // reverse postorder when collecting the accesses. 5896 // 5897 // We visit the memory accesses in bottom-up order because it can simplify the 5898 // construction of store groups in the presence of write-after-write (WAW) 5899 // dependences. 5900 // 5901 // E.g., for the WAW dependence: A[i] = a; // (1) 5902 // A[i] = b; // (2) 5903 // A[i + 1] = c; // (3) 5904 // 5905 // We will first create a store group with (3) and (2). (1) can't be added to 5906 // this group because it and (2) are dependent. However, (1) can be grouped 5907 // with other accesses that may precede it in program order. Note that a 5908 // bottom-up order does not imply that WAW dependences should not be checked. 5909 void InterleavedAccessInfo::analyzeInterleaving( 5910 const ValueToValueMap &Strides) { 5911 DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 5912 5913 // Holds all accesses with a constant stride. 5914 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 5915 collectConstStrideAccesses(AccessStrideInfo, Strides); 5916 5917 if (AccessStrideInfo.empty()) 5918 return; 5919 5920 // Collect the dependences in the loop. 5921 collectDependences(); 5922 5923 // Holds all interleaved store groups temporarily. 5924 SmallSetVector<InterleaveGroup *, 4> StoreGroups; 5925 // Holds all interleaved load groups temporarily. 5926 SmallSetVector<InterleaveGroup *, 4> LoadGroups; 5927 5928 // Search in bottom-up program order for pairs of accesses (A and B) that can 5929 // form interleaved load or store groups. In the algorithm below, access A 5930 // precedes access B in program order. We initialize a group for B in the 5931 // outer loop of the algorithm, and then in the inner loop, we attempt to 5932 // insert each A into B's group if: 5933 // 5934 // 1. A and B have the same stride, 5935 // 2. A and B have the same memory object size, and 5936 // 3. A belongs in B's group according to its distance from B. 5937 // 5938 // Special care is taken to ensure group formation will not break any 5939 // dependences. 5940 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 5941 BI != E; ++BI) { 5942 Instruction *B = BI->first; 5943 StrideDescriptor DesB = BI->second; 5944 5945 // Initialize a group for B if it has an allowable stride. Even if we don't 5946 // create a group for B, we continue with the bottom-up algorithm to ensure 5947 // we don't break any of B's dependences. 5948 InterleaveGroup *Group = nullptr; 5949 if (isStrided(DesB.Stride)) { 5950 Group = getInterleaveGroup(B); 5951 if (!Group) { 5952 DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n'); 5953 Group = createInterleaveGroup(B, DesB.Stride, DesB.Align); 5954 } 5955 if (B->mayWriteToMemory()) 5956 StoreGroups.insert(Group); 5957 else 5958 LoadGroups.insert(Group); 5959 } 5960 5961 for (auto AI = std::next(BI); AI != E; ++AI) { 5962 Instruction *A = AI->first; 5963 StrideDescriptor DesA = AI->second; 5964 5965 // Our code motion strategy implies that we can't have dependences 5966 // between accesses in an interleaved group and other accesses located 5967 // between the first and last member of the group. Note that this also 5968 // means that a group can't have more than one member at a given offset. 5969 // The accesses in a group can have dependences with other accesses, but 5970 // we must ensure we don't extend the boundaries of the group such that 5971 // we encompass those dependent accesses. 5972 // 5973 // For example, assume we have the sequence of accesses shown below in a 5974 // stride-2 loop: 5975 // 5976 // (1, 2) is a group | A[i] = a; // (1) 5977 // | A[i-1] = b; // (2) | 5978 // A[i-3] = c; // (3) 5979 // A[i] = d; // (4) | (2, 4) is not a group 5980 // 5981 // Because accesses (2) and (3) are dependent, we can group (2) with (1) 5982 // but not with (4). If we did, the dependent access (3) would be within 5983 // the boundaries of the (2, 4) group. 5984 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { 5985 // If a dependence exists and A is already in a group, we know that A 5986 // must be a store since A precedes B and WAR dependences are allowed. 5987 // Thus, A would be sunk below B. We release A's group to prevent this 5988 // illegal code motion. A will then be free to form another group with 5989 // instructions that precede it. 5990 if (isInterleaved(A)) { 5991 InterleaveGroup *StoreGroup = getInterleaveGroup(A); 5992 StoreGroups.remove(StoreGroup); 5993 releaseGroup(StoreGroup); 5994 } 5995 5996 // If a dependence exists and A is not already in a group (or it was 5997 // and we just released it), B might be hoisted above A (if B is a 5998 // load) or another store might be sunk below A (if B is a store). In 5999 // either case, we can't add additional instructions to B's group. B 6000 // will only form a group with instructions that it precedes. 6001 break; 6002 } 6003 6004 // At this point, we've checked for illegal code motion. If either A or B 6005 // isn't strided, there's nothing left to do. 6006 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 6007 continue; 6008 6009 // Ignore A if it's already in a group or isn't the same kind of memory 6010 // operation as B. 6011 if (isInterleaved(A) || A->mayReadFromMemory() != B->mayReadFromMemory()) 6012 continue; 6013 6014 // Check rules 1 and 2. Ignore A if its stride or size is different from 6015 // that of B. 6016 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 6017 continue; 6018 6019 // Ignore A if the memory object of A and B don't belong to the same 6020 // address space 6021 if (getMemInstAddressSpace(A) != getMemInstAddressSpace(B)) 6022 continue; 6023 6024 // Calculate the distance from A to B. 6025 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 6026 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 6027 if (!DistToB) 6028 continue; 6029 int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 6030 6031 // Check rule 3. Ignore A if its distance to B is not a multiple of the 6032 // size. 6033 if (DistanceToB % static_cast<int64_t>(DesB.Size)) 6034 continue; 6035 6036 // Ignore A if either A or B is in a predicated block. Although we 6037 // currently prevent group formation for predicated accesses, we may be 6038 // able to relax this limitation in the future once we handle more 6039 // complicated blocks. 6040 if (isPredicated(A->getParent()) || isPredicated(B->getParent())) 6041 continue; 6042 6043 // The index of A is the index of B plus A's distance to B in multiples 6044 // of the size. 6045 int IndexA = 6046 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 6047 6048 // Try to insert A into B's group. 6049 if (Group->insertMember(A, IndexA, DesA.Align)) { 6050 DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 6051 << " into the interleave group with" << *B << '\n'); 6052 InterleaveGroupMap[A] = Group; 6053 6054 // Set the first load in program order as the insert position. 6055 if (A->mayReadFromMemory()) 6056 Group->setInsertPos(A); 6057 } 6058 } // Iteration over A accesses. 6059 } // Iteration over B accesses. 6060 6061 // Remove interleaved store groups with gaps. 6062 for (InterleaveGroup *Group : StoreGroups) 6063 if (Group->getNumMembers() != Group->getFactor()) { 6064 DEBUG(dbgs() << "LV: Invalidate candidate interleaved store group due " 6065 "to gaps.\n"); 6066 releaseGroup(Group); 6067 } 6068 // Remove interleaved groups with gaps (currently only loads) whose memory 6069 // accesses may wrap around. We have to revisit the getPtrStride analysis, 6070 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does 6071 // not check wrapping (see documentation there). 6072 // FORNOW we use Assume=false; 6073 // TODO: Change to Assume=true but making sure we don't exceed the threshold 6074 // of runtime SCEV assumptions checks (thereby potentially failing to 6075 // vectorize altogether). 6076 // Additional optional optimizations: 6077 // TODO: If we are peeling the loop and we know that the first pointer doesn't 6078 // wrap then we can deduce that all pointers in the group don't wrap. 6079 // This means that we can forcefully peel the loop in order to only have to 6080 // check the first pointer for no-wrap. When we'll change to use Assume=true 6081 // we'll only need at most one runtime check per interleaved group. 6082 for (InterleaveGroup *Group : LoadGroups) { 6083 // Case 1: A full group. Can Skip the checks; For full groups, if the wide 6084 // load would wrap around the address space we would do a memory access at 6085 // nullptr even without the transformation. 6086 if (Group->getNumMembers() == Group->getFactor()) 6087 continue; 6088 6089 // Case 2: If first and last members of the group don't wrap this implies 6090 // that all the pointers in the group don't wrap. 6091 // So we check only group member 0 (which is always guaranteed to exist), 6092 // and group member Factor - 1; If the latter doesn't exist we rely on 6093 // peeling (if it is a non-reveresed accsess -- see Case 3). 6094 Value *FirstMemberPtr = getPointerOperand(Group->getMember(0)); 6095 if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false, 6096 /*ShouldCheckWrap=*/true)) { 6097 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 6098 "first group member potentially pointer-wrapping.\n"); 6099 releaseGroup(Group); 6100 continue; 6101 } 6102 Instruction *LastMember = Group->getMember(Group->getFactor() - 1); 6103 if (LastMember) { 6104 Value *LastMemberPtr = getPointerOperand(LastMember); 6105 if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false, 6106 /*ShouldCheckWrap=*/true)) { 6107 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 6108 "last group member potentially pointer-wrapping.\n"); 6109 releaseGroup(Group); 6110 } 6111 } else { 6112 // Case 3: A non-reversed interleaved load group with gaps: We need 6113 // to execute at least one scalar epilogue iteration. This will ensure 6114 // we don't speculatively access memory out-of-bounds. We only need 6115 // to look for a member at index factor - 1, since every group must have 6116 // a member at index zero. 6117 if (Group->isReverse()) { 6118 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 6119 "a reverse access with gaps.\n"); 6120 releaseGroup(Group); 6121 continue; 6122 } 6123 DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 6124 RequiresScalarEpilogue = true; 6125 } 6126 } 6127 } 6128 6129 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) { 6130 if (!EnableCondStoresVectorization && Legal->getNumPredStores()) { 6131 ORE->emit(createMissedAnalysis("ConditionalStore") 6132 << "store that is conditionally executed prevents vectorization"); 6133 DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); 6134 return None; 6135 } 6136 6137 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 6138 // TODO: It may by useful to do since it's still likely to be dynamically 6139 // uniform if the target can skip. 6140 DEBUG(dbgs() << "LV: Not inserting runtime ptr check for divergent target"); 6141 6142 ORE->emit( 6143 createMissedAnalysis("CantVersionLoopWithDivergentTarget") 6144 << "runtime pointer checks needed. Not enabled for divergent target"); 6145 6146 return None; 6147 } 6148 6149 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 6150 if (!OptForSize) // Remaining checks deal with scalar loop when OptForSize. 6151 return computeFeasibleMaxVF(OptForSize, TC); 6152 6153 if (Legal->getRuntimePointerChecking()->Need) { 6154 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 6155 << "runtime pointer checks needed. Enable vectorization of this " 6156 "loop with '#pragma clang loop vectorize(enable)' when " 6157 "compiling with -Os/-Oz"); 6158 DEBUG(dbgs() 6159 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 6160 return None; 6161 } 6162 6163 // If we optimize the program for size, avoid creating the tail loop. 6164 DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 6165 6166 // If we don't know the precise trip count, don't try to vectorize. 6167 if (TC < 2) { 6168 ORE->emit( 6169 createMissedAnalysis("UnknownLoopCountComplexCFG") 6170 << "unable to calculate the loop count due to complex control flow"); 6171 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 6172 return None; 6173 } 6174 6175 unsigned MaxVF = computeFeasibleMaxVF(OptForSize, TC); 6176 6177 if (TC % MaxVF != 0) { 6178 // If the trip count that we found modulo the vectorization factor is not 6179 // zero then we require a tail. 6180 // FIXME: look for a smaller MaxVF that does divide TC rather than give up. 6181 // FIXME: return None if loop requiresScalarEpilog(<MaxVF>), or look for a 6182 // smaller MaxVF that does not require a scalar epilog. 6183 6184 ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize") 6185 << "cannot optimize for size and vectorize at the " 6186 "same time. Enable vectorization of this loop " 6187 "with '#pragma clang loop vectorize(enable)' " 6188 "when compiling with -Os/-Oz"); 6189 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 6190 return None; 6191 } 6192 6193 return MaxVF; 6194 } 6195 6196 unsigned 6197 LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize, 6198 unsigned ConstTripCount) { 6199 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 6200 unsigned SmallestType, WidestType; 6201 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 6202 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 6203 6204 // Get the maximum safe dependence distance in bits computed by LAA. 6205 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 6206 // the memory accesses that is most restrictive (involved in the smallest 6207 // dependence distance). 6208 unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth(); 6209 6210 WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth); 6211 6212 unsigned MaxVectorSize = WidestRegister / WidestType; 6213 6214 DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " 6215 << WidestType << " bits.\n"); 6216 DEBUG(dbgs() << "LV: The Widest register safe to use is: " << WidestRegister 6217 << " bits.\n"); 6218 6219 assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements" 6220 " into one vector!"); 6221 if (MaxVectorSize == 0) { 6222 DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 6223 MaxVectorSize = 1; 6224 return MaxVectorSize; 6225 } else if (ConstTripCount && ConstTripCount < MaxVectorSize && 6226 isPowerOf2_32(ConstTripCount)) { 6227 // We need to clamp the VF to be the ConstTripCount. There is no point in 6228 // choosing a higher viable VF as done in the loop below. 6229 DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 6230 << ConstTripCount << "\n"); 6231 MaxVectorSize = ConstTripCount; 6232 return MaxVectorSize; 6233 } 6234 6235 unsigned MaxVF = MaxVectorSize; 6236 if (MaximizeBandwidth && !OptForSize) { 6237 // Collect all viable vectorization factors larger than the default MaxVF 6238 // (i.e. MaxVectorSize). 6239 SmallVector<unsigned, 8> VFs; 6240 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 6241 for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2) 6242 VFs.push_back(VS); 6243 6244 // For each VF calculate its register usage. 6245 auto RUs = calculateRegisterUsage(VFs); 6246 6247 // Select the largest VF which doesn't require more registers than existing 6248 // ones. 6249 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 6250 for (int i = RUs.size() - 1; i >= 0; --i) { 6251 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 6252 MaxVF = VFs[i]; 6253 break; 6254 } 6255 } 6256 } 6257 return MaxVF; 6258 } 6259 6260 LoopVectorizationCostModel::VectorizationFactor 6261 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) { 6262 float Cost = expectedCost(1).first; 6263 #ifndef NDEBUG 6264 const float ScalarCost = Cost; 6265 #endif /* NDEBUG */ 6266 unsigned Width = 1; 6267 DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 6268 6269 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 6270 // Ignore scalar width, because the user explicitly wants vectorization. 6271 if (ForceVectorization && MaxVF > 1) { 6272 Width = 2; 6273 Cost = expectedCost(Width).first / (float)Width; 6274 } 6275 6276 for (unsigned i = 2; i <= MaxVF; i *= 2) { 6277 // Notice that the vector loop needs to be executed less times, so 6278 // we need to divide the cost of the vector loops by the width of 6279 // the vector elements. 6280 VectorizationCostTy C = expectedCost(i); 6281 float VectorCost = C.first / (float)i; 6282 DEBUG(dbgs() << "LV: Vector loop of width " << i 6283 << " costs: " << (int)VectorCost << ".\n"); 6284 if (!C.second && !ForceVectorization) { 6285 DEBUG( 6286 dbgs() << "LV: Not considering vector loop of width " << i 6287 << " because it will not generate any vector instructions.\n"); 6288 continue; 6289 } 6290 if (VectorCost < Cost) { 6291 Cost = VectorCost; 6292 Width = i; 6293 } 6294 } 6295 6296 DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 6297 << "LV: Vectorization seems to be not beneficial, " 6298 << "but was forced by a user.\n"); 6299 DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 6300 VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)}; 6301 return Factor; 6302 } 6303 6304 std::pair<unsigned, unsigned> 6305 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6306 unsigned MinWidth = -1U; 6307 unsigned MaxWidth = 8; 6308 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6309 6310 // For each block. 6311 for (BasicBlock *BB : TheLoop->blocks()) { 6312 // For each instruction in the loop. 6313 for (Instruction &I : *BB) { 6314 Type *T = I.getType(); 6315 6316 // Skip ignored values. 6317 if (ValuesToIgnore.count(&I)) 6318 continue; 6319 6320 // Only examine Loads, Stores and PHINodes. 6321 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6322 continue; 6323 6324 // Examine PHI nodes that are reduction variables. Update the type to 6325 // account for the recurrence type. 6326 if (auto *PN = dyn_cast<PHINode>(&I)) { 6327 if (!Legal->isReductionVariable(PN)) 6328 continue; 6329 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 6330 T = RdxDesc.getRecurrenceType(); 6331 } 6332 6333 // Examine the stored values. 6334 if (auto *ST = dyn_cast<StoreInst>(&I)) 6335 T = ST->getValueOperand()->getType(); 6336 6337 // Ignore loaded pointer types and stored pointer types that are not 6338 // vectorizable. 6339 // 6340 // FIXME: The check here attempts to predict whether a load or store will 6341 // be vectorized. We only know this for certain after a VF has 6342 // been selected. Here, we assume that if an access can be 6343 // vectorized, it will be. We should also look at extending this 6344 // optimization to non-pointer types. 6345 // 6346 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 6347 !Legal->isAccessInterleaved(&I) && !Legal->isLegalGatherOrScatter(&I)) 6348 continue; 6349 6350 MinWidth = std::min(MinWidth, 6351 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6352 MaxWidth = std::max(MaxWidth, 6353 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6354 } 6355 } 6356 6357 return {MinWidth, MaxWidth}; 6358 } 6359 6360 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 6361 unsigned VF, 6362 unsigned LoopCost) { 6363 // -- The interleave heuristics -- 6364 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6365 // There are many micro-architectural considerations that we can't predict 6366 // at this level. For example, frontend pressure (on decode or fetch) due to 6367 // code size, or the number and capabilities of the execution ports. 6368 // 6369 // We use the following heuristics to select the interleave count: 6370 // 1. If the code has reductions, then we interleave to break the cross 6371 // iteration dependency. 6372 // 2. If the loop is really small, then we interleave to reduce the loop 6373 // overhead. 6374 // 3. We don't interleave if we think that we will spill registers to memory 6375 // due to the increased register pressure. 6376 6377 // When we optimize for size, we don't interleave. 6378 if (OptForSize) 6379 return 1; 6380 6381 // We used the distance for the interleave count. 6382 if (Legal->getMaxSafeDepDistBytes() != -1U) 6383 return 1; 6384 6385 // Do not interleave loops with a relatively small trip count. 6386 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 6387 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 6388 return 1; 6389 6390 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 6391 DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6392 << " registers\n"); 6393 6394 if (VF == 1) { 6395 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6396 TargetNumRegisters = ForceTargetNumScalarRegs; 6397 } else { 6398 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6399 TargetNumRegisters = ForceTargetNumVectorRegs; 6400 } 6401 6402 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6403 // We divide by these constants so assume that we have at least one 6404 // instruction that uses at least one register. 6405 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 6406 R.NumInstructions = std::max(R.NumInstructions, 1U); 6407 6408 // We calculate the interleave count using the following formula. 6409 // Subtract the number of loop invariants from the number of available 6410 // registers. These registers are used by all of the interleaved instances. 6411 // Next, divide the remaining registers by the number of registers that is 6412 // required by the loop, in order to estimate how many parallel instances 6413 // fit without causing spills. All of this is rounded down if necessary to be 6414 // a power of two. We want power of two interleave count to simplify any 6415 // addressing operations or alignment considerations. 6416 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 6417 R.MaxLocalUsers); 6418 6419 // Don't count the induction variable as interleaved. 6420 if (EnableIndVarRegisterHeur) 6421 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 6422 std::max(1U, (R.MaxLocalUsers - 1))); 6423 6424 // Clamp the interleave ranges to reasonable counts. 6425 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 6426 6427 // Check if the user has overridden the max. 6428 if (VF == 1) { 6429 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6430 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6431 } else { 6432 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6433 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6434 } 6435 6436 // If we did not calculate the cost for VF (because the user selected the VF) 6437 // then we calculate the cost of VF here. 6438 if (LoopCost == 0) 6439 LoopCost = expectedCost(VF).first; 6440 6441 // Clamp the calculated IC to be between the 1 and the max interleave count 6442 // that the target allows. 6443 if (IC > MaxInterleaveCount) 6444 IC = MaxInterleaveCount; 6445 else if (IC < 1) 6446 IC = 1; 6447 6448 // Interleave if we vectorized this loop and there is a reduction that could 6449 // benefit from interleaving. 6450 if (VF > 1 && !Legal->getReductionVars()->empty()) { 6451 DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6452 return IC; 6453 } 6454 6455 // Note that if we've already vectorized the loop we will have done the 6456 // runtime check and so interleaving won't require further checks. 6457 bool InterleavingRequiresRuntimePointerCheck = 6458 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 6459 6460 // We want to interleave small loops in order to reduce the loop overhead and 6461 // potentially expose ILP opportunities. 6462 DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 6463 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6464 // We assume that the cost overhead is 1 and we use the cost model 6465 // to estimate the cost of the loop and interleave until the cost of the 6466 // loop overhead is about 5% of the cost of the loop. 6467 unsigned SmallIC = 6468 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6469 6470 // Interleave until store/load ports (estimated by max interleave count) are 6471 // saturated. 6472 unsigned NumStores = Legal->getNumStores(); 6473 unsigned NumLoads = Legal->getNumLoads(); 6474 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6475 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6476 6477 // If we have a scalar reduction (vector reductions are already dealt with 6478 // by this point), we can increase the critical path length if the loop 6479 // we're interleaving is inside another loop. Limit, by default to 2, so the 6480 // critical path only gets increased by one reduction operation. 6481 if (!Legal->getReductionVars()->empty() && TheLoop->getLoopDepth() > 1) { 6482 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6483 SmallIC = std::min(SmallIC, F); 6484 StoresIC = std::min(StoresIC, F); 6485 LoadsIC = std::min(LoadsIC, F); 6486 } 6487 6488 if (EnableLoadStoreRuntimeInterleave && 6489 std::max(StoresIC, LoadsIC) > SmallIC) { 6490 DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6491 return std::max(StoresIC, LoadsIC); 6492 } 6493 6494 DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6495 return SmallIC; 6496 } 6497 6498 // Interleave if this is a large loop (small loops are already dealt with by 6499 // this point) that could benefit from interleaving. 6500 bool HasReductions = !Legal->getReductionVars()->empty(); 6501 if (TTI.enableAggressiveInterleaving(HasReductions)) { 6502 DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6503 return IC; 6504 } 6505 6506 DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6507 return 1; 6508 } 6509 6510 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6511 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 6512 // This function calculates the register usage by measuring the highest number 6513 // of values that are alive at a single location. Obviously, this is a very 6514 // rough estimation. We scan the loop in a topological order in order and 6515 // assign a number to each instruction. We use RPO to ensure that defs are 6516 // met before their users. We assume that each instruction that has in-loop 6517 // users starts an interval. We record every time that an in-loop value is 6518 // used, so we have a list of the first and last occurrences of each 6519 // instruction. Next, we transpose this data structure into a multi map that 6520 // holds the list of intervals that *end* at a specific location. This multi 6521 // map allows us to perform a linear search. We scan the instructions linearly 6522 // and record each time that a new interval starts, by placing it in a set. 6523 // If we find this value in the multi-map then we remove it from the set. 6524 // The max register usage is the maximum size of the set. 6525 // We also search for instructions that are defined outside the loop, but are 6526 // used inside the loop. We need this number separately from the max-interval 6527 // usage number because when we unroll, loop-invariant values do not take 6528 // more register. 6529 LoopBlocksDFS DFS(TheLoop); 6530 DFS.perform(LI); 6531 6532 RegisterUsage RU; 6533 RU.NumInstructions = 0; 6534 6535 // Each 'key' in the map opens a new interval. The values 6536 // of the map are the index of the 'last seen' usage of the 6537 // instruction that is the key. 6538 using IntervalMap = DenseMap<Instruction *, unsigned>; 6539 6540 // Maps instruction to its index. 6541 DenseMap<unsigned, Instruction *> IdxToInstr; 6542 // Marks the end of each interval. 6543 IntervalMap EndPoint; 6544 // Saves the list of instruction indices that are used in the loop. 6545 SmallSet<Instruction *, 8> Ends; 6546 // Saves the list of values that are used in the loop but are 6547 // defined outside the loop, such as arguments and constants. 6548 SmallPtrSet<Value *, 8> LoopInvariants; 6549 6550 unsigned Index = 0; 6551 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6552 RU.NumInstructions += BB->size(); 6553 for (Instruction &I : *BB) { 6554 IdxToInstr[Index++] = &I; 6555 6556 // Save the end location of each USE. 6557 for (Value *U : I.operands()) { 6558 auto *Instr = dyn_cast<Instruction>(U); 6559 6560 // Ignore non-instruction values such as arguments, constants, etc. 6561 if (!Instr) 6562 continue; 6563 6564 // If this instruction is outside the loop then record it and continue. 6565 if (!TheLoop->contains(Instr)) { 6566 LoopInvariants.insert(Instr); 6567 continue; 6568 } 6569 6570 // Overwrite previous end points. 6571 EndPoint[Instr] = Index; 6572 Ends.insert(Instr); 6573 } 6574 } 6575 } 6576 6577 // Saves the list of intervals that end with the index in 'key'. 6578 using InstrList = SmallVector<Instruction *, 2>; 6579 DenseMap<unsigned, InstrList> TransposeEnds; 6580 6581 // Transpose the EndPoints to a list of values that end at each index. 6582 for (auto &Interval : EndPoint) 6583 TransposeEnds[Interval.second].push_back(Interval.first); 6584 6585 SmallSet<Instruction *, 8> OpenIntervals; 6586 6587 // Get the size of the widest register. 6588 unsigned MaxSafeDepDist = -1U; 6589 if (Legal->getMaxSafeDepDistBytes() != -1U) 6590 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 6591 unsigned WidestRegister = 6592 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 6593 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6594 6595 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6596 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 6597 6598 DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6599 6600 // A lambda that gets the register usage for the given type and VF. 6601 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 6602 if (Ty->isTokenTy()) 6603 return 0U; 6604 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 6605 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 6606 }; 6607 6608 for (unsigned int i = 0; i < Index; ++i) { 6609 Instruction *I = IdxToInstr[i]; 6610 6611 // Remove all of the instructions that end at this location. 6612 InstrList &List = TransposeEnds[i]; 6613 for (Instruction *ToRemove : List) 6614 OpenIntervals.erase(ToRemove); 6615 6616 // Ignore instructions that are never used within the loop. 6617 if (!Ends.count(I)) 6618 continue; 6619 6620 // Skip ignored values. 6621 if (ValuesToIgnore.count(I)) 6622 continue; 6623 6624 // For each VF find the maximum usage of registers. 6625 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6626 if (VFs[j] == 1) { 6627 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 6628 continue; 6629 } 6630 collectUniformsAndScalars(VFs[j]); 6631 // Count the number of live intervals. 6632 unsigned RegUsage = 0; 6633 for (auto Inst : OpenIntervals) { 6634 // Skip ignored values for VF > 1. 6635 if (VecValuesToIgnore.count(Inst) || 6636 isScalarAfterVectorization(Inst, VFs[j])) 6637 continue; 6638 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 6639 } 6640 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 6641 } 6642 6643 DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6644 << OpenIntervals.size() << '\n'); 6645 6646 // Add the current instruction to the list of open intervals. 6647 OpenIntervals.insert(I); 6648 } 6649 6650 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6651 unsigned Invariant = 0; 6652 if (VFs[i] == 1) 6653 Invariant = LoopInvariants.size(); 6654 else { 6655 for (auto Inst : LoopInvariants) 6656 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 6657 } 6658 6659 DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 6660 DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 6661 DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); 6662 DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n'); 6663 6664 RU.LoopInvariantRegs = Invariant; 6665 RU.MaxLocalUsers = MaxUsages[i]; 6666 RUs[i] = RU; 6667 } 6668 6669 return RUs; 6670 } 6671 6672 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) { 6673 // If we aren't vectorizing the loop, or if we've already collected the 6674 // instructions to scalarize, there's nothing to do. Collection may already 6675 // have occurred if we have a user-selected VF and are now computing the 6676 // expected cost for interleaving. 6677 if (VF < 2 || InstsToScalarize.count(VF)) 6678 return; 6679 6680 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6681 // not profitable to scalarize any instructions, the presence of VF in the 6682 // map will indicate that we've analyzed it already. 6683 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6684 6685 // Find all the instructions that are scalar with predication in the loop and 6686 // determine if it would be better to not if-convert the blocks they are in. 6687 // If so, we also record the instructions to scalarize. 6688 for (BasicBlock *BB : TheLoop->blocks()) { 6689 if (!Legal->blockNeedsPredication(BB)) 6690 continue; 6691 for (Instruction &I : *BB) 6692 if (Legal->isScalarWithPredication(&I)) { 6693 ScalarCostsTy ScalarCosts; 6694 if (computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6695 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6696 6697 // Remember that BB will remain after vectorization. 6698 PredicatedBBsAfterVectorization.insert(BB); 6699 } 6700 } 6701 } 6702 6703 int LoopVectorizationCostModel::computePredInstDiscount( 6704 Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts, 6705 unsigned VF) { 6706 assert(!isUniformAfterVectorization(PredInst, VF) && 6707 "Instruction marked uniform-after-vectorization will be predicated"); 6708 6709 // Initialize the discount to zero, meaning that the scalar version and the 6710 // vector version cost the same. 6711 int Discount = 0; 6712 6713 // Holds instructions to analyze. The instructions we visit are mapped in 6714 // ScalarCosts. Those instructions are the ones that would be scalarized if 6715 // we find that the scalar version costs less. 6716 SmallVector<Instruction *, 8> Worklist; 6717 6718 // Returns true if the given instruction can be scalarized. 6719 auto canBeScalarized = [&](Instruction *I) -> bool { 6720 // We only attempt to scalarize instructions forming a single-use chain 6721 // from the original predicated block that would otherwise be vectorized. 6722 // Although not strictly necessary, we give up on instructions we know will 6723 // already be scalar to avoid traversing chains that are unlikely to be 6724 // beneficial. 6725 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6726 isScalarAfterVectorization(I, VF)) 6727 return false; 6728 6729 // If the instruction is scalar with predication, it will be analyzed 6730 // separately. We ignore it within the context of PredInst. 6731 if (Legal->isScalarWithPredication(I)) 6732 return false; 6733 6734 // If any of the instruction's operands are uniform after vectorization, 6735 // the instruction cannot be scalarized. This prevents, for example, a 6736 // masked load from being scalarized. 6737 // 6738 // We assume we will only emit a value for lane zero of an instruction 6739 // marked uniform after vectorization, rather than VF identical values. 6740 // Thus, if we scalarize an instruction that uses a uniform, we would 6741 // create uses of values corresponding to the lanes we aren't emitting code 6742 // for. This behavior can be changed by allowing getScalarValue to clone 6743 // the lane zero values for uniforms rather than asserting. 6744 for (Use &U : I->operands()) 6745 if (auto *J = dyn_cast<Instruction>(U.get())) 6746 if (isUniformAfterVectorization(J, VF)) 6747 return false; 6748 6749 // Otherwise, we can scalarize the instruction. 6750 return true; 6751 }; 6752 6753 // Returns true if an operand that cannot be scalarized must be extracted 6754 // from a vector. We will account for this scalarization overhead below. Note 6755 // that the non-void predicated instructions are placed in their own blocks, 6756 // and their return values are inserted into vectors. Thus, an extract would 6757 // still be required. 6758 auto needsExtract = [&](Instruction *I) -> bool { 6759 return TheLoop->contains(I) && !isScalarAfterVectorization(I, VF); 6760 }; 6761 6762 // Compute the expected cost discount from scalarizing the entire expression 6763 // feeding the predicated instruction. We currently only consider expressions 6764 // that are single-use instruction chains. 6765 Worklist.push_back(PredInst); 6766 while (!Worklist.empty()) { 6767 Instruction *I = Worklist.pop_back_val(); 6768 6769 // If we've already analyzed the instruction, there's nothing to do. 6770 if (ScalarCosts.count(I)) 6771 continue; 6772 6773 // Compute the cost of the vector instruction. Note that this cost already 6774 // includes the scalarization overhead of the predicated instruction. 6775 unsigned VectorCost = getInstructionCost(I, VF).first; 6776 6777 // Compute the cost of the scalarized instruction. This cost is the cost of 6778 // the instruction as if it wasn't if-converted and instead remained in the 6779 // predicated block. We will scale this cost by block probability after 6780 // computing the scalarization overhead. 6781 unsigned ScalarCost = VF * getInstructionCost(I, 1).first; 6782 6783 // Compute the scalarization overhead of needed insertelement instructions 6784 // and phi nodes. 6785 if (Legal->isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6786 ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF), 6787 true, false); 6788 ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI); 6789 } 6790 6791 // Compute the scalarization overhead of needed extractelement 6792 // instructions. For each of the instruction's operands, if the operand can 6793 // be scalarized, add it to the worklist; otherwise, account for the 6794 // overhead. 6795 for (Use &U : I->operands()) 6796 if (auto *J = dyn_cast<Instruction>(U.get())) { 6797 assert(VectorType::isValidElementType(J->getType()) && 6798 "Instruction has non-scalar type"); 6799 if (canBeScalarized(J)) 6800 Worklist.push_back(J); 6801 else if (needsExtract(J)) 6802 ScalarCost += TTI.getScalarizationOverhead( 6803 ToVectorTy(J->getType(),VF), false, true); 6804 } 6805 6806 // Scale the total scalar cost by block probability. 6807 ScalarCost /= getReciprocalPredBlockProb(); 6808 6809 // Compute the discount. A non-negative discount means the vector version 6810 // of the instruction costs more, and scalarizing would be beneficial. 6811 Discount += VectorCost - ScalarCost; 6812 ScalarCosts[I] = ScalarCost; 6813 } 6814 6815 return Discount; 6816 } 6817 6818 LoopVectorizationCostModel::VectorizationCostTy 6819 LoopVectorizationCostModel::expectedCost(unsigned VF) { 6820 VectorizationCostTy Cost; 6821 6822 // For each block. 6823 for (BasicBlock *BB : TheLoop->blocks()) { 6824 VectorizationCostTy BlockCost; 6825 6826 // For each instruction in the old loop. 6827 for (Instruction &I : *BB) { 6828 // Skip dbg intrinsics. 6829 if (isa<DbgInfoIntrinsic>(I)) 6830 continue; 6831 6832 // Skip ignored values. 6833 if (ValuesToIgnore.count(&I)) 6834 continue; 6835 6836 VectorizationCostTy C = getInstructionCost(&I, VF); 6837 6838 // Check if we should override the cost. 6839 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6840 C.first = ForceTargetInstructionCost; 6841 6842 BlockCost.first += C.first; 6843 BlockCost.second |= C.second; 6844 DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " 6845 << VF << " For instruction: " << I << '\n'); 6846 } 6847 6848 // If we are vectorizing a predicated block, it will have been 6849 // if-converted. This means that the block's instructions (aside from 6850 // stores and instructions that may divide by zero) will now be 6851 // unconditionally executed. For the scalar case, we may not always execute 6852 // the predicated block. Thus, scale the block's cost by the probability of 6853 // executing it. 6854 if (VF == 1 && Legal->blockNeedsPredication(BB)) 6855 BlockCost.first /= getReciprocalPredBlockProb(); 6856 6857 Cost.first += BlockCost.first; 6858 Cost.second |= BlockCost.second; 6859 } 6860 6861 return Cost; 6862 } 6863 6864 /// \brief Gets Address Access SCEV after verifying that the access pattern 6865 /// is loop invariant except the induction variable dependence. 6866 /// 6867 /// This SCEV can be sent to the Target in order to estimate the address 6868 /// calculation cost. 6869 static const SCEV *getAddressAccessSCEV( 6870 Value *Ptr, 6871 LoopVectorizationLegality *Legal, 6872 ScalarEvolution *SE, 6873 const Loop *TheLoop) { 6874 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6875 if (!Gep) 6876 return nullptr; 6877 6878 // We are looking for a gep with all loop invariant indices except for one 6879 // which should be an induction variable. 6880 unsigned NumOperands = Gep->getNumOperands(); 6881 for (unsigned i = 1; i < NumOperands; ++i) { 6882 Value *Opd = Gep->getOperand(i); 6883 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6884 !Legal->isInductionVariable(Opd)) 6885 return nullptr; 6886 } 6887 6888 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6889 return SE->getSCEV(Ptr); 6890 } 6891 6892 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6893 return Legal->hasStride(I->getOperand(0)) || 6894 Legal->hasStride(I->getOperand(1)); 6895 } 6896 6897 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6898 unsigned VF) { 6899 Type *ValTy = getMemInstValueType(I); 6900 auto SE = PSE.getSE(); 6901 6902 unsigned Alignment = getMemInstAlignment(I); 6903 unsigned AS = getMemInstAddressSpace(I); 6904 Value *Ptr = getPointerOperand(I); 6905 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6906 6907 // Figure out whether the access is strided and get the stride value 6908 // if it's known in compile time 6909 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, SE, TheLoop); 6910 6911 // Get the cost of the scalar memory instruction and address computation. 6912 unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6913 6914 Cost += VF * 6915 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6916 AS, I); 6917 6918 // Get the overhead of the extractelement and insertelement instructions 6919 // we might create due to scalarization. 6920 Cost += getScalarizationOverhead(I, VF, TTI); 6921 6922 // If we have a predicated store, it may not be executed for each vector 6923 // lane. Scale the cost by the probability of executing the predicated 6924 // block. 6925 if (Legal->isScalarWithPredication(I)) 6926 Cost /= getReciprocalPredBlockProb(); 6927 6928 return Cost; 6929 } 6930 6931 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6932 unsigned VF) { 6933 Type *ValTy = getMemInstValueType(I); 6934 Type *VectorTy = ToVectorTy(ValTy, VF); 6935 unsigned Alignment = getMemInstAlignment(I); 6936 Value *Ptr = getPointerOperand(I); 6937 unsigned AS = getMemInstAddressSpace(I); 6938 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6939 6940 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6941 "Stride should be 1 or -1 for consecutive memory access"); 6942 unsigned Cost = 0; 6943 if (Legal->isMaskRequired(I)) 6944 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6945 else 6946 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I); 6947 6948 bool Reverse = ConsecutiveStride < 0; 6949 if (Reverse) 6950 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6951 return Cost; 6952 } 6953 6954 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6955 unsigned VF) { 6956 LoadInst *LI = cast<LoadInst>(I); 6957 Type *ValTy = LI->getType(); 6958 Type *VectorTy = ToVectorTy(ValTy, VF); 6959 unsigned Alignment = LI->getAlignment(); 6960 unsigned AS = LI->getPointerAddressSpace(); 6961 6962 return TTI.getAddressComputationCost(ValTy) + 6963 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) + 6964 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6965 } 6966 6967 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6968 unsigned VF) { 6969 Type *ValTy = getMemInstValueType(I); 6970 Type *VectorTy = ToVectorTy(ValTy, VF); 6971 unsigned Alignment = getMemInstAlignment(I); 6972 Value *Ptr = getPointerOperand(I); 6973 6974 return TTI.getAddressComputationCost(VectorTy) + 6975 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 6976 Legal->isMaskRequired(I), Alignment); 6977 } 6978 6979 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6980 unsigned VF) { 6981 Type *ValTy = getMemInstValueType(I); 6982 Type *VectorTy = ToVectorTy(ValTy, VF); 6983 unsigned AS = getMemInstAddressSpace(I); 6984 6985 auto Group = Legal->getInterleavedAccessGroup(I); 6986 assert(Group && "Fail to get an interleaved access group."); 6987 6988 unsigned InterleaveFactor = Group->getFactor(); 6989 Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6990 6991 // Holds the indices of existing members in an interleaved load group. 6992 // An interleaved store group doesn't need this as it doesn't allow gaps. 6993 SmallVector<unsigned, 4> Indices; 6994 if (isa<LoadInst>(I)) { 6995 for (unsigned i = 0; i < InterleaveFactor; i++) 6996 if (Group->getMember(i)) 6997 Indices.push_back(i); 6998 } 6999 7000 // Calculate the cost of the whole interleaved group. 7001 unsigned Cost = TTI.getInterleavedMemoryOpCost(I->getOpcode(), WideVecTy, 7002 Group->getFactor(), Indices, 7003 Group->getAlignment(), AS); 7004 7005 if (Group->isReverse()) 7006 Cost += Group->getNumMembers() * 7007 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 7008 return Cost; 7009 } 7010 7011 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 7012 unsigned VF) { 7013 // Calculate scalar cost only. Vectorization cost should be ready at this 7014 // moment. 7015 if (VF == 1) { 7016 Type *ValTy = getMemInstValueType(I); 7017 unsigned Alignment = getMemInstAlignment(I); 7018 unsigned AS = getMemInstAddressSpace(I); 7019 7020 return TTI.getAddressComputationCost(ValTy) + 7021 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I); 7022 } 7023 return getWideningCost(I, VF); 7024 } 7025 7026 LoopVectorizationCostModel::VectorizationCostTy 7027 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 7028 // If we know that this instruction will remain uniform, check the cost of 7029 // the scalar version. 7030 if (isUniformAfterVectorization(I, VF)) 7031 VF = 1; 7032 7033 if (VF > 1 && isProfitableToScalarize(I, VF)) 7034 return VectorizationCostTy(InstsToScalarize[VF][I], false); 7035 7036 // Forced scalars do not have any scalarization overhead. 7037 if (VF > 1 && ForcedScalars.count(VF) && 7038 ForcedScalars.find(VF)->second.count(I)) 7039 return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false); 7040 7041 Type *VectorTy; 7042 unsigned C = getInstructionCost(I, VF, VectorTy); 7043 7044 bool TypeNotScalarized = 7045 VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF; 7046 return VectorizationCostTy(C, TypeNotScalarized); 7047 } 7048 7049 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) { 7050 if (VF == 1) 7051 return; 7052 for (BasicBlock *BB : TheLoop->blocks()) { 7053 // For each instruction in the old loop. 7054 for (Instruction &I : *BB) { 7055 Value *Ptr = getPointerOperand(&I); 7056 if (!Ptr) 7057 continue; 7058 7059 if (isa<LoadInst>(&I) && Legal->isUniform(Ptr)) { 7060 // Scalar load + broadcast 7061 unsigned Cost = getUniformMemOpCost(&I, VF); 7062 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7063 continue; 7064 } 7065 7066 // We assume that widening is the best solution when possible. 7067 if (Legal->memoryInstructionCanBeWidened(&I, VF)) { 7068 unsigned Cost = getConsecutiveMemOpCost(&I, VF); 7069 setWideningDecision(&I, VF, CM_Widen, Cost); 7070 continue; 7071 } 7072 7073 // Choose between Interleaving, Gather/Scatter or Scalarization. 7074 unsigned InterleaveCost = std::numeric_limits<unsigned>::max(); 7075 unsigned NumAccesses = 1; 7076 if (Legal->isAccessInterleaved(&I)) { 7077 auto Group = Legal->getInterleavedAccessGroup(&I); 7078 assert(Group && "Fail to get an interleaved access group."); 7079 7080 // Make one decision for the whole group. 7081 if (getWideningDecision(&I, VF) != CM_Unknown) 7082 continue; 7083 7084 NumAccesses = Group->getNumMembers(); 7085 InterleaveCost = getInterleaveGroupCost(&I, VF); 7086 } 7087 7088 unsigned GatherScatterCost = 7089 Legal->isLegalGatherOrScatter(&I) 7090 ? getGatherScatterCost(&I, VF) * NumAccesses 7091 : std::numeric_limits<unsigned>::max(); 7092 7093 unsigned ScalarizationCost = 7094 getMemInstScalarizationCost(&I, VF) * NumAccesses; 7095 7096 // Choose better solution for the current VF, 7097 // write down this decision and use it during vectorization. 7098 unsigned Cost; 7099 InstWidening Decision; 7100 if (InterleaveCost <= GatherScatterCost && 7101 InterleaveCost < ScalarizationCost) { 7102 Decision = CM_Interleave; 7103 Cost = InterleaveCost; 7104 } else if (GatherScatterCost < ScalarizationCost) { 7105 Decision = CM_GatherScatter; 7106 Cost = GatherScatterCost; 7107 } else { 7108 Decision = CM_Scalarize; 7109 Cost = ScalarizationCost; 7110 } 7111 // If the instructions belongs to an interleave group, the whole group 7112 // receives the same decision. The whole group receives the cost, but 7113 // the cost will actually be assigned to one instruction. 7114 if (auto Group = Legal->getInterleavedAccessGroup(&I)) 7115 setWideningDecision(Group, VF, Decision, Cost); 7116 else 7117 setWideningDecision(&I, VF, Decision, Cost); 7118 } 7119 } 7120 7121 // Make sure that any load of address and any other address computation 7122 // remains scalar unless there is gather/scatter support. This avoids 7123 // inevitable extracts into address registers, and also has the benefit of 7124 // activating LSR more, since that pass can't optimize vectorized 7125 // addresses. 7126 if (TTI.prefersVectorizedAddressing()) 7127 return; 7128 7129 // Start with all scalar pointer uses. 7130 SmallPtrSet<Instruction *, 8> AddrDefs; 7131 for (BasicBlock *BB : TheLoop->blocks()) 7132 for (Instruction &I : *BB) { 7133 Instruction *PtrDef = 7134 dyn_cast_or_null<Instruction>(getPointerOperand(&I)); 7135 if (PtrDef && TheLoop->contains(PtrDef) && 7136 getWideningDecision(&I, VF) != CM_GatherScatter) 7137 AddrDefs.insert(PtrDef); 7138 } 7139 7140 // Add all instructions used to generate the addresses. 7141 SmallVector<Instruction *, 4> Worklist; 7142 for (auto *I : AddrDefs) 7143 Worklist.push_back(I); 7144 while (!Worklist.empty()) { 7145 Instruction *I = Worklist.pop_back_val(); 7146 for (auto &Op : I->operands()) 7147 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7148 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7149 AddrDefs.insert(InstOp).second) 7150 Worklist.push_back(InstOp); 7151 } 7152 7153 for (auto *I : AddrDefs) { 7154 if (isa<LoadInst>(I)) { 7155 // Setting the desired widening decision should ideally be handled in 7156 // by cost functions, but since this involves the task of finding out 7157 // if the loaded register is involved in an address computation, it is 7158 // instead changed here when we know this is the case. 7159 if (getWideningDecision(I, VF) == CM_Widen) 7160 // Scalarize a widened load of address. 7161 setWideningDecision(I, VF, CM_Scalarize, 7162 (VF * getMemoryInstructionCost(I, 1))); 7163 else if (auto Group = Legal->getInterleavedAccessGroup(I)) { 7164 // Scalarize an interleave group of address loads. 7165 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7166 if (Instruction *Member = Group->getMember(I)) 7167 setWideningDecision(Member, VF, CM_Scalarize, 7168 (VF * getMemoryInstructionCost(Member, 1))); 7169 } 7170 } 7171 } else 7172 // Make sure I gets scalarized and a cost estimate without 7173 // scalarization overhead. 7174 ForcedScalars[VF].insert(I); 7175 } 7176 } 7177 7178 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7179 unsigned VF, 7180 Type *&VectorTy) { 7181 Type *RetTy = I->getType(); 7182 if (canTruncateToMinimalBitwidth(I, VF)) 7183 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7184 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 7185 auto SE = PSE.getSE(); 7186 7187 // TODO: We need to estimate the cost of intrinsic calls. 7188 switch (I->getOpcode()) { 7189 case Instruction::GetElementPtr: 7190 // We mark this instruction as zero-cost because the cost of GEPs in 7191 // vectorized code depends on whether the corresponding memory instruction 7192 // is scalarized or not. Therefore, we handle GEPs with the memory 7193 // instruction cost. 7194 return 0; 7195 case Instruction::Br: { 7196 // In cases of scalarized and predicated instructions, there will be VF 7197 // predicated blocks in the vectorized loop. Each branch around these 7198 // blocks requires also an extract of its vector compare i1 element. 7199 bool ScalarPredicatedBB = false; 7200 BranchInst *BI = cast<BranchInst>(I); 7201 if (VF > 1 && BI->isConditional() && 7202 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7203 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7204 ScalarPredicatedBB = true; 7205 7206 if (ScalarPredicatedBB) { 7207 // Return cost for branches around scalarized and predicated blocks. 7208 Type *Vec_i1Ty = 7209 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7210 return (TTI.getScalarizationOverhead(Vec_i1Ty, false, true) + 7211 (TTI.getCFInstrCost(Instruction::Br) * VF)); 7212 } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1) 7213 // The back-edge branch will remain, as will all scalar branches. 7214 return TTI.getCFInstrCost(Instruction::Br); 7215 else 7216 // This branch will be eliminated by if-conversion. 7217 return 0; 7218 // Note: We currently assume zero cost for an unconditional branch inside 7219 // a predicated block since it will become a fall-through, although we 7220 // may decide in the future to call TTI for all branches. 7221 } 7222 case Instruction::PHI: { 7223 auto *Phi = cast<PHINode>(I); 7224 7225 // First-order recurrences are replaced by vector shuffles inside the loop. 7226 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 7227 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 7228 VectorTy, VF - 1, VectorTy); 7229 7230 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7231 // converted into select instructions. We require N - 1 selects per phi 7232 // node, where N is the number of incoming values. 7233 if (VF > 1 && Phi->getParent() != TheLoop->getHeader()) 7234 return (Phi->getNumIncomingValues() - 1) * 7235 TTI.getCmpSelInstrCost( 7236 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7237 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF)); 7238 7239 return TTI.getCFInstrCost(Instruction::PHI); 7240 } 7241 case Instruction::UDiv: 7242 case Instruction::SDiv: 7243 case Instruction::URem: 7244 case Instruction::SRem: 7245 // If we have a predicated instruction, it may not be executed for each 7246 // vector lane. Get the scalarization cost and scale this amount by the 7247 // probability of executing the predicated block. If the instruction is not 7248 // predicated, we fall through to the next case. 7249 if (VF > 1 && Legal->isScalarWithPredication(I)) { 7250 unsigned Cost = 0; 7251 7252 // These instructions have a non-void type, so account for the phi nodes 7253 // that we will create. This cost is likely to be zero. The phi node 7254 // cost, if any, should be scaled by the block probability because it 7255 // models a copy at the end of each predicated block. 7256 Cost += VF * TTI.getCFInstrCost(Instruction::PHI); 7257 7258 // The cost of the non-predicated instruction. 7259 Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy); 7260 7261 // The cost of insertelement and extractelement instructions needed for 7262 // scalarization. 7263 Cost += getScalarizationOverhead(I, VF, TTI); 7264 7265 // Scale the cost by the probability of executing the predicated blocks. 7266 // This assumes the predicated block for each vector lane is equally 7267 // likely. 7268 return Cost / getReciprocalPredBlockProb(); 7269 } 7270 LLVM_FALLTHROUGH; 7271 case Instruction::Add: 7272 case Instruction::FAdd: 7273 case Instruction::Sub: 7274 case Instruction::FSub: 7275 case Instruction::Mul: 7276 case Instruction::FMul: 7277 case Instruction::FDiv: 7278 case Instruction::FRem: 7279 case Instruction::Shl: 7280 case Instruction::LShr: 7281 case Instruction::AShr: 7282 case Instruction::And: 7283 case Instruction::Or: 7284 case Instruction::Xor: { 7285 // Since we will replace the stride by 1 the multiplication should go away. 7286 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7287 return 0; 7288 // Certain instructions can be cheaper to vectorize if they have a constant 7289 // second vector operand. One example of this are shifts on x86. 7290 TargetTransformInfo::OperandValueKind Op1VK = 7291 TargetTransformInfo::OK_AnyValue; 7292 TargetTransformInfo::OperandValueKind Op2VK = 7293 TargetTransformInfo::OK_AnyValue; 7294 TargetTransformInfo::OperandValueProperties Op1VP = 7295 TargetTransformInfo::OP_None; 7296 TargetTransformInfo::OperandValueProperties Op2VP = 7297 TargetTransformInfo::OP_None; 7298 Value *Op2 = I->getOperand(1); 7299 7300 // Check for a splat or for a non uniform vector of constants. 7301 if (isa<ConstantInt>(Op2)) { 7302 ConstantInt *CInt = cast<ConstantInt>(Op2); 7303 if (CInt && CInt->getValue().isPowerOf2()) 7304 Op2VP = TargetTransformInfo::OP_PowerOf2; 7305 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 7306 } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) { 7307 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 7308 Constant *SplatValue = cast<Constant>(Op2)->getSplatValue(); 7309 if (SplatValue) { 7310 ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue); 7311 if (CInt && CInt->getValue().isPowerOf2()) 7312 Op2VP = TargetTransformInfo::OP_PowerOf2; 7313 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 7314 } 7315 } else if (Legal->isUniform(Op2)) { 7316 Op2VK = TargetTransformInfo::OK_UniformValue; 7317 } 7318 SmallVector<const Value *, 4> Operands(I->operand_values()); 7319 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 7320 return N * TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, 7321 Op2VK, Op1VP, Op2VP, Operands); 7322 } 7323 case Instruction::Select: { 7324 SelectInst *SI = cast<SelectInst>(I); 7325 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7326 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7327 Type *CondTy = SI->getCondition()->getType(); 7328 if (!ScalarCond) 7329 CondTy = VectorType::get(CondTy, VF); 7330 7331 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I); 7332 } 7333 case Instruction::ICmp: 7334 case Instruction::FCmp: { 7335 Type *ValTy = I->getOperand(0)->getType(); 7336 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7337 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7338 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7339 VectorTy = ToVectorTy(ValTy, VF); 7340 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I); 7341 } 7342 case Instruction::Store: 7343 case Instruction::Load: { 7344 unsigned Width = VF; 7345 if (Width > 1) { 7346 InstWidening Decision = getWideningDecision(I, Width); 7347 assert(Decision != CM_Unknown && 7348 "CM decision should be taken at this point"); 7349 if (Decision == CM_Scalarize) 7350 Width = 1; 7351 } 7352 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 7353 return getMemoryInstructionCost(I, VF); 7354 } 7355 case Instruction::ZExt: 7356 case Instruction::SExt: 7357 case Instruction::FPToUI: 7358 case Instruction::FPToSI: 7359 case Instruction::FPExt: 7360 case Instruction::PtrToInt: 7361 case Instruction::IntToPtr: 7362 case Instruction::SIToFP: 7363 case Instruction::UIToFP: 7364 case Instruction::Trunc: 7365 case Instruction::FPTrunc: 7366 case Instruction::BitCast: { 7367 // We optimize the truncation of induction variables having constant 7368 // integer steps. The cost of these truncations is the same as the scalar 7369 // operation. 7370 if (isOptimizableIVTruncate(I, VF)) { 7371 auto *Trunc = cast<TruncInst>(I); 7372 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7373 Trunc->getSrcTy(), Trunc); 7374 } 7375 7376 Type *SrcScalarTy = I->getOperand(0)->getType(); 7377 Type *SrcVecTy = 7378 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7379 if (canTruncateToMinimalBitwidth(I, VF)) { 7380 // This cast is going to be shrunk. This may remove the cast or it might 7381 // turn it into slightly different cast. For example, if MinBW == 16, 7382 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7383 // 7384 // Calculate the modified src and dest types. 7385 Type *MinVecTy = VectorTy; 7386 if (I->getOpcode() == Instruction::Trunc) { 7387 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7388 VectorTy = 7389 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7390 } else if (I->getOpcode() == Instruction::ZExt || 7391 I->getOpcode() == Instruction::SExt) { 7392 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7393 VectorTy = 7394 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7395 } 7396 } 7397 7398 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 7399 return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I); 7400 } 7401 case Instruction::Call: { 7402 bool NeedToScalarize; 7403 CallInst *CI = cast<CallInst>(I); 7404 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 7405 if (getVectorIntrinsicIDForCall(CI, TLI)) 7406 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 7407 return CallCost; 7408 } 7409 default: 7410 // The cost of executing VF copies of the scalar instruction. This opcode 7411 // is unknown. Assume that it is the same as 'mul'. 7412 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) + 7413 getScalarizationOverhead(I, VF, TTI); 7414 } // end of switch. 7415 } 7416 7417 char LoopVectorize::ID = 0; 7418 7419 static const char lv_name[] = "Loop Vectorization"; 7420 7421 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7422 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7423 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7424 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7425 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7426 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7427 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7428 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7429 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7430 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7431 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7432 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7433 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7434 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7435 7436 namespace llvm { 7437 7438 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { 7439 return new LoopVectorize(NoUnrolling, AlwaysVectorize); 7440 } 7441 7442 } // end namespace llvm 7443 7444 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7445 // Check if the pointer operand of a load or store instruction is 7446 // consecutive. 7447 if (auto *Ptr = getPointerOperand(Inst)) 7448 return Legal->isConsecutivePtr(Ptr); 7449 return false; 7450 } 7451 7452 void LoopVectorizationCostModel::collectValuesToIgnore() { 7453 // Ignore ephemeral values. 7454 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7455 7456 // Ignore type-promoting instructions we identified during reduction 7457 // detection. 7458 for (auto &Reduction : *Legal->getReductionVars()) { 7459 RecurrenceDescriptor &RedDes = Reduction.second; 7460 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7461 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7462 } 7463 } 7464 7465 LoopVectorizationCostModel::VectorizationFactor 7466 LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) { 7467 // Width 1 means no vectorize, cost 0 means uncomputed cost. 7468 const LoopVectorizationCostModel::VectorizationFactor NoVectorization = {1U, 7469 0U}; 7470 Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(OptForSize); 7471 if (!MaybeMaxVF.hasValue()) // Cases considered too costly to vectorize. 7472 return NoVectorization; 7473 7474 if (UserVF) { 7475 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 7476 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 7477 // Collect the instructions (and their associated costs) that will be more 7478 // profitable to scalarize. 7479 CM.selectUserVectorizationFactor(UserVF); 7480 buildVPlans(UserVF, UserVF); 7481 DEBUG(printPlans(dbgs())); 7482 return {UserVF, 0}; 7483 } 7484 7485 unsigned MaxVF = MaybeMaxVF.getValue(); 7486 assert(MaxVF != 0 && "MaxVF is zero."); 7487 7488 for (unsigned VF = 1; VF <= MaxVF; VF *= 2) { 7489 // Collect Uniform and Scalar instructions after vectorization with VF. 7490 CM.collectUniformsAndScalars(VF); 7491 7492 // Collect the instructions (and their associated costs) that will be more 7493 // profitable to scalarize. 7494 if (VF > 1) 7495 CM.collectInstsToScalarize(VF); 7496 } 7497 7498 buildVPlans(1, MaxVF); 7499 DEBUG(printPlans(dbgs())); 7500 if (MaxVF == 1) 7501 return NoVectorization; 7502 7503 // Select the optimal vectorization factor. 7504 return CM.selectVectorizationFactor(MaxVF); 7505 } 7506 7507 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) { 7508 DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF << '\n'); 7509 BestVF = VF; 7510 BestUF = UF; 7511 7512 erase_if(VPlans, [VF](const std::unique_ptr<VPlan> &Plan) { 7513 return !Plan->hasVF(VF); 7514 }); 7515 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 7516 } 7517 7518 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 7519 DominatorTree *DT) { 7520 // Perform the actual loop transformation. 7521 7522 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7523 VPTransformState State{ 7524 BestVF, BestUF, LI, DT, ILV.Builder, ILV.VectorLoopValueMap, &ILV}; 7525 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 7526 7527 //===------------------------------------------------===// 7528 // 7529 // Notice: any optimization or new instruction that go 7530 // into the code below should also be implemented in 7531 // the cost-model. 7532 // 7533 //===------------------------------------------------===// 7534 7535 // 2. Copy and widen instructions from the old loop into the new loop. 7536 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 7537 VPlans.front()->execute(&State); 7538 7539 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7540 // predication, updating analyses. 7541 ILV.fixVectorizedLoop(); 7542 } 7543 7544 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 7545 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 7546 BasicBlock *Latch = OrigLoop->getLoopLatch(); 7547 7548 // We create new control-flow for the vectorized loop, so the original 7549 // condition will be dead after vectorization if it's only used by the 7550 // branch. 7551 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 7552 if (Cmp && Cmp->hasOneUse()) 7553 DeadInstructions.insert(Cmp); 7554 7555 // We create new "steps" for induction variable updates to which the original 7556 // induction variables map. An original update instruction will be dead if 7557 // all its users except the induction variable are dead. 7558 for (auto &Induction : *Legal->getInductionVars()) { 7559 PHINode *Ind = Induction.first; 7560 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 7561 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 7562 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 7563 })) 7564 DeadInstructions.insert(IndUpdate); 7565 } 7566 } 7567 7568 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 7569 7570 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7571 7572 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 7573 Instruction::BinaryOps BinOp) { 7574 // When unrolling and the VF is 1, we only need to add a simple scalar. 7575 Type *Ty = Val->getType(); 7576 assert(!Ty->isVectorTy() && "Val must be a scalar"); 7577 7578 if (Ty->isFloatingPointTy()) { 7579 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 7580 7581 // Floating point operations had to be 'fast' to enable the unrolling. 7582 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 7583 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 7584 } 7585 Constant *C = ConstantInt::get(Ty, StartIdx); 7586 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 7587 } 7588 7589 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7590 SmallVector<Metadata *, 4> MDs; 7591 // Reserve first location for self reference to the LoopID metadata node. 7592 MDs.push_back(nullptr); 7593 bool IsUnrollMetadata = false; 7594 MDNode *LoopID = L->getLoopID(); 7595 if (LoopID) { 7596 // First find existing loop unrolling disable metadata. 7597 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7598 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7599 if (MD) { 7600 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7601 IsUnrollMetadata = 7602 S && S->getString().startswith("llvm.loop.unroll.disable"); 7603 } 7604 MDs.push_back(LoopID->getOperand(i)); 7605 } 7606 } 7607 7608 if (!IsUnrollMetadata) { 7609 // Add runtime unroll disable metadata. 7610 LLVMContext &Context = L->getHeader()->getContext(); 7611 SmallVector<Metadata *, 1> DisableOperands; 7612 DisableOperands.push_back( 7613 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7614 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7615 MDs.push_back(DisableNode); 7616 MDNode *NewLoopID = MDNode::get(Context, MDs); 7617 // Set operand 0 to refer to the loop id itself. 7618 NewLoopID->replaceOperandWith(0, NewLoopID); 7619 L->setLoopID(NewLoopID); 7620 } 7621 } 7622 7623 namespace { 7624 7625 /// VPWidenRecipe is a recipe for producing a copy of vector type for each 7626 /// Instruction in its ingredients independently, in order. This recipe covers 7627 /// most of the traditional vectorization cases where each ingredient transforms 7628 /// into a vectorized version of itself. 7629 class VPWidenRecipe : public VPRecipeBase { 7630 private: 7631 /// Hold the ingredients by pointing to their original BasicBlock location. 7632 BasicBlock::iterator Begin; 7633 BasicBlock::iterator End; 7634 7635 public: 7636 VPWidenRecipe(Instruction *I) : VPRecipeBase(VPWidenSC) { 7637 End = I->getIterator(); 7638 Begin = End++; 7639 } 7640 7641 ~VPWidenRecipe() override = default; 7642 7643 /// Method to support type inquiry through isa, cast, and dyn_cast. 7644 static inline bool classof(const VPRecipeBase *V) { 7645 return V->getVPRecipeID() == VPRecipeBase::VPWidenSC; 7646 } 7647 7648 /// Produce widened copies of all Ingredients. 7649 void execute(VPTransformState &State) override { 7650 for (auto &Instr : make_range(Begin, End)) 7651 State.ILV->widenInstruction(Instr); 7652 } 7653 7654 /// Augment the recipe to include Instr, if it lies at its End. 7655 bool appendInstruction(Instruction *Instr) { 7656 if (End != Instr->getIterator()) 7657 return false; 7658 End++; 7659 return true; 7660 } 7661 7662 /// Print the recipe. 7663 void print(raw_ostream &O, const Twine &Indent) const override { 7664 O << " +\n" << Indent << "\"WIDEN\\l\""; 7665 for (auto &Instr : make_range(Begin, End)) 7666 O << " +\n" << Indent << "\" " << VPlanIngredient(&Instr) << "\\l\""; 7667 } 7668 }; 7669 7670 /// A recipe for handling phi nodes of integer and floating-point inductions, 7671 /// producing their vector and scalar values. 7672 class VPWidenIntOrFpInductionRecipe : public VPRecipeBase { 7673 private: 7674 PHINode *IV; 7675 TruncInst *Trunc; 7676 7677 public: 7678 VPWidenIntOrFpInductionRecipe(PHINode *IV, TruncInst *Trunc = nullptr) 7679 : VPRecipeBase(VPWidenIntOrFpInductionSC), IV(IV), Trunc(Trunc) {} 7680 ~VPWidenIntOrFpInductionRecipe() override = default; 7681 7682 /// Method to support type inquiry through isa, cast, and dyn_cast. 7683 static inline bool classof(const VPRecipeBase *V) { 7684 return V->getVPRecipeID() == VPRecipeBase::VPWidenIntOrFpInductionSC; 7685 } 7686 7687 /// Generate the vectorized and scalarized versions of the phi node as 7688 /// needed by their users. 7689 void execute(VPTransformState &State) override { 7690 assert(!State.Instance && "Int or FP induction being replicated."); 7691 State.ILV->widenIntOrFpInduction(IV, Trunc); 7692 } 7693 7694 /// Print the recipe. 7695 void print(raw_ostream &O, const Twine &Indent) const override { 7696 O << " +\n" << Indent << "\"WIDEN-INDUCTION"; 7697 if (Trunc) { 7698 O << "\\l\""; 7699 O << " +\n" << Indent << "\" " << VPlanIngredient(IV) << "\\l\""; 7700 O << " +\n" << Indent << "\" " << VPlanIngredient(Trunc) << "\\l\""; 7701 } else 7702 O << " " << VPlanIngredient(IV) << "\\l\""; 7703 } 7704 }; 7705 7706 /// A recipe for handling all phi nodes except for integer and FP inductions. 7707 class VPWidenPHIRecipe : public VPRecipeBase { 7708 private: 7709 PHINode *Phi; 7710 7711 public: 7712 VPWidenPHIRecipe(PHINode *Phi) : VPRecipeBase(VPWidenPHISC), Phi(Phi) {} 7713 ~VPWidenPHIRecipe() override = default; 7714 7715 /// Method to support type inquiry through isa, cast, and dyn_cast. 7716 static inline bool classof(const VPRecipeBase *V) { 7717 return V->getVPRecipeID() == VPRecipeBase::VPWidenPHISC; 7718 } 7719 7720 /// Generate the phi/select nodes. 7721 void execute(VPTransformState &State) override { 7722 State.ILV->widenPHIInstruction(Phi, State.UF, State.VF); 7723 } 7724 7725 /// Print the recipe. 7726 void print(raw_ostream &O, const Twine &Indent) const override { 7727 O << " +\n" << Indent << "\"WIDEN-PHI " << VPlanIngredient(Phi) << "\\l\""; 7728 } 7729 }; 7730 7731 /// A recipe for vectorizing a phi-node as a sequence of mask-based select 7732 /// instructions. 7733 class VPBlendRecipe : public VPRecipeBase { 7734 private: 7735 PHINode *Phi; 7736 7737 public: 7738 VPBlendRecipe(PHINode *Phi) : VPRecipeBase(VPBlendSC), Phi(Phi) {} 7739 7740 /// Method to support type inquiry through isa, cast, and dyn_cast. 7741 static inline bool classof(const VPRecipeBase *V) { 7742 return V->getVPRecipeID() == VPRecipeBase::VPBlendSC; 7743 } 7744 7745 /// Generate the phi/select nodes. 7746 void execute(VPTransformState &State) override { 7747 State.ILV->setDebugLocFromInst(State.Builder, Phi); 7748 // We know that all PHIs in non-header blocks are converted into 7749 // selects, so we don't have to worry about the insertion order and we 7750 // can just use the builder. 7751 // At this point we generate the predication tree. There may be 7752 // duplications since this is a simple recursive scan, but future 7753 // optimizations will clean it up. 7754 7755 unsigned NumIncoming = Phi->getNumIncomingValues(); 7756 7757 // Generate a sequence of selects of the form: 7758 // SELECT(Mask3, In3, 7759 // SELECT(Mask2, In2, 7760 // ( ...))) 7761 InnerLoopVectorizer::VectorParts Entry(State.UF); 7762 for (unsigned In = 0; In < NumIncoming; In++) { 7763 InnerLoopVectorizer::VectorParts Cond = 7764 State.ILV->createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent()); 7765 7766 for (unsigned Part = 0; Part < State.UF; ++Part) { 7767 Value *In0 = 7768 State.ILV->getOrCreateVectorValue(Phi->getIncomingValue(In), Part); 7769 assert((Cond[Part] || NumIncoming == 1) && 7770 "Multiple predecessors with one predecessor having a full mask"); 7771 if (In == 0) 7772 Entry[Part] = In0; // Initialize with the first incoming value. 7773 else 7774 // Select between the current value and the previous incoming edge 7775 // based on the incoming mask. 7776 Entry[Part] = State.Builder.CreateSelect(Cond[Part], In0, Entry[Part], 7777 "predphi"); 7778 } 7779 } 7780 for (unsigned Part = 0; Part < State.UF; ++Part) 7781 State.ValueMap.setVectorValue(Phi, Part, Entry[Part]); 7782 } 7783 7784 /// Print the recipe. 7785 void print(raw_ostream &O, const Twine &Indent) const override { 7786 O << " +\n" << Indent << "\"BLEND "; 7787 Phi->printAsOperand(O, false); 7788 O << " ="; 7789 if (Phi->getNumIncomingValues() == 1) { 7790 // Not a User of any mask: not really blending, this is a 7791 // single-predecessor phi. 7792 O << " "; 7793 Phi->getIncomingValue(0)->printAsOperand(O, false); 7794 } else { 7795 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I < E; ++I) { 7796 O << " "; 7797 Phi->getIncomingValue(I)->printAsOperand(O, false); 7798 O << "/"; 7799 Phi->getIncomingBlock(I)->printAsOperand(O, false); 7800 } 7801 } 7802 O << "\\l\""; 7803 7804 } 7805 }; 7806 7807 /// VPInterleaveRecipe is a recipe for transforming an interleave group of load 7808 /// or stores into one wide load/store and shuffles. 7809 class VPInterleaveRecipe : public VPRecipeBase { 7810 private: 7811 const InterleaveGroup *IG; 7812 7813 public: 7814 VPInterleaveRecipe(const InterleaveGroup *IG) 7815 : VPRecipeBase(VPInterleaveSC), IG(IG) {} 7816 ~VPInterleaveRecipe() override = default; 7817 7818 /// Method to support type inquiry through isa, cast, and dyn_cast. 7819 static inline bool classof(const VPRecipeBase *V) { 7820 return V->getVPRecipeID() == VPRecipeBase::VPInterleaveSC; 7821 } 7822 7823 /// Generate the wide load or store, and shuffles. 7824 void execute(VPTransformState &State) override { 7825 assert(!State.Instance && "Interleave group being replicated."); 7826 State.ILV->vectorizeInterleaveGroup(IG->getInsertPos()); 7827 } 7828 7829 /// Print the recipe. 7830 void print(raw_ostream &O, const Twine &Indent) const override; 7831 7832 const InterleaveGroup *getInterleaveGroup() { return IG; } 7833 }; 7834 7835 /// VPReplicateRecipe replicates a given instruction producing multiple scalar 7836 /// copies of the original scalar type, one per lane, instead of producing a 7837 /// single copy of widened type for all lanes. If the instruction is known to be 7838 /// uniform only one copy, per lane zero, will be generated. 7839 class VPReplicateRecipe : public VPRecipeBase { 7840 private: 7841 /// The instruction being replicated. 7842 Instruction *Ingredient; 7843 7844 /// Indicator if only a single replica per lane is needed. 7845 bool IsUniform; 7846 7847 /// Indicator if the replicas are also predicated. 7848 bool IsPredicated; 7849 7850 /// Indicator if the scalar values should also be packed into a vector. 7851 bool AlsoPack; 7852 7853 public: 7854 VPReplicateRecipe(Instruction *I, bool IsUniform, bool IsPredicated = false) 7855 : VPRecipeBase(VPReplicateSC), Ingredient(I), IsUniform(IsUniform), 7856 IsPredicated(IsPredicated) { 7857 // Retain the previous behavior of predicateInstructions(), where an 7858 // insert-element of a predicated instruction got hoisted into the 7859 // predicated basic block iff it was its only user. This is achieved by 7860 // having predicated instructions also pack their values into a vector by 7861 // default unless they have a replicated user which uses their scalar value. 7862 AlsoPack = IsPredicated && !I->use_empty(); 7863 } 7864 7865 ~VPReplicateRecipe() override = default; 7866 7867 /// Method to support type inquiry through isa, cast, and dyn_cast. 7868 static inline bool classof(const VPRecipeBase *V) { 7869 return V->getVPRecipeID() == VPRecipeBase::VPReplicateSC; 7870 } 7871 7872 /// Generate replicas of the desired Ingredient. Replicas will be generated 7873 /// for all parts and lanes unless a specific part and lane are specified in 7874 /// the \p State. 7875 void execute(VPTransformState &State) override; 7876 7877 void setAlsoPack(bool Pack) { AlsoPack = Pack; } 7878 7879 /// Print the recipe. 7880 void print(raw_ostream &O, const Twine &Indent) const override { 7881 O << " +\n" 7882 << Indent << "\"" << (IsUniform ? "CLONE " : "REPLICATE ") 7883 << VPlanIngredient(Ingredient); 7884 if (AlsoPack) 7885 O << " (S->V)"; 7886 O << "\\l\""; 7887 } 7888 }; 7889 7890 /// A recipe for generating conditional branches on the bits of a mask. 7891 class VPBranchOnMaskRecipe : public VPRecipeBase { 7892 private: 7893 /// The input IR basic block used to obtain the mask providing the condition 7894 /// bits for the branch. 7895 BasicBlock *MaskedBasicBlock; 7896 7897 public: 7898 VPBranchOnMaskRecipe(BasicBlock *BB) 7899 : VPRecipeBase(VPBranchOnMaskSC), MaskedBasicBlock(BB) {} 7900 7901 /// Method to support type inquiry through isa, cast, and dyn_cast. 7902 static inline bool classof(const VPRecipeBase *V) { 7903 return V->getVPRecipeID() == VPRecipeBase::VPBranchOnMaskSC; 7904 } 7905 7906 /// Generate the extraction of the appropriate bit from the block mask and the 7907 /// conditional branch. 7908 void execute(VPTransformState &State) override; 7909 7910 /// Print the recipe. 7911 void print(raw_ostream &O, const Twine &Indent) const override { 7912 O << " +\n" 7913 << Indent << "\"BRANCH-ON-MASK-OF " << MaskedBasicBlock->getName() 7914 << "\\l\""; 7915 } 7916 }; 7917 7918 /// VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when 7919 /// control converges back from a Branch-on-Mask. The phi nodes are needed in 7920 /// order to merge values that are set under such a branch and feed their uses. 7921 /// The phi nodes can be scalar or vector depending on the users of the value. 7922 /// This recipe works in concert with VPBranchOnMaskRecipe. 7923 class VPPredInstPHIRecipe : public VPRecipeBase { 7924 private: 7925 Instruction *PredInst; 7926 7927 public: 7928 /// Construct a VPPredInstPHIRecipe given \p PredInst whose value needs a phi 7929 /// nodes after merging back from a Branch-on-Mask. 7930 VPPredInstPHIRecipe(Instruction *PredInst) 7931 : VPRecipeBase(VPPredInstPHISC), PredInst(PredInst) {} 7932 ~VPPredInstPHIRecipe() override = default; 7933 7934 /// Method to support type inquiry through isa, cast, and dyn_cast. 7935 static inline bool classof(const VPRecipeBase *V) { 7936 return V->getVPRecipeID() == VPRecipeBase::VPPredInstPHISC; 7937 } 7938 7939 /// Generates phi nodes for live-outs as needed to retain SSA form. 7940 void execute(VPTransformState &State) override; 7941 7942 /// Print the recipe. 7943 void print(raw_ostream &O, const Twine &Indent) const override { 7944 O << " +\n" 7945 << Indent << "\"PHI-PREDICATED-INSTRUCTION " << VPlanIngredient(PredInst) 7946 << "\\l\""; 7947 } 7948 }; 7949 7950 /// A Recipe for widening load/store operations. 7951 class VPWidenMemoryInstructionRecipe : public VPRecipeBase { 7952 private: 7953 Instruction &Instr; 7954 7955 public: 7956 VPWidenMemoryInstructionRecipe(Instruction &Instr) 7957 : VPRecipeBase(VPWidenMemoryInstructionSC), Instr(Instr) {} 7958 7959 /// Method to support type inquiry through isa, cast, and dyn_cast. 7960 static inline bool classof(const VPRecipeBase *V) { 7961 return V->getVPRecipeID() == VPRecipeBase::VPWidenMemoryInstructionSC; 7962 } 7963 7964 /// Generate the wide load/store. 7965 void execute(VPTransformState &State) override { 7966 State.ILV->vectorizeMemoryInstruction(&Instr); 7967 } 7968 7969 /// Print the recipe. 7970 void print(raw_ostream &O, const Twine &Indent) const override { 7971 O << " +\n" << Indent << "\"WIDEN " << VPlanIngredient(&Instr); 7972 O << "\\l\""; 7973 } 7974 }; 7975 } // end anonymous namespace 7976 7977 bool LoopVectorizationPlanner::getDecisionAndClampRange( 7978 const std::function<bool(unsigned)> &Predicate, VFRange &Range) { 7979 assert(Range.End > Range.Start && "Trying to test an empty VF range."); 7980 bool PredicateAtRangeStart = Predicate(Range.Start); 7981 7982 for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2) 7983 if (Predicate(TmpVF) != PredicateAtRangeStart) { 7984 Range.End = TmpVF; 7985 break; 7986 } 7987 7988 return PredicateAtRangeStart; 7989 } 7990 7991 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 7992 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 7993 /// of VF's starting at a given VF and extending it as much as possible. Each 7994 /// vectorization decision can potentially shorten this sub-range during 7995 /// buildVPlan(). 7996 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) { 7997 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 7998 VFRange SubRange = {VF, MaxVF + 1}; 7999 VPlans.push_back(buildVPlan(SubRange)); 8000 VF = SubRange.End; 8001 } 8002 } 8003 8004 InnerLoopVectorizer::VectorParts 8005 InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) { 8006 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8007 8008 // Look for cached value. 8009 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8010 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8011 if (ECEntryIt != EdgeMaskCache.end()) 8012 return ECEntryIt->second; 8013 8014 VectorParts SrcMask = createBlockInMask(Src); 8015 8016 // The terminator has to be a branch inst! 8017 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8018 assert(BI && "Unexpected terminator found"); 8019 8020 if (!BI->isConditional()) 8021 return EdgeMaskCache[Edge] = SrcMask; 8022 8023 VectorParts EdgeMask(UF); 8024 for (unsigned Part = 0; Part < UF; ++Part) { 8025 auto *EdgeMaskPart = getOrCreateVectorValue(BI->getCondition(), Part); 8026 if (BI->getSuccessor(0) != Dst) 8027 EdgeMaskPart = Builder.CreateNot(EdgeMaskPart); 8028 8029 if (SrcMask[Part]) // Otherwise block in-mask is all-one, no need to AND. 8030 EdgeMaskPart = Builder.CreateAnd(EdgeMaskPart, SrcMask[Part]); 8031 8032 EdgeMask[Part] = EdgeMaskPart; 8033 } 8034 8035 return EdgeMaskCache[Edge] = EdgeMask; 8036 } 8037 8038 InnerLoopVectorizer::VectorParts 8039 InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) { 8040 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8041 8042 // Look for cached value. 8043 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8044 if (BCEntryIt != BlockMaskCache.end()) 8045 return BCEntryIt->second; 8046 8047 // All-one mask is modelled as no-mask following the convention for masked 8048 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8049 VectorParts BlockMask(UF); 8050 for (unsigned Part = 0; Part < UF; ++Part) 8051 BlockMask[Part] = nullptr; 8052 8053 // Loop incoming mask is all-one. 8054 if (OrigLoop->getHeader() == BB) 8055 return BlockMaskCache[BB] = BlockMask; 8056 8057 // This is the block mask. We OR all incoming edges. 8058 for (auto *Predecessor : predecessors(BB)) { 8059 VectorParts EdgeMask = createEdgeMask(Predecessor, BB); 8060 if (!EdgeMask[0]) // Mask of predecessor is all-one so mask of block is too. 8061 return BlockMaskCache[BB] = EdgeMask; 8062 8063 if (!BlockMask[0]) { // BlockMask has its initialized nullptr value. 8064 BlockMask = EdgeMask; 8065 continue; 8066 } 8067 8068 for (unsigned Part = 0; Part < UF; ++Part) 8069 BlockMask[Part] = Builder.CreateOr(BlockMask[Part], EdgeMask[Part]); 8070 } 8071 8072 return BlockMaskCache[BB] = BlockMask; 8073 } 8074 8075 VPInterleaveRecipe * 8076 LoopVectorizationPlanner::tryToInterleaveMemory(Instruction *I, 8077 VFRange &Range) { 8078 const InterleaveGroup *IG = Legal->getInterleavedAccessGroup(I); 8079 if (!IG) 8080 return nullptr; 8081 8082 // Now check if IG is relevant for VF's in the given range. 8083 auto isIGMember = [&](Instruction *I) -> std::function<bool(unsigned)> { 8084 return [=](unsigned VF) -> bool { 8085 return (VF >= 2 && // Query is illegal for VF == 1 8086 CM.getWideningDecision(I, VF) == 8087 LoopVectorizationCostModel::CM_Interleave); 8088 }; 8089 }; 8090 if (!getDecisionAndClampRange(isIGMember(I), Range)) 8091 return nullptr; 8092 8093 // I is a member of an InterleaveGroup for VF's in the (possibly trimmed) 8094 // range. If it's the primary member of the IG construct a VPInterleaveRecipe. 8095 // Otherwise, it's an adjunct member of the IG, do not construct any Recipe. 8096 assert(I == IG->getInsertPos() && 8097 "Generating a recipe for an adjunct member of an interleave group"); 8098 8099 return new VPInterleaveRecipe(IG); 8100 } 8101 8102 VPWidenMemoryInstructionRecipe * 8103 LoopVectorizationPlanner::tryToWidenMemory(Instruction *I, VFRange &Range) { 8104 if (!isa<LoadInst>(I) && !isa<StoreInst>(I)) 8105 return nullptr; 8106 8107 auto willWiden = [&](unsigned VF) -> bool { 8108 if (VF == 1) 8109 return false; 8110 if (CM.isScalarAfterVectorization(I, VF) || 8111 CM.isProfitableToScalarize(I, VF)) 8112 return false; 8113 LoopVectorizationCostModel::InstWidening Decision = 8114 CM.getWideningDecision(I, VF); 8115 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8116 "CM decision should be taken at this point."); 8117 assert(Decision != LoopVectorizationCostModel::CM_Interleave && 8118 "Interleave memory opportunity should be caught earlier."); 8119 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8120 }; 8121 8122 if (!getDecisionAndClampRange(willWiden, Range)) 8123 return nullptr; 8124 8125 return new VPWidenMemoryInstructionRecipe(*I); 8126 } 8127 8128 VPWidenIntOrFpInductionRecipe * 8129 LoopVectorizationPlanner::tryToOptimizeInduction(Instruction *I, 8130 VFRange &Range) { 8131 if (PHINode *Phi = dyn_cast<PHINode>(I)) { 8132 // Check if this is an integer or fp induction. If so, build the recipe that 8133 // produces its scalar and vector values. 8134 InductionDescriptor II = Legal->getInductionVars()->lookup(Phi); 8135 if (II.getKind() == InductionDescriptor::IK_IntInduction || 8136 II.getKind() == InductionDescriptor::IK_FpInduction) 8137 return new VPWidenIntOrFpInductionRecipe(Phi); 8138 8139 return nullptr; 8140 } 8141 8142 // Optimize the special case where the source is a constant integer 8143 // induction variable. Notice that we can only optimize the 'trunc' case 8144 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8145 // (c) other casts depend on pointer size. 8146 8147 // Determine whether \p K is a truncation based on an induction variable that 8148 // can be optimized. 8149 auto isOptimizableIVTruncate = 8150 [&](Instruction *K) -> std::function<bool(unsigned)> { 8151 return 8152 [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); }; 8153 }; 8154 8155 if (isa<TruncInst>(I) && 8156 getDecisionAndClampRange(isOptimizableIVTruncate(I), Range)) 8157 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 8158 cast<TruncInst>(I)); 8159 return nullptr; 8160 } 8161 8162 VPBlendRecipe *LoopVectorizationPlanner::tryToBlend(Instruction *I) { 8163 PHINode *Phi = dyn_cast<PHINode>(I); 8164 if (!Phi || Phi->getParent() == OrigLoop->getHeader()) 8165 return nullptr; 8166 8167 return new VPBlendRecipe(Phi); 8168 } 8169 8170 bool LoopVectorizationPlanner::tryToWiden(Instruction *I, VPBasicBlock *VPBB, 8171 VFRange &Range) { 8172 if (Legal->isScalarWithPredication(I)) 8173 return false; 8174 8175 auto IsVectorizableOpcode = [](unsigned Opcode) { 8176 switch (Opcode) { 8177 case Instruction::Add: 8178 case Instruction::And: 8179 case Instruction::AShr: 8180 case Instruction::BitCast: 8181 case Instruction::Br: 8182 case Instruction::Call: 8183 case Instruction::FAdd: 8184 case Instruction::FCmp: 8185 case Instruction::FDiv: 8186 case Instruction::FMul: 8187 case Instruction::FPExt: 8188 case Instruction::FPToSI: 8189 case Instruction::FPToUI: 8190 case Instruction::FPTrunc: 8191 case Instruction::FRem: 8192 case Instruction::FSub: 8193 case Instruction::GetElementPtr: 8194 case Instruction::ICmp: 8195 case Instruction::IntToPtr: 8196 case Instruction::Load: 8197 case Instruction::LShr: 8198 case Instruction::Mul: 8199 case Instruction::Or: 8200 case Instruction::PHI: 8201 case Instruction::PtrToInt: 8202 case Instruction::SDiv: 8203 case Instruction::Select: 8204 case Instruction::SExt: 8205 case Instruction::Shl: 8206 case Instruction::SIToFP: 8207 case Instruction::SRem: 8208 case Instruction::Store: 8209 case Instruction::Sub: 8210 case Instruction::Trunc: 8211 case Instruction::UDiv: 8212 case Instruction::UIToFP: 8213 case Instruction::URem: 8214 case Instruction::Xor: 8215 case Instruction::ZExt: 8216 return true; 8217 } 8218 return false; 8219 }; 8220 8221 if (!IsVectorizableOpcode(I->getOpcode())) 8222 return false; 8223 8224 if (CallInst *CI = dyn_cast<CallInst>(I)) { 8225 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8226 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8227 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect)) 8228 return false; 8229 } 8230 8231 auto willWiden = [&](unsigned VF) -> bool { 8232 if (!isa<PHINode>(I) && (CM.isScalarAfterVectorization(I, VF) || 8233 CM.isProfitableToScalarize(I, VF))) 8234 return false; 8235 if (CallInst *CI = dyn_cast<CallInst>(I)) { 8236 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8237 // The following case may be scalarized depending on the VF. 8238 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8239 // version of the instruction. 8240 // Is it beneficial to perform intrinsic call compared to lib call? 8241 bool NeedToScalarize; 8242 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 8243 bool UseVectorIntrinsic = 8244 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 8245 return UseVectorIntrinsic || !NeedToScalarize; 8246 } 8247 if (isa<LoadInst>(I) || isa<StoreInst>(I)) { 8248 LoopVectorizationCostModel::InstWidening Decision = 8249 CM.getWideningDecision(I, VF); 8250 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8251 "CM decision should be taken at this point."); 8252 assert(Decision != LoopVectorizationCostModel::CM_Interleave && 8253 "Interleave memory opportunity should be caught earlier."); 8254 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8255 } 8256 return true; 8257 }; 8258 8259 if (!getDecisionAndClampRange(willWiden, Range)) 8260 return false; 8261 8262 // Success: widen this instruction. We optimize the common case where 8263 // consecutive instructions can be represented by a single recipe. 8264 if (!VPBB->empty()) { 8265 VPWidenRecipe *LastWidenRecipe = dyn_cast<VPWidenRecipe>(&VPBB->back()); 8266 if (LastWidenRecipe && LastWidenRecipe->appendInstruction(I)) 8267 return true; 8268 } 8269 8270 VPBB->appendRecipe(new VPWidenRecipe(I)); 8271 return true; 8272 } 8273 8274 VPBasicBlock *LoopVectorizationPlanner::handleReplication( 8275 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8276 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe) { 8277 bool IsUniform = getDecisionAndClampRange( 8278 [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); }, 8279 Range); 8280 8281 bool IsPredicated = Legal->isScalarWithPredication(I); 8282 auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated); 8283 8284 // Find if I uses a predicated instruction. If so, it will use its scalar 8285 // value. Avoid hoisting the insert-element which packs the scalar value into 8286 // a vector value, as that happens iff all users use the vector value. 8287 for (auto &Op : I->operands()) 8288 if (auto *PredInst = dyn_cast<Instruction>(Op)) 8289 if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end()) 8290 PredInst2Recipe[PredInst]->setAlsoPack(false); 8291 8292 // Finalize the recipe for Instr, first if it is not predicated. 8293 if (!IsPredicated) { 8294 DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8295 VPBB->appendRecipe(Recipe); 8296 return VPBB; 8297 } 8298 DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8299 assert(VPBB->getSuccessors().empty() && 8300 "VPBB has successors when handling predicated replication."); 8301 // Record predicated instructions for above packing optimizations. 8302 PredInst2Recipe[I] = Recipe; 8303 VPBlockBase *Region = VPBB->setOneSuccessor(createReplicateRegion(I, Recipe)); 8304 return cast<VPBasicBlock>(Region->setOneSuccessor(new VPBasicBlock())); 8305 } 8306 8307 VPRegionBlock * 8308 LoopVectorizationPlanner::createReplicateRegion(Instruction *Instr, 8309 VPRecipeBase *PredRecipe) { 8310 // Instructions marked for predication are replicated and placed under an 8311 // if-then construct to prevent side-effects. 8312 8313 // Build the triangular if-then region. 8314 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8315 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8316 auto *BOMRecipe = new VPBranchOnMaskRecipe(Instr->getParent()); 8317 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8318 auto *PHIRecipe = 8319 Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr); 8320 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8321 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8322 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8323 8324 // Note: first set Entry as region entry and then connect successors starting 8325 // from it in order, to propagate the "parent" of each VPBasicBlock. 8326 Entry->setTwoSuccessors(Pred, Exit); 8327 Pred->setOneSuccessor(Exit); 8328 8329 return Region; 8330 } 8331 8332 std::unique_ptr<VPlan> LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 8333 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8334 DenseMap<Instruction *, Instruction *> SinkAfterInverse; 8335 8336 // Collect instructions from the original loop that will become trivially dead 8337 // in the vectorized loop. We don't need to vectorize these instructions. For 8338 // example, original induction update instructions can become dead because we 8339 // separately emit induction "steps" when generating code for the new loop. 8340 // Similarly, we create a new latch condition when setting up the structure 8341 // of the new loop, so the old one can become dead. 8342 SmallPtrSet<Instruction *, 4> DeadInstructions; 8343 collectTriviallyDeadInstructions(DeadInstructions); 8344 8345 // Hold a mapping from predicated instructions to their recipes, in order to 8346 // fix their AlsoPack behavior if a user is determined to replicate and use a 8347 // scalar instead of vector value. 8348 DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe; 8349 8350 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 8351 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 8352 auto Plan = llvm::make_unique<VPlan>(VPBB); 8353 8354 // Scan the body of the loop in a topological order to visit each basic block 8355 // after having visited its predecessor basic blocks. 8356 LoopBlocksDFS DFS(OrigLoop); 8357 DFS.perform(LI); 8358 8359 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 8360 // Relevant instructions from basic block BB will be grouped into VPRecipe 8361 // ingredients and fill a new VPBasicBlock. 8362 unsigned VPBBsForBB = 0; 8363 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 8364 VPBB->setOneSuccessor(FirstVPBBForBB); 8365 VPBB = FirstVPBBForBB; 8366 8367 std::vector<Instruction *> Ingredients; 8368 8369 // Organize the ingredients to vectorize from current basic block in the 8370 // right order. 8371 for (Instruction &I : *BB) { 8372 Instruction *Instr = &I; 8373 8374 // First filter out irrelevant instructions, to ensure no recipes are 8375 // built for them. 8376 if (isa<BranchInst>(Instr) || isa<DbgInfoIntrinsic>(Instr) || 8377 DeadInstructions.count(Instr)) 8378 continue; 8379 8380 // I is a member of an InterleaveGroup for Range.Start. If it's an adjunct 8381 // member of the IG, do not construct any Recipe for it. 8382 const InterleaveGroup *IG = Legal->getInterleavedAccessGroup(Instr); 8383 if (IG && Instr != IG->getInsertPos() && 8384 Range.Start >= 2 && // Query is illegal for VF == 1 8385 CM.getWideningDecision(Instr, Range.Start) == 8386 LoopVectorizationCostModel::CM_Interleave) { 8387 if (SinkAfterInverse.count(Instr)) 8388 Ingredients.push_back(SinkAfterInverse.find(Instr)->second); 8389 continue; 8390 } 8391 8392 // Move instructions to handle first-order recurrences, step 1: avoid 8393 // handling this instruction until after we've handled the instruction it 8394 // should follow. 8395 auto SAIt = SinkAfter.find(Instr); 8396 if (SAIt != SinkAfter.end()) { 8397 DEBUG(dbgs() << "Sinking" << *SAIt->first << " after" << *SAIt->second 8398 << " to vectorize a 1st order recurrence.\n"); 8399 SinkAfterInverse[SAIt->second] = Instr; 8400 continue; 8401 } 8402 8403 Ingredients.push_back(Instr); 8404 8405 // Move instructions to handle first-order recurrences, step 2: push the 8406 // instruction to be sunk at its insertion point. 8407 auto SAInvIt = SinkAfterInverse.find(Instr); 8408 if (SAInvIt != SinkAfterInverse.end()) 8409 Ingredients.push_back(SAInvIt->second); 8410 } 8411 8412 // Introduce each ingredient into VPlan. 8413 for (Instruction *Instr : Ingredients) { 8414 VPRecipeBase *Recipe = nullptr; 8415 8416 // Check if Instr should belong to an interleave memory recipe, or already 8417 // does. In the latter case Instr is irrelevant. 8418 if ((Recipe = tryToInterleaveMemory(Instr, Range))) { 8419 VPBB->appendRecipe(Recipe); 8420 continue; 8421 } 8422 8423 // Check if Instr is a memory operation that should be widened. 8424 if ((Recipe = tryToWidenMemory(Instr, Range))) { 8425 VPBB->appendRecipe(Recipe); 8426 continue; 8427 } 8428 8429 // Check if Instr should form some PHI recipe. 8430 if ((Recipe = tryToOptimizeInduction(Instr, Range))) { 8431 VPBB->appendRecipe(Recipe); 8432 continue; 8433 } 8434 if ((Recipe = tryToBlend(Instr))) { 8435 VPBB->appendRecipe(Recipe); 8436 continue; 8437 } 8438 if (PHINode *Phi = dyn_cast<PHINode>(Instr)) { 8439 VPBB->appendRecipe(new VPWidenPHIRecipe(Phi)); 8440 continue; 8441 } 8442 8443 // Check if Instr is to be widened by a general VPWidenRecipe, after 8444 // having first checked for specific widening recipes that deal with 8445 // Interleave Groups, Inductions and Phi nodes. 8446 if (tryToWiden(Instr, VPBB, Range)) 8447 continue; 8448 8449 // Otherwise, if all widening options failed, Instruction is to be 8450 // replicated. This may create a successor for VPBB. 8451 VPBasicBlock *NextVPBB = 8452 handleReplication(Instr, Range, VPBB, PredInst2Recipe); 8453 if (NextVPBB != VPBB) { 8454 VPBB = NextVPBB; 8455 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 8456 : ""); 8457 } 8458 } 8459 } 8460 8461 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 8462 // may also be empty, such as the last one VPBB, reflecting original 8463 // basic-blocks with no recipes. 8464 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 8465 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 8466 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 8467 PreEntry->disconnectSuccessor(Entry); 8468 delete PreEntry; 8469 8470 std::string PlanName; 8471 raw_string_ostream RSO(PlanName); 8472 unsigned VF = Range.Start; 8473 Plan->addVF(VF); 8474 RSO << "Initial VPlan for VF={" << VF; 8475 for (VF *= 2; VF < Range.End; VF *= 2) { 8476 Plan->addVF(VF); 8477 RSO << "," << VF; 8478 } 8479 RSO << "},UF>=1"; 8480 RSO.flush(); 8481 Plan->setName(PlanName); 8482 8483 return Plan; 8484 } 8485 8486 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent) const { 8487 O << " +\n" 8488 << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 8489 IG->getInsertPos()->printAsOperand(O, false); 8490 O << "\\l\""; 8491 for (unsigned i = 0; i < IG->getFactor(); ++i) 8492 if (Instruction *I = IG->getMember(i)) 8493 O << " +\n" 8494 << Indent << "\" " << VPlanIngredient(I) << " " << i << "\\l\""; 8495 } 8496 8497 void VPReplicateRecipe::execute(VPTransformState &State) { 8498 if (State.Instance) { // Generate a single instance. 8499 State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated); 8500 // Insert scalar instance packing it into a vector. 8501 if (AlsoPack && State.VF > 1) { 8502 // If we're constructing lane 0, initialize to start from undef. 8503 if (State.Instance->Lane == 0) { 8504 Value *Undef = 8505 UndefValue::get(VectorType::get(Ingredient->getType(), State.VF)); 8506 State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef); 8507 } 8508 State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance); 8509 } 8510 return; 8511 } 8512 8513 // Generate scalar instances for all VF lanes of all UF parts, unless the 8514 // instruction is uniform inwhich case generate only the first lane for each 8515 // of the UF parts. 8516 unsigned EndLane = IsUniform ? 1 : State.VF; 8517 for (unsigned Part = 0; Part < State.UF; ++Part) 8518 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 8519 State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated); 8520 } 8521 8522 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 8523 assert(State.Instance && "Branch on Mask works only on single instance."); 8524 8525 unsigned Part = State.Instance->Part; 8526 unsigned Lane = State.Instance->Lane; 8527 8528 auto Cond = State.ILV->createBlockInMask(MaskedBasicBlock); 8529 8530 Value *ConditionBit = Cond[Part]; 8531 if (!ConditionBit) // Block in mask is all-one. 8532 ConditionBit = State.Builder.getTrue(); 8533 else if (ConditionBit->getType()->isVectorTy()) 8534 ConditionBit = State.Builder.CreateExtractElement( 8535 ConditionBit, State.Builder.getInt32(Lane)); 8536 8537 // Replace the temporary unreachable terminator with a new conditional branch, 8538 // whose two destinations will be set later when they are created. 8539 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 8540 assert(isa<UnreachableInst>(CurrentTerminator) && 8541 "Expected to replace unreachable terminator with conditional branch."); 8542 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 8543 CondBr->setSuccessor(0, nullptr); 8544 ReplaceInstWithInst(CurrentTerminator, CondBr); 8545 8546 DEBUG(dbgs() << "\nLV: vectorizing BranchOnMask recipe " 8547 << MaskedBasicBlock->getName()); 8548 } 8549 8550 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 8551 assert(State.Instance && "Predicated instruction PHI works per instance."); 8552 Instruction *ScalarPredInst = cast<Instruction>( 8553 State.ValueMap.getScalarValue(PredInst, *State.Instance)); 8554 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 8555 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 8556 assert(PredicatingBB && "Predicated block has no single predecessor."); 8557 8558 // By current pack/unpack logic we need to generate only a single phi node: if 8559 // a vector value for the predicated instruction exists at this point it means 8560 // the instruction has vector users only, and a phi for the vector value is 8561 // needed. In this case the recipe of the predicated instruction is marked to 8562 // also do that packing, thereby "hoisting" the insert-element sequence. 8563 // Otherwise, a phi node for the scalar value is needed. 8564 unsigned Part = State.Instance->Part; 8565 if (State.ValueMap.hasVectorValue(PredInst, Part)) { 8566 Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part); 8567 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 8568 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 8569 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 8570 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 8571 State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache. 8572 } else { 8573 Type *PredInstType = PredInst->getType(); 8574 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 8575 Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB); 8576 Phi->addIncoming(ScalarPredInst, PredicatedBB); 8577 State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi); 8578 } 8579 } 8580 8581 bool LoopVectorizePass::processLoop(Loop *L) { 8582 assert(L->empty() && "Only process inner loops."); 8583 8584 #ifndef NDEBUG 8585 const std::string DebugLocStr = getDebugLocString(L); 8586 #endif /* NDEBUG */ 8587 8588 DEBUG(dbgs() << "\nLV: Checking a loop in \"" 8589 << L->getHeader()->getParent()->getName() << "\" from " 8590 << DebugLocStr << "\n"); 8591 8592 LoopVectorizeHints Hints(L, DisableUnrolling, *ORE); 8593 8594 DEBUG(dbgs() << "LV: Loop hints:" 8595 << " force=" 8596 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 8597 ? "disabled" 8598 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 8599 ? "enabled" 8600 : "?")) 8601 << " width=" << Hints.getWidth() 8602 << " unroll=" << Hints.getInterleave() << "\n"); 8603 8604 // Function containing loop 8605 Function *F = L->getHeader()->getParent(); 8606 8607 // Looking at the diagnostic output is the only way to determine if a loop 8608 // was vectorized (other than looking at the IR or machine code), so it 8609 // is important to generate an optimization remark for each loop. Most of 8610 // these messages are generated as OptimizationRemarkAnalysis. Remarks 8611 // generated as OptimizationRemark and OptimizationRemarkMissed are 8612 // less verbose reporting vectorized loops and unvectorized loops that may 8613 // benefit from vectorization, respectively. 8614 8615 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { 8616 DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 8617 return false; 8618 } 8619 8620 PredicatedScalarEvolution PSE(*SE, *L); 8621 8622 // Check if it is legal to vectorize the loop. 8623 LoopVectorizationRequirements Requirements(*ORE); 8624 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI, ORE, 8625 &Requirements, &Hints); 8626 if (!LVL.canVectorize()) { 8627 DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 8628 emitMissedWarning(F, L, Hints, ORE); 8629 return false; 8630 } 8631 8632 // Check the function attributes to find out if this function should be 8633 // optimized for size. 8634 bool OptForSize = 8635 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 8636 8637 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 8638 // count by optimizing for size, to minimize overheads. 8639 unsigned ExpectedTC = SE->getSmallConstantMaxTripCount(L); 8640 bool HasExpectedTC = (ExpectedTC > 0); 8641 8642 if (!HasExpectedTC && LoopVectorizeWithBlockFrequency) { 8643 auto EstimatedTC = getLoopEstimatedTripCount(L); 8644 if (EstimatedTC) { 8645 ExpectedTC = *EstimatedTC; 8646 HasExpectedTC = true; 8647 } 8648 } 8649 8650 if (HasExpectedTC && ExpectedTC < TinyTripCountVectorThreshold) { 8651 DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 8652 << "This loop is worth vectorizing only if no scalar " 8653 << "iteration overheads are incurred."); 8654 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 8655 DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 8656 else { 8657 DEBUG(dbgs() << "\n"); 8658 // Loops with a very small trip count are considered for vectorization 8659 // under OptForSize, thereby making sure the cost of their loop body is 8660 // dominant, free of runtime guards and scalar iteration overheads. 8661 OptForSize = true; 8662 } 8663 } 8664 8665 // Check the function attributes to see if implicit floats are allowed. 8666 // FIXME: This check doesn't seem possibly correct -- what if the loop is 8667 // an integer loop and the vector instructions selected are purely integer 8668 // vector instructions? 8669 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 8670 DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 8671 "attribute is used.\n"); 8672 ORE->emit(createMissedAnalysis(Hints.vectorizeAnalysisPassName(), 8673 "NoImplicitFloat", L) 8674 << "loop not vectorized due to NoImplicitFloat attribute"); 8675 emitMissedWarning(F, L, Hints, ORE); 8676 return false; 8677 } 8678 8679 // Check if the target supports potentially unsafe FP vectorization. 8680 // FIXME: Add a check for the type of safety issue (denormal, signaling) 8681 // for the target we're vectorizing for, to make sure none of the 8682 // additional fp-math flags can help. 8683 if (Hints.isPotentiallyUnsafe() && 8684 TTI->isFPVectorizationPotentiallyUnsafe()) { 8685 DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 8686 ORE->emit( 8687 createMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L) 8688 << "loop not vectorized due to unsafe FP support."); 8689 emitMissedWarning(F, L, Hints, ORE); 8690 return false; 8691 } 8692 8693 // Use the cost model. 8694 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F, 8695 &Hints); 8696 CM.collectValuesToIgnore(); 8697 8698 // Use the planner for vectorization. 8699 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM); 8700 8701 // Get user vectorization factor. 8702 unsigned UserVF = Hints.getWidth(); 8703 8704 // Plan how to best vectorize, return the best VF and its cost. 8705 LoopVectorizationCostModel::VectorizationFactor VF = 8706 LVP.plan(OptForSize, UserVF); 8707 8708 // Select the interleave count. 8709 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 8710 8711 // Get user interleave count. 8712 unsigned UserIC = Hints.getInterleave(); 8713 8714 // Identify the diagnostic messages that should be produced. 8715 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 8716 bool VectorizeLoop = true, InterleaveLoop = true; 8717 if (Requirements.doesNotMeet(F, L, Hints)) { 8718 DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 8719 "requirements.\n"); 8720 emitMissedWarning(F, L, Hints, ORE); 8721 return false; 8722 } 8723 8724 if (VF.Width == 1) { 8725 DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 8726 VecDiagMsg = std::make_pair( 8727 "VectorizationNotBeneficial", 8728 "the cost-model indicates that vectorization is not beneficial"); 8729 VectorizeLoop = false; 8730 } 8731 8732 if (IC == 1 && UserIC <= 1) { 8733 // Tell the user interleaving is not beneficial. 8734 DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 8735 IntDiagMsg = std::make_pair( 8736 "InterleavingNotBeneficial", 8737 "the cost-model indicates that interleaving is not beneficial"); 8738 InterleaveLoop = false; 8739 if (UserIC == 1) { 8740 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 8741 IntDiagMsg.second += 8742 " and is explicitly disabled or interleave count is set to 1"; 8743 } 8744 } else if (IC > 1 && UserIC == 1) { 8745 // Tell the user interleaving is beneficial, but it explicitly disabled. 8746 DEBUG(dbgs() 8747 << "LV: Interleaving is beneficial but is explicitly disabled."); 8748 IntDiagMsg = std::make_pair( 8749 "InterleavingBeneficialButDisabled", 8750 "the cost-model indicates that interleaving is beneficial " 8751 "but is explicitly disabled or interleave count is set to 1"); 8752 InterleaveLoop = false; 8753 } 8754 8755 // Override IC if user provided an interleave count. 8756 IC = UserIC > 0 ? UserIC : IC; 8757 8758 // Emit diagnostic messages, if any. 8759 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 8760 if (!VectorizeLoop && !InterleaveLoop) { 8761 // Do not vectorize or interleaving the loop. 8762 ORE->emit([&]() { 8763 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 8764 L->getStartLoc(), L->getHeader()) 8765 << VecDiagMsg.second; 8766 }); 8767 ORE->emit([&]() { 8768 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 8769 L->getStartLoc(), L->getHeader()) 8770 << IntDiagMsg.second; 8771 }); 8772 return false; 8773 } else if (!VectorizeLoop && InterleaveLoop) { 8774 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 8775 ORE->emit([&]() { 8776 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 8777 L->getStartLoc(), L->getHeader()) 8778 << VecDiagMsg.second; 8779 }); 8780 } else if (VectorizeLoop && !InterleaveLoop) { 8781 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 8782 << DebugLocStr << '\n'); 8783 ORE->emit([&]() { 8784 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 8785 L->getStartLoc(), L->getHeader()) 8786 << IntDiagMsg.second; 8787 }); 8788 } else if (VectorizeLoop && InterleaveLoop) { 8789 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 8790 << DebugLocStr << '\n'); 8791 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 8792 } 8793 8794 LVP.setBestPlan(VF.Width, IC); 8795 8796 using namespace ore; 8797 8798 if (!VectorizeLoop) { 8799 assert(IC > 1 && "interleave count should not be 1 or 0"); 8800 // If we decided that it is not legal to vectorize the loop, then 8801 // interleave it. 8802 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 8803 &CM); 8804 LVP.executePlan(Unroller, DT); 8805 8806 ORE->emit([&]() { 8807 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 8808 L->getHeader()) 8809 << "interleaved loop (interleaved count: " 8810 << NV("InterleaveCount", IC) << ")"; 8811 }); 8812 } else { 8813 // If we decided that it is *legal* to vectorize the loop, then do it. 8814 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 8815 &LVL, &CM); 8816 LVP.executePlan(LB, DT); 8817 ++LoopsVectorized; 8818 8819 // Add metadata to disable runtime unrolling a scalar loop when there are 8820 // no runtime checks about strides and memory. A scalar loop that is 8821 // rarely used is not worth unrolling. 8822 if (!LB.areSafetyChecksAdded()) 8823 AddRuntimeUnrollDisableMetaData(L); 8824 8825 // Report the vectorization decision. 8826 ORE->emit([&]() { 8827 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 8828 L->getHeader()) 8829 << "vectorized loop (vectorization width: " 8830 << NV("VectorizationFactor", VF.Width) 8831 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 8832 }); 8833 } 8834 8835 // Mark the loop as already vectorized to avoid vectorizing again. 8836 Hints.setAlreadyVectorized(); 8837 8838 DEBUG(verifyFunction(*L->getHeader()->getParent())); 8839 return true; 8840 } 8841 8842 bool LoopVectorizePass::runImpl( 8843 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 8844 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 8845 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 8846 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 8847 OptimizationRemarkEmitter &ORE_) { 8848 SE = &SE_; 8849 LI = &LI_; 8850 TTI = &TTI_; 8851 DT = &DT_; 8852 BFI = &BFI_; 8853 TLI = TLI_; 8854 AA = &AA_; 8855 AC = &AC_; 8856 GetLAA = &GetLAA_; 8857 DB = &DB_; 8858 ORE = &ORE_; 8859 8860 // Don't attempt if 8861 // 1. the target claims to have no vector registers, and 8862 // 2. interleaving won't help ILP. 8863 // 8864 // The second condition is necessary because, even if the target has no 8865 // vector registers, loop vectorization may still enable scalar 8866 // interleaving. 8867 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 8868 return false; 8869 8870 bool Changed = false; 8871 8872 // The vectorizer requires loops to be in simplified form. 8873 // Since simplification may add new inner loops, it has to run before the 8874 // legality and profitability checks. This means running the loop vectorizer 8875 // will simplify all loops, regardless of whether anything end up being 8876 // vectorized. 8877 for (auto &L : *LI) 8878 Changed |= simplifyLoop(L, DT, LI, SE, AC, false /* PreserveLCSSA */); 8879 8880 // Build up a worklist of inner-loops to vectorize. This is necessary as 8881 // the act of vectorizing or partially unrolling a loop creates new loops 8882 // and can invalidate iterators across the loops. 8883 SmallVector<Loop *, 8> Worklist; 8884 8885 for (Loop *L : *LI) 8886 addAcyclicInnerLoop(*L, Worklist); 8887 8888 LoopsAnalyzed += Worklist.size(); 8889 8890 // Now walk the identified inner loops. 8891 while (!Worklist.empty()) { 8892 Loop *L = Worklist.pop_back_val(); 8893 8894 // For the inner loops we actually process, form LCSSA to simplify the 8895 // transform. 8896 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 8897 8898 Changed |= processLoop(L); 8899 } 8900 8901 // Process each loop nest in the function. 8902 return Changed; 8903 } 8904 8905 PreservedAnalyses LoopVectorizePass::run(Function &F, 8906 FunctionAnalysisManager &AM) { 8907 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 8908 auto &LI = AM.getResult<LoopAnalysis>(F); 8909 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 8910 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 8911 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 8912 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 8913 auto &AA = AM.getResult<AAManager>(F); 8914 auto &AC = AM.getResult<AssumptionAnalysis>(F); 8915 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 8916 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 8917 8918 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 8919 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 8920 [&](Loop &L) -> const LoopAccessInfo & { 8921 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI}; 8922 return LAM.getResult<LoopAccessAnalysis>(L, AR); 8923 }; 8924 bool Changed = 8925 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE); 8926 if (!Changed) 8927 return PreservedAnalyses::all(); 8928 PreservedAnalyses PA; 8929 PA.preserve<LoopAnalysis>(); 8930 PA.preserve<DominatorTreeAnalysis>(); 8931 PA.preserve<BasicAA>(); 8932 PA.preserve<GlobalsAA>(); 8933 return PA; 8934 } 8935