1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 11 // and generates target-independent LLVM-IR. 12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 13 // of instructions in order to estimate the profitability of vectorization. 14 // 15 // The loop vectorizer combines consecutive loop iterations into a single 16 // 'wide' iteration. After this transformation the index is incremented 17 // by the SIMD vector width, and not by one. 18 // 19 // This pass has three parts: 20 // 1. The main loop pass that drives the different parts. 21 // 2. LoopVectorizationLegality - A unit that checks for the legality 22 // of the vectorization. 23 // 3. InnerLoopVectorizer - A unit that performs the actual 24 // widening of instructions. 25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 26 // of vectorization. It decides on the optimal vector width, which 27 // can be one, if vectorization is not profitable. 28 // 29 // There is a development effort going on to migrate loop vectorizer to the 30 // VPlan infrastructure and to introduce outer loop vectorization support (see 31 // docs/Proposal/VectorizationPlan.rst and 32 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 33 // purpose, we temporarily introduced the VPlan-native vectorization path: an 34 // alternative vectorization path that is natively implemented on top of the 35 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 36 // 37 //===----------------------------------------------------------------------===// 38 // 39 // The reduction-variable vectorization is based on the paper: 40 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 41 // 42 // Variable uniformity checks are inspired by: 43 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 44 // 45 // The interleaved access vectorization is based on the paper: 46 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 47 // Data for SIMD 48 // 49 // Other ideas/concepts are from: 50 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 51 // 52 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 53 // Vectorizing Compilers. 54 // 55 //===----------------------------------------------------------------------===// 56 57 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 58 #include "LoopVectorizationPlanner.h" 59 #include "llvm/ADT/APInt.h" 60 #include "llvm/ADT/ArrayRef.h" 61 #include "llvm/ADT/DenseMap.h" 62 #include "llvm/ADT/DenseMapInfo.h" 63 #include "llvm/ADT/Hashing.h" 64 #include "llvm/ADT/MapVector.h" 65 #include "llvm/ADT/None.h" 66 #include "llvm/ADT/Optional.h" 67 #include "llvm/ADT/STLExtras.h" 68 #include "llvm/ADT/SetVector.h" 69 #include "llvm/ADT/SmallPtrSet.h" 70 #include "llvm/ADT/SmallSet.h" 71 #include "llvm/ADT/SmallVector.h" 72 #include "llvm/ADT/Statistic.h" 73 #include "llvm/ADT/StringRef.h" 74 #include "llvm/ADT/Twine.h" 75 #include "llvm/ADT/iterator_range.h" 76 #include "llvm/Analysis/AssumptionCache.h" 77 #include "llvm/Analysis/BasicAliasAnalysis.h" 78 #include "llvm/Analysis/BlockFrequencyInfo.h" 79 #include "llvm/Analysis/CFG.h" 80 #include "llvm/Analysis/CodeMetrics.h" 81 #include "llvm/Analysis/DemandedBits.h" 82 #include "llvm/Analysis/GlobalsModRef.h" 83 #include "llvm/Analysis/LoopAccessAnalysis.h" 84 #include "llvm/Analysis/LoopAnalysisManager.h" 85 #include "llvm/Analysis/LoopInfo.h" 86 #include "llvm/Analysis/LoopIterator.h" 87 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 88 #include "llvm/Analysis/ScalarEvolution.h" 89 #include "llvm/Analysis/ScalarEvolutionExpander.h" 90 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 91 #include "llvm/Analysis/TargetLibraryInfo.h" 92 #include "llvm/Analysis/TargetTransformInfo.h" 93 #include "llvm/Analysis/VectorUtils.h" 94 #include "llvm/IR/Attributes.h" 95 #include "llvm/IR/BasicBlock.h" 96 #include "llvm/IR/CFG.h" 97 #include "llvm/IR/Constant.h" 98 #include "llvm/IR/Constants.h" 99 #include "llvm/IR/DataLayout.h" 100 #include "llvm/IR/DebugInfoMetadata.h" 101 #include "llvm/IR/DebugLoc.h" 102 #include "llvm/IR/DerivedTypes.h" 103 #include "llvm/IR/DiagnosticInfo.h" 104 #include "llvm/IR/Dominators.h" 105 #include "llvm/IR/Function.h" 106 #include "llvm/IR/IRBuilder.h" 107 #include "llvm/IR/InstrTypes.h" 108 #include "llvm/IR/Instruction.h" 109 #include "llvm/IR/Instructions.h" 110 #include "llvm/IR/IntrinsicInst.h" 111 #include "llvm/IR/Intrinsics.h" 112 #include "llvm/IR/LLVMContext.h" 113 #include "llvm/IR/Metadata.h" 114 #include "llvm/IR/Module.h" 115 #include "llvm/IR/Operator.h" 116 #include "llvm/IR/Type.h" 117 #include "llvm/IR/Use.h" 118 #include "llvm/IR/User.h" 119 #include "llvm/IR/Value.h" 120 #include "llvm/IR/ValueHandle.h" 121 #include "llvm/IR/Verifier.h" 122 #include "llvm/Pass.h" 123 #include "llvm/Support/Casting.h" 124 #include "llvm/Support/CommandLine.h" 125 #include "llvm/Support/Compiler.h" 126 #include "llvm/Support/Debug.h" 127 #include "llvm/Support/ErrorHandling.h" 128 #include "llvm/Support/MathExtras.h" 129 #include "llvm/Support/raw_ostream.h" 130 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 131 #include "llvm/Transforms/Utils/LoopSimplify.h" 132 #include "llvm/Transforms/Utils/LoopUtils.h" 133 #include "llvm/Transforms/Utils/LoopVersioning.h" 134 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 135 #include <algorithm> 136 #include <cassert> 137 #include <cstdint> 138 #include <cstdlib> 139 #include <functional> 140 #include <iterator> 141 #include <limits> 142 #include <memory> 143 #include <string> 144 #include <tuple> 145 #include <utility> 146 #include <vector> 147 148 using namespace llvm; 149 150 #define LV_NAME "loop-vectorize" 151 #define DEBUG_TYPE LV_NAME 152 153 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 154 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 155 156 /// Loops with a known constant trip count below this number are vectorized only 157 /// if no scalar iteration overheads are incurred. 158 static cl::opt<unsigned> TinyTripCountVectorThreshold( 159 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 160 cl::desc("Loops with a constant trip count that is smaller than this " 161 "value are vectorized only if no scalar iteration overheads " 162 "are incurred.")); 163 164 static cl::opt<bool> MaximizeBandwidth( 165 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 166 cl::desc("Maximize bandwidth when selecting vectorization factor which " 167 "will be determined by the smallest type in loop.")); 168 169 static cl::opt<bool> EnableInterleavedMemAccesses( 170 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 171 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 172 173 /// Maximum factor for an interleaved memory access. 174 static cl::opt<unsigned> MaxInterleaveGroupFactor( 175 "max-interleave-group-factor", cl::Hidden, 176 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 177 cl::init(8)); 178 179 /// We don't interleave loops with a known constant trip count below this 180 /// number. 181 static const unsigned TinyTripCountInterleaveThreshold = 128; 182 183 static cl::opt<unsigned> ForceTargetNumScalarRegs( 184 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 185 cl::desc("A flag that overrides the target's number of scalar registers.")); 186 187 static cl::opt<unsigned> ForceTargetNumVectorRegs( 188 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 189 cl::desc("A flag that overrides the target's number of vector registers.")); 190 191 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 192 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 193 cl::desc("A flag that overrides the target's max interleave factor for " 194 "scalar loops.")); 195 196 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 197 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 198 cl::desc("A flag that overrides the target's max interleave factor for " 199 "vectorized loops.")); 200 201 static cl::opt<unsigned> ForceTargetInstructionCost( 202 "force-target-instruction-cost", cl::init(0), cl::Hidden, 203 cl::desc("A flag that overrides the target's expected cost for " 204 "an instruction to a single constant value. Mostly " 205 "useful for getting consistent testing.")); 206 207 static cl::opt<unsigned> SmallLoopCost( 208 "small-loop-cost", cl::init(20), cl::Hidden, 209 cl::desc( 210 "The cost of a loop that is considered 'small' by the interleaver.")); 211 212 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 213 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 214 cl::desc("Enable the use of the block frequency analysis to access PGO " 215 "heuristics minimizing code growth in cold regions and being more " 216 "aggressive in hot regions.")); 217 218 // Runtime interleave loops for load/store throughput. 219 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 220 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 221 cl::desc( 222 "Enable runtime interleaving until load/store ports are saturated")); 223 224 /// The number of stores in a loop that are allowed to need predication. 225 static cl::opt<unsigned> NumberOfStoresToPredicate( 226 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 227 cl::desc("Max number of stores to be predicated behind an if.")); 228 229 static cl::opt<bool> EnableIndVarRegisterHeur( 230 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 231 cl::desc("Count the induction variable only once when interleaving")); 232 233 static cl::opt<bool> EnableCondStoresVectorization( 234 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 235 cl::desc("Enable if predication of stores during vectorization.")); 236 237 static cl::opt<unsigned> MaxNestedScalarReductionIC( 238 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 239 cl::desc("The maximum interleave count to use when interleaving a scalar " 240 "reduction in a nested loop.")); 241 242 static cl::opt<bool> EnableVPlanNativePath( 243 "enable-vplan-native-path", cl::init(false), cl::Hidden, 244 cl::desc("Enable VPlan-native vectorization path with " 245 "support for outer loop vectorization.")); 246 247 /// A helper function for converting Scalar types to vector types. 248 /// If the incoming type is void, we return void. If the VF is 1, we return 249 /// the scalar type. 250 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 251 if (Scalar->isVoidTy() || VF == 1) 252 return Scalar; 253 return VectorType::get(Scalar, VF); 254 } 255 256 // FIXME: The following helper functions have multiple implementations 257 // in the project. They can be effectively organized in a common Load/Store 258 // utilities unit. 259 260 /// A helper function that returns the type of loaded or stored value. 261 static Type *getMemInstValueType(Value *I) { 262 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 263 "Expected Load or Store instruction"); 264 if (auto *LI = dyn_cast<LoadInst>(I)) 265 return LI->getType(); 266 return cast<StoreInst>(I)->getValueOperand()->getType(); 267 } 268 269 /// A helper function that returns the alignment of load or store instruction. 270 static unsigned getMemInstAlignment(Value *I) { 271 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 272 "Expected Load or Store instruction"); 273 if (auto *LI = dyn_cast<LoadInst>(I)) 274 return LI->getAlignment(); 275 return cast<StoreInst>(I)->getAlignment(); 276 } 277 278 /// A helper function that returns the address space of the pointer operand of 279 /// load or store instruction. 280 static unsigned getMemInstAddressSpace(Value *I) { 281 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 282 "Expected Load or Store instruction"); 283 if (auto *LI = dyn_cast<LoadInst>(I)) 284 return LI->getPointerAddressSpace(); 285 return cast<StoreInst>(I)->getPointerAddressSpace(); 286 } 287 288 /// A helper function that returns true if the given type is irregular. The 289 /// type is irregular if its allocated size doesn't equal the store size of an 290 /// element of the corresponding vector type at the given vectorization factor. 291 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) { 292 // Determine if an array of VF elements of type Ty is "bitcast compatible" 293 // with a <VF x Ty> vector. 294 if (VF > 1) { 295 auto *VectorTy = VectorType::get(Ty, VF); 296 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); 297 } 298 299 // If the vectorization factor is one, we just check if an array of type Ty 300 // requires padding between elements. 301 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 302 } 303 304 /// A helper function that returns the reciprocal of the block probability of 305 /// predicated blocks. If we return X, we are assuming the predicated block 306 /// will execute once for every X iterations of the loop header. 307 /// 308 /// TODO: We should use actual block probability here, if available. Currently, 309 /// we always assume predicated blocks have a 50% chance of executing. 310 static unsigned getReciprocalPredBlockProb() { return 2; } 311 312 /// A helper function that adds a 'fast' flag to floating-point operations. 313 static Value *addFastMathFlag(Value *V) { 314 if (isa<FPMathOperator>(V)) { 315 FastMathFlags Flags; 316 Flags.setFast(); 317 cast<Instruction>(V)->setFastMathFlags(Flags); 318 } 319 return V; 320 } 321 322 /// A helper function that returns an integer or floating-point constant with 323 /// value C. 324 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 325 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 326 : ConstantFP::get(Ty, C); 327 } 328 329 namespace llvm { 330 331 /// InnerLoopVectorizer vectorizes loops which contain only one basic 332 /// block to a specified vectorization factor (VF). 333 /// This class performs the widening of scalars into vectors, or multiple 334 /// scalars. This class also implements the following features: 335 /// * It inserts an epilogue loop for handling loops that don't have iteration 336 /// counts that are known to be a multiple of the vectorization factor. 337 /// * It handles the code generation for reduction variables. 338 /// * Scalarization (implementation using scalars) of un-vectorizable 339 /// instructions. 340 /// InnerLoopVectorizer does not perform any vectorization-legality 341 /// checks, and relies on the caller to check for the different legality 342 /// aspects. The InnerLoopVectorizer relies on the 343 /// LoopVectorizationLegality class to provide information about the induction 344 /// and reduction variables that were found to a given vectorization factor. 345 class InnerLoopVectorizer { 346 public: 347 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 348 LoopInfo *LI, DominatorTree *DT, 349 const TargetLibraryInfo *TLI, 350 const TargetTransformInfo *TTI, AssumptionCache *AC, 351 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 352 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 353 LoopVectorizationCostModel *CM) 354 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 355 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 356 Builder(PSE.getSE()->getContext()), 357 VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {} 358 virtual ~InnerLoopVectorizer() = default; 359 360 /// Create a new empty loop. Unlink the old loop and connect the new one. 361 /// Return the pre-header block of the new loop. 362 BasicBlock *createVectorizedLoopSkeleton(); 363 364 /// Widen a single instruction within the innermost loop. 365 void widenInstruction(Instruction &I); 366 367 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 368 void fixVectorizedLoop(); 369 370 // Return true if any runtime check is added. 371 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 372 373 /// A type for vectorized values in the new loop. Each value from the 374 /// original loop, when vectorized, is represented by UF vector values in the 375 /// new unrolled loop, where UF is the unroll factor. 376 using VectorParts = SmallVector<Value *, 2>; 377 378 /// Vectorize a single PHINode in a block. This method handles the induction 379 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 380 /// arbitrary length vectors. 381 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF); 382 383 /// A helper function to scalarize a single Instruction in the innermost loop. 384 /// Generates a sequence of scalar instances for each lane between \p MinLane 385 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 386 /// inclusive.. 387 void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance, 388 bool IfPredicateInstr); 389 390 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 391 /// is provided, the integer induction variable will first be truncated to 392 /// the corresponding type. 393 void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr); 394 395 /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a 396 /// vector or scalar value on-demand if one is not yet available. When 397 /// vectorizing a loop, we visit the definition of an instruction before its 398 /// uses. When visiting the definition, we either vectorize or scalarize the 399 /// instruction, creating an entry for it in the corresponding map. (In some 400 /// cases, such as induction variables, we will create both vector and scalar 401 /// entries.) Then, as we encounter uses of the definition, we derive values 402 /// for each scalar or vector use unless such a value is already available. 403 /// For example, if we scalarize a definition and one of its uses is vector, 404 /// we build the required vector on-demand with an insertelement sequence 405 /// when visiting the use. Otherwise, if the use is scalar, we can use the 406 /// existing scalar definition. 407 /// 408 /// Return a value in the new loop corresponding to \p V from the original 409 /// loop at unroll index \p Part. If the value has already been vectorized, 410 /// the corresponding vector entry in VectorLoopValueMap is returned. If, 411 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 412 /// a new vector value on-demand by inserting the scalar values into a vector 413 /// with an insertelement sequence. If the value has been neither vectorized 414 /// nor scalarized, it must be loop invariant, so we simply broadcast the 415 /// value into a vector. 416 Value *getOrCreateVectorValue(Value *V, unsigned Part); 417 418 /// Return a value in the new loop corresponding to \p V from the original 419 /// loop at unroll and vector indices \p Instance. If the value has been 420 /// vectorized but not scalarized, the necessary extractelement instruction 421 /// will be generated. 422 Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance); 423 424 /// Construct the vector value of a scalarized value \p V one lane at a time. 425 void packScalarIntoVectorValue(Value *V, const VPIteration &Instance); 426 427 /// Try to vectorize the interleaved access group that \p Instr belongs to. 428 void vectorizeInterleaveGroup(Instruction *Instr); 429 430 /// Vectorize Load and Store instructions, optionally masking the vector 431 /// operations if \p BlockInMask is non-null. 432 void vectorizeMemoryInstruction(Instruction *Instr, 433 VectorParts *BlockInMask = nullptr); 434 435 /// Set the debug location in the builder using the debug location in 436 /// the instruction. 437 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 438 439 protected: 440 friend class LoopVectorizationPlanner; 441 442 /// A small list of PHINodes. 443 using PhiVector = SmallVector<PHINode *, 4>; 444 445 /// A type for scalarized values in the new loop. Each value from the 446 /// original loop, when scalarized, is represented by UF x VF scalar values 447 /// in the new unrolled loop, where UF is the unroll factor and VF is the 448 /// vectorization factor. 449 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 450 451 /// Set up the values of the IVs correctly when exiting the vector loop. 452 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 453 Value *CountRoundDown, Value *EndValue, 454 BasicBlock *MiddleBlock); 455 456 /// Create a new induction variable inside L. 457 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 458 Value *Step, Instruction *DL); 459 460 /// Handle all cross-iteration phis in the header. 461 void fixCrossIterationPHIs(); 462 463 /// Fix a first-order recurrence. This is the second phase of vectorizing 464 /// this phi node. 465 void fixFirstOrderRecurrence(PHINode *Phi); 466 467 /// Fix a reduction cross-iteration phi. This is the second phase of 468 /// vectorizing this phi node. 469 void fixReduction(PHINode *Phi); 470 471 /// The Loop exit block may have single value PHI nodes with some 472 /// incoming value. While vectorizing we only handled real values 473 /// that were defined inside the loop and we should have one value for 474 /// each predecessor of its parent basic block. See PR14725. 475 void fixLCSSAPHIs(); 476 477 /// Iteratively sink the scalarized operands of a predicated instruction into 478 /// the block that was created for it. 479 void sinkScalarOperands(Instruction *PredInst); 480 481 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 482 /// represented as. 483 void truncateToMinimalBitwidths(); 484 485 /// Insert the new loop to the loop hierarchy and pass manager 486 /// and update the analysis passes. 487 void updateAnalysis(); 488 489 /// Create a broadcast instruction. This method generates a broadcast 490 /// instruction (shuffle) for loop invariant values and for the induction 491 /// value. If this is the induction variable then we extend it to N, N+1, ... 492 /// this is needed because each iteration in the loop corresponds to a SIMD 493 /// element. 494 virtual Value *getBroadcastInstrs(Value *V); 495 496 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 497 /// to each vector element of Val. The sequence starts at StartIndex. 498 /// \p Opcode is relevant for FP induction variable. 499 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 500 Instruction::BinaryOps Opcode = 501 Instruction::BinaryOpsEnd); 502 503 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 504 /// variable on which to base the steps, \p Step is the size of the step, and 505 /// \p EntryVal is the value from the original loop that maps to the steps. 506 /// Note that \p EntryVal doesn't have to be an induction variable - it 507 /// can also be a truncate instruction. 508 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 509 const InductionDescriptor &ID); 510 511 /// Create a vector induction phi node based on an existing scalar one. \p 512 /// EntryVal is the value from the original loop that maps to the vector phi 513 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 514 /// truncate instruction, instead of widening the original IV, we widen a 515 /// version of the IV truncated to \p EntryVal's type. 516 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 517 Value *Step, Instruction *EntryVal); 518 519 /// Returns true if an instruction \p I should be scalarized instead of 520 /// vectorized for the chosen vectorization factor. 521 bool shouldScalarizeInstruction(Instruction *I) const; 522 523 /// Returns true if we should generate a scalar version of \p IV. 524 bool needsScalarInduction(Instruction *IV) const; 525 526 /// If there is a cast involved in the induction variable \p ID, which should 527 /// be ignored in the vectorized loop body, this function records the 528 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 529 /// cast. We had already proved that the casted Phi is equal to the uncasted 530 /// Phi in the vectorized loop (under a runtime guard), and therefore 531 /// there is no need to vectorize the cast - the same value can be used in the 532 /// vector loop for both the Phi and the cast. 533 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 534 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 535 /// 536 /// \p EntryVal is the value from the original loop that maps to the vector 537 /// phi node and is used to distinguish what is the IV currently being 538 /// processed - original one (if \p EntryVal is a phi corresponding to the 539 /// original IV) or the "newly-created" one based on the proof mentioned above 540 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 541 /// latter case \p EntryVal is a TruncInst and we must not record anything for 542 /// that IV, but it's error-prone to expect callers of this routine to care 543 /// about that, hence this explicit parameter. 544 void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID, 545 const Instruction *EntryVal, 546 Value *VectorLoopValue, 547 unsigned Part, 548 unsigned Lane = UINT_MAX); 549 550 /// Generate a shuffle sequence that will reverse the vector Vec. 551 virtual Value *reverseVector(Value *Vec); 552 553 /// Returns (and creates if needed) the original loop trip count. 554 Value *getOrCreateTripCount(Loop *NewLoop); 555 556 /// Returns (and creates if needed) the trip count of the widened loop. 557 Value *getOrCreateVectorTripCount(Loop *NewLoop); 558 559 /// Returns a bitcasted value to the requested vector type. 560 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 561 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 562 const DataLayout &DL); 563 564 /// Emit a bypass check to see if the vector trip count is zero, including if 565 /// it overflows. 566 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 567 568 /// Emit a bypass check to see if all of the SCEV assumptions we've 569 /// had to make are correct. 570 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 571 572 /// Emit bypass checks to check any memory assumptions we may have made. 573 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 574 575 /// Add additional metadata to \p To that was not present on \p Orig. 576 /// 577 /// Currently this is used to add the noalias annotations based on the 578 /// inserted memchecks. Use this for instructions that are *cloned* into the 579 /// vector loop. 580 void addNewMetadata(Instruction *To, const Instruction *Orig); 581 582 /// Add metadata from one instruction to another. 583 /// 584 /// This includes both the original MDs from \p From and additional ones (\see 585 /// addNewMetadata). Use this for *newly created* instructions in the vector 586 /// loop. 587 void addMetadata(Instruction *To, Instruction *From); 588 589 /// Similar to the previous function but it adds the metadata to a 590 /// vector of instructions. 591 void addMetadata(ArrayRef<Value *> To, Instruction *From); 592 593 /// The original loop. 594 Loop *OrigLoop; 595 596 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 597 /// dynamic knowledge to simplify SCEV expressions and converts them to a 598 /// more usable form. 599 PredicatedScalarEvolution &PSE; 600 601 /// Loop Info. 602 LoopInfo *LI; 603 604 /// Dominator Tree. 605 DominatorTree *DT; 606 607 /// Alias Analysis. 608 AliasAnalysis *AA; 609 610 /// Target Library Info. 611 const TargetLibraryInfo *TLI; 612 613 /// Target Transform Info. 614 const TargetTransformInfo *TTI; 615 616 /// Assumption Cache. 617 AssumptionCache *AC; 618 619 /// Interface to emit optimization remarks. 620 OptimizationRemarkEmitter *ORE; 621 622 /// LoopVersioning. It's only set up (non-null) if memchecks were 623 /// used. 624 /// 625 /// This is currently only used to add no-alias metadata based on the 626 /// memchecks. The actually versioning is performed manually. 627 std::unique_ptr<LoopVersioning> LVer; 628 629 /// The vectorization SIMD factor to use. Each vector will have this many 630 /// vector elements. 631 unsigned VF; 632 633 /// The vectorization unroll factor to use. Each scalar is vectorized to this 634 /// many different vector instructions. 635 unsigned UF; 636 637 /// The builder that we use 638 IRBuilder<> Builder; 639 640 // --- Vectorization state --- 641 642 /// The vector-loop preheader. 643 BasicBlock *LoopVectorPreHeader; 644 645 /// The scalar-loop preheader. 646 BasicBlock *LoopScalarPreHeader; 647 648 /// Middle Block between the vector and the scalar. 649 BasicBlock *LoopMiddleBlock; 650 651 /// The ExitBlock of the scalar loop. 652 BasicBlock *LoopExitBlock; 653 654 /// The vector loop body. 655 BasicBlock *LoopVectorBody; 656 657 /// The scalar loop body. 658 BasicBlock *LoopScalarBody; 659 660 /// A list of all bypass blocks. The first block is the entry of the loop. 661 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 662 663 /// The new Induction variable which was added to the new block. 664 PHINode *Induction = nullptr; 665 666 /// The induction variable of the old basic block. 667 PHINode *OldInduction = nullptr; 668 669 /// Maps values from the original loop to their corresponding values in the 670 /// vectorized loop. A key value can map to either vector values, scalar 671 /// values or both kinds of values, depending on whether the key was 672 /// vectorized and scalarized. 673 VectorizerValueMap VectorLoopValueMap; 674 675 /// Store instructions that were predicated. 676 SmallVector<Instruction *, 4> PredicatedInstructions; 677 678 /// Trip count of the original loop. 679 Value *TripCount = nullptr; 680 681 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 682 Value *VectorTripCount = nullptr; 683 684 /// The legality analysis. 685 LoopVectorizationLegality *Legal; 686 687 /// The profitablity analysis. 688 LoopVectorizationCostModel *Cost; 689 690 // Record whether runtime checks are added. 691 bool AddedSafetyChecks = false; 692 693 // Holds the end values for each induction variable. We save the end values 694 // so we can later fix-up the external users of the induction variables. 695 DenseMap<PHINode *, Value *> IVEndValues; 696 }; 697 698 class InnerLoopUnroller : public InnerLoopVectorizer { 699 public: 700 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 701 LoopInfo *LI, DominatorTree *DT, 702 const TargetLibraryInfo *TLI, 703 const TargetTransformInfo *TTI, AssumptionCache *AC, 704 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 705 LoopVectorizationLegality *LVL, 706 LoopVectorizationCostModel *CM) 707 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 708 UnrollFactor, LVL, CM) {} 709 710 private: 711 Value *getBroadcastInstrs(Value *V) override; 712 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 713 Instruction::BinaryOps Opcode = 714 Instruction::BinaryOpsEnd) override; 715 Value *reverseVector(Value *Vec) override; 716 }; 717 718 } // end namespace llvm 719 720 /// Look for a meaningful debug location on the instruction or it's 721 /// operands. 722 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 723 if (!I) 724 return I; 725 726 DebugLoc Empty; 727 if (I->getDebugLoc() != Empty) 728 return I; 729 730 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 731 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 732 if (OpInst->getDebugLoc() != Empty) 733 return OpInst; 734 } 735 736 return I; 737 } 738 739 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 740 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 741 const DILocation *DIL = Inst->getDebugLoc(); 742 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 743 !isa<DbgInfoIntrinsic>(Inst)) 744 B.SetCurrentDebugLocation(DIL->cloneWithDuplicationFactor(UF * VF)); 745 else 746 B.SetCurrentDebugLocation(DIL); 747 } else 748 B.SetCurrentDebugLocation(DebugLoc()); 749 } 750 751 #ifndef NDEBUG 752 /// \return string containing a file name and a line # for the given loop. 753 static std::string getDebugLocString(const Loop *L) { 754 std::string Result; 755 if (L) { 756 raw_string_ostream OS(Result); 757 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 758 LoopDbgLoc.print(OS); 759 else 760 // Just print the module name. 761 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 762 OS.flush(); 763 } 764 return Result; 765 } 766 #endif 767 768 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 769 const Instruction *Orig) { 770 // If the loop was versioned with memchecks, add the corresponding no-alias 771 // metadata. 772 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 773 LVer->annotateInstWithNoAlias(To, Orig); 774 } 775 776 void InnerLoopVectorizer::addMetadata(Instruction *To, 777 Instruction *From) { 778 propagateMetadata(To, From); 779 addNewMetadata(To, From); 780 } 781 782 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 783 Instruction *From) { 784 for (Value *V : To) { 785 if (Instruction *I = dyn_cast<Instruction>(V)) 786 addMetadata(I, From); 787 } 788 } 789 790 namespace llvm { 791 792 /// The group of interleaved loads/stores sharing the same stride and 793 /// close to each other. 794 /// 795 /// Each member in this group has an index starting from 0, and the largest 796 /// index should be less than interleaved factor, which is equal to the absolute 797 /// value of the access's stride. 798 /// 799 /// E.g. An interleaved load group of factor 4: 800 /// for (unsigned i = 0; i < 1024; i+=4) { 801 /// a = A[i]; // Member of index 0 802 /// b = A[i+1]; // Member of index 1 803 /// d = A[i+3]; // Member of index 3 804 /// ... 805 /// } 806 /// 807 /// An interleaved store group of factor 4: 808 /// for (unsigned i = 0; i < 1024; i+=4) { 809 /// ... 810 /// A[i] = a; // Member of index 0 811 /// A[i+1] = b; // Member of index 1 812 /// A[i+2] = c; // Member of index 2 813 /// A[i+3] = d; // Member of index 3 814 /// } 815 /// 816 /// Note: the interleaved load group could have gaps (missing members), but 817 /// the interleaved store group doesn't allow gaps. 818 class InterleaveGroup { 819 public: 820 InterleaveGroup(Instruction *Instr, int Stride, unsigned Align) 821 : Align(Align), InsertPos(Instr) { 822 assert(Align && "The alignment should be non-zero"); 823 824 Factor = std::abs(Stride); 825 assert(Factor > 1 && "Invalid interleave factor"); 826 827 Reverse = Stride < 0; 828 Members[0] = Instr; 829 } 830 831 bool isReverse() const { return Reverse; } 832 unsigned getFactor() const { return Factor; } 833 unsigned getAlignment() const { return Align; } 834 unsigned getNumMembers() const { return Members.size(); } 835 836 /// Try to insert a new member \p Instr with index \p Index and 837 /// alignment \p NewAlign. The index is related to the leader and it could be 838 /// negative if it is the new leader. 839 /// 840 /// \returns false if the instruction doesn't belong to the group. 841 bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) { 842 assert(NewAlign && "The new member's alignment should be non-zero"); 843 844 int Key = Index + SmallestKey; 845 846 // Skip if there is already a member with the same index. 847 if (Members.count(Key)) 848 return false; 849 850 if (Key > LargestKey) { 851 // The largest index is always less than the interleave factor. 852 if (Index >= static_cast<int>(Factor)) 853 return false; 854 855 LargestKey = Key; 856 } else if (Key < SmallestKey) { 857 // The largest index is always less than the interleave factor. 858 if (LargestKey - Key >= static_cast<int>(Factor)) 859 return false; 860 861 SmallestKey = Key; 862 } 863 864 // It's always safe to select the minimum alignment. 865 Align = std::min(Align, NewAlign); 866 Members[Key] = Instr; 867 return true; 868 } 869 870 /// Get the member with the given index \p Index 871 /// 872 /// \returns nullptr if contains no such member. 873 Instruction *getMember(unsigned Index) const { 874 int Key = SmallestKey + Index; 875 if (!Members.count(Key)) 876 return nullptr; 877 878 return Members.find(Key)->second; 879 } 880 881 /// Get the index for the given member. Unlike the key in the member 882 /// map, the index starts from 0. 883 unsigned getIndex(Instruction *Instr) const { 884 for (auto I : Members) 885 if (I.second == Instr) 886 return I.first - SmallestKey; 887 888 llvm_unreachable("InterleaveGroup contains no such member"); 889 } 890 891 Instruction *getInsertPos() const { return InsertPos; } 892 void setInsertPos(Instruction *Inst) { InsertPos = Inst; } 893 894 /// Add metadata (e.g. alias info) from the instructions in this group to \p 895 /// NewInst. 896 /// 897 /// FIXME: this function currently does not add noalias metadata a'la 898 /// addNewMedata. To do that we need to compute the intersection of the 899 /// noalias info from all members. 900 void addMetadata(Instruction *NewInst) const { 901 SmallVector<Value *, 4> VL; 902 std::transform(Members.begin(), Members.end(), std::back_inserter(VL), 903 [](std::pair<int, Instruction *> p) { return p.second; }); 904 propagateMetadata(NewInst, VL); 905 } 906 907 private: 908 unsigned Factor; // Interleave Factor. 909 bool Reverse; 910 unsigned Align; 911 DenseMap<int, Instruction *> Members; 912 int SmallestKey = 0; 913 int LargestKey = 0; 914 915 // To avoid breaking dependences, vectorized instructions of an interleave 916 // group should be inserted at either the first load or the last store in 917 // program order. 918 // 919 // E.g. %even = load i32 // Insert Position 920 // %add = add i32 %even // Use of %even 921 // %odd = load i32 922 // 923 // store i32 %even 924 // %odd = add i32 // Def of %odd 925 // store i32 %odd // Insert Position 926 Instruction *InsertPos; 927 }; 928 } // end namespace llvm 929 930 namespace { 931 932 /// Drive the analysis of interleaved memory accesses in the loop. 933 /// 934 /// Use this class to analyze interleaved accesses only when we can vectorize 935 /// a loop. Otherwise it's meaningless to do analysis as the vectorization 936 /// on interleaved accesses is unsafe. 937 /// 938 /// The analysis collects interleave groups and records the relationships 939 /// between the member and the group in a map. 940 class InterleavedAccessInfo { 941 public: 942 InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L, 943 DominatorTree *DT, LoopInfo *LI, 944 const LoopAccessInfo *LAI) 945 : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(LAI) {} 946 947 ~InterleavedAccessInfo() { 948 SmallSet<InterleaveGroup *, 4> DelSet; 949 // Avoid releasing a pointer twice. 950 for (auto &I : InterleaveGroupMap) 951 DelSet.insert(I.second); 952 for (auto *Ptr : DelSet) 953 delete Ptr; 954 } 955 956 /// Analyze the interleaved accesses and collect them in interleave 957 /// groups. Substitute symbolic strides using \p Strides. 958 void analyzeInterleaving(); 959 960 /// Check if \p Instr belongs to any interleave group. 961 bool isInterleaved(Instruction *Instr) const { 962 return InterleaveGroupMap.count(Instr); 963 } 964 965 /// Get the interleave group that \p Instr belongs to. 966 /// 967 /// \returns nullptr if doesn't have such group. 968 InterleaveGroup *getInterleaveGroup(Instruction *Instr) const { 969 if (InterleaveGroupMap.count(Instr)) 970 return InterleaveGroupMap.find(Instr)->second; 971 return nullptr; 972 } 973 974 /// Returns true if an interleaved group that may access memory 975 /// out-of-bounds requires a scalar epilogue iteration for correctness. 976 bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; } 977 978 private: 979 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks. 980 /// Simplifies SCEV expressions in the context of existing SCEV assumptions. 981 /// The interleaved access analysis can also add new predicates (for example 982 /// by versioning strides of pointers). 983 PredicatedScalarEvolution &PSE; 984 985 Loop *TheLoop; 986 DominatorTree *DT; 987 LoopInfo *LI; 988 const LoopAccessInfo *LAI; 989 990 /// True if the loop may contain non-reversed interleaved groups with 991 /// out-of-bounds accesses. We ensure we don't speculatively access memory 992 /// out-of-bounds by executing at least one scalar epilogue iteration. 993 bool RequiresScalarEpilogue = false; 994 995 /// Holds the relationships between the members and the interleave group. 996 DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap; 997 998 /// Holds dependences among the memory accesses in the loop. It maps a source 999 /// access to a set of dependent sink accesses. 1000 DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences; 1001 1002 /// The descriptor for a strided memory access. 1003 struct StrideDescriptor { 1004 StrideDescriptor() = default; 1005 StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size, 1006 unsigned Align) 1007 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {} 1008 1009 // The access's stride. It is negative for a reverse access. 1010 int64_t Stride = 0; 1011 1012 // The scalar expression of this access. 1013 const SCEV *Scev = nullptr; 1014 1015 // The size of the memory object. 1016 uint64_t Size = 0; 1017 1018 // The alignment of this access. 1019 unsigned Align = 0; 1020 }; 1021 1022 /// A type for holding instructions and their stride descriptors. 1023 using StrideEntry = std::pair<Instruction *, StrideDescriptor>; 1024 1025 /// Create a new interleave group with the given instruction \p Instr, 1026 /// stride \p Stride and alignment \p Align. 1027 /// 1028 /// \returns the newly created interleave group. 1029 InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride, 1030 unsigned Align) { 1031 assert(!InterleaveGroupMap.count(Instr) && 1032 "Already in an interleaved access group"); 1033 InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align); 1034 return InterleaveGroupMap[Instr]; 1035 } 1036 1037 /// Release the group and remove all the relationships. 1038 void releaseGroup(InterleaveGroup *Group) { 1039 for (unsigned i = 0; i < Group->getFactor(); i++) 1040 if (Instruction *Member = Group->getMember(i)) 1041 InterleaveGroupMap.erase(Member); 1042 1043 delete Group; 1044 } 1045 1046 /// Collect all the accesses with a constant stride in program order. 1047 void collectConstStrideAccesses( 1048 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 1049 const ValueToValueMap &Strides); 1050 1051 /// Returns true if \p Stride is allowed in an interleaved group. 1052 static bool isStrided(int Stride) { 1053 unsigned Factor = std::abs(Stride); 1054 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 1055 } 1056 1057 /// Returns true if \p BB is a predicated block. 1058 bool isPredicated(BasicBlock *BB) const { 1059 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 1060 } 1061 1062 /// Returns true if LoopAccessInfo can be used for dependence queries. 1063 bool areDependencesValid() const { 1064 return LAI && LAI->getDepChecker().getDependences(); 1065 } 1066 1067 /// Returns true if memory accesses \p A and \p B can be reordered, if 1068 /// necessary, when constructing interleaved groups. 1069 /// 1070 /// \p A must precede \p B in program order. We return false if reordering is 1071 /// not necessary or is prevented because \p A and \p B may be dependent. 1072 bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A, 1073 StrideEntry *B) const { 1074 // Code motion for interleaved accesses can potentially hoist strided loads 1075 // and sink strided stores. The code below checks the legality of the 1076 // following two conditions: 1077 // 1078 // 1. Potentially moving a strided load (B) before any store (A) that 1079 // precedes B, or 1080 // 1081 // 2. Potentially moving a strided store (A) after any load or store (B) 1082 // that A precedes. 1083 // 1084 // It's legal to reorder A and B if we know there isn't a dependence from A 1085 // to B. Note that this determination is conservative since some 1086 // dependences could potentially be reordered safely. 1087 1088 // A is potentially the source of a dependence. 1089 auto *Src = A->first; 1090 auto SrcDes = A->second; 1091 1092 // B is potentially the sink of a dependence. 1093 auto *Sink = B->first; 1094 auto SinkDes = B->second; 1095 1096 // Code motion for interleaved accesses can't violate WAR dependences. 1097 // Thus, reordering is legal if the source isn't a write. 1098 if (!Src->mayWriteToMemory()) 1099 return true; 1100 1101 // At least one of the accesses must be strided. 1102 if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride)) 1103 return true; 1104 1105 // If dependence information is not available from LoopAccessInfo, 1106 // conservatively assume the instructions can't be reordered. 1107 if (!areDependencesValid()) 1108 return false; 1109 1110 // If we know there is a dependence from source to sink, assume the 1111 // instructions can't be reordered. Otherwise, reordering is legal. 1112 return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink); 1113 } 1114 1115 /// Collect the dependences from LoopAccessInfo. 1116 /// 1117 /// We process the dependences once during the interleaved access analysis to 1118 /// enable constant-time dependence queries. 1119 void collectDependences() { 1120 if (!areDependencesValid()) 1121 return; 1122 auto *Deps = LAI->getDepChecker().getDependences(); 1123 for (auto Dep : *Deps) 1124 Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI)); 1125 } 1126 }; 1127 1128 } // end anonymous namespace 1129 1130 static void emitMissedWarning(Function *F, Loop *L, 1131 const LoopVectorizeHints &LH, 1132 OptimizationRemarkEmitter *ORE) { 1133 LH.emitRemarkWithHints(); 1134 1135 if (LH.getForce() == LoopVectorizeHints::FK_Enabled) { 1136 if (LH.getWidth() != 1) 1137 ORE->emit(DiagnosticInfoOptimizationFailure( 1138 DEBUG_TYPE, "FailedRequestedVectorization", 1139 L->getStartLoc(), L->getHeader()) 1140 << "loop not vectorized: " 1141 << "failed explicitly specified loop vectorization"); 1142 else if (LH.getInterleave() != 1) 1143 ORE->emit(DiagnosticInfoOptimizationFailure( 1144 DEBUG_TYPE, "FailedRequestedInterleaving", L->getStartLoc(), 1145 L->getHeader()) 1146 << "loop not interleaved: " 1147 << "failed explicitly specified loop interleaving"); 1148 } 1149 } 1150 1151 namespace llvm { 1152 1153 /// LoopVectorizationCostModel - estimates the expected speedups due to 1154 /// vectorization. 1155 /// In many cases vectorization is not profitable. This can happen because of 1156 /// a number of reasons. In this class we mainly attempt to predict the 1157 /// expected speedup/slowdowns due to the supported instruction set. We use the 1158 /// TargetTransformInfo to query the different backends for the cost of 1159 /// different operations. 1160 class LoopVectorizationCostModel { 1161 public: 1162 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, 1163 LoopInfo *LI, LoopVectorizationLegality *Legal, 1164 const TargetTransformInfo &TTI, 1165 const TargetLibraryInfo *TLI, DemandedBits *DB, 1166 AssumptionCache *AC, 1167 OptimizationRemarkEmitter *ORE, const Function *F, 1168 const LoopVectorizeHints *Hints, 1169 InterleavedAccessInfo &IAI) 1170 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 1171 AC(AC), ORE(ORE), TheFunction(F), Hints(Hints), InterleaveInfo(IAI) {} 1172 1173 /// \return An upper bound for the vectorization factor, or None if 1174 /// vectorization should be avoided up front. 1175 Optional<unsigned> computeMaxVF(bool OptForSize); 1176 1177 /// \return The most profitable vectorization factor and the cost of that VF. 1178 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 1179 /// then this vectorization factor will be selected if vectorization is 1180 /// possible. 1181 VectorizationFactor selectVectorizationFactor(unsigned MaxVF); 1182 1183 /// Setup cost-based decisions for user vectorization factor. 1184 void selectUserVectorizationFactor(unsigned UserVF) { 1185 collectUniformsAndScalars(UserVF); 1186 collectInstsToScalarize(UserVF); 1187 } 1188 1189 /// \return The size (in bits) of the smallest and widest types in the code 1190 /// that needs to be vectorized. We ignore values that remain scalar such as 1191 /// 64 bit loop indices. 1192 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1193 1194 /// \return The desired interleave count. 1195 /// If interleave count has been specified by metadata it will be returned. 1196 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1197 /// are the selected vectorization factor and the cost of the selected VF. 1198 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 1199 unsigned LoopCost); 1200 1201 /// Memory access instruction may be vectorized in more than one way. 1202 /// Form of instruction after vectorization depends on cost. 1203 /// This function takes cost-based decisions for Load/Store instructions 1204 /// and collects them in a map. This decisions map is used for building 1205 /// the lists of loop-uniform and loop-scalar instructions. 1206 /// The calculated cost is saved with widening decision in order to 1207 /// avoid redundant calculations. 1208 void setCostBasedWideningDecision(unsigned VF); 1209 1210 /// A struct that represents some properties of the register usage 1211 /// of a loop. 1212 struct RegisterUsage { 1213 /// Holds the number of loop invariant values that are used in the loop. 1214 unsigned LoopInvariantRegs; 1215 1216 /// Holds the maximum number of concurrent live intervals in the loop. 1217 unsigned MaxLocalUsers; 1218 }; 1219 1220 /// \return Returns information about the register usages of the loop for the 1221 /// given vectorization factors. 1222 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 1223 1224 /// Collect values we want to ignore in the cost model. 1225 void collectValuesToIgnore(); 1226 1227 /// \returns The smallest bitwidth each instruction can be represented with. 1228 /// The vector equivalents of these instructions should be truncated to this 1229 /// type. 1230 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1231 return MinBWs; 1232 } 1233 1234 /// \returns True if it is more profitable to scalarize instruction \p I for 1235 /// vectorization factor \p VF. 1236 bool isProfitableToScalarize(Instruction *I, unsigned VF) const { 1237 assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1."); 1238 auto Scalars = InstsToScalarize.find(VF); 1239 assert(Scalars != InstsToScalarize.end() && 1240 "VF not yet analyzed for scalarization profitability"); 1241 return Scalars->second.count(I); 1242 } 1243 1244 /// Returns true if \p I is known to be uniform after vectorization. 1245 bool isUniformAfterVectorization(Instruction *I, unsigned VF) const { 1246 if (VF == 1) 1247 return true; 1248 assert(Uniforms.count(VF) && "VF not yet analyzed for uniformity"); 1249 auto UniformsPerVF = Uniforms.find(VF); 1250 return UniformsPerVF->second.count(I); 1251 } 1252 1253 /// Returns true if \p I is known to be scalar after vectorization. 1254 bool isScalarAfterVectorization(Instruction *I, unsigned VF) const { 1255 if (VF == 1) 1256 return true; 1257 assert(Scalars.count(VF) && "Scalar values are not calculated for VF"); 1258 auto ScalarsPerVF = Scalars.find(VF); 1259 return ScalarsPerVF->second.count(I); 1260 } 1261 1262 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1263 /// for vectorization factor \p VF. 1264 bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const { 1265 return VF > 1 && MinBWs.count(I) && !isProfitableToScalarize(I, VF) && 1266 !isScalarAfterVectorization(I, VF); 1267 } 1268 1269 /// Decision that was taken during cost calculation for memory instruction. 1270 enum InstWidening { 1271 CM_Unknown, 1272 CM_Widen, // For consecutive accesses with stride +1. 1273 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1274 CM_Interleave, 1275 CM_GatherScatter, 1276 CM_Scalarize 1277 }; 1278 1279 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1280 /// instruction \p I and vector width \p VF. 1281 void setWideningDecision(Instruction *I, unsigned VF, InstWidening W, 1282 unsigned Cost) { 1283 assert(VF >= 2 && "Expected VF >=2"); 1284 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1285 } 1286 1287 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1288 /// interleaving group \p Grp and vector width \p VF. 1289 void setWideningDecision(const InterleaveGroup *Grp, unsigned VF, 1290 InstWidening W, unsigned Cost) { 1291 assert(VF >= 2 && "Expected VF >=2"); 1292 /// Broadcast this decicion to all instructions inside the group. 1293 /// But the cost will be assigned to one instruction only. 1294 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1295 if (auto *I = Grp->getMember(i)) { 1296 if (Grp->getInsertPos() == I) 1297 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1298 else 1299 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1300 } 1301 } 1302 } 1303 1304 /// Return the cost model decision for the given instruction \p I and vector 1305 /// width \p VF. Return CM_Unknown if this instruction did not pass 1306 /// through the cost modeling. 1307 InstWidening getWideningDecision(Instruction *I, unsigned VF) { 1308 assert(VF >= 2 && "Expected VF >=2"); 1309 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1310 auto Itr = WideningDecisions.find(InstOnVF); 1311 if (Itr == WideningDecisions.end()) 1312 return CM_Unknown; 1313 return Itr->second.first; 1314 } 1315 1316 /// Return the vectorization cost for the given instruction \p I and vector 1317 /// width \p VF. 1318 unsigned getWideningCost(Instruction *I, unsigned VF) { 1319 assert(VF >= 2 && "Expected VF >=2"); 1320 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1321 assert(WideningDecisions.count(InstOnVF) && "The cost is not calculated"); 1322 return WideningDecisions[InstOnVF].second; 1323 } 1324 1325 /// Return True if instruction \p I is an optimizable truncate whose operand 1326 /// is an induction variable. Such a truncate will be removed by adding a new 1327 /// induction variable with the destination type. 1328 bool isOptimizableIVTruncate(Instruction *I, unsigned VF) { 1329 // If the instruction is not a truncate, return false. 1330 auto *Trunc = dyn_cast<TruncInst>(I); 1331 if (!Trunc) 1332 return false; 1333 1334 // Get the source and destination types of the truncate. 1335 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1336 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1337 1338 // If the truncate is free for the given types, return false. Replacing a 1339 // free truncate with an induction variable would add an induction variable 1340 // update instruction to each iteration of the loop. We exclude from this 1341 // check the primary induction variable since it will need an update 1342 // instruction regardless. 1343 Value *Op = Trunc->getOperand(0); 1344 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1345 return false; 1346 1347 // If the truncated value is not an induction variable, return false. 1348 return Legal->isInductionPhi(Op); 1349 } 1350 1351 /// Collects the instructions to scalarize for each predicated instruction in 1352 /// the loop. 1353 void collectInstsToScalarize(unsigned VF); 1354 1355 /// Collect Uniform and Scalar values for the given \p VF. 1356 /// The sets depend on CM decision for Load/Store instructions 1357 /// that may be vectorized as interleave, gather-scatter or scalarized. 1358 void collectUniformsAndScalars(unsigned VF) { 1359 // Do the analysis once. 1360 if (VF == 1 || Uniforms.count(VF)) 1361 return; 1362 setCostBasedWideningDecision(VF); 1363 collectLoopUniforms(VF); 1364 collectLoopScalars(VF); 1365 } 1366 1367 /// Returns true if the target machine supports masked store operation 1368 /// for the given \p DataType and kind of access to \p Ptr. 1369 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1370 return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedStore(DataType); 1371 } 1372 1373 /// Returns true if the target machine supports masked load operation 1374 /// for the given \p DataType and kind of access to \p Ptr. 1375 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1376 return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedLoad(DataType); 1377 } 1378 1379 /// Returns true if the target machine supports masked scatter operation 1380 /// for the given \p DataType. 1381 bool isLegalMaskedScatter(Type *DataType) { 1382 return TTI.isLegalMaskedScatter(DataType); 1383 } 1384 1385 /// Returns true if the target machine supports masked gather operation 1386 /// for the given \p DataType. 1387 bool isLegalMaskedGather(Type *DataType) { 1388 return TTI.isLegalMaskedGather(DataType); 1389 } 1390 1391 /// Returns true if the target machine can represent \p V as a masked gather 1392 /// or scatter operation. 1393 bool isLegalGatherOrScatter(Value *V) { 1394 bool LI = isa<LoadInst>(V); 1395 bool SI = isa<StoreInst>(V); 1396 if (!LI && !SI) 1397 return false; 1398 auto *Ty = getMemInstValueType(V); 1399 return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty)); 1400 } 1401 1402 /// Returns true if \p I is an instruction that will be scalarized with 1403 /// predication. Such instructions include conditional stores and 1404 /// instructions that may divide by zero. 1405 bool isScalarWithPredication(Instruction *I); 1406 1407 /// Returns true if \p I is a memory instruction with consecutive memory 1408 /// access that can be widened. 1409 bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1); 1410 1411 /// Check if \p Instr belongs to any interleaved access group. 1412 bool isAccessInterleaved(Instruction *Instr) { 1413 return InterleaveInfo.isInterleaved(Instr); 1414 } 1415 1416 /// Get the interleaved access group that \p Instr belongs to. 1417 const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { 1418 return InterleaveInfo.getInterleaveGroup(Instr); 1419 } 1420 1421 /// Returns true if an interleaved group requires a scalar iteration 1422 /// to handle accesses with gaps. 1423 bool requiresScalarEpilogue() const { 1424 return InterleaveInfo.requiresScalarEpilogue(); 1425 } 1426 1427 private: 1428 unsigned NumPredStores = 0; 1429 1430 /// \return An upper bound for the vectorization factor, larger than zero. 1431 /// One is returned if vectorization should best be avoided due to cost. 1432 unsigned computeFeasibleMaxVF(bool OptForSize, unsigned ConstTripCount); 1433 1434 /// The vectorization cost is a combination of the cost itself and a boolean 1435 /// indicating whether any of the contributing operations will actually 1436 /// operate on 1437 /// vector values after type legalization in the backend. If this latter value 1438 /// is 1439 /// false, then all operations will be scalarized (i.e. no vectorization has 1440 /// actually taken place). 1441 using VectorizationCostTy = std::pair<unsigned, bool>; 1442 1443 /// Returns the expected execution cost. The unit of the cost does 1444 /// not matter because we use the 'cost' units to compare different 1445 /// vector widths. The cost that is returned is *not* normalized by 1446 /// the factor width. 1447 VectorizationCostTy expectedCost(unsigned VF); 1448 1449 /// Returns the execution time cost of an instruction for a given vector 1450 /// width. Vector width of one means scalar. 1451 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1452 1453 /// The cost-computation logic from getInstructionCost which provides 1454 /// the vector type as an output parameter. 1455 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1456 1457 /// Calculate vectorization cost of memory instruction \p I. 1458 unsigned getMemoryInstructionCost(Instruction *I, unsigned VF); 1459 1460 /// The cost computation for scalarized memory instruction. 1461 unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF); 1462 1463 /// The cost computation for interleaving group of memory instructions. 1464 unsigned getInterleaveGroupCost(Instruction *I, unsigned VF); 1465 1466 /// The cost computation for Gather/Scatter instruction. 1467 unsigned getGatherScatterCost(Instruction *I, unsigned VF); 1468 1469 /// The cost computation for widening instruction \p I with consecutive 1470 /// memory access. 1471 unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF); 1472 1473 /// The cost calculation for Load instruction \p I with uniform pointer - 1474 /// scalar load + broadcast. 1475 unsigned getUniformMemOpCost(Instruction *I, unsigned VF); 1476 1477 /// Returns whether the instruction is a load or store and will be a emitted 1478 /// as a vector operation. 1479 bool isConsecutiveLoadOrStore(Instruction *I); 1480 1481 /// Returns true if an artificially high cost for emulated masked memrefs 1482 /// should be used. 1483 bool useEmulatedMaskMemRefHack(Instruction *I); 1484 1485 /// Create an analysis remark that explains why vectorization failed 1486 /// 1487 /// \p RemarkName is the identifier for the remark. \return the remark object 1488 /// that can be streamed to. 1489 OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) { 1490 return createLVMissedAnalysis(Hints->vectorizeAnalysisPassName(), 1491 RemarkName, TheLoop); 1492 } 1493 1494 /// Map of scalar integer values to the smallest bitwidth they can be legally 1495 /// represented as. The vector equivalents of these values should be truncated 1496 /// to this type. 1497 MapVector<Instruction *, uint64_t> MinBWs; 1498 1499 /// A type representing the costs for instructions if they were to be 1500 /// scalarized rather than vectorized. The entries are Instruction-Cost 1501 /// pairs. 1502 using ScalarCostsTy = DenseMap<Instruction *, unsigned>; 1503 1504 /// A set containing all BasicBlocks that are known to present after 1505 /// vectorization as a predicated block. 1506 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1507 1508 /// A map holding scalar costs for different vectorization factors. The 1509 /// presence of a cost for an instruction in the mapping indicates that the 1510 /// instruction will be scalarized when vectorizing with the associated 1511 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1512 DenseMap<unsigned, ScalarCostsTy> InstsToScalarize; 1513 1514 /// Holds the instructions known to be uniform after vectorization. 1515 /// The data is collected per VF. 1516 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms; 1517 1518 /// Holds the instructions known to be scalar after vectorization. 1519 /// The data is collected per VF. 1520 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars; 1521 1522 /// Holds the instructions (address computations) that are forced to be 1523 /// scalarized. 1524 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1525 1526 /// Returns the expected difference in cost from scalarizing the expression 1527 /// feeding a predicated instruction \p PredInst. The instructions to 1528 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1529 /// non-negative return value implies the expression will be scalarized. 1530 /// Currently, only single-use chains are considered for scalarization. 1531 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1532 unsigned VF); 1533 1534 /// Collect the instructions that are uniform after vectorization. An 1535 /// instruction is uniform if we represent it with a single scalar value in 1536 /// the vectorized loop corresponding to each vector iteration. Examples of 1537 /// uniform instructions include pointer operands of consecutive or 1538 /// interleaved memory accesses. Note that although uniformity implies an 1539 /// instruction will be scalar, the reverse is not true. In general, a 1540 /// scalarized instruction will be represented by VF scalar values in the 1541 /// vectorized loop, each corresponding to an iteration of the original 1542 /// scalar loop. 1543 void collectLoopUniforms(unsigned VF); 1544 1545 /// Collect the instructions that are scalar after vectorization. An 1546 /// instruction is scalar if it is known to be uniform or will be scalarized 1547 /// during vectorization. Non-uniform scalarized instructions will be 1548 /// represented by VF values in the vectorized loop, each corresponding to an 1549 /// iteration of the original scalar loop. 1550 void collectLoopScalars(unsigned VF); 1551 1552 /// Keeps cost model vectorization decision and cost for instructions. 1553 /// Right now it is used for memory instructions only. 1554 using DecisionList = DenseMap<std::pair<Instruction *, unsigned>, 1555 std::pair<InstWidening, unsigned>>; 1556 1557 DecisionList WideningDecisions; 1558 1559 public: 1560 /// The loop that we evaluate. 1561 Loop *TheLoop; 1562 1563 /// Predicated scalar evolution analysis. 1564 PredicatedScalarEvolution &PSE; 1565 1566 /// Loop Info analysis. 1567 LoopInfo *LI; 1568 1569 /// Vectorization legality. 1570 LoopVectorizationLegality *Legal; 1571 1572 /// Vector target information. 1573 const TargetTransformInfo &TTI; 1574 1575 /// Target Library Info. 1576 const TargetLibraryInfo *TLI; 1577 1578 /// Demanded bits analysis. 1579 DemandedBits *DB; 1580 1581 /// Assumption cache. 1582 AssumptionCache *AC; 1583 1584 /// Interface to emit optimization remarks. 1585 OptimizationRemarkEmitter *ORE; 1586 1587 const Function *TheFunction; 1588 1589 /// Loop Vectorize Hint. 1590 const LoopVectorizeHints *Hints; 1591 1592 /// The interleave access information contains groups of interleaved accesses 1593 /// with the same stride and close to each other. 1594 InterleavedAccessInfo &InterleaveInfo; 1595 1596 /// Values to ignore in the cost model. 1597 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1598 1599 /// Values to ignore in the cost model when VF > 1. 1600 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1601 }; 1602 1603 } // end namespace llvm 1604 1605 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 1606 // vectorization. The loop needs to be annotated with #pragma omp simd 1607 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 1608 // vector length information is not provided, vectorization is not considered 1609 // explicit. Interleave hints are not allowed either. These limitations will be 1610 // relaxed in the future. 1611 // Please, note that we are currently forced to abuse the pragma 'clang 1612 // vectorize' semantics. This pragma provides *auto-vectorization hints* 1613 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 1614 // provides *explicit vectorization hints* (LV can bypass legal checks and 1615 // assume that vectorization is legal). However, both hints are implemented 1616 // using the same metadata (llvm.loop.vectorize, processed by 1617 // LoopVectorizeHints). This will be fixed in the future when the native IR 1618 // representation for pragma 'omp simd' is introduced. 1619 static bool isExplicitVecOuterLoop(Loop *OuterLp, 1620 OptimizationRemarkEmitter *ORE) { 1621 assert(!OuterLp->empty() && "This is not an outer loop"); 1622 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 1623 1624 // Only outer loops with an explicit vectorization hint are supported. 1625 // Unannotated outer loops are ignored. 1626 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 1627 return false; 1628 1629 Function *Fn = OuterLp->getHeader()->getParent(); 1630 if (!Hints.allowVectorization(Fn, OuterLp, false /*AlwaysVectorize*/)) { 1631 DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 1632 return false; 1633 } 1634 1635 if (!Hints.getWidth()) { 1636 DEBUG(dbgs() << "LV: Not vectorizing: No user vector width.\n"); 1637 emitMissedWarning(Fn, OuterLp, Hints, ORE); 1638 return false; 1639 } 1640 1641 if (Hints.getInterleave() > 1) { 1642 // TODO: Interleave support is future work. 1643 DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 1644 "outer loops.\n"); 1645 emitMissedWarning(Fn, OuterLp, Hints, ORE); 1646 return false; 1647 } 1648 1649 return true; 1650 } 1651 1652 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 1653 OptimizationRemarkEmitter *ORE, 1654 SmallVectorImpl<Loop *> &V) { 1655 // Collect inner loops and outer loops without irreducible control flow. For 1656 // now, only collect outer loops that have explicit vectorization hints. 1657 if (L.empty() || (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 1658 LoopBlocksRPO RPOT(&L); 1659 RPOT.perform(LI); 1660 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 1661 V.push_back(&L); 1662 // TODO: Collect inner loops inside marked outer loops in case 1663 // vectorization fails for the outer loop. Do not invoke 1664 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 1665 // already known to be reducible. We can use an inherited attribute for 1666 // that. 1667 return; 1668 } 1669 } 1670 for (Loop *InnerL : L) 1671 collectSupportedLoops(*InnerL, LI, ORE, V); 1672 } 1673 1674 namespace { 1675 1676 /// The LoopVectorize Pass. 1677 struct LoopVectorize : public FunctionPass { 1678 /// Pass identification, replacement for typeid 1679 static char ID; 1680 1681 LoopVectorizePass Impl; 1682 1683 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) 1684 : FunctionPass(ID) { 1685 Impl.DisableUnrolling = NoUnrolling; 1686 Impl.AlwaysVectorize = AlwaysVectorize; 1687 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1688 } 1689 1690 bool runOnFunction(Function &F) override { 1691 if (skipFunction(F)) 1692 return false; 1693 1694 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1695 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1696 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1697 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1698 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1699 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1700 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 1701 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1702 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1703 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 1704 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1705 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 1706 1707 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 1708 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 1709 1710 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 1711 GetLAA, *ORE); 1712 } 1713 1714 void getAnalysisUsage(AnalysisUsage &AU) const override { 1715 AU.addRequired<AssumptionCacheTracker>(); 1716 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1717 AU.addRequired<DominatorTreeWrapperPass>(); 1718 AU.addRequired<LoopInfoWrapperPass>(); 1719 AU.addRequired<ScalarEvolutionWrapperPass>(); 1720 AU.addRequired<TargetTransformInfoWrapperPass>(); 1721 AU.addRequired<AAResultsWrapperPass>(); 1722 AU.addRequired<LoopAccessLegacyAnalysis>(); 1723 AU.addRequired<DemandedBitsWrapperPass>(); 1724 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 1725 AU.addPreserved<LoopInfoWrapperPass>(); 1726 AU.addPreserved<DominatorTreeWrapperPass>(); 1727 AU.addPreserved<BasicAAWrapperPass>(); 1728 AU.addPreserved<GlobalsAAWrapperPass>(); 1729 } 1730 }; 1731 1732 } // end anonymous namespace 1733 1734 //===----------------------------------------------------------------------===// 1735 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 1736 // LoopVectorizationCostModel and LoopVectorizationPlanner. 1737 //===----------------------------------------------------------------------===// 1738 1739 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 1740 // We need to place the broadcast of invariant variables outside the loop, 1741 // but only if it's proven safe to do so. Else, broadcast will be inside 1742 // vector loop body. 1743 Instruction *Instr = dyn_cast<Instruction>(V); 1744 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 1745 (!Instr || 1746 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 1747 // Place the code for broadcasting invariant variables in the new preheader. 1748 IRBuilder<>::InsertPointGuard Guard(Builder); 1749 if (SafeToHoist) 1750 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1751 1752 // Broadcast the scalar into all locations in the vector. 1753 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 1754 1755 return Shuf; 1756 } 1757 1758 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 1759 const InductionDescriptor &II, Value *Step, Instruction *EntryVal) { 1760 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1761 "Expected either an induction phi-node or a truncate of it!"); 1762 Value *Start = II.getStartValue(); 1763 1764 // Construct the initial value of the vector IV in the vector loop preheader 1765 auto CurrIP = Builder.saveIP(); 1766 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1767 if (isa<TruncInst>(EntryVal)) { 1768 assert(Start->getType()->isIntegerTy() && 1769 "Truncation requires an integer type"); 1770 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 1771 Step = Builder.CreateTrunc(Step, TruncType); 1772 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 1773 } 1774 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 1775 Value *SteppedStart = 1776 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 1777 1778 // We create vector phi nodes for both integer and floating-point induction 1779 // variables. Here, we determine the kind of arithmetic we will perform. 1780 Instruction::BinaryOps AddOp; 1781 Instruction::BinaryOps MulOp; 1782 if (Step->getType()->isIntegerTy()) { 1783 AddOp = Instruction::Add; 1784 MulOp = Instruction::Mul; 1785 } else { 1786 AddOp = II.getInductionOpcode(); 1787 MulOp = Instruction::FMul; 1788 } 1789 1790 // Multiply the vectorization factor by the step using integer or 1791 // floating-point arithmetic as appropriate. 1792 Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF); 1793 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF)); 1794 1795 // Create a vector splat to use in the induction update. 1796 // 1797 // FIXME: If the step is non-constant, we create the vector splat with 1798 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 1799 // handle a constant vector splat. 1800 Value *SplatVF = isa<Constant>(Mul) 1801 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 1802 : Builder.CreateVectorSplat(VF, Mul); 1803 Builder.restoreIP(CurrIP); 1804 1805 // We may need to add the step a number of times, depending on the unroll 1806 // factor. The last of those goes into the PHI. 1807 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 1808 &*LoopVectorBody->getFirstInsertionPt()); 1809 Instruction *LastInduction = VecInd; 1810 for (unsigned Part = 0; Part < UF; ++Part) { 1811 VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction); 1812 1813 if (isa<TruncInst>(EntryVal)) 1814 addMetadata(LastInduction, EntryVal); 1815 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part); 1816 1817 LastInduction = cast<Instruction>(addFastMathFlag( 1818 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"))); 1819 } 1820 1821 // Move the last step to the end of the latch block. This ensures consistent 1822 // placement of all induction updates. 1823 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 1824 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 1825 auto *ICmp = cast<Instruction>(Br->getCondition()); 1826 LastInduction->moveBefore(ICmp); 1827 LastInduction->setName("vec.ind.next"); 1828 1829 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 1830 VecInd->addIncoming(LastInduction, LoopVectorLatch); 1831 } 1832 1833 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 1834 return Cost->isScalarAfterVectorization(I, VF) || 1835 Cost->isProfitableToScalarize(I, VF); 1836 } 1837 1838 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 1839 if (shouldScalarizeInstruction(IV)) 1840 return true; 1841 auto isScalarInst = [&](User *U) -> bool { 1842 auto *I = cast<Instruction>(U); 1843 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 1844 }; 1845 return llvm::any_of(IV->users(), isScalarInst); 1846 } 1847 1848 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 1849 const InductionDescriptor &ID, const Instruction *EntryVal, 1850 Value *VectorLoopVal, unsigned Part, unsigned Lane) { 1851 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1852 "Expected either an induction phi-node or a truncate of it!"); 1853 1854 // This induction variable is not the phi from the original loop but the 1855 // newly-created IV based on the proof that casted Phi is equal to the 1856 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 1857 // re-uses the same InductionDescriptor that original IV uses but we don't 1858 // have to do any recording in this case - that is done when original IV is 1859 // processed. 1860 if (isa<TruncInst>(EntryVal)) 1861 return; 1862 1863 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 1864 if (Casts.empty()) 1865 return; 1866 // Only the first Cast instruction in the Casts vector is of interest. 1867 // The rest of the Casts (if exist) have no uses outside the 1868 // induction update chain itself. 1869 Instruction *CastInst = *Casts.begin(); 1870 if (Lane < UINT_MAX) 1871 VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal); 1872 else 1873 VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal); 1874 } 1875 1876 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) { 1877 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 1878 "Primary induction variable must have an integer type"); 1879 1880 auto II = Legal->getInductionVars()->find(IV); 1881 assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); 1882 1883 auto ID = II->second; 1884 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 1885 1886 // The scalar value to broadcast. This will be derived from the canonical 1887 // induction variable. 1888 Value *ScalarIV = nullptr; 1889 1890 // The value from the original loop to which we are mapping the new induction 1891 // variable. 1892 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 1893 1894 // True if we have vectorized the induction variable. 1895 auto VectorizedIV = false; 1896 1897 // Determine if we want a scalar version of the induction variable. This is 1898 // true if the induction variable itself is not widened, or if it has at 1899 // least one user in the loop that is not widened. 1900 auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal); 1901 1902 // Generate code for the induction step. Note that induction steps are 1903 // required to be loop-invariant 1904 assert(PSE.getSE()->isLoopInvariant(ID.getStep(), OrigLoop) && 1905 "Induction step should be loop invariant"); 1906 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 1907 Value *Step = nullptr; 1908 if (PSE.getSE()->isSCEVable(IV->getType())) { 1909 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 1910 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), 1911 LoopVectorPreHeader->getTerminator()); 1912 } else { 1913 Step = cast<SCEVUnknown>(ID.getStep())->getValue(); 1914 } 1915 1916 // Try to create a new independent vector induction variable. If we can't 1917 // create the phi node, we will splat the scalar induction variable in each 1918 // loop iteration. 1919 if (VF > 1 && !shouldScalarizeInstruction(EntryVal)) { 1920 createVectorIntOrFpInductionPHI(ID, Step, EntryVal); 1921 VectorizedIV = true; 1922 } 1923 1924 // If we haven't yet vectorized the induction variable, or if we will create 1925 // a scalar one, we need to define the scalar induction variable and step 1926 // values. If we were given a truncation type, truncate the canonical 1927 // induction variable and step. Otherwise, derive these values from the 1928 // induction descriptor. 1929 if (!VectorizedIV || NeedsScalarIV) { 1930 ScalarIV = Induction; 1931 if (IV != OldInduction) { 1932 ScalarIV = IV->getType()->isIntegerTy() 1933 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 1934 : Builder.CreateCast(Instruction::SIToFP, Induction, 1935 IV->getType()); 1936 ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL); 1937 ScalarIV->setName("offset.idx"); 1938 } 1939 if (Trunc) { 1940 auto *TruncType = cast<IntegerType>(Trunc->getType()); 1941 assert(Step->getType()->isIntegerTy() && 1942 "Truncation requires an integer step"); 1943 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 1944 Step = Builder.CreateTrunc(Step, TruncType); 1945 } 1946 } 1947 1948 // If we haven't yet vectorized the induction variable, splat the scalar 1949 // induction variable, and build the necessary step vectors. 1950 // TODO: Don't do it unless the vectorized IV is really required. 1951 if (!VectorizedIV) { 1952 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 1953 for (unsigned Part = 0; Part < UF; ++Part) { 1954 Value *EntryPart = 1955 getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode()); 1956 VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart); 1957 if (Trunc) 1958 addMetadata(EntryPart, Trunc); 1959 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part); 1960 } 1961 } 1962 1963 // If an induction variable is only used for counting loop iterations or 1964 // calculating addresses, it doesn't need to be widened. Create scalar steps 1965 // that can be used by instructions we will later scalarize. Note that the 1966 // addition of the scalar steps will not increase the number of instructions 1967 // in the loop in the common case prior to InstCombine. We will be trading 1968 // one vector extract for each scalar step. 1969 if (NeedsScalarIV) 1970 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 1971 } 1972 1973 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 1974 Instruction::BinaryOps BinOp) { 1975 // Create and check the types. 1976 assert(Val->getType()->isVectorTy() && "Must be a vector"); 1977 int VLen = Val->getType()->getVectorNumElements(); 1978 1979 Type *STy = Val->getType()->getScalarType(); 1980 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 1981 "Induction Step must be an integer or FP"); 1982 assert(Step->getType() == STy && "Step has wrong type"); 1983 1984 SmallVector<Constant *, 8> Indices; 1985 1986 if (STy->isIntegerTy()) { 1987 // Create a vector of consecutive numbers from zero to VF. 1988 for (int i = 0; i < VLen; ++i) 1989 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 1990 1991 // Add the consecutive indices to the vector value. 1992 Constant *Cv = ConstantVector::get(Indices); 1993 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 1994 Step = Builder.CreateVectorSplat(VLen, Step); 1995 assert(Step->getType() == Val->getType() && "Invalid step vec"); 1996 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 1997 // which can be found from the original scalar operations. 1998 Step = Builder.CreateMul(Cv, Step); 1999 return Builder.CreateAdd(Val, Step, "induction"); 2000 } 2001 2002 // Floating point induction. 2003 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2004 "Binary Opcode should be specified for FP induction"); 2005 // Create a vector of consecutive numbers from zero to VF. 2006 for (int i = 0; i < VLen; ++i) 2007 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2008 2009 // Add the consecutive indices to the vector value. 2010 Constant *Cv = ConstantVector::get(Indices); 2011 2012 Step = Builder.CreateVectorSplat(VLen, Step); 2013 2014 // Floating point operations had to be 'fast' to enable the induction. 2015 FastMathFlags Flags; 2016 Flags.setFast(); 2017 2018 Value *MulOp = Builder.CreateFMul(Cv, Step); 2019 if (isa<Instruction>(MulOp)) 2020 // Have to check, MulOp may be a constant 2021 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2022 2023 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2024 if (isa<Instruction>(BOp)) 2025 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2026 return BOp; 2027 } 2028 2029 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2030 Instruction *EntryVal, 2031 const InductionDescriptor &ID) { 2032 // We shouldn't have to build scalar steps if we aren't vectorizing. 2033 assert(VF > 1 && "VF should be greater than one"); 2034 2035 // Get the value type and ensure it and the step have the same integer type. 2036 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2037 assert(ScalarIVTy == Step->getType() && 2038 "Val and Step should have the same type"); 2039 2040 // We build scalar steps for both integer and floating-point induction 2041 // variables. Here, we determine the kind of arithmetic we will perform. 2042 Instruction::BinaryOps AddOp; 2043 Instruction::BinaryOps MulOp; 2044 if (ScalarIVTy->isIntegerTy()) { 2045 AddOp = Instruction::Add; 2046 MulOp = Instruction::Mul; 2047 } else { 2048 AddOp = ID.getInductionOpcode(); 2049 MulOp = Instruction::FMul; 2050 } 2051 2052 // Determine the number of scalars we need to generate for each unroll 2053 // iteration. If EntryVal is uniform, we only need to generate the first 2054 // lane. Otherwise, we generate all VF values. 2055 unsigned Lanes = 2056 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1 2057 : VF; 2058 // Compute the scalar steps and save the results in VectorLoopValueMap. 2059 for (unsigned Part = 0; Part < UF; ++Part) { 2060 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2061 auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane); 2062 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step)); 2063 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul)); 2064 VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add); 2065 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane); 2066 } 2067 } 2068 } 2069 2070 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) { 2071 assert(V != Induction && "The new induction variable should not be used."); 2072 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2073 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2074 2075 // If we have a stride that is replaced by one, do it here. 2076 if (Legal->hasStride(V)) 2077 V = ConstantInt::get(V->getType(), 1); 2078 2079 // If we have a vector mapped to this value, return it. 2080 if (VectorLoopValueMap.hasVectorValue(V, Part)) 2081 return VectorLoopValueMap.getVectorValue(V, Part); 2082 2083 // If the value has not been vectorized, check if it has been scalarized 2084 // instead. If it has been scalarized, and we actually need the value in 2085 // vector form, we will construct the vector values on demand. 2086 if (VectorLoopValueMap.hasAnyScalarValue(V)) { 2087 Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0}); 2088 2089 // If we've scalarized a value, that value should be an instruction. 2090 auto *I = cast<Instruction>(V); 2091 2092 // If we aren't vectorizing, we can just copy the scalar map values over to 2093 // the vector map. 2094 if (VF == 1) { 2095 VectorLoopValueMap.setVectorValue(V, Part, ScalarValue); 2096 return ScalarValue; 2097 } 2098 2099 // Get the last scalar instruction we generated for V and Part. If the value 2100 // is known to be uniform after vectorization, this corresponds to lane zero 2101 // of the Part unroll iteration. Otherwise, the last instruction is the one 2102 // we created for the last vector lane of the Part unroll iteration. 2103 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1; 2104 auto *LastInst = cast<Instruction>( 2105 VectorLoopValueMap.getScalarValue(V, {Part, LastLane})); 2106 2107 // Set the insert point after the last scalarized instruction. This ensures 2108 // the insertelement sequence will directly follow the scalar definitions. 2109 auto OldIP = Builder.saveIP(); 2110 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 2111 Builder.SetInsertPoint(&*NewIP); 2112 2113 // However, if we are vectorizing, we need to construct the vector values. 2114 // If the value is known to be uniform after vectorization, we can just 2115 // broadcast the scalar value corresponding to lane zero for each unroll 2116 // iteration. Otherwise, we construct the vector values using insertelement 2117 // instructions. Since the resulting vectors are stored in 2118 // VectorLoopValueMap, we will only generate the insertelements once. 2119 Value *VectorValue = nullptr; 2120 if (Cost->isUniformAfterVectorization(I, VF)) { 2121 VectorValue = getBroadcastInstrs(ScalarValue); 2122 VectorLoopValueMap.setVectorValue(V, Part, VectorValue); 2123 } else { 2124 // Initialize packing with insertelements to start from undef. 2125 Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF)); 2126 VectorLoopValueMap.setVectorValue(V, Part, Undef); 2127 for (unsigned Lane = 0; Lane < VF; ++Lane) 2128 packScalarIntoVectorValue(V, {Part, Lane}); 2129 VectorValue = VectorLoopValueMap.getVectorValue(V, Part); 2130 } 2131 Builder.restoreIP(OldIP); 2132 return VectorValue; 2133 } 2134 2135 // If this scalar is unknown, assume that it is a constant or that it is 2136 // loop invariant. Broadcast V and save the value for future uses. 2137 Value *B = getBroadcastInstrs(V); 2138 VectorLoopValueMap.setVectorValue(V, Part, B); 2139 return B; 2140 } 2141 2142 Value * 2143 InnerLoopVectorizer::getOrCreateScalarValue(Value *V, 2144 const VPIteration &Instance) { 2145 // If the value is not an instruction contained in the loop, it should 2146 // already be scalar. 2147 if (OrigLoop->isLoopInvariant(V)) 2148 return V; 2149 2150 assert(Instance.Lane > 0 2151 ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF) 2152 : true && "Uniform values only have lane zero"); 2153 2154 // If the value from the original loop has not been vectorized, it is 2155 // represented by UF x VF scalar values in the new loop. Return the requested 2156 // scalar value. 2157 if (VectorLoopValueMap.hasScalarValue(V, Instance)) 2158 return VectorLoopValueMap.getScalarValue(V, Instance); 2159 2160 // If the value has not been scalarized, get its entry in VectorLoopValueMap 2161 // for the given unroll part. If this entry is not a vector type (i.e., the 2162 // vectorization factor is one), there is no need to generate an 2163 // extractelement instruction. 2164 auto *U = getOrCreateVectorValue(V, Instance.Part); 2165 if (!U->getType()->isVectorTy()) { 2166 assert(VF == 1 && "Value not scalarized has non-vector type"); 2167 return U; 2168 } 2169 2170 // Otherwise, the value from the original loop has been vectorized and is 2171 // represented by UF vector values. Extract and return the requested scalar 2172 // value from the appropriate vector lane. 2173 return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane)); 2174 } 2175 2176 void InnerLoopVectorizer::packScalarIntoVectorValue( 2177 Value *V, const VPIteration &Instance) { 2178 assert(V != Induction && "The new induction variable should not be used."); 2179 assert(!V->getType()->isVectorTy() && "Can't pack a vector"); 2180 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2181 2182 Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance); 2183 Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part); 2184 VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst, 2185 Builder.getInt32(Instance.Lane)); 2186 VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue); 2187 } 2188 2189 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2190 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2191 SmallVector<Constant *, 8> ShuffleMask; 2192 for (unsigned i = 0; i < VF; ++i) 2193 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 2194 2195 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2196 ConstantVector::get(ShuffleMask), 2197 "reverse"); 2198 } 2199 2200 // Try to vectorize the interleave group that \p Instr belongs to. 2201 // 2202 // E.g. Translate following interleaved load group (factor = 3): 2203 // for (i = 0; i < N; i+=3) { 2204 // R = Pic[i]; // Member of index 0 2205 // G = Pic[i+1]; // Member of index 1 2206 // B = Pic[i+2]; // Member of index 2 2207 // ... // do something to R, G, B 2208 // } 2209 // To: 2210 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2211 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2212 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2213 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2214 // 2215 // Or translate following interleaved store group (factor = 3): 2216 // for (i = 0; i < N; i+=3) { 2217 // ... do something to R, G, B 2218 // Pic[i] = R; // Member of index 0 2219 // Pic[i+1] = G; // Member of index 1 2220 // Pic[i+2] = B; // Member of index 2 2221 // } 2222 // To: 2223 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2224 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2225 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2226 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2227 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2228 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { 2229 const InterleaveGroup *Group = Cost->getInterleavedAccessGroup(Instr); 2230 assert(Group && "Fail to get an interleaved access group."); 2231 2232 // Skip if current instruction is not the insert position. 2233 if (Instr != Group->getInsertPos()) 2234 return; 2235 2236 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2237 Value *Ptr = getLoadStorePointerOperand(Instr); 2238 2239 // Prepare for the vector type of the interleaved load/store. 2240 Type *ScalarTy = getMemInstValueType(Instr); 2241 unsigned InterleaveFactor = Group->getFactor(); 2242 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2243 Type *PtrTy = VecTy->getPointerTo(getMemInstAddressSpace(Instr)); 2244 2245 // Prepare for the new pointers. 2246 setDebugLocFromInst(Builder, Ptr); 2247 SmallVector<Value *, 2> NewPtrs; 2248 unsigned Index = Group->getIndex(Instr); 2249 2250 // If the group is reverse, adjust the index to refer to the last vector lane 2251 // instead of the first. We adjust the index from the first vector lane, 2252 // rather than directly getting the pointer for lane VF - 1, because the 2253 // pointer operand of the interleaved access is supposed to be uniform. For 2254 // uniform instructions, we're only required to generate a value for the 2255 // first vector lane in each unroll iteration. 2256 if (Group->isReverse()) 2257 Index += (VF - 1) * Group->getFactor(); 2258 2259 bool InBounds = false; 2260 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2261 InBounds = gep->isInBounds(); 2262 2263 for (unsigned Part = 0; Part < UF; Part++) { 2264 Value *NewPtr = getOrCreateScalarValue(Ptr, {Part, 0}); 2265 2266 // Notice current instruction could be any index. Need to adjust the address 2267 // to the member of index 0. 2268 // 2269 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2270 // b = A[i]; // Member of index 0 2271 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2272 // 2273 // E.g. A[i+1] = a; // Member of index 1 2274 // A[i] = b; // Member of index 0 2275 // A[i+2] = c; // Member of index 2 (Current instruction) 2276 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2277 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 2278 if (InBounds) 2279 cast<GetElementPtrInst>(NewPtr)->setIsInBounds(true); 2280 2281 // Cast to the vector pointer type. 2282 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2283 } 2284 2285 setDebugLocFromInst(Builder, Instr); 2286 Value *UndefVec = UndefValue::get(VecTy); 2287 2288 // Vectorize the interleaved load group. 2289 if (isa<LoadInst>(Instr)) { 2290 // For each unroll part, create a wide load for the group. 2291 SmallVector<Value *, 2> NewLoads; 2292 for (unsigned Part = 0; Part < UF; Part++) { 2293 auto *NewLoad = Builder.CreateAlignedLoad( 2294 NewPtrs[Part], Group->getAlignment(), "wide.vec"); 2295 Group->addMetadata(NewLoad); 2296 NewLoads.push_back(NewLoad); 2297 } 2298 2299 // For each member in the group, shuffle out the appropriate data from the 2300 // wide loads. 2301 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2302 Instruction *Member = Group->getMember(I); 2303 2304 // Skip the gaps in the group. 2305 if (!Member) 2306 continue; 2307 2308 Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF); 2309 for (unsigned Part = 0; Part < UF; Part++) { 2310 Value *StridedVec = Builder.CreateShuffleVector( 2311 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 2312 2313 // If this member has different type, cast the result type. 2314 if (Member->getType() != ScalarTy) { 2315 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2316 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2317 } 2318 2319 if (Group->isReverse()) 2320 StridedVec = reverseVector(StridedVec); 2321 2322 VectorLoopValueMap.setVectorValue(Member, Part, StridedVec); 2323 } 2324 } 2325 return; 2326 } 2327 2328 // The sub vector type for current instruction. 2329 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2330 2331 // Vectorize the interleaved store group. 2332 for (unsigned Part = 0; Part < UF; Part++) { 2333 // Collect the stored vector from each member. 2334 SmallVector<Value *, 4> StoredVecs; 2335 for (unsigned i = 0; i < InterleaveFactor; i++) { 2336 // Interleaved store group doesn't allow a gap, so each index has a member 2337 Instruction *Member = Group->getMember(i); 2338 assert(Member && "Fail to get a member from an interleaved store group"); 2339 2340 Value *StoredVec = getOrCreateVectorValue( 2341 cast<StoreInst>(Member)->getValueOperand(), Part); 2342 if (Group->isReverse()) 2343 StoredVec = reverseVector(StoredVec); 2344 2345 // If this member has different type, cast it to a unified type. 2346 2347 if (StoredVec->getType() != SubVT) 2348 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2349 2350 StoredVecs.push_back(StoredVec); 2351 } 2352 2353 // Concatenate all vectors into a wide vector. 2354 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2355 2356 // Interleave the elements in the wide vector. 2357 Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor); 2358 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2359 "interleaved.vec"); 2360 2361 Instruction *NewStoreInstr = 2362 Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment()); 2363 2364 Group->addMetadata(NewStoreInstr); 2365 } 2366 } 2367 2368 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr, 2369 VectorParts *BlockInMask) { 2370 // Attempt to issue a wide load. 2371 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2372 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2373 2374 assert((LI || SI) && "Invalid Load/Store instruction"); 2375 2376 LoopVectorizationCostModel::InstWidening Decision = 2377 Cost->getWideningDecision(Instr, VF); 2378 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 2379 "CM decision should be taken at this point"); 2380 if (Decision == LoopVectorizationCostModel::CM_Interleave) 2381 return vectorizeInterleaveGroup(Instr); 2382 2383 Type *ScalarDataTy = getMemInstValueType(Instr); 2384 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2385 Value *Ptr = getLoadStorePointerOperand(Instr); 2386 unsigned Alignment = getMemInstAlignment(Instr); 2387 // An alignment of 0 means target abi alignment. We need to use the scalar's 2388 // target abi alignment in such a case. 2389 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2390 if (!Alignment) 2391 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2392 unsigned AddressSpace = getMemInstAddressSpace(Instr); 2393 2394 // Determine if the pointer operand of the access is either consecutive or 2395 // reverse consecutive. 2396 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2397 bool ConsecutiveStride = 2398 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2399 bool CreateGatherScatter = 2400 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2401 2402 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2403 // gather/scatter. Otherwise Decision should have been to Scalarize. 2404 assert((ConsecutiveStride || CreateGatherScatter) && 2405 "The instruction should be scalarized"); 2406 2407 // Handle consecutive loads/stores. 2408 if (ConsecutiveStride) 2409 Ptr = getOrCreateScalarValue(Ptr, {0, 0}); 2410 2411 VectorParts Mask; 2412 bool isMaskRequired = BlockInMask; 2413 if (isMaskRequired) 2414 Mask = *BlockInMask; 2415 2416 bool InBounds = false; 2417 if (auto *gep = dyn_cast<GetElementPtrInst>( 2418 getLoadStorePointerOperand(Instr)->stripPointerCasts())) 2419 InBounds = gep->isInBounds(); 2420 2421 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2422 // Calculate the pointer for the specific unroll-part. 2423 GetElementPtrInst *PartPtr = nullptr; 2424 2425 if (Reverse) { 2426 // If the address is consecutive but reversed, then the 2427 // wide store needs to start at the last vector element. 2428 PartPtr = cast<GetElementPtrInst>( 2429 Builder.CreateGEP(Ptr, Builder.getInt32(-Part * VF))); 2430 PartPtr->setIsInBounds(InBounds); 2431 PartPtr = cast<GetElementPtrInst>( 2432 Builder.CreateGEP(PartPtr, Builder.getInt32(1 - VF))); 2433 PartPtr->setIsInBounds(InBounds); 2434 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2435 Mask[Part] = reverseVector(Mask[Part]); 2436 } else { 2437 PartPtr = cast<GetElementPtrInst>( 2438 Builder.CreateGEP(Ptr, Builder.getInt32(Part * VF))); 2439 PartPtr->setIsInBounds(InBounds); 2440 } 2441 2442 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2443 }; 2444 2445 // Handle Stores: 2446 if (SI) { 2447 setDebugLocFromInst(Builder, SI); 2448 2449 for (unsigned Part = 0; Part < UF; ++Part) { 2450 Instruction *NewSI = nullptr; 2451 Value *StoredVal = getOrCreateVectorValue(SI->getValueOperand(), Part); 2452 if (CreateGatherScatter) { 2453 Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr; 2454 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 2455 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2456 MaskPart); 2457 } else { 2458 if (Reverse) { 2459 // If we store to reverse consecutive memory locations, then we need 2460 // to reverse the order of elements in the stored value. 2461 StoredVal = reverseVector(StoredVal); 2462 // We don't want to update the value in the map as it might be used in 2463 // another expression. So don't call resetVectorValue(StoredVal). 2464 } 2465 auto *VecPtr = CreateVecPtr(Part, Ptr); 2466 if (isMaskRequired) 2467 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2468 Mask[Part]); 2469 else 2470 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 2471 } 2472 addMetadata(NewSI, SI); 2473 } 2474 return; 2475 } 2476 2477 // Handle loads. 2478 assert(LI && "Must have a load instruction"); 2479 setDebugLocFromInst(Builder, LI); 2480 for (unsigned Part = 0; Part < UF; ++Part) { 2481 Value *NewLI; 2482 if (CreateGatherScatter) { 2483 Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr; 2484 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 2485 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 2486 nullptr, "wide.masked.gather"); 2487 addMetadata(NewLI, LI); 2488 } else { 2489 auto *VecPtr = CreateVecPtr(Part, Ptr); 2490 if (isMaskRequired) 2491 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 2492 UndefValue::get(DataTy), 2493 "wide.masked.load"); 2494 else 2495 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 2496 2497 // Add metadata to the load, but setVectorValue to the reverse shuffle. 2498 addMetadata(NewLI, LI); 2499 if (Reverse) 2500 NewLI = reverseVector(NewLI); 2501 } 2502 VectorLoopValueMap.setVectorValue(Instr, Part, NewLI); 2503 } 2504 } 2505 2506 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2507 const VPIteration &Instance, 2508 bool IfPredicateInstr) { 2509 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2510 2511 setDebugLocFromInst(Builder, Instr); 2512 2513 // Does this instruction return a value ? 2514 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2515 2516 Instruction *Cloned = Instr->clone(); 2517 if (!IsVoidRetTy) 2518 Cloned->setName(Instr->getName() + ".cloned"); 2519 2520 // Replace the operands of the cloned instructions with their scalar 2521 // equivalents in the new loop. 2522 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2523 auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance); 2524 Cloned->setOperand(op, NewOp); 2525 } 2526 addNewMetadata(Cloned, Instr); 2527 2528 // Place the cloned scalar in the new loop. 2529 Builder.Insert(Cloned); 2530 2531 // Add the cloned scalar to the scalar map entry. 2532 VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned); 2533 2534 // If we just cloned a new assumption, add it the assumption cache. 2535 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2536 if (II->getIntrinsicID() == Intrinsic::assume) 2537 AC->registerAssumption(II); 2538 2539 // End if-block. 2540 if (IfPredicateInstr) 2541 PredicatedInstructions.push_back(Cloned); 2542 } 2543 2544 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2545 Value *End, Value *Step, 2546 Instruction *DL) { 2547 BasicBlock *Header = L->getHeader(); 2548 BasicBlock *Latch = L->getLoopLatch(); 2549 // As we're just creating this loop, it's possible no latch exists 2550 // yet. If so, use the header as this will be a single block loop. 2551 if (!Latch) 2552 Latch = Header; 2553 2554 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2555 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 2556 setDebugLocFromInst(Builder, OldInst); 2557 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2558 2559 Builder.SetInsertPoint(Latch->getTerminator()); 2560 setDebugLocFromInst(Builder, OldInst); 2561 2562 // Create i+1 and fill the PHINode. 2563 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2564 Induction->addIncoming(Start, L->getLoopPreheader()); 2565 Induction->addIncoming(Next, Latch); 2566 // Create the compare. 2567 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2568 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 2569 2570 // Now we have two terminators. Remove the old one from the block. 2571 Latch->getTerminator()->eraseFromParent(); 2572 2573 return Induction; 2574 } 2575 2576 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2577 if (TripCount) 2578 return TripCount; 2579 2580 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2581 // Find the loop boundaries. 2582 ScalarEvolution *SE = PSE.getSE(); 2583 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2584 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 2585 "Invalid loop count"); 2586 2587 Type *IdxTy = Legal->getWidestInductionType(); 2588 2589 // The exit count might have the type of i64 while the phi is i32. This can 2590 // happen if we have an induction variable that is sign extended before the 2591 // compare. The only way that we get a backedge taken count is that the 2592 // induction variable was signed and as such will not overflow. In such a case 2593 // truncation is legal. 2594 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 2595 IdxTy->getPrimitiveSizeInBits()) 2596 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2597 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2598 2599 // Get the total trip count from the count by adding 1. 2600 const SCEV *ExitCount = SE->getAddExpr( 2601 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2602 2603 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2604 2605 // Expand the trip count and place the new instructions in the preheader. 2606 // Notice that the pre-header does not change, only the loop body. 2607 SCEVExpander Exp(*SE, DL, "induction"); 2608 2609 // Count holds the overall loop count (N). 2610 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2611 L->getLoopPreheader()->getTerminator()); 2612 2613 if (TripCount->getType()->isPointerTy()) 2614 TripCount = 2615 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2616 L->getLoopPreheader()->getTerminator()); 2617 2618 return TripCount; 2619 } 2620 2621 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 2622 if (VectorTripCount) 2623 return VectorTripCount; 2624 2625 Value *TC = getOrCreateTripCount(L); 2626 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2627 2628 // Now we need to generate the expression for the part of the loop that the 2629 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2630 // iterations are not required for correctness, or N - Step, otherwise. Step 2631 // is equal to the vectorization factor (number of SIMD elements) times the 2632 // unroll factor (number of SIMD instructions). 2633 Constant *Step = ConstantInt::get(TC->getType(), VF * UF); 2634 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2635 2636 // If there is a non-reversed interleaved group that may speculatively access 2637 // memory out-of-bounds, we need to ensure that there will be at least one 2638 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 2639 // the trip count, we set the remainder to be equal to the step. If the step 2640 // does not evenly divide the trip count, no adjustment is necessary since 2641 // there will already be scalar iterations. Note that the minimum iterations 2642 // check ensures that N >= Step. 2643 if (VF > 1 && Cost->requiresScalarEpilogue()) { 2644 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2645 R = Builder.CreateSelect(IsZero, Step, R); 2646 } 2647 2648 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2649 2650 return VectorTripCount; 2651 } 2652 2653 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2654 const DataLayout &DL) { 2655 // Verify that V is a vector type with same number of elements as DstVTy. 2656 unsigned VF = DstVTy->getNumElements(); 2657 VectorType *SrcVecTy = cast<VectorType>(V->getType()); 2658 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2659 Type *SrcElemTy = SrcVecTy->getElementType(); 2660 Type *DstElemTy = DstVTy->getElementType(); 2661 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2662 "Vector elements must have same size"); 2663 2664 // Do a direct cast if element types are castable. 2665 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2666 return Builder.CreateBitOrPointerCast(V, DstVTy); 2667 } 2668 // V cannot be directly casted to desired vector type. 2669 // May happen when V is a floating point vector but DstVTy is a vector of 2670 // pointers or vice-versa. Handle this using a two-step bitcast using an 2671 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2672 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2673 "Only one type should be a pointer type"); 2674 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2675 "Only one type should be a floating point type"); 2676 Type *IntTy = 2677 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2678 VectorType *VecIntTy = VectorType::get(IntTy, VF); 2679 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2680 return Builder.CreateBitOrPointerCast(CastVal, DstVTy); 2681 } 2682 2683 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 2684 BasicBlock *Bypass) { 2685 Value *Count = getOrCreateTripCount(L); 2686 BasicBlock *BB = L->getLoopPreheader(); 2687 IRBuilder<> Builder(BB->getTerminator()); 2688 2689 // Generate code to check if the loop's trip count is less than VF * UF, or 2690 // equal to it in case a scalar epilogue is required; this implies that the 2691 // vector trip count is zero. This check also covers the case where adding one 2692 // to the backedge-taken count overflowed leading to an incorrect trip count 2693 // of zero. In this case we will also jump to the scalar loop. 2694 auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 2695 : ICmpInst::ICMP_ULT; 2696 Value *CheckMinIters = Builder.CreateICmp( 2697 P, Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check"); 2698 2699 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2700 // Update dominator tree immediately if the generated block is a 2701 // LoopBypassBlock because SCEV expansions to generate loop bypass 2702 // checks may query it before the current function is finished. 2703 DT->addNewBlock(NewBB, BB); 2704 if (L->getParentLoop()) 2705 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2706 ReplaceInstWithInst(BB->getTerminator(), 2707 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 2708 LoopBypassBlocks.push_back(BB); 2709 } 2710 2711 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 2712 BasicBlock *BB = L->getLoopPreheader(); 2713 2714 // Generate the code to check that the SCEV assumptions that we made. 2715 // We want the new basic block to start at the first instruction in a 2716 // sequence of instructions that form a check. 2717 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 2718 "scev.check"); 2719 Value *SCEVCheck = 2720 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 2721 2722 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 2723 if (C->isZero()) 2724 return; 2725 2726 // Create a new block containing the stride check. 2727 BB->setName("vector.scevcheck"); 2728 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2729 // Update dominator tree immediately if the generated block is a 2730 // LoopBypassBlock because SCEV expansions to generate loop bypass 2731 // checks may query it before the current function is finished. 2732 DT->addNewBlock(NewBB, BB); 2733 if (L->getParentLoop()) 2734 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2735 ReplaceInstWithInst(BB->getTerminator(), 2736 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 2737 LoopBypassBlocks.push_back(BB); 2738 AddedSafetyChecks = true; 2739 } 2740 2741 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 2742 BasicBlock *BB = L->getLoopPreheader(); 2743 2744 // Generate the code that checks in runtime if arrays overlap. We put the 2745 // checks into a separate block to make the more common case of few elements 2746 // faster. 2747 Instruction *FirstCheckInst; 2748 Instruction *MemRuntimeCheck; 2749 std::tie(FirstCheckInst, MemRuntimeCheck) = 2750 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 2751 if (!MemRuntimeCheck) 2752 return; 2753 2754 // Create a new block containing the memory check. 2755 BB->setName("vector.memcheck"); 2756 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2757 // Update dominator tree immediately if the generated block is a 2758 // LoopBypassBlock because SCEV expansions to generate loop bypass 2759 // checks may query it before the current function is finished. 2760 DT->addNewBlock(NewBB, BB); 2761 if (L->getParentLoop()) 2762 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2763 ReplaceInstWithInst(BB->getTerminator(), 2764 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 2765 LoopBypassBlocks.push_back(BB); 2766 AddedSafetyChecks = true; 2767 2768 // We currently don't use LoopVersioning for the actual loop cloning but we 2769 // still use it to add the noalias metadata. 2770 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 2771 PSE.getSE()); 2772 LVer->prepareNoAliasMetadata(); 2773 } 2774 2775 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 2776 /* 2777 In this function we generate a new loop. The new loop will contain 2778 the vectorized instructions while the old loop will continue to run the 2779 scalar remainder. 2780 2781 [ ] <-- loop iteration number check. 2782 / | 2783 / v 2784 | [ ] <-- vector loop bypass (may consist of multiple blocks). 2785 | / | 2786 | / v 2787 || [ ] <-- vector pre header. 2788 |/ | 2789 | v 2790 | [ ] \ 2791 | [ ]_| <-- vector loop. 2792 | | 2793 | v 2794 | -[ ] <--- middle-block. 2795 | / | 2796 | / v 2797 -|- >[ ] <--- new preheader. 2798 | | 2799 | v 2800 | [ ] \ 2801 | [ ]_| <-- old scalar loop to handle remainder. 2802 \ | 2803 \ v 2804 >[ ] <-- exit block. 2805 ... 2806 */ 2807 2808 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 2809 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 2810 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 2811 assert(VectorPH && "Invalid loop structure"); 2812 assert(ExitBlock && "Must have an exit block"); 2813 2814 // Some loops have a single integer induction variable, while other loops 2815 // don't. One example is c++ iterators that often have multiple pointer 2816 // induction variables. In the code below we also support a case where we 2817 // don't have a single induction variable. 2818 // 2819 // We try to obtain an induction variable from the original loop as hard 2820 // as possible. However if we don't find one that: 2821 // - is an integer 2822 // - counts from zero, stepping by one 2823 // - is the size of the widest induction variable type 2824 // then we create a new one. 2825 OldInduction = Legal->getPrimaryInduction(); 2826 Type *IdxTy = Legal->getWidestInductionType(); 2827 2828 // Split the single block loop into the two loop structure described above. 2829 BasicBlock *VecBody = 2830 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 2831 BasicBlock *MiddleBlock = 2832 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 2833 BasicBlock *ScalarPH = 2834 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 2835 2836 // Create and register the new vector loop. 2837 Loop *Lp = LI->AllocateLoop(); 2838 Loop *ParentLoop = OrigLoop->getParentLoop(); 2839 2840 // Insert the new loop into the loop nest and register the new basic blocks 2841 // before calling any utilities such as SCEV that require valid LoopInfo. 2842 if (ParentLoop) { 2843 ParentLoop->addChildLoop(Lp); 2844 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 2845 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 2846 } else { 2847 LI->addTopLevelLoop(Lp); 2848 } 2849 Lp->addBasicBlockToLoop(VecBody, *LI); 2850 2851 // Find the loop boundaries. 2852 Value *Count = getOrCreateTripCount(Lp); 2853 2854 Value *StartIdx = ConstantInt::get(IdxTy, 0); 2855 2856 // Now, compare the new count to zero. If it is zero skip the vector loop and 2857 // jump to the scalar loop. This check also covers the case where the 2858 // backedge-taken count is uint##_max: adding one to it will overflow leading 2859 // to an incorrect trip count of zero. In this (rare) case we will also jump 2860 // to the scalar loop. 2861 emitMinimumIterationCountCheck(Lp, ScalarPH); 2862 2863 // Generate the code to check any assumptions that we've made for SCEV 2864 // expressions. 2865 emitSCEVChecks(Lp, ScalarPH); 2866 2867 // Generate the code that checks in runtime if arrays overlap. We put the 2868 // checks into a separate block to make the more common case of few elements 2869 // faster. 2870 emitMemRuntimeChecks(Lp, ScalarPH); 2871 2872 // Generate the induction variable. 2873 // The loop step is equal to the vectorization factor (num of SIMD elements) 2874 // times the unroll factor (num of SIMD instructions). 2875 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 2876 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 2877 Induction = 2878 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 2879 getDebugLocFromInstOrOperands(OldInduction)); 2880 2881 // We are going to resume the execution of the scalar loop. 2882 // Go over all of the induction variables that we found and fix the 2883 // PHIs that are left in the scalar version of the loop. 2884 // The starting values of PHI nodes depend on the counter of the last 2885 // iteration in the vectorized loop. 2886 // If we come from a bypass edge then we need to start from the original 2887 // start value. 2888 2889 // This variable saves the new starting index for the scalar loop. It is used 2890 // to test if there are any tail iterations left once the vector loop has 2891 // completed. 2892 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 2893 for (auto &InductionEntry : *List) { 2894 PHINode *OrigPhi = InductionEntry.first; 2895 InductionDescriptor II = InductionEntry.second; 2896 2897 // Create phi nodes to merge from the backedge-taken check block. 2898 PHINode *BCResumeVal = PHINode::Create( 2899 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 2900 Value *&EndValue = IVEndValues[OrigPhi]; 2901 if (OrigPhi == OldInduction) { 2902 // We know what the end value is. 2903 EndValue = CountRoundDown; 2904 } else { 2905 IRBuilder<> B(Lp->getLoopPreheader()->getTerminator()); 2906 Type *StepType = II.getStep()->getType(); 2907 Instruction::CastOps CastOp = 2908 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 2909 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 2910 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2911 EndValue = II.transform(B, CRD, PSE.getSE(), DL); 2912 EndValue->setName("ind.end"); 2913 } 2914 2915 // The new PHI merges the original incoming value, in case of a bypass, 2916 // or the value at the end of the vectorized loop. 2917 BCResumeVal->addIncoming(EndValue, MiddleBlock); 2918 2919 // Fix the scalar body counter (PHI node). 2920 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 2921 2922 // The old induction's phi node in the scalar body needs the truncated 2923 // value. 2924 for (BasicBlock *BB : LoopBypassBlocks) 2925 BCResumeVal->addIncoming(II.getStartValue(), BB); 2926 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 2927 } 2928 2929 // Add a check in the middle block to see if we have completed 2930 // all of the iterations in the first vector loop. 2931 // If (N - N%VF) == N, then we *don't* need to run the remainder. 2932 Value *CmpN = 2933 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 2934 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 2935 ReplaceInstWithInst(MiddleBlock->getTerminator(), 2936 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 2937 2938 // Get ready to start creating new instructions into the vectorized body. 2939 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 2940 2941 // Save the state. 2942 LoopVectorPreHeader = Lp->getLoopPreheader(); 2943 LoopScalarPreHeader = ScalarPH; 2944 LoopMiddleBlock = MiddleBlock; 2945 LoopExitBlock = ExitBlock; 2946 LoopVectorBody = VecBody; 2947 LoopScalarBody = OldBasicBlock; 2948 2949 // Keep all loop hints from the original loop on the vector loop (we'll 2950 // replace the vectorizer-specific hints below). 2951 if (MDNode *LID = OrigLoop->getLoopID()) 2952 Lp->setLoopID(LID); 2953 2954 LoopVectorizeHints Hints(Lp, true, *ORE); 2955 Hints.setAlreadyVectorized(); 2956 2957 return LoopVectorPreHeader; 2958 } 2959 2960 // Fix up external users of the induction variable. At this point, we are 2961 // in LCSSA form, with all external PHIs that use the IV having one input value, 2962 // coming from the remainder loop. We need those PHIs to also have a correct 2963 // value for the IV when arriving directly from the middle block. 2964 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 2965 const InductionDescriptor &II, 2966 Value *CountRoundDown, Value *EndValue, 2967 BasicBlock *MiddleBlock) { 2968 // There are two kinds of external IV usages - those that use the value 2969 // computed in the last iteration (the PHI) and those that use the penultimate 2970 // value (the value that feeds into the phi from the loop latch). 2971 // We allow both, but they, obviously, have different values. 2972 2973 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 2974 2975 DenseMap<Value *, Value *> MissingVals; 2976 2977 // An external user of the last iteration's value should see the value that 2978 // the remainder loop uses to initialize its own IV. 2979 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 2980 for (User *U : PostInc->users()) { 2981 Instruction *UI = cast<Instruction>(U); 2982 if (!OrigLoop->contains(UI)) { 2983 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 2984 MissingVals[UI] = EndValue; 2985 } 2986 } 2987 2988 // An external user of the penultimate value need to see EndValue - Step. 2989 // The simplest way to get this is to recompute it from the constituent SCEVs, 2990 // that is Start + (Step * (CRD - 1)). 2991 for (User *U : OrigPhi->users()) { 2992 auto *UI = cast<Instruction>(U); 2993 if (!OrigLoop->contains(UI)) { 2994 const DataLayout &DL = 2995 OrigLoop->getHeader()->getModule()->getDataLayout(); 2996 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 2997 2998 IRBuilder<> B(MiddleBlock->getTerminator()); 2999 Value *CountMinusOne = B.CreateSub( 3000 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3001 Value *CMO = 3002 !II.getStep()->getType()->isIntegerTy() 3003 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3004 II.getStep()->getType()) 3005 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3006 CMO->setName("cast.cmo"); 3007 Value *Escape = II.transform(B, CMO, PSE.getSE(), DL); 3008 Escape->setName("ind.escape"); 3009 MissingVals[UI] = Escape; 3010 } 3011 } 3012 3013 for (auto &I : MissingVals) { 3014 PHINode *PHI = cast<PHINode>(I.first); 3015 // One corner case we have to handle is two IVs "chasing" each-other, 3016 // that is %IV2 = phi [...], [ %IV1, %latch ] 3017 // In this case, if IV1 has an external use, we need to avoid adding both 3018 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3019 // don't already have an incoming value for the middle block. 3020 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3021 PHI->addIncoming(I.second, MiddleBlock); 3022 } 3023 } 3024 3025 namespace { 3026 3027 struct CSEDenseMapInfo { 3028 static bool canHandle(const Instruction *I) { 3029 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3030 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3031 } 3032 3033 static inline Instruction *getEmptyKey() { 3034 return DenseMapInfo<Instruction *>::getEmptyKey(); 3035 } 3036 3037 static inline Instruction *getTombstoneKey() { 3038 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3039 } 3040 3041 static unsigned getHashValue(const Instruction *I) { 3042 assert(canHandle(I) && "Unknown instruction!"); 3043 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3044 I->value_op_end())); 3045 } 3046 3047 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3048 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3049 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3050 return LHS == RHS; 3051 return LHS->isIdenticalTo(RHS); 3052 } 3053 }; 3054 3055 } // end anonymous namespace 3056 3057 ///Perform cse of induction variable instructions. 3058 static void cse(BasicBlock *BB) { 3059 // Perform simple cse. 3060 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3061 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3062 Instruction *In = &*I++; 3063 3064 if (!CSEDenseMapInfo::canHandle(In)) 3065 continue; 3066 3067 // Check if we can replace this instruction with any of the 3068 // visited instructions. 3069 if (Instruction *V = CSEMap.lookup(In)) { 3070 In->replaceAllUsesWith(V); 3071 In->eraseFromParent(); 3072 continue; 3073 } 3074 3075 CSEMap[In] = In; 3076 } 3077 } 3078 3079 /// Estimate the overhead of scalarizing an instruction. This is a 3080 /// convenience wrapper for the type-based getScalarizationOverhead API. 3081 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF, 3082 const TargetTransformInfo &TTI) { 3083 if (VF == 1) 3084 return 0; 3085 3086 unsigned Cost = 0; 3087 Type *RetTy = ToVectorTy(I->getType(), VF); 3088 if (!RetTy->isVoidTy() && 3089 (!isa<LoadInst>(I) || 3090 !TTI.supportsEfficientVectorElementLoadStore())) 3091 Cost += TTI.getScalarizationOverhead(RetTy, true, false); 3092 3093 if (CallInst *CI = dyn_cast<CallInst>(I)) { 3094 SmallVector<const Value *, 4> Operands(CI->arg_operands()); 3095 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3096 } 3097 else if (!isa<StoreInst>(I) || 3098 !TTI.supportsEfficientVectorElementLoadStore()) { 3099 SmallVector<const Value *, 4> Operands(I->operand_values()); 3100 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3101 } 3102 3103 return Cost; 3104 } 3105 3106 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3107 // Return the cost of the instruction, including scalarization overhead if it's 3108 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3109 // i.e. either vector version isn't available, or is too expensive. 3110 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3111 const TargetTransformInfo &TTI, 3112 const TargetLibraryInfo *TLI, 3113 bool &NeedToScalarize) { 3114 Function *F = CI->getCalledFunction(); 3115 StringRef FnName = CI->getCalledFunction()->getName(); 3116 Type *ScalarRetTy = CI->getType(); 3117 SmallVector<Type *, 4> Tys, ScalarTys; 3118 for (auto &ArgOp : CI->arg_operands()) 3119 ScalarTys.push_back(ArgOp->getType()); 3120 3121 // Estimate cost of scalarized vector call. The source operands are assumed 3122 // to be vectors, so we need to extract individual elements from there, 3123 // execute VF scalar calls, and then gather the result into the vector return 3124 // value. 3125 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3126 if (VF == 1) 3127 return ScalarCallCost; 3128 3129 // Compute corresponding vector type for return value and arguments. 3130 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3131 for (Type *ScalarTy : ScalarTys) 3132 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3133 3134 // Compute costs of unpacking argument values for the scalar calls and 3135 // packing the return values to a vector. 3136 unsigned ScalarizationCost = getScalarizationOverhead(CI, VF, TTI); 3137 3138 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3139 3140 // If we can't emit a vector call for this function, then the currently found 3141 // cost is the cost we need to return. 3142 NeedToScalarize = true; 3143 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3144 return Cost; 3145 3146 // If the corresponding vector cost is cheaper, return its cost. 3147 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3148 if (VectorCallCost < Cost) { 3149 NeedToScalarize = false; 3150 return VectorCallCost; 3151 } 3152 return Cost; 3153 } 3154 3155 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3156 // factor VF. Return the cost of the instruction, including scalarization 3157 // overhead if it's needed. 3158 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3159 const TargetTransformInfo &TTI, 3160 const TargetLibraryInfo *TLI) { 3161 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3162 assert(ID && "Expected intrinsic call!"); 3163 3164 FastMathFlags FMF; 3165 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3166 FMF = FPMO->getFastMathFlags(); 3167 3168 SmallVector<Value *, 4> Operands(CI->arg_operands()); 3169 return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF); 3170 } 3171 3172 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3173 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3174 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3175 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3176 } 3177 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3178 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3179 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3180 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3181 } 3182 3183 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3184 // For every instruction `I` in MinBWs, truncate the operands, create a 3185 // truncated version of `I` and reextend its result. InstCombine runs 3186 // later and will remove any ext/trunc pairs. 3187 SmallPtrSet<Value *, 4> Erased; 3188 for (const auto &KV : Cost->getMinimalBitwidths()) { 3189 // If the value wasn't vectorized, we must maintain the original scalar 3190 // type. The absence of the value from VectorLoopValueMap indicates that it 3191 // wasn't vectorized. 3192 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3193 continue; 3194 for (unsigned Part = 0; Part < UF; ++Part) { 3195 Value *I = getOrCreateVectorValue(KV.first, Part); 3196 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3197 continue; 3198 Type *OriginalTy = I->getType(); 3199 Type *ScalarTruncatedTy = 3200 IntegerType::get(OriginalTy->getContext(), KV.second); 3201 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3202 OriginalTy->getVectorNumElements()); 3203 if (TruncatedTy == OriginalTy) 3204 continue; 3205 3206 IRBuilder<> B(cast<Instruction>(I)); 3207 auto ShrinkOperand = [&](Value *V) -> Value * { 3208 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3209 if (ZI->getSrcTy() == TruncatedTy) 3210 return ZI->getOperand(0); 3211 return B.CreateZExtOrTrunc(V, TruncatedTy); 3212 }; 3213 3214 // The actual instruction modification depends on the instruction type, 3215 // unfortunately. 3216 Value *NewI = nullptr; 3217 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3218 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3219 ShrinkOperand(BO->getOperand(1))); 3220 3221 // Any wrapping introduced by shrinking this operation shouldn't be 3222 // considered undefined behavior. So, we can't unconditionally copy 3223 // arithmetic wrapping flags to NewI. 3224 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3225 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3226 NewI = 3227 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3228 ShrinkOperand(CI->getOperand(1))); 3229 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3230 NewI = B.CreateSelect(SI->getCondition(), 3231 ShrinkOperand(SI->getTrueValue()), 3232 ShrinkOperand(SI->getFalseValue())); 3233 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3234 switch (CI->getOpcode()) { 3235 default: 3236 llvm_unreachable("Unhandled cast!"); 3237 case Instruction::Trunc: 3238 NewI = ShrinkOperand(CI->getOperand(0)); 3239 break; 3240 case Instruction::SExt: 3241 NewI = B.CreateSExtOrTrunc( 3242 CI->getOperand(0), 3243 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3244 break; 3245 case Instruction::ZExt: 3246 NewI = B.CreateZExtOrTrunc( 3247 CI->getOperand(0), 3248 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3249 break; 3250 } 3251 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3252 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3253 auto *O0 = B.CreateZExtOrTrunc( 3254 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3255 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3256 auto *O1 = B.CreateZExtOrTrunc( 3257 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3258 3259 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3260 } else if (isa<LoadInst>(I)) { 3261 // Don't do anything with the operands, just extend the result. 3262 continue; 3263 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3264 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3265 auto *O0 = B.CreateZExtOrTrunc( 3266 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3267 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3268 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3269 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3270 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3271 auto *O0 = B.CreateZExtOrTrunc( 3272 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3273 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3274 } else { 3275 llvm_unreachable("Unhandled instruction type!"); 3276 } 3277 3278 // Lastly, extend the result. 3279 NewI->takeName(cast<Instruction>(I)); 3280 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3281 I->replaceAllUsesWith(Res); 3282 cast<Instruction>(I)->eraseFromParent(); 3283 Erased.insert(I); 3284 VectorLoopValueMap.resetVectorValue(KV.first, Part, Res); 3285 } 3286 } 3287 3288 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3289 for (const auto &KV : Cost->getMinimalBitwidths()) { 3290 // If the value wasn't vectorized, we must maintain the original scalar 3291 // type. The absence of the value from VectorLoopValueMap indicates that it 3292 // wasn't vectorized. 3293 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3294 continue; 3295 for (unsigned Part = 0; Part < UF; ++Part) { 3296 Value *I = getOrCreateVectorValue(KV.first, Part); 3297 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3298 if (Inst && Inst->use_empty()) { 3299 Value *NewI = Inst->getOperand(0); 3300 Inst->eraseFromParent(); 3301 VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI); 3302 } 3303 } 3304 } 3305 } 3306 3307 void InnerLoopVectorizer::fixVectorizedLoop() { 3308 // Insert truncates and extends for any truncated instructions as hints to 3309 // InstCombine. 3310 if (VF > 1) 3311 truncateToMinimalBitwidths(); 3312 3313 // At this point every instruction in the original loop is widened to a 3314 // vector form. Now we need to fix the recurrences in the loop. These PHI 3315 // nodes are currently empty because we did not want to introduce cycles. 3316 // This is the second stage of vectorizing recurrences. 3317 fixCrossIterationPHIs(); 3318 3319 // Update the dominator tree. 3320 // 3321 // FIXME: After creating the structure of the new loop, the dominator tree is 3322 // no longer up-to-date, and it remains that way until we update it 3323 // here. An out-of-date dominator tree is problematic for SCEV, 3324 // because SCEVExpander uses it to guide code generation. The 3325 // vectorizer use SCEVExpanders in several places. Instead, we should 3326 // keep the dominator tree up-to-date as we go. 3327 updateAnalysis(); 3328 3329 // Fix-up external users of the induction variables. 3330 for (auto &Entry : *Legal->getInductionVars()) 3331 fixupIVUsers(Entry.first, Entry.second, 3332 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 3333 IVEndValues[Entry.first], LoopMiddleBlock); 3334 3335 fixLCSSAPHIs(); 3336 for (Instruction *PI : PredicatedInstructions) 3337 sinkScalarOperands(&*PI); 3338 3339 // Remove redundant induction instructions. 3340 cse(LoopVectorBody); 3341 } 3342 3343 void InnerLoopVectorizer::fixCrossIterationPHIs() { 3344 // In order to support recurrences we need to be able to vectorize Phi nodes. 3345 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3346 // stage #2: We now need to fix the recurrences by adding incoming edges to 3347 // the currently empty PHI nodes. At this point every instruction in the 3348 // original loop is widened to a vector form so we can use them to construct 3349 // the incoming edges. 3350 for (PHINode &Phi : OrigLoop->getHeader()->phis()) { 3351 // Handle first-order recurrences and reductions that need to be fixed. 3352 if (Legal->isFirstOrderRecurrence(&Phi)) 3353 fixFirstOrderRecurrence(&Phi); 3354 else if (Legal->isReductionVariable(&Phi)) 3355 fixReduction(&Phi); 3356 } 3357 } 3358 3359 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 3360 // This is the second phase of vectorizing first-order recurrences. An 3361 // overview of the transformation is described below. Suppose we have the 3362 // following loop. 3363 // 3364 // for (int i = 0; i < n; ++i) 3365 // b[i] = a[i] - a[i - 1]; 3366 // 3367 // There is a first-order recurrence on "a". For this loop, the shorthand 3368 // scalar IR looks like: 3369 // 3370 // scalar.ph: 3371 // s_init = a[-1] 3372 // br scalar.body 3373 // 3374 // scalar.body: 3375 // i = phi [0, scalar.ph], [i+1, scalar.body] 3376 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3377 // s2 = a[i] 3378 // b[i] = s2 - s1 3379 // br cond, scalar.body, ... 3380 // 3381 // In this example, s1 is a recurrence because it's value depends on the 3382 // previous iteration. In the first phase of vectorization, we created a 3383 // temporary value for s1. We now complete the vectorization and produce the 3384 // shorthand vector IR shown below (for VF = 4, UF = 1). 3385 // 3386 // vector.ph: 3387 // v_init = vector(..., ..., ..., a[-1]) 3388 // br vector.body 3389 // 3390 // vector.body 3391 // i = phi [0, vector.ph], [i+4, vector.body] 3392 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3393 // v2 = a[i, i+1, i+2, i+3]; 3394 // v3 = vector(v1(3), v2(0, 1, 2)) 3395 // b[i, i+1, i+2, i+3] = v2 - v3 3396 // br cond, vector.body, middle.block 3397 // 3398 // middle.block: 3399 // x = v2(3) 3400 // br scalar.ph 3401 // 3402 // scalar.ph: 3403 // s_init = phi [x, middle.block], [a[-1], otherwise] 3404 // br scalar.body 3405 // 3406 // After execution completes the vector loop, we extract the next value of 3407 // the recurrence (x) to use as the initial value in the scalar loop. 3408 3409 // Get the original loop preheader and single loop latch. 3410 auto *Preheader = OrigLoop->getLoopPreheader(); 3411 auto *Latch = OrigLoop->getLoopLatch(); 3412 3413 // Get the initial and previous values of the scalar recurrence. 3414 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 3415 auto *Previous = Phi->getIncomingValueForBlock(Latch); 3416 3417 // Create a vector from the initial value. 3418 auto *VectorInit = ScalarInit; 3419 if (VF > 1) { 3420 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3421 VectorInit = Builder.CreateInsertElement( 3422 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 3423 Builder.getInt32(VF - 1), "vector.recur.init"); 3424 } 3425 3426 // We constructed a temporary phi node in the first phase of vectorization. 3427 // This phi node will eventually be deleted. 3428 Builder.SetInsertPoint( 3429 cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0))); 3430 3431 // Create a phi node for the new recurrence. The current value will either be 3432 // the initial value inserted into a vector or loop-varying vector value. 3433 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 3434 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 3435 3436 // Get the vectorized previous value of the last part UF - 1. It appears last 3437 // among all unrolled iterations, due to the order of their construction. 3438 Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1); 3439 3440 // Set the insertion point after the previous value if it is an instruction. 3441 // Note that the previous value may have been constant-folded so it is not 3442 // guaranteed to be an instruction in the vector loop. Also, if the previous 3443 // value is a phi node, we should insert after all the phi nodes to avoid 3444 // breaking basic block verification. 3445 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart) || 3446 isa<PHINode>(PreviousLastPart)) 3447 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3448 else 3449 Builder.SetInsertPoint( 3450 &*++BasicBlock::iterator(cast<Instruction>(PreviousLastPart))); 3451 3452 // We will construct a vector for the recurrence by combining the values for 3453 // the current and previous iterations. This is the required shuffle mask. 3454 SmallVector<Constant *, 8> ShuffleMask(VF); 3455 ShuffleMask[0] = Builder.getInt32(VF - 1); 3456 for (unsigned I = 1; I < VF; ++I) 3457 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 3458 3459 // The vector from which to take the initial value for the current iteration 3460 // (actual or unrolled). Initially, this is the vector phi node. 3461 Value *Incoming = VecPhi; 3462 3463 // Shuffle the current and previous vector and update the vector parts. 3464 for (unsigned Part = 0; Part < UF; ++Part) { 3465 Value *PreviousPart = getOrCreateVectorValue(Previous, Part); 3466 Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part); 3467 auto *Shuffle = 3468 VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart, 3469 ConstantVector::get(ShuffleMask)) 3470 : Incoming; 3471 PhiPart->replaceAllUsesWith(Shuffle); 3472 cast<Instruction>(PhiPart)->eraseFromParent(); 3473 VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle); 3474 Incoming = PreviousPart; 3475 } 3476 3477 // Fix the latch value of the new recurrence in the vector loop. 3478 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3479 3480 // Extract the last vector element in the middle block. This will be the 3481 // initial value for the recurrence when jumping to the scalar loop. 3482 auto *ExtractForScalar = Incoming; 3483 if (VF > 1) { 3484 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3485 ExtractForScalar = Builder.CreateExtractElement( 3486 ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract"); 3487 } 3488 // Extract the second last element in the middle block if the 3489 // Phi is used outside the loop. We need to extract the phi itself 3490 // and not the last element (the phi update in the current iteration). This 3491 // will be the value when jumping to the exit block from the LoopMiddleBlock, 3492 // when the scalar loop is not run at all. 3493 Value *ExtractForPhiUsedOutsideLoop = nullptr; 3494 if (VF > 1) 3495 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 3496 Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi"); 3497 // When loop is unrolled without vectorizing, initialize 3498 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 3499 // `Incoming`. This is analogous to the vectorized case above: extracting the 3500 // second last element when VF > 1. 3501 else if (UF > 1) 3502 ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2); 3503 3504 // Fix the initial value of the original recurrence in the scalar loop. 3505 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3506 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3507 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3508 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 3509 Start->addIncoming(Incoming, BB); 3510 } 3511 3512 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 3513 Phi->setName("scalar.recur"); 3514 3515 // Finally, fix users of the recurrence outside the loop. The users will need 3516 // either the last value of the scalar recurrence or the last value of the 3517 // vector recurrence we extracted in the middle block. Since the loop is in 3518 // LCSSA form, we just need to find the phi node for the original scalar 3519 // recurrence in the exit block, and then add an edge for the middle block. 3520 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3521 if (LCSSAPhi.getIncomingValue(0) == Phi) { 3522 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 3523 break; 3524 } 3525 } 3526 } 3527 3528 void InnerLoopVectorizer::fixReduction(PHINode *Phi) { 3529 Constant *Zero = Builder.getInt32(0); 3530 3531 // Get it's reduction variable descriptor. 3532 assert(Legal->isReductionVariable(Phi) && 3533 "Unable to find the reduction variable"); 3534 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 3535 3536 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3537 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3538 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3539 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3540 RdxDesc.getMinMaxRecurrenceKind(); 3541 setDebugLocFromInst(Builder, ReductionStartValue); 3542 3543 // We need to generate a reduction vector from the incoming scalar. 3544 // To do so, we need to generate the 'identity' vector and override 3545 // one of the elements with the incoming scalar reduction. We need 3546 // to do it in the vector-loop preheader. 3547 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3548 3549 // This is the vector-clone of the value that leaves the loop. 3550 Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType(); 3551 3552 // Find the reduction identity variable. Zero for addition, or, xor, 3553 // one for multiplication, -1 for And. 3554 Value *Identity; 3555 Value *VectorStart; 3556 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3557 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3558 // MinMax reduction have the start value as their identify. 3559 if (VF == 1) { 3560 VectorStart = Identity = ReductionStartValue; 3561 } else { 3562 VectorStart = Identity = 3563 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3564 } 3565 } else { 3566 // Handle other reduction kinds: 3567 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3568 RK, VecTy->getScalarType()); 3569 if (VF == 1) { 3570 Identity = Iden; 3571 // This vector is the Identity vector where the first element is the 3572 // incoming scalar reduction. 3573 VectorStart = ReductionStartValue; 3574 } else { 3575 Identity = ConstantVector::getSplat(VF, Iden); 3576 3577 // This vector is the Identity vector where the first element is the 3578 // incoming scalar reduction. 3579 VectorStart = 3580 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3581 } 3582 } 3583 3584 // Fix the vector-loop phi. 3585 3586 // Reductions do not have to start at zero. They can start with 3587 // any loop invariant values. 3588 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3589 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3590 for (unsigned Part = 0; Part < UF; ++Part) { 3591 Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part); 3592 Value *Val = getOrCreateVectorValue(LoopVal, Part); 3593 // Make sure to add the reduction stat value only to the 3594 // first unroll part. 3595 Value *StartVal = (Part == 0) ? VectorStart : Identity; 3596 cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader); 3597 cast<PHINode>(VecRdxPhi) 3598 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3599 } 3600 3601 // Before each round, move the insertion point right between 3602 // the PHIs and the values we are going to write. 3603 // This allows us to write both PHINodes and the extractelement 3604 // instructions. 3605 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3606 3607 setDebugLocFromInst(Builder, LoopExitInst); 3608 3609 // If the vector reduction can be performed in a smaller type, we truncate 3610 // then extend the loop exit value to enable InstCombine to evaluate the 3611 // entire expression in the smaller type. 3612 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3613 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3614 Builder.SetInsertPoint( 3615 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 3616 VectorParts RdxParts(UF); 3617 for (unsigned Part = 0; Part < UF; ++Part) { 3618 RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3619 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3620 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3621 : Builder.CreateZExt(Trunc, VecTy); 3622 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 3623 UI != RdxParts[Part]->user_end();) 3624 if (*UI != Trunc) { 3625 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 3626 RdxParts[Part] = Extnd; 3627 } else { 3628 ++UI; 3629 } 3630 } 3631 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3632 for (unsigned Part = 0; Part < UF; ++Part) { 3633 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3634 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]); 3635 } 3636 } 3637 3638 // Reduce all of the unrolled parts into a single vector. 3639 Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0); 3640 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3641 setDebugLocFromInst(Builder, ReducedPartRdx); 3642 for (unsigned Part = 1; Part < UF; ++Part) { 3643 Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3644 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3645 // Floating point operations had to be 'fast' to enable the reduction. 3646 ReducedPartRdx = addFastMathFlag( 3647 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart, 3648 ReducedPartRdx, "bin.rdx")); 3649 else 3650 ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp( 3651 Builder, MinMaxKind, ReducedPartRdx, RdxPart); 3652 } 3653 3654 if (VF > 1) { 3655 bool NoNaN = Legal->hasFunNoNaNAttr(); 3656 ReducedPartRdx = 3657 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN); 3658 // If the reduction can be performed in a smaller type, we need to extend 3659 // the reduction to the wider type before we branch to the original loop. 3660 if (Phi->getType() != RdxDesc.getRecurrenceType()) 3661 ReducedPartRdx = 3662 RdxDesc.isSigned() 3663 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 3664 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 3665 } 3666 3667 // Create a phi node that merges control-flow from the backedge-taken check 3668 // block and the middle block. 3669 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 3670 LoopScalarPreHeader->getTerminator()); 3671 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3672 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 3673 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3674 3675 // Now, we need to fix the users of the reduction variable 3676 // inside and outside of the scalar remainder loop. 3677 // We know that the loop is in LCSSA form. We need to update the 3678 // PHI nodes in the exit blocks. 3679 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3680 // All PHINodes need to have a single entry edge, or two if 3681 // we already fixed them. 3682 assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 3683 3684 // We found a reduction value exit-PHI. Update it with the 3685 // incoming bypass edge. 3686 if (LCSSAPhi.getIncomingValue(0) == LoopExitInst) 3687 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 3688 } // end of the LCSSA phi scan. 3689 3690 // Fix the scalar loop reduction variable with the incoming reduction sum 3691 // from the vector body and from the backedge value. 3692 int IncomingEdgeBlockIdx = 3693 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 3694 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 3695 // Pick the other block. 3696 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 3697 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 3698 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 3699 } 3700 3701 void InnerLoopVectorizer::fixLCSSAPHIs() { 3702 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3703 if (LCSSAPhi.getNumIncomingValues() == 1) { 3704 assert(OrigLoop->isLoopInvariant(LCSSAPhi.getIncomingValue(0)) && 3705 "Incoming value isn't loop invariant"); 3706 LCSSAPhi.addIncoming(LCSSAPhi.getIncomingValue(0), LoopMiddleBlock); 3707 } 3708 } 3709 } 3710 3711 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 3712 // The basic block and loop containing the predicated instruction. 3713 auto *PredBB = PredInst->getParent(); 3714 auto *VectorLoop = LI->getLoopFor(PredBB); 3715 3716 // Initialize a worklist with the operands of the predicated instruction. 3717 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 3718 3719 // Holds instructions that we need to analyze again. An instruction may be 3720 // reanalyzed if we don't yet know if we can sink it or not. 3721 SmallVector<Instruction *, 8> InstsToReanalyze; 3722 3723 // Returns true if a given use occurs in the predicated block. Phi nodes use 3724 // their operands in their corresponding predecessor blocks. 3725 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 3726 auto *I = cast<Instruction>(U.getUser()); 3727 BasicBlock *BB = I->getParent(); 3728 if (auto *Phi = dyn_cast<PHINode>(I)) 3729 BB = Phi->getIncomingBlock( 3730 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 3731 return BB == PredBB; 3732 }; 3733 3734 // Iteratively sink the scalarized operands of the predicated instruction 3735 // into the block we created for it. When an instruction is sunk, it's 3736 // operands are then added to the worklist. The algorithm ends after one pass 3737 // through the worklist doesn't sink a single instruction. 3738 bool Changed; 3739 do { 3740 // Add the instructions that need to be reanalyzed to the worklist, and 3741 // reset the changed indicator. 3742 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 3743 InstsToReanalyze.clear(); 3744 Changed = false; 3745 3746 while (!Worklist.empty()) { 3747 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 3748 3749 // We can't sink an instruction if it is a phi node, is already in the 3750 // predicated block, is not in the loop, or may have side effects. 3751 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 3752 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 3753 continue; 3754 3755 // It's legal to sink the instruction if all its uses occur in the 3756 // predicated block. Otherwise, there's nothing to do yet, and we may 3757 // need to reanalyze the instruction. 3758 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 3759 InstsToReanalyze.push_back(I); 3760 continue; 3761 } 3762 3763 // Move the instruction to the beginning of the predicated block, and add 3764 // it's operands to the worklist. 3765 I->moveBefore(&*PredBB->getFirstInsertionPt()); 3766 Worklist.insert(I->op_begin(), I->op_end()); 3767 3768 // The sinking may have enabled other instructions to be sunk, so we will 3769 // need to iterate. 3770 Changed = true; 3771 } 3772 } while (Changed); 3773 } 3774 3775 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 3776 unsigned VF) { 3777 assert(PN->getParent() == OrigLoop->getHeader() && 3778 "Non-header phis should have been handled elsewhere"); 3779 3780 PHINode *P = cast<PHINode>(PN); 3781 // In order to support recurrences we need to be able to vectorize Phi nodes. 3782 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3783 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 3784 // this value when we vectorize all of the instructions that use the PHI. 3785 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 3786 for (unsigned Part = 0; Part < UF; ++Part) { 3787 // This is phase one of vectorizing PHIs. 3788 Type *VecTy = 3789 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 3790 Value *EntryPart = PHINode::Create( 3791 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 3792 VectorLoopValueMap.setVectorValue(P, Part, EntryPart); 3793 } 3794 return; 3795 } 3796 3797 setDebugLocFromInst(Builder, P); 3798 3799 // This PHINode must be an induction variable. 3800 // Make sure that we know about it. 3801 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 3802 3803 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 3804 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3805 3806 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 3807 // which can be found from the original scalar operations. 3808 switch (II.getKind()) { 3809 case InductionDescriptor::IK_NoInduction: 3810 llvm_unreachable("Unknown induction"); 3811 case InductionDescriptor::IK_IntInduction: 3812 case InductionDescriptor::IK_FpInduction: 3813 llvm_unreachable("Integer/fp induction is handled elsewhere."); 3814 case InductionDescriptor::IK_PtrInduction: { 3815 // Handle the pointer induction variable case. 3816 assert(P->getType()->isPointerTy() && "Unexpected type."); 3817 // This is the normalized GEP that starts counting at zero. 3818 Value *PtrInd = Induction; 3819 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 3820 // Determine the number of scalars we need to generate for each unroll 3821 // iteration. If the instruction is uniform, we only need to generate the 3822 // first lane. Otherwise, we generate all VF values. 3823 unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF; 3824 // These are the scalar results. Notice that we don't generate vector GEPs 3825 // because scalar GEPs result in better code. 3826 for (unsigned Part = 0; Part < UF; ++Part) { 3827 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 3828 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 3829 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 3830 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 3831 SclrGep->setName("next.gep"); 3832 VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep); 3833 } 3834 } 3835 return; 3836 } 3837 } 3838 } 3839 3840 /// A helper function for checking whether an integer division-related 3841 /// instruction may divide by zero (in which case it must be predicated if 3842 /// executed conditionally in the scalar code). 3843 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 3844 /// Non-zero divisors that are non compile-time constants will not be 3845 /// converted into multiplication, so we will still end up scalarizing 3846 /// the division, but can do so w/o predication. 3847 static bool mayDivideByZero(Instruction &I) { 3848 assert((I.getOpcode() == Instruction::UDiv || 3849 I.getOpcode() == Instruction::SDiv || 3850 I.getOpcode() == Instruction::URem || 3851 I.getOpcode() == Instruction::SRem) && 3852 "Unexpected instruction"); 3853 Value *Divisor = I.getOperand(1); 3854 auto *CInt = dyn_cast<ConstantInt>(Divisor); 3855 return !CInt || CInt->isZero(); 3856 } 3857 3858 void InnerLoopVectorizer::widenInstruction(Instruction &I) { 3859 switch (I.getOpcode()) { 3860 case Instruction::Br: 3861 case Instruction::PHI: 3862 llvm_unreachable("This instruction is handled by a different recipe."); 3863 case Instruction::GetElementPtr: { 3864 // Construct a vector GEP by widening the operands of the scalar GEP as 3865 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 3866 // results in a vector of pointers when at least one operand of the GEP 3867 // is vector-typed. Thus, to keep the representation compact, we only use 3868 // vector-typed operands for loop-varying values. 3869 auto *GEP = cast<GetElementPtrInst>(&I); 3870 3871 if (VF > 1 && OrigLoop->hasLoopInvariantOperands(GEP)) { 3872 // If we are vectorizing, but the GEP has only loop-invariant operands, 3873 // the GEP we build (by only using vector-typed operands for 3874 // loop-varying values) would be a scalar pointer. Thus, to ensure we 3875 // produce a vector of pointers, we need to either arbitrarily pick an 3876 // operand to broadcast, or broadcast a clone of the original GEP. 3877 // Here, we broadcast a clone of the original. 3878 // 3879 // TODO: If at some point we decide to scalarize instructions having 3880 // loop-invariant operands, this special case will no longer be 3881 // required. We would add the scalarization decision to 3882 // collectLoopScalars() and teach getVectorValue() to broadcast 3883 // the lane-zero scalar value. 3884 auto *Clone = Builder.Insert(GEP->clone()); 3885 for (unsigned Part = 0; Part < UF; ++Part) { 3886 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 3887 VectorLoopValueMap.setVectorValue(&I, Part, EntryPart); 3888 addMetadata(EntryPart, GEP); 3889 } 3890 } else { 3891 // If the GEP has at least one loop-varying operand, we are sure to 3892 // produce a vector of pointers. But if we are only unrolling, we want 3893 // to produce a scalar GEP for each unroll part. Thus, the GEP we 3894 // produce with the code below will be scalar (if VF == 1) or vector 3895 // (otherwise). Note that for the unroll-only case, we still maintain 3896 // values in the vector mapping with initVector, as we do for other 3897 // instructions. 3898 for (unsigned Part = 0; Part < UF; ++Part) { 3899 // The pointer operand of the new GEP. If it's loop-invariant, we 3900 // won't broadcast it. 3901 auto *Ptr = 3902 OrigLoop->isLoopInvariant(GEP->getPointerOperand()) 3903 ? GEP->getPointerOperand() 3904 : getOrCreateVectorValue(GEP->getPointerOperand(), Part); 3905 3906 // Collect all the indices for the new GEP. If any index is 3907 // loop-invariant, we won't broadcast it. 3908 SmallVector<Value *, 4> Indices; 3909 for (auto &U : make_range(GEP->idx_begin(), GEP->idx_end())) { 3910 if (OrigLoop->isLoopInvariant(U.get())) 3911 Indices.push_back(U.get()); 3912 else 3913 Indices.push_back(getOrCreateVectorValue(U.get(), Part)); 3914 } 3915 3916 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 3917 // but it should be a vector, otherwise. 3918 auto *NewGEP = GEP->isInBounds() 3919 ? Builder.CreateInBoundsGEP(Ptr, Indices) 3920 : Builder.CreateGEP(Ptr, Indices); 3921 assert((VF == 1 || NewGEP->getType()->isVectorTy()) && 3922 "NewGEP is not a pointer vector"); 3923 VectorLoopValueMap.setVectorValue(&I, Part, NewGEP); 3924 addMetadata(NewGEP, GEP); 3925 } 3926 } 3927 3928 break; 3929 } 3930 case Instruction::UDiv: 3931 case Instruction::SDiv: 3932 case Instruction::SRem: 3933 case Instruction::URem: 3934 case Instruction::Add: 3935 case Instruction::FAdd: 3936 case Instruction::Sub: 3937 case Instruction::FSub: 3938 case Instruction::Mul: 3939 case Instruction::FMul: 3940 case Instruction::FDiv: 3941 case Instruction::FRem: 3942 case Instruction::Shl: 3943 case Instruction::LShr: 3944 case Instruction::AShr: 3945 case Instruction::And: 3946 case Instruction::Or: 3947 case Instruction::Xor: { 3948 // Just widen binops. 3949 auto *BinOp = cast<BinaryOperator>(&I); 3950 setDebugLocFromInst(Builder, BinOp); 3951 3952 for (unsigned Part = 0; Part < UF; ++Part) { 3953 Value *A = getOrCreateVectorValue(BinOp->getOperand(0), Part); 3954 Value *B = getOrCreateVectorValue(BinOp->getOperand(1), Part); 3955 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A, B); 3956 3957 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 3958 VecOp->copyIRFlags(BinOp); 3959 3960 // Use this vector value for all users of the original instruction. 3961 VectorLoopValueMap.setVectorValue(&I, Part, V); 3962 addMetadata(V, BinOp); 3963 } 3964 3965 break; 3966 } 3967 case Instruction::Select: { 3968 // Widen selects. 3969 // If the selector is loop invariant we can create a select 3970 // instruction with a scalar condition. Otherwise, use vector-select. 3971 auto *SE = PSE.getSE(); 3972 bool InvariantCond = 3973 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); 3974 setDebugLocFromInst(Builder, &I); 3975 3976 // The condition can be loop invariant but still defined inside the 3977 // loop. This means that we can't just use the original 'cond' value. 3978 // We have to take the 'vectorized' value and pick the first lane. 3979 // Instcombine will make this a no-op. 3980 3981 auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0}); 3982 3983 for (unsigned Part = 0; Part < UF; ++Part) { 3984 Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part); 3985 Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part); 3986 Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part); 3987 Value *Sel = 3988 Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1); 3989 VectorLoopValueMap.setVectorValue(&I, Part, Sel); 3990 addMetadata(Sel, &I); 3991 } 3992 3993 break; 3994 } 3995 3996 case Instruction::ICmp: 3997 case Instruction::FCmp: { 3998 // Widen compares. Generate vector compares. 3999 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4000 auto *Cmp = dyn_cast<CmpInst>(&I); 4001 setDebugLocFromInst(Builder, Cmp); 4002 for (unsigned Part = 0; Part < UF; ++Part) { 4003 Value *A = getOrCreateVectorValue(Cmp->getOperand(0), Part); 4004 Value *B = getOrCreateVectorValue(Cmp->getOperand(1), Part); 4005 Value *C = nullptr; 4006 if (FCmp) { 4007 // Propagate fast math flags. 4008 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4009 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4010 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4011 } else { 4012 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4013 } 4014 VectorLoopValueMap.setVectorValue(&I, Part, C); 4015 addMetadata(C, &I); 4016 } 4017 4018 break; 4019 } 4020 4021 case Instruction::ZExt: 4022 case Instruction::SExt: 4023 case Instruction::FPToUI: 4024 case Instruction::FPToSI: 4025 case Instruction::FPExt: 4026 case Instruction::PtrToInt: 4027 case Instruction::IntToPtr: 4028 case Instruction::SIToFP: 4029 case Instruction::UIToFP: 4030 case Instruction::Trunc: 4031 case Instruction::FPTrunc: 4032 case Instruction::BitCast: { 4033 auto *CI = dyn_cast<CastInst>(&I); 4034 setDebugLocFromInst(Builder, CI); 4035 4036 /// Vectorize casts. 4037 Type *DestTy = 4038 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4039 4040 for (unsigned Part = 0; Part < UF; ++Part) { 4041 Value *A = getOrCreateVectorValue(CI->getOperand(0), Part); 4042 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4043 VectorLoopValueMap.setVectorValue(&I, Part, Cast); 4044 addMetadata(Cast, &I); 4045 } 4046 break; 4047 } 4048 4049 case Instruction::Call: { 4050 // Ignore dbg intrinsics. 4051 if (isa<DbgInfoIntrinsic>(I)) 4052 break; 4053 setDebugLocFromInst(Builder, &I); 4054 4055 Module *M = I.getParent()->getParent()->getParent(); 4056 auto *CI = cast<CallInst>(&I); 4057 4058 StringRef FnName = CI->getCalledFunction()->getName(); 4059 Function *F = CI->getCalledFunction(); 4060 Type *RetTy = ToVectorTy(CI->getType(), VF); 4061 SmallVector<Type *, 4> Tys; 4062 for (Value *ArgOperand : CI->arg_operands()) 4063 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4064 4065 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4066 4067 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4068 // version of the instruction. 4069 // Is it beneficial to perform intrinsic call compared to lib call? 4070 bool NeedToScalarize; 4071 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4072 bool UseVectorIntrinsic = 4073 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4074 assert((UseVectorIntrinsic || !NeedToScalarize) && 4075 "Instruction should be scalarized elsewhere."); 4076 4077 for (unsigned Part = 0; Part < UF; ++Part) { 4078 SmallVector<Value *, 4> Args; 4079 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4080 Value *Arg = CI->getArgOperand(i); 4081 // Some intrinsics have a scalar argument - don't replace it with a 4082 // vector. 4083 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) 4084 Arg = getOrCreateVectorValue(CI->getArgOperand(i), Part); 4085 Args.push_back(Arg); 4086 } 4087 4088 Function *VectorF; 4089 if (UseVectorIntrinsic) { 4090 // Use vector version of the intrinsic. 4091 Type *TysForDecl[] = {CI->getType()}; 4092 if (VF > 1) 4093 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4094 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4095 } else { 4096 // Use vector version of the library call. 4097 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4098 assert(!VFnName.empty() && "Vector function name is empty."); 4099 VectorF = M->getFunction(VFnName); 4100 if (!VectorF) { 4101 // Generate a declaration 4102 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4103 VectorF = 4104 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4105 VectorF->copyAttributesFrom(F); 4106 } 4107 } 4108 assert(VectorF && "Can't create vector function."); 4109 4110 SmallVector<OperandBundleDef, 1> OpBundles; 4111 CI->getOperandBundlesAsDefs(OpBundles); 4112 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4113 4114 if (isa<FPMathOperator>(V)) 4115 V->copyFastMathFlags(CI); 4116 4117 VectorLoopValueMap.setVectorValue(&I, Part, V); 4118 addMetadata(V, &I); 4119 } 4120 4121 break; 4122 } 4123 4124 default: 4125 // This instruction is not vectorized by simple widening. 4126 DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4127 llvm_unreachable("Unhandled instruction!"); 4128 } // end of switch. 4129 } 4130 4131 void InnerLoopVectorizer::updateAnalysis() { 4132 // Forget the original basic block. 4133 PSE.getSE()->forgetLoop(OrigLoop); 4134 4135 // Update the dominator tree information. 4136 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4137 "Entry does not dominate exit."); 4138 4139 DT->addNewBlock(LoopMiddleBlock, 4140 LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4141 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4142 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4143 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4144 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 4145 } 4146 4147 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) { 4148 // We should not collect Scalars more than once per VF. Right now, this 4149 // function is called from collectUniformsAndScalars(), which already does 4150 // this check. Collecting Scalars for VF=1 does not make any sense. 4151 assert(VF >= 2 && !Scalars.count(VF) && 4152 "This function should not be visited twice for the same VF"); 4153 4154 SmallSetVector<Instruction *, 8> Worklist; 4155 4156 // These sets are used to seed the analysis with pointers used by memory 4157 // accesses that will remain scalar. 4158 SmallSetVector<Instruction *, 8> ScalarPtrs; 4159 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4160 4161 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4162 // The pointer operands of loads and stores will be scalar as long as the 4163 // memory access is not a gather or scatter operation. The value operand of a 4164 // store will remain scalar if the store is scalarized. 4165 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4166 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4167 assert(WideningDecision != CM_Unknown && 4168 "Widening decision should be ready at this moment"); 4169 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4170 if (Ptr == Store->getValueOperand()) 4171 return WideningDecision == CM_Scalarize; 4172 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4173 "Ptr is neither a value or pointer operand"); 4174 return WideningDecision != CM_GatherScatter; 4175 }; 4176 4177 // A helper that returns true if the given value is a bitcast or 4178 // getelementptr instruction contained in the loop. 4179 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4180 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4181 isa<GetElementPtrInst>(V)) && 4182 !TheLoop->isLoopInvariant(V); 4183 }; 4184 4185 // A helper that evaluates a memory access's use of a pointer. If the use 4186 // will be a scalar use, and the pointer is only used by memory accesses, we 4187 // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4188 // PossibleNonScalarPtrs. 4189 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4190 // We only care about bitcast and getelementptr instructions contained in 4191 // the loop. 4192 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4193 return; 4194 4195 // If the pointer has already been identified as scalar (e.g., if it was 4196 // also identified as uniform), there's nothing to do. 4197 auto *I = cast<Instruction>(Ptr); 4198 if (Worklist.count(I)) 4199 return; 4200 4201 // If the use of the pointer will be a scalar use, and all users of the 4202 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4203 // place the pointer in PossibleNonScalarPtrs. 4204 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4205 return isa<LoadInst>(U) || isa<StoreInst>(U); 4206 })) 4207 ScalarPtrs.insert(I); 4208 else 4209 PossibleNonScalarPtrs.insert(I); 4210 }; 4211 4212 // We seed the scalars analysis with three classes of instructions: (1) 4213 // instructions marked uniform-after-vectorization, (2) bitcast and 4214 // getelementptr instructions used by memory accesses requiring a scalar use, 4215 // and (3) pointer induction variables and their update instructions (we 4216 // currently only scalarize these). 4217 // 4218 // (1) Add to the worklist all instructions that have been identified as 4219 // uniform-after-vectorization. 4220 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4221 4222 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4223 // memory accesses requiring a scalar use. The pointer operands of loads and 4224 // stores will be scalar as long as the memory accesses is not a gather or 4225 // scatter operation. The value operand of a store will remain scalar if the 4226 // store is scalarized. 4227 for (auto *BB : TheLoop->blocks()) 4228 for (auto &I : *BB) { 4229 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4230 evaluatePtrUse(Load, Load->getPointerOperand()); 4231 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4232 evaluatePtrUse(Store, Store->getPointerOperand()); 4233 evaluatePtrUse(Store, Store->getValueOperand()); 4234 } 4235 } 4236 for (auto *I : ScalarPtrs) 4237 if (!PossibleNonScalarPtrs.count(I)) { 4238 DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4239 Worklist.insert(I); 4240 } 4241 4242 // (3) Add to the worklist all pointer induction variables and their update 4243 // instructions. 4244 // 4245 // TODO: Once we are able to vectorize pointer induction variables we should 4246 // no longer insert them into the worklist here. 4247 auto *Latch = TheLoop->getLoopLatch(); 4248 for (auto &Induction : *Legal->getInductionVars()) { 4249 auto *Ind = Induction.first; 4250 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4251 if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction) 4252 continue; 4253 Worklist.insert(Ind); 4254 Worklist.insert(IndUpdate); 4255 DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4256 DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n"); 4257 } 4258 4259 // Insert the forced scalars. 4260 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4261 // induction variable when the PHI user is scalarized. 4262 if (ForcedScalars.count(VF)) 4263 for (auto *I : ForcedScalars.find(VF)->second) 4264 Worklist.insert(I); 4265 4266 // Expand the worklist by looking through any bitcasts and getelementptr 4267 // instructions we've already identified as scalar. This is similar to the 4268 // expansion step in collectLoopUniforms(); however, here we're only 4269 // expanding to include additional bitcasts and getelementptr instructions. 4270 unsigned Idx = 0; 4271 while (Idx != Worklist.size()) { 4272 Instruction *Dst = Worklist[Idx++]; 4273 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4274 continue; 4275 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4276 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4277 auto *J = cast<Instruction>(U); 4278 return !TheLoop->contains(J) || Worklist.count(J) || 4279 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4280 isScalarUse(J, Src)); 4281 })) { 4282 Worklist.insert(Src); 4283 DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4284 } 4285 } 4286 4287 // An induction variable will remain scalar if all users of the induction 4288 // variable and induction variable update remain scalar. 4289 for (auto &Induction : *Legal->getInductionVars()) { 4290 auto *Ind = Induction.first; 4291 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4292 4293 // We already considered pointer induction variables, so there's no reason 4294 // to look at their users again. 4295 // 4296 // TODO: Once we are able to vectorize pointer induction variables we 4297 // should no longer skip over them here. 4298 if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction) 4299 continue; 4300 4301 // Determine if all users of the induction variable are scalar after 4302 // vectorization. 4303 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4304 auto *I = cast<Instruction>(U); 4305 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 4306 }); 4307 if (!ScalarInd) 4308 continue; 4309 4310 // Determine if all users of the induction variable update instruction are 4311 // scalar after vectorization. 4312 auto ScalarIndUpdate = 4313 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4314 auto *I = cast<Instruction>(U); 4315 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 4316 }); 4317 if (!ScalarIndUpdate) 4318 continue; 4319 4320 // The induction variable and its update instruction will remain scalar. 4321 Worklist.insert(Ind); 4322 Worklist.insert(IndUpdate); 4323 DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4324 DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n"); 4325 } 4326 4327 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4328 } 4329 4330 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) { 4331 if (!Legal->blockNeedsPredication(I->getParent())) 4332 return false; 4333 switch(I->getOpcode()) { 4334 default: 4335 break; 4336 case Instruction::Load: 4337 case Instruction::Store: { 4338 if (!Legal->isMaskRequired(I)) 4339 return false; 4340 auto *Ptr = getLoadStorePointerOperand(I); 4341 auto *Ty = getMemInstValueType(I); 4342 return isa<LoadInst>(I) ? 4343 !(isLegalMaskedLoad(Ty, Ptr) || isLegalMaskedGather(Ty)) 4344 : !(isLegalMaskedStore(Ty, Ptr) || isLegalMaskedScatter(Ty)); 4345 } 4346 case Instruction::UDiv: 4347 case Instruction::SDiv: 4348 case Instruction::SRem: 4349 case Instruction::URem: 4350 return mayDivideByZero(*I); 4351 } 4352 return false; 4353 } 4354 4355 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I, 4356 unsigned VF) { 4357 // Get and ensure we have a valid memory instruction. 4358 LoadInst *LI = dyn_cast<LoadInst>(I); 4359 StoreInst *SI = dyn_cast<StoreInst>(I); 4360 assert((LI || SI) && "Invalid memory instruction"); 4361 4362 auto *Ptr = getLoadStorePointerOperand(I); 4363 4364 // In order to be widened, the pointer should be consecutive, first of all. 4365 if (!Legal->isConsecutivePtr(Ptr)) 4366 return false; 4367 4368 // If the instruction is a store located in a predicated block, it will be 4369 // scalarized. 4370 if (isScalarWithPredication(I)) 4371 return false; 4372 4373 // If the instruction's allocated size doesn't equal it's type size, it 4374 // requires padding and will be scalarized. 4375 auto &DL = I->getModule()->getDataLayout(); 4376 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 4377 if (hasIrregularType(ScalarTy, DL, VF)) 4378 return false; 4379 4380 return true; 4381 } 4382 4383 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { 4384 // We should not collect Uniforms more than once per VF. Right now, 4385 // this function is called from collectUniformsAndScalars(), which 4386 // already does this check. Collecting Uniforms for VF=1 does not make any 4387 // sense. 4388 4389 assert(VF >= 2 && !Uniforms.count(VF) && 4390 "This function should not be visited twice for the same VF"); 4391 4392 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4393 // not analyze again. Uniforms.count(VF) will return 1. 4394 Uniforms[VF].clear(); 4395 4396 // We now know that the loop is vectorizable! 4397 // Collect instructions inside the loop that will remain uniform after 4398 // vectorization. 4399 4400 // Global values, params and instructions outside of current loop are out of 4401 // scope. 4402 auto isOutOfScope = [&](Value *V) -> bool { 4403 Instruction *I = dyn_cast<Instruction>(V); 4404 return (!I || !TheLoop->contains(I)); 4405 }; 4406 4407 SetVector<Instruction *> Worklist; 4408 BasicBlock *Latch = TheLoop->getLoopLatch(); 4409 4410 // Start with the conditional branch. If the branch condition is an 4411 // instruction contained in the loop that is only used by the branch, it is 4412 // uniform. 4413 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4414 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) { 4415 Worklist.insert(Cmp); 4416 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); 4417 } 4418 4419 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers 4420 // are pointers that are treated like consecutive pointers during 4421 // vectorization. The pointer operands of interleaved accesses are an 4422 // example. 4423 SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs; 4424 4425 // Holds pointer operands of instructions that are possibly non-uniform. 4426 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs; 4427 4428 auto isUniformDecision = [&](Instruction *I, unsigned VF) { 4429 InstWidening WideningDecision = getWideningDecision(I, VF); 4430 assert(WideningDecision != CM_Unknown && 4431 "Widening decision should be ready at this moment"); 4432 4433 return (WideningDecision == CM_Widen || 4434 WideningDecision == CM_Widen_Reverse || 4435 WideningDecision == CM_Interleave); 4436 }; 4437 // Iterate over the instructions in the loop, and collect all 4438 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible 4439 // that a consecutive-like pointer operand will be scalarized, we collect it 4440 // in PossibleNonUniformPtrs instead. We use two sets here because a single 4441 // getelementptr instruction can be used by both vectorized and scalarized 4442 // memory instructions. For example, if a loop loads and stores from the same 4443 // location, but the store is conditional, the store will be scalarized, and 4444 // the getelementptr won't remain uniform. 4445 for (auto *BB : TheLoop->blocks()) 4446 for (auto &I : *BB) { 4447 // If there's no pointer operand, there's nothing to do. 4448 auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 4449 if (!Ptr) 4450 continue; 4451 4452 // True if all users of Ptr are memory accesses that have Ptr as their 4453 // pointer operand. 4454 auto UsersAreMemAccesses = 4455 llvm::all_of(Ptr->users(), [&](User *U) -> bool { 4456 return getLoadStorePointerOperand(U) == Ptr; 4457 }); 4458 4459 // Ensure the memory instruction will not be scalarized or used by 4460 // gather/scatter, making its pointer operand non-uniform. If the pointer 4461 // operand is used by any instruction other than a memory access, we 4462 // conservatively assume the pointer operand may be non-uniform. 4463 if (!UsersAreMemAccesses || !isUniformDecision(&I, VF)) 4464 PossibleNonUniformPtrs.insert(Ptr); 4465 4466 // If the memory instruction will be vectorized and its pointer operand 4467 // is consecutive-like, or interleaving - the pointer operand should 4468 // remain uniform. 4469 else 4470 ConsecutiveLikePtrs.insert(Ptr); 4471 } 4472 4473 // Add to the Worklist all consecutive and consecutive-like pointers that 4474 // aren't also identified as possibly non-uniform. 4475 for (auto *V : ConsecutiveLikePtrs) 4476 if (!PossibleNonUniformPtrs.count(V)) { 4477 DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n"); 4478 Worklist.insert(V); 4479 } 4480 4481 // Expand Worklist in topological order: whenever a new instruction 4482 // is added , its users should be either already inside Worklist, or 4483 // out of scope. It ensures a uniform instruction will only be used 4484 // by uniform instructions or out of scope instructions. 4485 unsigned idx = 0; 4486 while (idx != Worklist.size()) { 4487 Instruction *I = Worklist[idx++]; 4488 4489 for (auto OV : I->operand_values()) { 4490 if (isOutOfScope(OV)) 4491 continue; 4492 auto *OI = cast<Instruction>(OV); 4493 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 4494 auto *J = cast<Instruction>(U); 4495 return !TheLoop->contains(J) || Worklist.count(J) || 4496 (OI == getLoadStorePointerOperand(J) && 4497 isUniformDecision(J, VF)); 4498 })) { 4499 Worklist.insert(OI); 4500 DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); 4501 } 4502 } 4503 } 4504 4505 // Returns true if Ptr is the pointer operand of a memory access instruction 4506 // I, and I is known to not require scalarization. 4507 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 4508 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 4509 }; 4510 4511 // For an instruction to be added into Worklist above, all its users inside 4512 // the loop should also be in Worklist. However, this condition cannot be 4513 // true for phi nodes that form a cyclic dependence. We must process phi 4514 // nodes separately. An induction variable will remain uniform if all users 4515 // of the induction variable and induction variable update remain uniform. 4516 // The code below handles both pointer and non-pointer induction variables. 4517 for (auto &Induction : *Legal->getInductionVars()) { 4518 auto *Ind = Induction.first; 4519 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4520 4521 // Determine if all users of the induction variable are uniform after 4522 // vectorization. 4523 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4524 auto *I = cast<Instruction>(U); 4525 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4526 isVectorizedMemAccessUse(I, Ind); 4527 }); 4528 if (!UniformInd) 4529 continue; 4530 4531 // Determine if all users of the induction variable update instruction are 4532 // uniform after vectorization. 4533 auto UniformIndUpdate = 4534 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4535 auto *I = cast<Instruction>(U); 4536 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4537 isVectorizedMemAccessUse(I, IndUpdate); 4538 }); 4539 if (!UniformIndUpdate) 4540 continue; 4541 4542 // The induction variable and its update instruction will remain uniform. 4543 Worklist.insert(Ind); 4544 Worklist.insert(IndUpdate); 4545 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n"); 4546 DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate << "\n"); 4547 } 4548 4549 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 4550 } 4551 4552 void InterleavedAccessInfo::collectConstStrideAccesses( 4553 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 4554 const ValueToValueMap &Strides) { 4555 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 4556 4557 // Since it's desired that the load/store instructions be maintained in 4558 // "program order" for the interleaved access analysis, we have to visit the 4559 // blocks in the loop in reverse postorder (i.e., in a topological order). 4560 // Such an ordering will ensure that any load/store that may be executed 4561 // before a second load/store will precede the second load/store in 4562 // AccessStrideInfo. 4563 LoopBlocksDFS DFS(TheLoop); 4564 DFS.perform(LI); 4565 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 4566 for (auto &I : *BB) { 4567 auto *LI = dyn_cast<LoadInst>(&I); 4568 auto *SI = dyn_cast<StoreInst>(&I); 4569 if (!LI && !SI) 4570 continue; 4571 4572 Value *Ptr = getLoadStorePointerOperand(&I); 4573 // We don't check wrapping here because we don't know yet if Ptr will be 4574 // part of a full group or a group with gaps. Checking wrapping for all 4575 // pointers (even those that end up in groups with no gaps) will be overly 4576 // conservative. For full groups, wrapping should be ok since if we would 4577 // wrap around the address space we would do a memory access at nullptr 4578 // even without the transformation. The wrapping checks are therefore 4579 // deferred until after we've formed the interleaved groups. 4580 int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, 4581 /*Assume=*/true, /*ShouldCheckWrap=*/false); 4582 4583 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 4584 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 4585 uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 4586 4587 // An alignment of 0 means target ABI alignment. 4588 unsigned Align = getMemInstAlignment(&I); 4589 if (!Align) 4590 Align = DL.getABITypeAlignment(PtrTy->getElementType()); 4591 4592 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align); 4593 } 4594 } 4595 4596 // Analyze interleaved accesses and collect them into interleaved load and 4597 // store groups. 4598 // 4599 // When generating code for an interleaved load group, we effectively hoist all 4600 // loads in the group to the location of the first load in program order. When 4601 // generating code for an interleaved store group, we sink all stores to the 4602 // location of the last store. This code motion can change the order of load 4603 // and store instructions and may break dependences. 4604 // 4605 // The code generation strategy mentioned above ensures that we won't violate 4606 // any write-after-read (WAR) dependences. 4607 // 4608 // E.g., for the WAR dependence: a = A[i]; // (1) 4609 // A[i] = b; // (2) 4610 // 4611 // The store group of (2) is always inserted at or below (2), and the load 4612 // group of (1) is always inserted at or above (1). Thus, the instructions will 4613 // never be reordered. All other dependences are checked to ensure the 4614 // correctness of the instruction reordering. 4615 // 4616 // The algorithm visits all memory accesses in the loop in bottom-up program 4617 // order. Program order is established by traversing the blocks in the loop in 4618 // reverse postorder when collecting the accesses. 4619 // 4620 // We visit the memory accesses in bottom-up order because it can simplify the 4621 // construction of store groups in the presence of write-after-write (WAW) 4622 // dependences. 4623 // 4624 // E.g., for the WAW dependence: A[i] = a; // (1) 4625 // A[i] = b; // (2) 4626 // A[i + 1] = c; // (3) 4627 // 4628 // We will first create a store group with (3) and (2). (1) can't be added to 4629 // this group because it and (2) are dependent. However, (1) can be grouped 4630 // with other accesses that may precede it in program order. Note that a 4631 // bottom-up order does not imply that WAW dependences should not be checked. 4632 void InterleavedAccessInfo::analyzeInterleaving() { 4633 DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 4634 const ValueToValueMap &Strides = LAI->getSymbolicStrides(); 4635 4636 // Holds all accesses with a constant stride. 4637 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 4638 collectConstStrideAccesses(AccessStrideInfo, Strides); 4639 4640 if (AccessStrideInfo.empty()) 4641 return; 4642 4643 // Collect the dependences in the loop. 4644 collectDependences(); 4645 4646 // Holds all interleaved store groups temporarily. 4647 SmallSetVector<InterleaveGroup *, 4> StoreGroups; 4648 // Holds all interleaved load groups temporarily. 4649 SmallSetVector<InterleaveGroup *, 4> LoadGroups; 4650 4651 // Search in bottom-up program order for pairs of accesses (A and B) that can 4652 // form interleaved load or store groups. In the algorithm below, access A 4653 // precedes access B in program order. We initialize a group for B in the 4654 // outer loop of the algorithm, and then in the inner loop, we attempt to 4655 // insert each A into B's group if: 4656 // 4657 // 1. A and B have the same stride, 4658 // 2. A and B have the same memory object size, and 4659 // 3. A belongs in B's group according to its distance from B. 4660 // 4661 // Special care is taken to ensure group formation will not break any 4662 // dependences. 4663 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 4664 BI != E; ++BI) { 4665 Instruction *B = BI->first; 4666 StrideDescriptor DesB = BI->second; 4667 4668 // Initialize a group for B if it has an allowable stride. Even if we don't 4669 // create a group for B, we continue with the bottom-up algorithm to ensure 4670 // we don't break any of B's dependences. 4671 InterleaveGroup *Group = nullptr; 4672 if (isStrided(DesB.Stride)) { 4673 Group = getInterleaveGroup(B); 4674 if (!Group) { 4675 DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n'); 4676 Group = createInterleaveGroup(B, DesB.Stride, DesB.Align); 4677 } 4678 if (B->mayWriteToMemory()) 4679 StoreGroups.insert(Group); 4680 else 4681 LoadGroups.insert(Group); 4682 } 4683 4684 for (auto AI = std::next(BI); AI != E; ++AI) { 4685 Instruction *A = AI->first; 4686 StrideDescriptor DesA = AI->second; 4687 4688 // Our code motion strategy implies that we can't have dependences 4689 // between accesses in an interleaved group and other accesses located 4690 // between the first and last member of the group. Note that this also 4691 // means that a group can't have more than one member at a given offset. 4692 // The accesses in a group can have dependences with other accesses, but 4693 // we must ensure we don't extend the boundaries of the group such that 4694 // we encompass those dependent accesses. 4695 // 4696 // For example, assume we have the sequence of accesses shown below in a 4697 // stride-2 loop: 4698 // 4699 // (1, 2) is a group | A[i] = a; // (1) 4700 // | A[i-1] = b; // (2) | 4701 // A[i-3] = c; // (3) 4702 // A[i] = d; // (4) | (2, 4) is not a group 4703 // 4704 // Because accesses (2) and (3) are dependent, we can group (2) with (1) 4705 // but not with (4). If we did, the dependent access (3) would be within 4706 // the boundaries of the (2, 4) group. 4707 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { 4708 // If a dependence exists and A is already in a group, we know that A 4709 // must be a store since A precedes B and WAR dependences are allowed. 4710 // Thus, A would be sunk below B. We release A's group to prevent this 4711 // illegal code motion. A will then be free to form another group with 4712 // instructions that precede it. 4713 if (isInterleaved(A)) { 4714 InterleaveGroup *StoreGroup = getInterleaveGroup(A); 4715 StoreGroups.remove(StoreGroup); 4716 releaseGroup(StoreGroup); 4717 } 4718 4719 // If a dependence exists and A is not already in a group (or it was 4720 // and we just released it), B might be hoisted above A (if B is a 4721 // load) or another store might be sunk below A (if B is a store). In 4722 // either case, we can't add additional instructions to B's group. B 4723 // will only form a group with instructions that it precedes. 4724 break; 4725 } 4726 4727 // At this point, we've checked for illegal code motion. If either A or B 4728 // isn't strided, there's nothing left to do. 4729 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 4730 continue; 4731 4732 // Ignore A if it's already in a group or isn't the same kind of memory 4733 // operation as B. 4734 // Note that mayReadFromMemory() isn't mutually exclusive to mayWriteToMemory 4735 // in the case of atomic loads. We shouldn't see those here, canVectorizeMemory() 4736 // should have returned false - except for the case we asked for optimization 4737 // remarks. 4738 if (isInterleaved(A) || (A->mayReadFromMemory() != B->mayReadFromMemory()) 4739 || (A->mayWriteToMemory() != B->mayWriteToMemory())) 4740 continue; 4741 4742 // Check rules 1 and 2. Ignore A if its stride or size is different from 4743 // that of B. 4744 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 4745 continue; 4746 4747 // Ignore A if the memory object of A and B don't belong to the same 4748 // address space 4749 if (getMemInstAddressSpace(A) != getMemInstAddressSpace(B)) 4750 continue; 4751 4752 // Calculate the distance from A to B. 4753 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 4754 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 4755 if (!DistToB) 4756 continue; 4757 int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 4758 4759 // Check rule 3. Ignore A if its distance to B is not a multiple of the 4760 // size. 4761 if (DistanceToB % static_cast<int64_t>(DesB.Size)) 4762 continue; 4763 4764 // Ignore A if either A or B is in a predicated block. Although we 4765 // currently prevent group formation for predicated accesses, we may be 4766 // able to relax this limitation in the future once we handle more 4767 // complicated blocks. 4768 if (isPredicated(A->getParent()) || isPredicated(B->getParent())) 4769 continue; 4770 4771 // The index of A is the index of B plus A's distance to B in multiples 4772 // of the size. 4773 int IndexA = 4774 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 4775 4776 // Try to insert A into B's group. 4777 if (Group->insertMember(A, IndexA, DesA.Align)) { 4778 DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 4779 << " into the interleave group with" << *B << '\n'); 4780 InterleaveGroupMap[A] = Group; 4781 4782 // Set the first load in program order as the insert position. 4783 if (A->mayReadFromMemory()) 4784 Group->setInsertPos(A); 4785 } 4786 } // Iteration over A accesses. 4787 } // Iteration over B accesses. 4788 4789 // Remove interleaved store groups with gaps. 4790 for (InterleaveGroup *Group : StoreGroups) 4791 if (Group->getNumMembers() != Group->getFactor()) { 4792 DEBUG(dbgs() << "LV: Invalidate candidate interleaved store group due " 4793 "to gaps.\n"); 4794 releaseGroup(Group); 4795 } 4796 // Remove interleaved groups with gaps (currently only loads) whose memory 4797 // accesses may wrap around. We have to revisit the getPtrStride analysis, 4798 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does 4799 // not check wrapping (see documentation there). 4800 // FORNOW we use Assume=false; 4801 // TODO: Change to Assume=true but making sure we don't exceed the threshold 4802 // of runtime SCEV assumptions checks (thereby potentially failing to 4803 // vectorize altogether). 4804 // Additional optional optimizations: 4805 // TODO: If we are peeling the loop and we know that the first pointer doesn't 4806 // wrap then we can deduce that all pointers in the group don't wrap. 4807 // This means that we can forcefully peel the loop in order to only have to 4808 // check the first pointer for no-wrap. When we'll change to use Assume=true 4809 // we'll only need at most one runtime check per interleaved group. 4810 for (InterleaveGroup *Group : LoadGroups) { 4811 // Case 1: A full group. Can Skip the checks; For full groups, if the wide 4812 // load would wrap around the address space we would do a memory access at 4813 // nullptr even without the transformation. 4814 if (Group->getNumMembers() == Group->getFactor()) 4815 continue; 4816 4817 // Case 2: If first and last members of the group don't wrap this implies 4818 // that all the pointers in the group don't wrap. 4819 // So we check only group member 0 (which is always guaranteed to exist), 4820 // and group member Factor - 1; If the latter doesn't exist we rely on 4821 // peeling (if it is a non-reveresed accsess -- see Case 3). 4822 Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0)); 4823 if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false, 4824 /*ShouldCheckWrap=*/true)) { 4825 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 4826 "first group member potentially pointer-wrapping.\n"); 4827 releaseGroup(Group); 4828 continue; 4829 } 4830 Instruction *LastMember = Group->getMember(Group->getFactor() - 1); 4831 if (LastMember) { 4832 Value *LastMemberPtr = getLoadStorePointerOperand(LastMember); 4833 if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false, 4834 /*ShouldCheckWrap=*/true)) { 4835 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 4836 "last group member potentially pointer-wrapping.\n"); 4837 releaseGroup(Group); 4838 } 4839 } else { 4840 // Case 3: A non-reversed interleaved load group with gaps: We need 4841 // to execute at least one scalar epilogue iteration. This will ensure 4842 // we don't speculatively access memory out-of-bounds. We only need 4843 // to look for a member at index factor - 1, since every group must have 4844 // a member at index zero. 4845 if (Group->isReverse()) { 4846 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 4847 "a reverse access with gaps.\n"); 4848 releaseGroup(Group); 4849 continue; 4850 } 4851 DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 4852 RequiresScalarEpilogue = true; 4853 } 4854 } 4855 } 4856 4857 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) { 4858 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 4859 // TODO: It may by useful to do since it's still likely to be dynamically 4860 // uniform if the target can skip. 4861 DEBUG(dbgs() << "LV: Not inserting runtime ptr check for divergent target"); 4862 4863 ORE->emit( 4864 createMissedAnalysis("CantVersionLoopWithDivergentTarget") 4865 << "runtime pointer checks needed. Not enabled for divergent target"); 4866 4867 return None; 4868 } 4869 4870 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 4871 if (!OptForSize) // Remaining checks deal with scalar loop when OptForSize. 4872 return computeFeasibleMaxVF(OptForSize, TC); 4873 4874 if (Legal->getRuntimePointerChecking()->Need) { 4875 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 4876 << "runtime pointer checks needed. Enable vectorization of this " 4877 "loop with '#pragma clang loop vectorize(enable)' when " 4878 "compiling with -Os/-Oz"); 4879 DEBUG(dbgs() 4880 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 4881 return None; 4882 } 4883 4884 // If we optimize the program for size, avoid creating the tail loop. 4885 DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 4886 4887 // If we don't know the precise trip count, don't try to vectorize. 4888 if (TC < 2) { 4889 ORE->emit( 4890 createMissedAnalysis("UnknownLoopCountComplexCFG") 4891 << "unable to calculate the loop count due to complex control flow"); 4892 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 4893 return None; 4894 } 4895 4896 unsigned MaxVF = computeFeasibleMaxVF(OptForSize, TC); 4897 4898 if (TC % MaxVF != 0) { 4899 // If the trip count that we found modulo the vectorization factor is not 4900 // zero then we require a tail. 4901 // FIXME: look for a smaller MaxVF that does divide TC rather than give up. 4902 // FIXME: return None if loop requiresScalarEpilog(<MaxVF>), or look for a 4903 // smaller MaxVF that does not require a scalar epilog. 4904 4905 ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize") 4906 << "cannot optimize for size and vectorize at the " 4907 "same time. Enable vectorization of this loop " 4908 "with '#pragma clang loop vectorize(enable)' " 4909 "when compiling with -Os/-Oz"); 4910 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 4911 return None; 4912 } 4913 4914 return MaxVF; 4915 } 4916 4917 unsigned 4918 LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize, 4919 unsigned ConstTripCount) { 4920 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 4921 unsigned SmallestType, WidestType; 4922 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 4923 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 4924 4925 // Get the maximum safe dependence distance in bits computed by LAA. 4926 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 4927 // the memory accesses that is most restrictive (involved in the smallest 4928 // dependence distance). 4929 unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth(); 4930 4931 WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth); 4932 4933 unsigned MaxVectorSize = WidestRegister / WidestType; 4934 4935 DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " 4936 << WidestType << " bits.\n"); 4937 DEBUG(dbgs() << "LV: The Widest register safe to use is: " << WidestRegister 4938 << " bits.\n"); 4939 4940 assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements" 4941 " into one vector!"); 4942 if (MaxVectorSize == 0) { 4943 DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 4944 MaxVectorSize = 1; 4945 return MaxVectorSize; 4946 } else if (ConstTripCount && ConstTripCount < MaxVectorSize && 4947 isPowerOf2_32(ConstTripCount)) { 4948 // We need to clamp the VF to be the ConstTripCount. There is no point in 4949 // choosing a higher viable VF as done in the loop below. 4950 DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 4951 << ConstTripCount << "\n"); 4952 MaxVectorSize = ConstTripCount; 4953 return MaxVectorSize; 4954 } 4955 4956 unsigned MaxVF = MaxVectorSize; 4957 if (TTI.shouldMaximizeVectorBandwidth(OptForSize) || 4958 (MaximizeBandwidth && !OptForSize)) { 4959 // Collect all viable vectorization factors larger than the default MaxVF 4960 // (i.e. MaxVectorSize). 4961 SmallVector<unsigned, 8> VFs; 4962 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 4963 for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2) 4964 VFs.push_back(VS); 4965 4966 // For each VF calculate its register usage. 4967 auto RUs = calculateRegisterUsage(VFs); 4968 4969 // Select the largest VF which doesn't require more registers than existing 4970 // ones. 4971 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 4972 for (int i = RUs.size() - 1; i >= 0; --i) { 4973 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 4974 MaxVF = VFs[i]; 4975 break; 4976 } 4977 } 4978 if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) { 4979 if (MaxVF < MinVF) { 4980 DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 4981 << ") with target's minimum: " << MinVF << '\n'); 4982 MaxVF = MinVF; 4983 } 4984 } 4985 } 4986 return MaxVF; 4987 } 4988 4989 VectorizationFactor 4990 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) { 4991 float Cost = expectedCost(1).first; 4992 const float ScalarCost = Cost; 4993 unsigned Width = 1; 4994 DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 4995 4996 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 4997 // Ignore scalar width, because the user explicitly wants vectorization. 4998 if (ForceVectorization && MaxVF > 1) { 4999 Width = 2; 5000 Cost = expectedCost(Width).first / (float)Width; 5001 } 5002 5003 for (unsigned i = 2; i <= MaxVF; i *= 2) { 5004 // Notice that the vector loop needs to be executed less times, so 5005 // we need to divide the cost of the vector loops by the width of 5006 // the vector elements. 5007 VectorizationCostTy C = expectedCost(i); 5008 float VectorCost = C.first / (float)i; 5009 DEBUG(dbgs() << "LV: Vector loop of width " << i 5010 << " costs: " << (int)VectorCost << ".\n"); 5011 if (!C.second && !ForceVectorization) { 5012 DEBUG( 5013 dbgs() << "LV: Not considering vector loop of width " << i 5014 << " because it will not generate any vector instructions.\n"); 5015 continue; 5016 } 5017 if (VectorCost < Cost) { 5018 Cost = VectorCost; 5019 Width = i; 5020 } 5021 } 5022 5023 if (!EnableCondStoresVectorization && NumPredStores) { 5024 ORE->emit(createMissedAnalysis("ConditionalStore") 5025 << "store that is conditionally executed prevents vectorization"); 5026 DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); 5027 Width = 1; 5028 Cost = ScalarCost; 5029 } 5030 5031 DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 5032 << "LV: Vectorization seems to be not beneficial, " 5033 << "but was forced by a user.\n"); 5034 DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 5035 VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)}; 5036 return Factor; 5037 } 5038 5039 std::pair<unsigned, unsigned> 5040 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5041 unsigned MinWidth = -1U; 5042 unsigned MaxWidth = 8; 5043 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5044 5045 // For each block. 5046 for (BasicBlock *BB : TheLoop->blocks()) { 5047 // For each instruction in the loop. 5048 for (Instruction &I : *BB) { 5049 Type *T = I.getType(); 5050 5051 // Skip ignored values. 5052 if (ValuesToIgnore.count(&I)) 5053 continue; 5054 5055 // Only examine Loads, Stores and PHINodes. 5056 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5057 continue; 5058 5059 // Examine PHI nodes that are reduction variables. Update the type to 5060 // account for the recurrence type. 5061 if (auto *PN = dyn_cast<PHINode>(&I)) { 5062 if (!Legal->isReductionVariable(PN)) 5063 continue; 5064 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 5065 T = RdxDesc.getRecurrenceType(); 5066 } 5067 5068 // Examine the stored values. 5069 if (auto *ST = dyn_cast<StoreInst>(&I)) 5070 T = ST->getValueOperand()->getType(); 5071 5072 // Ignore loaded pointer types and stored pointer types that are not 5073 // vectorizable. 5074 // 5075 // FIXME: The check here attempts to predict whether a load or store will 5076 // be vectorized. We only know this for certain after a VF has 5077 // been selected. Here, we assume that if an access can be 5078 // vectorized, it will be. We should also look at extending this 5079 // optimization to non-pointer types. 5080 // 5081 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 5082 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 5083 continue; 5084 5085 MinWidth = std::min(MinWidth, 5086 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5087 MaxWidth = std::max(MaxWidth, 5088 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5089 } 5090 } 5091 5092 return {MinWidth, MaxWidth}; 5093 } 5094 5095 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 5096 unsigned VF, 5097 unsigned LoopCost) { 5098 // -- The interleave heuristics -- 5099 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5100 // There are many micro-architectural considerations that we can't predict 5101 // at this level. For example, frontend pressure (on decode or fetch) due to 5102 // code size, or the number and capabilities of the execution ports. 5103 // 5104 // We use the following heuristics to select the interleave count: 5105 // 1. If the code has reductions, then we interleave to break the cross 5106 // iteration dependency. 5107 // 2. If the loop is really small, then we interleave to reduce the loop 5108 // overhead. 5109 // 3. We don't interleave if we think that we will spill registers to memory 5110 // due to the increased register pressure. 5111 5112 // When we optimize for size, we don't interleave. 5113 if (OptForSize) 5114 return 1; 5115 5116 // We used the distance for the interleave count. 5117 if (Legal->getMaxSafeDepDistBytes() != -1U) 5118 return 1; 5119 5120 // Do not interleave loops with a relatively small trip count. 5121 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5122 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 5123 return 1; 5124 5125 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 5126 DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5127 << " registers\n"); 5128 5129 if (VF == 1) { 5130 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5131 TargetNumRegisters = ForceTargetNumScalarRegs; 5132 } else { 5133 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5134 TargetNumRegisters = ForceTargetNumVectorRegs; 5135 } 5136 5137 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5138 // We divide by these constants so assume that we have at least one 5139 // instruction that uses at least one register. 5140 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 5141 5142 // We calculate the interleave count using the following formula. 5143 // Subtract the number of loop invariants from the number of available 5144 // registers. These registers are used by all of the interleaved instances. 5145 // Next, divide the remaining registers by the number of registers that is 5146 // required by the loop, in order to estimate how many parallel instances 5147 // fit without causing spills. All of this is rounded down if necessary to be 5148 // a power of two. We want power of two interleave count to simplify any 5149 // addressing operations or alignment considerations. 5150 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 5151 R.MaxLocalUsers); 5152 5153 // Don't count the induction variable as interleaved. 5154 if (EnableIndVarRegisterHeur) 5155 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 5156 std::max(1U, (R.MaxLocalUsers - 1))); 5157 5158 // Clamp the interleave ranges to reasonable counts. 5159 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 5160 5161 // Check if the user has overridden the max. 5162 if (VF == 1) { 5163 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5164 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5165 } else { 5166 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5167 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5168 } 5169 5170 // If we did not calculate the cost for VF (because the user selected the VF) 5171 // then we calculate the cost of VF here. 5172 if (LoopCost == 0) 5173 LoopCost = expectedCost(VF).first; 5174 5175 // Clamp the calculated IC to be between the 1 and the max interleave count 5176 // that the target allows. 5177 if (IC > MaxInterleaveCount) 5178 IC = MaxInterleaveCount; 5179 else if (IC < 1) 5180 IC = 1; 5181 5182 // Interleave if we vectorized this loop and there is a reduction that could 5183 // benefit from interleaving. 5184 if (VF > 1 && !Legal->getReductionVars()->empty()) { 5185 DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5186 return IC; 5187 } 5188 5189 // Note that if we've already vectorized the loop we will have done the 5190 // runtime check and so interleaving won't require further checks. 5191 bool InterleavingRequiresRuntimePointerCheck = 5192 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 5193 5194 // We want to interleave small loops in order to reduce the loop overhead and 5195 // potentially expose ILP opportunities. 5196 DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 5197 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 5198 // We assume that the cost overhead is 1 and we use the cost model 5199 // to estimate the cost of the loop and interleave until the cost of the 5200 // loop overhead is about 5% of the cost of the loop. 5201 unsigned SmallIC = 5202 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5203 5204 // Interleave until store/load ports (estimated by max interleave count) are 5205 // saturated. 5206 unsigned NumStores = Legal->getNumStores(); 5207 unsigned NumLoads = Legal->getNumLoads(); 5208 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5209 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5210 5211 // If we have a scalar reduction (vector reductions are already dealt with 5212 // by this point), we can increase the critical path length if the loop 5213 // we're interleaving is inside another loop. Limit, by default to 2, so the 5214 // critical path only gets increased by one reduction operation. 5215 if (!Legal->getReductionVars()->empty() && TheLoop->getLoopDepth() > 1) { 5216 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5217 SmallIC = std::min(SmallIC, F); 5218 StoresIC = std::min(StoresIC, F); 5219 LoadsIC = std::min(LoadsIC, F); 5220 } 5221 5222 if (EnableLoadStoreRuntimeInterleave && 5223 std::max(StoresIC, LoadsIC) > SmallIC) { 5224 DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5225 return std::max(StoresIC, LoadsIC); 5226 } 5227 5228 DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5229 return SmallIC; 5230 } 5231 5232 // Interleave if this is a large loop (small loops are already dealt with by 5233 // this point) that could benefit from interleaving. 5234 bool HasReductions = !Legal->getReductionVars()->empty(); 5235 if (TTI.enableAggressiveInterleaving(HasReductions)) { 5236 DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5237 return IC; 5238 } 5239 5240 DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5241 return 1; 5242 } 5243 5244 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5245 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 5246 // This function calculates the register usage by measuring the highest number 5247 // of values that are alive at a single location. Obviously, this is a very 5248 // rough estimation. We scan the loop in a topological order in order and 5249 // assign a number to each instruction. We use RPO to ensure that defs are 5250 // met before their users. We assume that each instruction that has in-loop 5251 // users starts an interval. We record every time that an in-loop value is 5252 // used, so we have a list of the first and last occurrences of each 5253 // instruction. Next, we transpose this data structure into a multi map that 5254 // holds the list of intervals that *end* at a specific location. This multi 5255 // map allows us to perform a linear search. We scan the instructions linearly 5256 // and record each time that a new interval starts, by placing it in a set. 5257 // If we find this value in the multi-map then we remove it from the set. 5258 // The max register usage is the maximum size of the set. 5259 // We also search for instructions that are defined outside the loop, but are 5260 // used inside the loop. We need this number separately from the max-interval 5261 // usage number because when we unroll, loop-invariant values do not take 5262 // more register. 5263 LoopBlocksDFS DFS(TheLoop); 5264 DFS.perform(LI); 5265 5266 RegisterUsage RU; 5267 5268 // Each 'key' in the map opens a new interval. The values 5269 // of the map are the index of the 'last seen' usage of the 5270 // instruction that is the key. 5271 using IntervalMap = DenseMap<Instruction *, unsigned>; 5272 5273 // Maps instruction to its index. 5274 DenseMap<unsigned, Instruction *> IdxToInstr; 5275 // Marks the end of each interval. 5276 IntervalMap EndPoint; 5277 // Saves the list of instruction indices that are used in the loop. 5278 SmallSet<Instruction *, 8> Ends; 5279 // Saves the list of values that are used in the loop but are 5280 // defined outside the loop, such as arguments and constants. 5281 SmallPtrSet<Value *, 8> LoopInvariants; 5282 5283 unsigned Index = 0; 5284 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5285 for (Instruction &I : *BB) { 5286 IdxToInstr[Index++] = &I; 5287 5288 // Save the end location of each USE. 5289 for (Value *U : I.operands()) { 5290 auto *Instr = dyn_cast<Instruction>(U); 5291 5292 // Ignore non-instruction values such as arguments, constants, etc. 5293 if (!Instr) 5294 continue; 5295 5296 // If this instruction is outside the loop then record it and continue. 5297 if (!TheLoop->contains(Instr)) { 5298 LoopInvariants.insert(Instr); 5299 continue; 5300 } 5301 5302 // Overwrite previous end points. 5303 EndPoint[Instr] = Index; 5304 Ends.insert(Instr); 5305 } 5306 } 5307 } 5308 5309 // Saves the list of intervals that end with the index in 'key'. 5310 using InstrList = SmallVector<Instruction *, 2>; 5311 DenseMap<unsigned, InstrList> TransposeEnds; 5312 5313 // Transpose the EndPoints to a list of values that end at each index. 5314 for (auto &Interval : EndPoint) 5315 TransposeEnds[Interval.second].push_back(Interval.first); 5316 5317 SmallSet<Instruction *, 8> OpenIntervals; 5318 5319 // Get the size of the widest register. 5320 unsigned MaxSafeDepDist = -1U; 5321 if (Legal->getMaxSafeDepDistBytes() != -1U) 5322 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 5323 unsigned WidestRegister = 5324 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 5325 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5326 5327 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5328 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 5329 5330 DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5331 5332 // A lambda that gets the register usage for the given type and VF. 5333 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 5334 if (Ty->isTokenTy()) 5335 return 0U; 5336 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 5337 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 5338 }; 5339 5340 for (unsigned int i = 0; i < Index; ++i) { 5341 Instruction *I = IdxToInstr[i]; 5342 5343 // Remove all of the instructions that end at this location. 5344 InstrList &List = TransposeEnds[i]; 5345 for (Instruction *ToRemove : List) 5346 OpenIntervals.erase(ToRemove); 5347 5348 // Ignore instructions that are never used within the loop. 5349 if (!Ends.count(I)) 5350 continue; 5351 5352 // Skip ignored values. 5353 if (ValuesToIgnore.count(I)) 5354 continue; 5355 5356 // For each VF find the maximum usage of registers. 5357 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 5358 if (VFs[j] == 1) { 5359 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 5360 continue; 5361 } 5362 collectUniformsAndScalars(VFs[j]); 5363 // Count the number of live intervals. 5364 unsigned RegUsage = 0; 5365 for (auto Inst : OpenIntervals) { 5366 // Skip ignored values for VF > 1. 5367 if (VecValuesToIgnore.count(Inst) || 5368 isScalarAfterVectorization(Inst, VFs[j])) 5369 continue; 5370 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 5371 } 5372 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 5373 } 5374 5375 DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 5376 << OpenIntervals.size() << '\n'); 5377 5378 // Add the current instruction to the list of open intervals. 5379 OpenIntervals.insert(I); 5380 } 5381 5382 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 5383 unsigned Invariant = 0; 5384 if (VFs[i] == 1) 5385 Invariant = LoopInvariants.size(); 5386 else { 5387 for (auto Inst : LoopInvariants) 5388 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 5389 } 5390 5391 DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 5392 DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 5393 DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); 5394 5395 RU.LoopInvariantRegs = Invariant; 5396 RU.MaxLocalUsers = MaxUsages[i]; 5397 RUs[i] = RU; 5398 } 5399 5400 return RUs; 5401 } 5402 5403 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 5404 // TODO: Cost model for emulated masked load/store is completely 5405 // broken. This hack guides the cost model to use an artificially 5406 // high enough value to practically disable vectorization with such 5407 // operations, except where previously deployed legality hack allowed 5408 // using very low cost values. This is to avoid regressions coming simply 5409 // from moving "masked load/store" check from legality to cost model. 5410 // Masked Load/Gather emulation was previously never allowed. 5411 // Limited number of Masked Store/Scatter emulation was allowed. 5412 assert(isScalarWithPredication(I) && 5413 "Expecting a scalar emulated instruction"); 5414 return isa<LoadInst>(I) || 5415 (isa<StoreInst>(I) && 5416 NumPredStores > NumberOfStoresToPredicate); 5417 } 5418 5419 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) { 5420 // If we aren't vectorizing the loop, or if we've already collected the 5421 // instructions to scalarize, there's nothing to do. Collection may already 5422 // have occurred if we have a user-selected VF and are now computing the 5423 // expected cost for interleaving. 5424 if (VF < 2 || InstsToScalarize.count(VF)) 5425 return; 5426 5427 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 5428 // not profitable to scalarize any instructions, the presence of VF in the 5429 // map will indicate that we've analyzed it already. 5430 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 5431 5432 // Find all the instructions that are scalar with predication in the loop and 5433 // determine if it would be better to not if-convert the blocks they are in. 5434 // If so, we also record the instructions to scalarize. 5435 for (BasicBlock *BB : TheLoop->blocks()) { 5436 if (!Legal->blockNeedsPredication(BB)) 5437 continue; 5438 for (Instruction &I : *BB) 5439 if (isScalarWithPredication(&I)) { 5440 ScalarCostsTy ScalarCosts; 5441 // Do not apply discount logic if hacked cost is needed 5442 // for emulated masked memrefs. 5443 if (!useEmulatedMaskMemRefHack(&I) && 5444 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 5445 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 5446 // Remember that BB will remain after vectorization. 5447 PredicatedBBsAfterVectorization.insert(BB); 5448 } 5449 } 5450 } 5451 5452 int LoopVectorizationCostModel::computePredInstDiscount( 5453 Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts, 5454 unsigned VF) { 5455 assert(!isUniformAfterVectorization(PredInst, VF) && 5456 "Instruction marked uniform-after-vectorization will be predicated"); 5457 5458 // Initialize the discount to zero, meaning that the scalar version and the 5459 // vector version cost the same. 5460 int Discount = 0; 5461 5462 // Holds instructions to analyze. The instructions we visit are mapped in 5463 // ScalarCosts. Those instructions are the ones that would be scalarized if 5464 // we find that the scalar version costs less. 5465 SmallVector<Instruction *, 8> Worklist; 5466 5467 // Returns true if the given instruction can be scalarized. 5468 auto canBeScalarized = [&](Instruction *I) -> bool { 5469 // We only attempt to scalarize instructions forming a single-use chain 5470 // from the original predicated block that would otherwise be vectorized. 5471 // Although not strictly necessary, we give up on instructions we know will 5472 // already be scalar to avoid traversing chains that are unlikely to be 5473 // beneficial. 5474 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 5475 isScalarAfterVectorization(I, VF)) 5476 return false; 5477 5478 // If the instruction is scalar with predication, it will be analyzed 5479 // separately. We ignore it within the context of PredInst. 5480 if (isScalarWithPredication(I)) 5481 return false; 5482 5483 // If any of the instruction's operands are uniform after vectorization, 5484 // the instruction cannot be scalarized. This prevents, for example, a 5485 // masked load from being scalarized. 5486 // 5487 // We assume we will only emit a value for lane zero of an instruction 5488 // marked uniform after vectorization, rather than VF identical values. 5489 // Thus, if we scalarize an instruction that uses a uniform, we would 5490 // create uses of values corresponding to the lanes we aren't emitting code 5491 // for. This behavior can be changed by allowing getScalarValue to clone 5492 // the lane zero values for uniforms rather than asserting. 5493 for (Use &U : I->operands()) 5494 if (auto *J = dyn_cast<Instruction>(U.get())) 5495 if (isUniformAfterVectorization(J, VF)) 5496 return false; 5497 5498 // Otherwise, we can scalarize the instruction. 5499 return true; 5500 }; 5501 5502 // Returns true if an operand that cannot be scalarized must be extracted 5503 // from a vector. We will account for this scalarization overhead below. Note 5504 // that the non-void predicated instructions are placed in their own blocks, 5505 // and their return values are inserted into vectors. Thus, an extract would 5506 // still be required. 5507 auto needsExtract = [&](Instruction *I) -> bool { 5508 return TheLoop->contains(I) && !isScalarAfterVectorization(I, VF); 5509 }; 5510 5511 // Compute the expected cost discount from scalarizing the entire expression 5512 // feeding the predicated instruction. We currently only consider expressions 5513 // that are single-use instruction chains. 5514 Worklist.push_back(PredInst); 5515 while (!Worklist.empty()) { 5516 Instruction *I = Worklist.pop_back_val(); 5517 5518 // If we've already analyzed the instruction, there's nothing to do. 5519 if (ScalarCosts.count(I)) 5520 continue; 5521 5522 // Compute the cost of the vector instruction. Note that this cost already 5523 // includes the scalarization overhead of the predicated instruction. 5524 unsigned VectorCost = getInstructionCost(I, VF).first; 5525 5526 // Compute the cost of the scalarized instruction. This cost is the cost of 5527 // the instruction as if it wasn't if-converted and instead remained in the 5528 // predicated block. We will scale this cost by block probability after 5529 // computing the scalarization overhead. 5530 unsigned ScalarCost = VF * getInstructionCost(I, 1).first; 5531 5532 // Compute the scalarization overhead of needed insertelement instructions 5533 // and phi nodes. 5534 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 5535 ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF), 5536 true, false); 5537 ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI); 5538 } 5539 5540 // Compute the scalarization overhead of needed extractelement 5541 // instructions. For each of the instruction's operands, if the operand can 5542 // be scalarized, add it to the worklist; otherwise, account for the 5543 // overhead. 5544 for (Use &U : I->operands()) 5545 if (auto *J = dyn_cast<Instruction>(U.get())) { 5546 assert(VectorType::isValidElementType(J->getType()) && 5547 "Instruction has non-scalar type"); 5548 if (canBeScalarized(J)) 5549 Worklist.push_back(J); 5550 else if (needsExtract(J)) 5551 ScalarCost += TTI.getScalarizationOverhead( 5552 ToVectorTy(J->getType(),VF), false, true); 5553 } 5554 5555 // Scale the total scalar cost by block probability. 5556 ScalarCost /= getReciprocalPredBlockProb(); 5557 5558 // Compute the discount. A non-negative discount means the vector version 5559 // of the instruction costs more, and scalarizing would be beneficial. 5560 Discount += VectorCost - ScalarCost; 5561 ScalarCosts[I] = ScalarCost; 5562 } 5563 5564 return Discount; 5565 } 5566 5567 LoopVectorizationCostModel::VectorizationCostTy 5568 LoopVectorizationCostModel::expectedCost(unsigned VF) { 5569 VectorizationCostTy Cost; 5570 5571 // For each block. 5572 for (BasicBlock *BB : TheLoop->blocks()) { 5573 VectorizationCostTy BlockCost; 5574 5575 // For each instruction in the old loop. 5576 for (Instruction &I : BB->instructionsWithoutDebug()) { 5577 // Skip ignored values. 5578 if (ValuesToIgnore.count(&I) || 5579 (VF > 1 && VecValuesToIgnore.count(&I))) 5580 continue; 5581 5582 VectorizationCostTy C = getInstructionCost(&I, VF); 5583 5584 // Check if we should override the cost. 5585 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 5586 C.first = ForceTargetInstructionCost; 5587 5588 BlockCost.first += C.first; 5589 BlockCost.second |= C.second; 5590 DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " 5591 << VF << " For instruction: " << I << '\n'); 5592 } 5593 5594 // If we are vectorizing a predicated block, it will have been 5595 // if-converted. This means that the block's instructions (aside from 5596 // stores and instructions that may divide by zero) will now be 5597 // unconditionally executed. For the scalar case, we may not always execute 5598 // the predicated block. Thus, scale the block's cost by the probability of 5599 // executing it. 5600 if (VF == 1 && Legal->blockNeedsPredication(BB)) 5601 BlockCost.first /= getReciprocalPredBlockProb(); 5602 5603 Cost.first += BlockCost.first; 5604 Cost.second |= BlockCost.second; 5605 } 5606 5607 return Cost; 5608 } 5609 5610 /// Gets Address Access SCEV after verifying that the access pattern 5611 /// is loop invariant except the induction variable dependence. 5612 /// 5613 /// This SCEV can be sent to the Target in order to estimate the address 5614 /// calculation cost. 5615 static const SCEV *getAddressAccessSCEV( 5616 Value *Ptr, 5617 LoopVectorizationLegality *Legal, 5618 PredicatedScalarEvolution &PSE, 5619 const Loop *TheLoop) { 5620 5621 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 5622 if (!Gep) 5623 return nullptr; 5624 5625 // We are looking for a gep with all loop invariant indices except for one 5626 // which should be an induction variable. 5627 auto SE = PSE.getSE(); 5628 unsigned NumOperands = Gep->getNumOperands(); 5629 for (unsigned i = 1; i < NumOperands; ++i) { 5630 Value *Opd = Gep->getOperand(i); 5631 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 5632 !Legal->isInductionVariable(Opd)) 5633 return nullptr; 5634 } 5635 5636 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 5637 return PSE.getSCEV(Ptr); 5638 } 5639 5640 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 5641 return Legal->hasStride(I->getOperand(0)) || 5642 Legal->hasStride(I->getOperand(1)); 5643 } 5644 5645 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 5646 unsigned VF) { 5647 Type *ValTy = getMemInstValueType(I); 5648 auto SE = PSE.getSE(); 5649 5650 unsigned Alignment = getMemInstAlignment(I); 5651 unsigned AS = getMemInstAddressSpace(I); 5652 Value *Ptr = getLoadStorePointerOperand(I); 5653 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 5654 5655 // Figure out whether the access is strided and get the stride value 5656 // if it's known in compile time 5657 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 5658 5659 // Get the cost of the scalar memory instruction and address computation. 5660 unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 5661 5662 Cost += VF * 5663 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 5664 AS, I); 5665 5666 // Get the overhead of the extractelement and insertelement instructions 5667 // we might create due to scalarization. 5668 Cost += getScalarizationOverhead(I, VF, TTI); 5669 5670 // If we have a predicated store, it may not be executed for each vector 5671 // lane. Scale the cost by the probability of executing the predicated 5672 // block. 5673 if (isScalarWithPredication(I)) { 5674 Cost /= getReciprocalPredBlockProb(); 5675 5676 if (useEmulatedMaskMemRefHack(I)) 5677 // Artificially setting to a high enough value to practically disable 5678 // vectorization with such operations. 5679 Cost = 3000000; 5680 } 5681 5682 return Cost; 5683 } 5684 5685 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 5686 unsigned VF) { 5687 Type *ValTy = getMemInstValueType(I); 5688 Type *VectorTy = ToVectorTy(ValTy, VF); 5689 unsigned Alignment = getMemInstAlignment(I); 5690 Value *Ptr = getLoadStorePointerOperand(I); 5691 unsigned AS = getMemInstAddressSpace(I); 5692 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 5693 5694 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 5695 "Stride should be 1 or -1 for consecutive memory access"); 5696 unsigned Cost = 0; 5697 if (Legal->isMaskRequired(I)) 5698 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 5699 else 5700 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I); 5701 5702 bool Reverse = ConsecutiveStride < 0; 5703 if (Reverse) 5704 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5705 return Cost; 5706 } 5707 5708 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 5709 unsigned VF) { 5710 LoadInst *LI = cast<LoadInst>(I); 5711 Type *ValTy = LI->getType(); 5712 Type *VectorTy = ToVectorTy(ValTy, VF); 5713 unsigned Alignment = LI->getAlignment(); 5714 unsigned AS = LI->getPointerAddressSpace(); 5715 5716 return TTI.getAddressComputationCost(ValTy) + 5717 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) + 5718 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 5719 } 5720 5721 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 5722 unsigned VF) { 5723 Type *ValTy = getMemInstValueType(I); 5724 Type *VectorTy = ToVectorTy(ValTy, VF); 5725 unsigned Alignment = getMemInstAlignment(I); 5726 Value *Ptr = getLoadStorePointerOperand(I); 5727 5728 return TTI.getAddressComputationCost(VectorTy) + 5729 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 5730 Legal->isMaskRequired(I), Alignment); 5731 } 5732 5733 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 5734 unsigned VF) { 5735 Type *ValTy = getMemInstValueType(I); 5736 Type *VectorTy = ToVectorTy(ValTy, VF); 5737 unsigned AS = getMemInstAddressSpace(I); 5738 5739 auto Group = getInterleavedAccessGroup(I); 5740 assert(Group && "Fail to get an interleaved access group."); 5741 5742 unsigned InterleaveFactor = Group->getFactor(); 5743 Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 5744 5745 // Holds the indices of existing members in an interleaved load group. 5746 // An interleaved store group doesn't need this as it doesn't allow gaps. 5747 SmallVector<unsigned, 4> Indices; 5748 if (isa<LoadInst>(I)) { 5749 for (unsigned i = 0; i < InterleaveFactor; i++) 5750 if (Group->getMember(i)) 5751 Indices.push_back(i); 5752 } 5753 5754 // Calculate the cost of the whole interleaved group. 5755 unsigned Cost = TTI.getInterleavedMemoryOpCost(I->getOpcode(), WideVecTy, 5756 Group->getFactor(), Indices, 5757 Group->getAlignment(), AS); 5758 5759 if (Group->isReverse()) 5760 Cost += Group->getNumMembers() * 5761 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5762 return Cost; 5763 } 5764 5765 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 5766 unsigned VF) { 5767 // Calculate scalar cost only. Vectorization cost should be ready at this 5768 // moment. 5769 if (VF == 1) { 5770 Type *ValTy = getMemInstValueType(I); 5771 unsigned Alignment = getMemInstAlignment(I); 5772 unsigned AS = getMemInstAddressSpace(I); 5773 5774 return TTI.getAddressComputationCost(ValTy) + 5775 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I); 5776 } 5777 return getWideningCost(I, VF); 5778 } 5779 5780 LoopVectorizationCostModel::VectorizationCostTy 5781 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 5782 // If we know that this instruction will remain uniform, check the cost of 5783 // the scalar version. 5784 if (isUniformAfterVectorization(I, VF)) 5785 VF = 1; 5786 5787 if (VF > 1 && isProfitableToScalarize(I, VF)) 5788 return VectorizationCostTy(InstsToScalarize[VF][I], false); 5789 5790 // Forced scalars do not have any scalarization overhead. 5791 if (VF > 1 && ForcedScalars.count(VF) && 5792 ForcedScalars.find(VF)->second.count(I)) 5793 return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false); 5794 5795 Type *VectorTy; 5796 unsigned C = getInstructionCost(I, VF, VectorTy); 5797 5798 bool TypeNotScalarized = 5799 VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF; 5800 return VectorizationCostTy(C, TypeNotScalarized); 5801 } 5802 5803 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) { 5804 if (VF == 1) 5805 return; 5806 NumPredStores = 0; 5807 for (BasicBlock *BB : TheLoop->blocks()) { 5808 // For each instruction in the old loop. 5809 for (Instruction &I : *BB) { 5810 Value *Ptr = getLoadStorePointerOperand(&I); 5811 if (!Ptr) 5812 continue; 5813 5814 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 5815 NumPredStores++; 5816 if (isa<LoadInst>(&I) && Legal->isUniform(Ptr)) { 5817 // Scalar load + broadcast 5818 unsigned Cost = getUniformMemOpCost(&I, VF); 5819 setWideningDecision(&I, VF, CM_Scalarize, Cost); 5820 continue; 5821 } 5822 5823 // We assume that widening is the best solution when possible. 5824 if (memoryInstructionCanBeWidened(&I, VF)) { 5825 unsigned Cost = getConsecutiveMemOpCost(&I, VF); 5826 int ConsecutiveStride = 5827 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 5828 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 5829 "Expected consecutive stride."); 5830 InstWidening Decision = 5831 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 5832 setWideningDecision(&I, VF, Decision, Cost); 5833 continue; 5834 } 5835 5836 // Choose between Interleaving, Gather/Scatter or Scalarization. 5837 unsigned InterleaveCost = std::numeric_limits<unsigned>::max(); 5838 unsigned NumAccesses = 1; 5839 if (isAccessInterleaved(&I)) { 5840 auto Group = getInterleavedAccessGroup(&I); 5841 assert(Group && "Fail to get an interleaved access group."); 5842 5843 // Make one decision for the whole group. 5844 if (getWideningDecision(&I, VF) != CM_Unknown) 5845 continue; 5846 5847 NumAccesses = Group->getNumMembers(); 5848 InterleaveCost = getInterleaveGroupCost(&I, VF); 5849 } 5850 5851 unsigned GatherScatterCost = 5852 isLegalGatherOrScatter(&I) 5853 ? getGatherScatterCost(&I, VF) * NumAccesses 5854 : std::numeric_limits<unsigned>::max(); 5855 5856 unsigned ScalarizationCost = 5857 getMemInstScalarizationCost(&I, VF) * NumAccesses; 5858 5859 // Choose better solution for the current VF, 5860 // write down this decision and use it during vectorization. 5861 unsigned Cost; 5862 InstWidening Decision; 5863 if (InterleaveCost <= GatherScatterCost && 5864 InterleaveCost < ScalarizationCost) { 5865 Decision = CM_Interleave; 5866 Cost = InterleaveCost; 5867 } else if (GatherScatterCost < ScalarizationCost) { 5868 Decision = CM_GatherScatter; 5869 Cost = GatherScatterCost; 5870 } else { 5871 Decision = CM_Scalarize; 5872 Cost = ScalarizationCost; 5873 } 5874 // If the instructions belongs to an interleave group, the whole group 5875 // receives the same decision. The whole group receives the cost, but 5876 // the cost will actually be assigned to one instruction. 5877 if (auto Group = getInterleavedAccessGroup(&I)) 5878 setWideningDecision(Group, VF, Decision, Cost); 5879 else 5880 setWideningDecision(&I, VF, Decision, Cost); 5881 } 5882 } 5883 5884 // Make sure that any load of address and any other address computation 5885 // remains scalar unless there is gather/scatter support. This avoids 5886 // inevitable extracts into address registers, and also has the benefit of 5887 // activating LSR more, since that pass can't optimize vectorized 5888 // addresses. 5889 if (TTI.prefersVectorizedAddressing()) 5890 return; 5891 5892 // Start with all scalar pointer uses. 5893 SmallPtrSet<Instruction *, 8> AddrDefs; 5894 for (BasicBlock *BB : TheLoop->blocks()) 5895 for (Instruction &I : *BB) { 5896 Instruction *PtrDef = 5897 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 5898 if (PtrDef && TheLoop->contains(PtrDef) && 5899 getWideningDecision(&I, VF) != CM_GatherScatter) 5900 AddrDefs.insert(PtrDef); 5901 } 5902 5903 // Add all instructions used to generate the addresses. 5904 SmallVector<Instruction *, 4> Worklist; 5905 for (auto *I : AddrDefs) 5906 Worklist.push_back(I); 5907 while (!Worklist.empty()) { 5908 Instruction *I = Worklist.pop_back_val(); 5909 for (auto &Op : I->operands()) 5910 if (auto *InstOp = dyn_cast<Instruction>(Op)) 5911 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 5912 AddrDefs.insert(InstOp).second) 5913 Worklist.push_back(InstOp); 5914 } 5915 5916 for (auto *I : AddrDefs) { 5917 if (isa<LoadInst>(I)) { 5918 // Setting the desired widening decision should ideally be handled in 5919 // by cost functions, but since this involves the task of finding out 5920 // if the loaded register is involved in an address computation, it is 5921 // instead changed here when we know this is the case. 5922 InstWidening Decision = getWideningDecision(I, VF); 5923 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 5924 // Scalarize a widened load of address. 5925 setWideningDecision(I, VF, CM_Scalarize, 5926 (VF * getMemoryInstructionCost(I, 1))); 5927 else if (auto Group = getInterleavedAccessGroup(I)) { 5928 // Scalarize an interleave group of address loads. 5929 for (unsigned I = 0; I < Group->getFactor(); ++I) { 5930 if (Instruction *Member = Group->getMember(I)) 5931 setWideningDecision(Member, VF, CM_Scalarize, 5932 (VF * getMemoryInstructionCost(Member, 1))); 5933 } 5934 } 5935 } else 5936 // Make sure I gets scalarized and a cost estimate without 5937 // scalarization overhead. 5938 ForcedScalars[VF].insert(I); 5939 } 5940 } 5941 5942 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 5943 unsigned VF, 5944 Type *&VectorTy) { 5945 Type *RetTy = I->getType(); 5946 if (canTruncateToMinimalBitwidth(I, VF)) 5947 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 5948 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 5949 auto SE = PSE.getSE(); 5950 5951 // TODO: We need to estimate the cost of intrinsic calls. 5952 switch (I->getOpcode()) { 5953 case Instruction::GetElementPtr: 5954 // We mark this instruction as zero-cost because the cost of GEPs in 5955 // vectorized code depends on whether the corresponding memory instruction 5956 // is scalarized or not. Therefore, we handle GEPs with the memory 5957 // instruction cost. 5958 return 0; 5959 case Instruction::Br: { 5960 // In cases of scalarized and predicated instructions, there will be VF 5961 // predicated blocks in the vectorized loop. Each branch around these 5962 // blocks requires also an extract of its vector compare i1 element. 5963 bool ScalarPredicatedBB = false; 5964 BranchInst *BI = cast<BranchInst>(I); 5965 if (VF > 1 && BI->isConditional() && 5966 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 5967 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 5968 ScalarPredicatedBB = true; 5969 5970 if (ScalarPredicatedBB) { 5971 // Return cost for branches around scalarized and predicated blocks. 5972 Type *Vec_i1Ty = 5973 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 5974 return (TTI.getScalarizationOverhead(Vec_i1Ty, false, true) + 5975 (TTI.getCFInstrCost(Instruction::Br) * VF)); 5976 } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1) 5977 // The back-edge branch will remain, as will all scalar branches. 5978 return TTI.getCFInstrCost(Instruction::Br); 5979 else 5980 // This branch will be eliminated by if-conversion. 5981 return 0; 5982 // Note: We currently assume zero cost for an unconditional branch inside 5983 // a predicated block since it will become a fall-through, although we 5984 // may decide in the future to call TTI for all branches. 5985 } 5986 case Instruction::PHI: { 5987 auto *Phi = cast<PHINode>(I); 5988 5989 // First-order recurrences are replaced by vector shuffles inside the loop. 5990 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 5991 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 5992 VectorTy, VF - 1, VectorTy); 5993 5994 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 5995 // converted into select instructions. We require N - 1 selects per phi 5996 // node, where N is the number of incoming values. 5997 if (VF > 1 && Phi->getParent() != TheLoop->getHeader()) 5998 return (Phi->getNumIncomingValues() - 1) * 5999 TTI.getCmpSelInstrCost( 6000 Instruction::Select, ToVectorTy(Phi->getType(), VF), 6001 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF)); 6002 6003 return TTI.getCFInstrCost(Instruction::PHI); 6004 } 6005 case Instruction::UDiv: 6006 case Instruction::SDiv: 6007 case Instruction::URem: 6008 case Instruction::SRem: 6009 // If we have a predicated instruction, it may not be executed for each 6010 // vector lane. Get the scalarization cost and scale this amount by the 6011 // probability of executing the predicated block. If the instruction is not 6012 // predicated, we fall through to the next case. 6013 if (VF > 1 && isScalarWithPredication(I)) { 6014 unsigned Cost = 0; 6015 6016 // These instructions have a non-void type, so account for the phi nodes 6017 // that we will create. This cost is likely to be zero. The phi node 6018 // cost, if any, should be scaled by the block probability because it 6019 // models a copy at the end of each predicated block. 6020 Cost += VF * TTI.getCFInstrCost(Instruction::PHI); 6021 6022 // The cost of the non-predicated instruction. 6023 Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy); 6024 6025 // The cost of insertelement and extractelement instructions needed for 6026 // scalarization. 6027 Cost += getScalarizationOverhead(I, VF, TTI); 6028 6029 // Scale the cost by the probability of executing the predicated blocks. 6030 // This assumes the predicated block for each vector lane is equally 6031 // likely. 6032 return Cost / getReciprocalPredBlockProb(); 6033 } 6034 LLVM_FALLTHROUGH; 6035 case Instruction::Add: 6036 case Instruction::FAdd: 6037 case Instruction::Sub: 6038 case Instruction::FSub: 6039 case Instruction::Mul: 6040 case Instruction::FMul: 6041 case Instruction::FDiv: 6042 case Instruction::FRem: 6043 case Instruction::Shl: 6044 case Instruction::LShr: 6045 case Instruction::AShr: 6046 case Instruction::And: 6047 case Instruction::Or: 6048 case Instruction::Xor: { 6049 // Since we will replace the stride by 1 the multiplication should go away. 6050 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 6051 return 0; 6052 // Certain instructions can be cheaper to vectorize if they have a constant 6053 // second vector operand. One example of this are shifts on x86. 6054 TargetTransformInfo::OperandValueKind Op1VK = 6055 TargetTransformInfo::OK_AnyValue; 6056 TargetTransformInfo::OperandValueKind Op2VK = 6057 TargetTransformInfo::OK_AnyValue; 6058 TargetTransformInfo::OperandValueProperties Op1VP = 6059 TargetTransformInfo::OP_None; 6060 TargetTransformInfo::OperandValueProperties Op2VP = 6061 TargetTransformInfo::OP_None; 6062 Value *Op2 = I->getOperand(1); 6063 6064 // Check for a splat or for a non uniform vector of constants. 6065 if (isa<ConstantInt>(Op2)) { 6066 ConstantInt *CInt = cast<ConstantInt>(Op2); 6067 if (CInt && CInt->getValue().isPowerOf2()) 6068 Op2VP = TargetTransformInfo::OP_PowerOf2; 6069 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 6070 } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) { 6071 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 6072 Constant *SplatValue = cast<Constant>(Op2)->getSplatValue(); 6073 if (SplatValue) { 6074 ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue); 6075 if (CInt && CInt->getValue().isPowerOf2()) 6076 Op2VP = TargetTransformInfo::OP_PowerOf2; 6077 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 6078 } 6079 } else if (Legal->isUniform(Op2)) { 6080 Op2VK = TargetTransformInfo::OK_UniformValue; 6081 } 6082 SmallVector<const Value *, 4> Operands(I->operand_values()); 6083 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 6084 return N * TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, 6085 Op2VK, Op1VP, Op2VP, Operands); 6086 } 6087 case Instruction::Select: { 6088 SelectInst *SI = cast<SelectInst>(I); 6089 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 6090 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 6091 Type *CondTy = SI->getCondition()->getType(); 6092 if (!ScalarCond) 6093 CondTy = VectorType::get(CondTy, VF); 6094 6095 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I); 6096 } 6097 case Instruction::ICmp: 6098 case Instruction::FCmp: { 6099 Type *ValTy = I->getOperand(0)->getType(); 6100 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 6101 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 6102 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 6103 VectorTy = ToVectorTy(ValTy, VF); 6104 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I); 6105 } 6106 case Instruction::Store: 6107 case Instruction::Load: { 6108 unsigned Width = VF; 6109 if (Width > 1) { 6110 InstWidening Decision = getWideningDecision(I, Width); 6111 assert(Decision != CM_Unknown && 6112 "CM decision should be taken at this point"); 6113 if (Decision == CM_Scalarize) 6114 Width = 1; 6115 } 6116 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 6117 return getMemoryInstructionCost(I, VF); 6118 } 6119 case Instruction::ZExt: 6120 case Instruction::SExt: 6121 case Instruction::FPToUI: 6122 case Instruction::FPToSI: 6123 case Instruction::FPExt: 6124 case Instruction::PtrToInt: 6125 case Instruction::IntToPtr: 6126 case Instruction::SIToFP: 6127 case Instruction::UIToFP: 6128 case Instruction::Trunc: 6129 case Instruction::FPTrunc: 6130 case Instruction::BitCast: { 6131 // We optimize the truncation of induction variables having constant 6132 // integer steps. The cost of these truncations is the same as the scalar 6133 // operation. 6134 if (isOptimizableIVTruncate(I, VF)) { 6135 auto *Trunc = cast<TruncInst>(I); 6136 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 6137 Trunc->getSrcTy(), Trunc); 6138 } 6139 6140 Type *SrcScalarTy = I->getOperand(0)->getType(); 6141 Type *SrcVecTy = 6142 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 6143 if (canTruncateToMinimalBitwidth(I, VF)) { 6144 // This cast is going to be shrunk. This may remove the cast or it might 6145 // turn it into slightly different cast. For example, if MinBW == 16, 6146 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 6147 // 6148 // Calculate the modified src and dest types. 6149 Type *MinVecTy = VectorTy; 6150 if (I->getOpcode() == Instruction::Trunc) { 6151 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 6152 VectorTy = 6153 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6154 } else if (I->getOpcode() == Instruction::ZExt || 6155 I->getOpcode() == Instruction::SExt) { 6156 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 6157 VectorTy = 6158 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6159 } 6160 } 6161 6162 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 6163 return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I); 6164 } 6165 case Instruction::Call: { 6166 bool NeedToScalarize; 6167 CallInst *CI = cast<CallInst>(I); 6168 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 6169 if (getVectorIntrinsicIDForCall(CI, TLI)) 6170 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 6171 return CallCost; 6172 } 6173 default: 6174 // The cost of executing VF copies of the scalar instruction. This opcode 6175 // is unknown. Assume that it is the same as 'mul'. 6176 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) + 6177 getScalarizationOverhead(I, VF, TTI); 6178 } // end of switch. 6179 } 6180 6181 char LoopVectorize::ID = 0; 6182 6183 static const char lv_name[] = "Loop Vectorization"; 6184 6185 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 6186 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6187 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 6188 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6189 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 6190 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6191 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6192 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6193 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6194 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6195 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 6196 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6197 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 6198 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6199 6200 namespace llvm { 6201 6202 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { 6203 return new LoopVectorize(NoUnrolling, AlwaysVectorize); 6204 } 6205 6206 } // end namespace llvm 6207 6208 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6209 // Check if the pointer operand of a load or store instruction is 6210 // consecutive. 6211 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 6212 return Legal->isConsecutivePtr(Ptr); 6213 return false; 6214 } 6215 6216 void LoopVectorizationCostModel::collectValuesToIgnore() { 6217 // Ignore ephemeral values. 6218 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 6219 6220 // Ignore type-promoting instructions we identified during reduction 6221 // detection. 6222 for (auto &Reduction : *Legal->getReductionVars()) { 6223 RecurrenceDescriptor &RedDes = Reduction.second; 6224 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 6225 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6226 } 6227 // Ignore type-casting instructions we identified during induction 6228 // detection. 6229 for (auto &Induction : *Legal->getInductionVars()) { 6230 InductionDescriptor &IndDes = Induction.second; 6231 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6232 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6233 } 6234 } 6235 6236 VectorizationFactor 6237 LoopVectorizationPlanner::planInVPlanNativePath(bool OptForSize, 6238 unsigned UserVF) { 6239 // Width 1 means no vectorize, cost 0 means uncomputed cost. 6240 const VectorizationFactor NoVectorization = {1U, 0U}; 6241 6242 // Outer loop handling: They may require CFG and instruction level 6243 // transformations before even evaluating whether vectorization is profitable. 6244 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 6245 // the vectorization pipeline. 6246 if (!OrigLoop->empty()) { 6247 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 6248 assert(UserVF && "Expected UserVF for outer loop vectorization."); 6249 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 6250 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 6251 buildVPlans(UserVF, UserVF); 6252 6253 return {UserVF, 0}; 6254 } 6255 6256 DEBUG(dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 6257 "VPlan-native path.\n"); 6258 return NoVectorization; 6259 } 6260 6261 VectorizationFactor 6262 LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) { 6263 assert(OrigLoop->empty() && "Inner loop expected."); 6264 // Width 1 means no vectorize, cost 0 means uncomputed cost. 6265 const VectorizationFactor NoVectorization = {1U, 0U}; 6266 Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(OptForSize); 6267 if (!MaybeMaxVF.hasValue()) // Cases considered too costly to vectorize. 6268 return NoVectorization; 6269 6270 if (UserVF) { 6271 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 6272 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 6273 // Collect the instructions (and their associated costs) that will be more 6274 // profitable to scalarize. 6275 CM.selectUserVectorizationFactor(UserVF); 6276 buildVPlans(UserVF, UserVF); 6277 DEBUG(printPlans(dbgs())); 6278 return {UserVF, 0}; 6279 } 6280 6281 unsigned MaxVF = MaybeMaxVF.getValue(); 6282 assert(MaxVF != 0 && "MaxVF is zero."); 6283 6284 for (unsigned VF = 1; VF <= MaxVF; VF *= 2) { 6285 // Collect Uniform and Scalar instructions after vectorization with VF. 6286 CM.collectUniformsAndScalars(VF); 6287 6288 // Collect the instructions (and their associated costs) that will be more 6289 // profitable to scalarize. 6290 if (VF > 1) 6291 CM.collectInstsToScalarize(VF); 6292 } 6293 6294 buildVPlans(1, MaxVF); 6295 DEBUG(printPlans(dbgs())); 6296 if (MaxVF == 1) 6297 return NoVectorization; 6298 6299 // Select the optimal vectorization factor. 6300 return CM.selectVectorizationFactor(MaxVF); 6301 } 6302 6303 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) { 6304 DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF << '\n'); 6305 BestVF = VF; 6306 BestUF = UF; 6307 6308 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 6309 return !Plan->hasVF(VF); 6310 }); 6311 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 6312 } 6313 6314 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 6315 DominatorTree *DT) { 6316 // Perform the actual loop transformation. 6317 6318 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 6319 VPCallbackILV CallbackILV(ILV); 6320 6321 VPTransformState State{BestVF, BestUF, LI, 6322 DT, ILV.Builder, ILV.VectorLoopValueMap, 6323 &ILV, CallbackILV}; 6324 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 6325 6326 //===------------------------------------------------===// 6327 // 6328 // Notice: any optimization or new instruction that go 6329 // into the code below should also be implemented in 6330 // the cost-model. 6331 // 6332 //===------------------------------------------------===// 6333 6334 // 2. Copy and widen instructions from the old loop into the new loop. 6335 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 6336 VPlans.front()->execute(&State); 6337 6338 // 3. Fix the vectorized code: take care of header phi's, live-outs, 6339 // predication, updating analyses. 6340 ILV.fixVectorizedLoop(); 6341 } 6342 6343 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 6344 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 6345 BasicBlock *Latch = OrigLoop->getLoopLatch(); 6346 6347 // We create new control-flow for the vectorized loop, so the original 6348 // condition will be dead after vectorization if it's only used by the 6349 // branch. 6350 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 6351 if (Cmp && Cmp->hasOneUse()) 6352 DeadInstructions.insert(Cmp); 6353 6354 // We create new "steps" for induction variable updates to which the original 6355 // induction variables map. An original update instruction will be dead if 6356 // all its users except the induction variable are dead. 6357 for (auto &Induction : *Legal->getInductionVars()) { 6358 PHINode *Ind = Induction.first; 6359 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 6360 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 6361 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 6362 })) 6363 DeadInstructions.insert(IndUpdate); 6364 6365 // We record as "Dead" also the type-casting instructions we had identified 6366 // during induction analysis. We don't need any handling for them in the 6367 // vectorized loop because we have proven that, under a proper runtime 6368 // test guarding the vectorized loop, the value of the phi, and the casted 6369 // value of the phi, are the same. The last instruction in this casting chain 6370 // will get its scalar/vector/widened def from the scalar/vector/widened def 6371 // of the respective phi node. Any other casts in the induction def-use chain 6372 // have no other uses outside the phi update chain, and will be ignored. 6373 InductionDescriptor &IndDes = Induction.second; 6374 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6375 DeadInstructions.insert(Casts.begin(), Casts.end()); 6376 } 6377 } 6378 6379 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 6380 6381 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 6382 6383 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 6384 Instruction::BinaryOps BinOp) { 6385 // When unrolling and the VF is 1, we only need to add a simple scalar. 6386 Type *Ty = Val->getType(); 6387 assert(!Ty->isVectorTy() && "Val must be a scalar"); 6388 6389 if (Ty->isFloatingPointTy()) { 6390 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 6391 6392 // Floating point operations had to be 'fast' to enable the unrolling. 6393 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 6394 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 6395 } 6396 Constant *C = ConstantInt::get(Ty, StartIdx); 6397 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 6398 } 6399 6400 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 6401 SmallVector<Metadata *, 4> MDs; 6402 // Reserve first location for self reference to the LoopID metadata node. 6403 MDs.push_back(nullptr); 6404 bool IsUnrollMetadata = false; 6405 MDNode *LoopID = L->getLoopID(); 6406 if (LoopID) { 6407 // First find existing loop unrolling disable metadata. 6408 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 6409 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 6410 if (MD) { 6411 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 6412 IsUnrollMetadata = 6413 S && S->getString().startswith("llvm.loop.unroll.disable"); 6414 } 6415 MDs.push_back(LoopID->getOperand(i)); 6416 } 6417 } 6418 6419 if (!IsUnrollMetadata) { 6420 // Add runtime unroll disable metadata. 6421 LLVMContext &Context = L->getHeader()->getContext(); 6422 SmallVector<Metadata *, 1> DisableOperands; 6423 DisableOperands.push_back( 6424 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 6425 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 6426 MDs.push_back(DisableNode); 6427 MDNode *NewLoopID = MDNode::get(Context, MDs); 6428 // Set operand 0 to refer to the loop id itself. 6429 NewLoopID->replaceOperandWith(0, NewLoopID); 6430 L->setLoopID(NewLoopID); 6431 } 6432 } 6433 6434 bool LoopVectorizationPlanner::getDecisionAndClampRange( 6435 const std::function<bool(unsigned)> &Predicate, VFRange &Range) { 6436 assert(Range.End > Range.Start && "Trying to test an empty VF range."); 6437 bool PredicateAtRangeStart = Predicate(Range.Start); 6438 6439 for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2) 6440 if (Predicate(TmpVF) != PredicateAtRangeStart) { 6441 Range.End = TmpVF; 6442 break; 6443 } 6444 6445 return PredicateAtRangeStart; 6446 } 6447 6448 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 6449 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 6450 /// of VF's starting at a given VF and extending it as much as possible. Each 6451 /// vectorization decision can potentially shorten this sub-range during 6452 /// buildVPlan(). 6453 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) { 6454 6455 // Collect conditions feeding internal conditional branches; they need to be 6456 // represented in VPlan for it to model masking. 6457 SmallPtrSet<Value *, 1> NeedDef; 6458 6459 auto *Latch = OrigLoop->getLoopLatch(); 6460 for (BasicBlock *BB : OrigLoop->blocks()) { 6461 if (BB == Latch) 6462 continue; 6463 BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator()); 6464 if (Branch && Branch->isConditional()) 6465 NeedDef.insert(Branch->getCondition()); 6466 } 6467 6468 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 6469 VFRange SubRange = {VF, MaxVF + 1}; 6470 VPlans.push_back(buildVPlan(SubRange, NeedDef)); 6471 VF = SubRange.End; 6472 } 6473 } 6474 6475 VPValue *LoopVectorizationPlanner::createEdgeMask(BasicBlock *Src, 6476 BasicBlock *Dst, 6477 VPlanPtr &Plan) { 6478 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 6479 6480 // Look for cached value. 6481 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 6482 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 6483 if (ECEntryIt != EdgeMaskCache.end()) 6484 return ECEntryIt->second; 6485 6486 VPValue *SrcMask = createBlockInMask(Src, Plan); 6487 6488 // The terminator has to be a branch inst! 6489 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 6490 assert(BI && "Unexpected terminator found"); 6491 6492 if (!BI->isConditional()) 6493 return EdgeMaskCache[Edge] = SrcMask; 6494 6495 VPValue *EdgeMask = Plan->getVPValue(BI->getCondition()); 6496 assert(EdgeMask && "No Edge Mask found for condition"); 6497 6498 if (BI->getSuccessor(0) != Dst) 6499 EdgeMask = Builder.createNot(EdgeMask); 6500 6501 if (SrcMask) // Otherwise block in-mask is all-one, no need to AND. 6502 EdgeMask = Builder.createAnd(EdgeMask, SrcMask); 6503 6504 return EdgeMaskCache[Edge] = EdgeMask; 6505 } 6506 6507 VPValue *LoopVectorizationPlanner::createBlockInMask(BasicBlock *BB, 6508 VPlanPtr &Plan) { 6509 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 6510 6511 // Look for cached value. 6512 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 6513 if (BCEntryIt != BlockMaskCache.end()) 6514 return BCEntryIt->second; 6515 6516 // All-one mask is modelled as no-mask following the convention for masked 6517 // load/store/gather/scatter. Initialize BlockMask to no-mask. 6518 VPValue *BlockMask = nullptr; 6519 6520 // Loop incoming mask is all-one. 6521 if (OrigLoop->getHeader() == BB) 6522 return BlockMaskCache[BB] = BlockMask; 6523 6524 // This is the block mask. We OR all incoming edges. 6525 for (auto *Predecessor : predecessors(BB)) { 6526 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 6527 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 6528 return BlockMaskCache[BB] = EdgeMask; 6529 6530 if (!BlockMask) { // BlockMask has its initialized nullptr value. 6531 BlockMask = EdgeMask; 6532 continue; 6533 } 6534 6535 BlockMask = Builder.createOr(BlockMask, EdgeMask); 6536 } 6537 6538 return BlockMaskCache[BB] = BlockMask; 6539 } 6540 6541 VPInterleaveRecipe * 6542 LoopVectorizationPlanner::tryToInterleaveMemory(Instruction *I, 6543 VFRange &Range) { 6544 const InterleaveGroup *IG = CM.getInterleavedAccessGroup(I); 6545 if (!IG) 6546 return nullptr; 6547 6548 // Now check if IG is relevant for VF's in the given range. 6549 auto isIGMember = [&](Instruction *I) -> std::function<bool(unsigned)> { 6550 return [=](unsigned VF) -> bool { 6551 return (VF >= 2 && // Query is illegal for VF == 1 6552 CM.getWideningDecision(I, VF) == 6553 LoopVectorizationCostModel::CM_Interleave); 6554 }; 6555 }; 6556 if (!getDecisionAndClampRange(isIGMember(I), Range)) 6557 return nullptr; 6558 6559 // I is a member of an InterleaveGroup for VF's in the (possibly trimmed) 6560 // range. If it's the primary member of the IG construct a VPInterleaveRecipe. 6561 // Otherwise, it's an adjunct member of the IG, do not construct any Recipe. 6562 assert(I == IG->getInsertPos() && 6563 "Generating a recipe for an adjunct member of an interleave group"); 6564 6565 return new VPInterleaveRecipe(IG); 6566 } 6567 6568 VPWidenMemoryInstructionRecipe * 6569 LoopVectorizationPlanner::tryToWidenMemory(Instruction *I, VFRange &Range, 6570 VPlanPtr &Plan) { 6571 if (!isa<LoadInst>(I) && !isa<StoreInst>(I)) 6572 return nullptr; 6573 6574 auto willWiden = [&](unsigned VF) -> bool { 6575 if (VF == 1) 6576 return false; 6577 if (CM.isScalarAfterVectorization(I, VF) || 6578 CM.isProfitableToScalarize(I, VF)) 6579 return false; 6580 LoopVectorizationCostModel::InstWidening Decision = 6581 CM.getWideningDecision(I, VF); 6582 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 6583 "CM decision should be taken at this point."); 6584 assert(Decision != LoopVectorizationCostModel::CM_Interleave && 6585 "Interleave memory opportunity should be caught earlier."); 6586 return Decision != LoopVectorizationCostModel::CM_Scalarize; 6587 }; 6588 6589 if (!getDecisionAndClampRange(willWiden, Range)) 6590 return nullptr; 6591 6592 VPValue *Mask = nullptr; 6593 if (Legal->isMaskRequired(I)) 6594 Mask = createBlockInMask(I->getParent(), Plan); 6595 6596 return new VPWidenMemoryInstructionRecipe(*I, Mask); 6597 } 6598 6599 VPWidenIntOrFpInductionRecipe * 6600 LoopVectorizationPlanner::tryToOptimizeInduction(Instruction *I, 6601 VFRange &Range) { 6602 if (PHINode *Phi = dyn_cast<PHINode>(I)) { 6603 // Check if this is an integer or fp induction. If so, build the recipe that 6604 // produces its scalar and vector values. 6605 InductionDescriptor II = Legal->getInductionVars()->lookup(Phi); 6606 if (II.getKind() == InductionDescriptor::IK_IntInduction || 6607 II.getKind() == InductionDescriptor::IK_FpInduction) 6608 return new VPWidenIntOrFpInductionRecipe(Phi); 6609 6610 return nullptr; 6611 } 6612 6613 // Optimize the special case where the source is a constant integer 6614 // induction variable. Notice that we can only optimize the 'trunc' case 6615 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 6616 // (c) other casts depend on pointer size. 6617 6618 // Determine whether \p K is a truncation based on an induction variable that 6619 // can be optimized. 6620 auto isOptimizableIVTruncate = 6621 [&](Instruction *K) -> std::function<bool(unsigned)> { 6622 return 6623 [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); }; 6624 }; 6625 6626 if (isa<TruncInst>(I) && 6627 getDecisionAndClampRange(isOptimizableIVTruncate(I), Range)) 6628 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 6629 cast<TruncInst>(I)); 6630 return nullptr; 6631 } 6632 6633 VPBlendRecipe * 6634 LoopVectorizationPlanner::tryToBlend(Instruction *I, VPlanPtr &Plan) { 6635 PHINode *Phi = dyn_cast<PHINode>(I); 6636 if (!Phi || Phi->getParent() == OrigLoop->getHeader()) 6637 return nullptr; 6638 6639 // We know that all PHIs in non-header blocks are converted into selects, so 6640 // we don't have to worry about the insertion order and we can just use the 6641 // builder. At this point we generate the predication tree. There may be 6642 // duplications since this is a simple recursive scan, but future 6643 // optimizations will clean it up. 6644 6645 SmallVector<VPValue *, 2> Masks; 6646 unsigned NumIncoming = Phi->getNumIncomingValues(); 6647 for (unsigned In = 0; In < NumIncoming; In++) { 6648 VPValue *EdgeMask = 6649 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 6650 assert((EdgeMask || NumIncoming == 1) && 6651 "Multiple predecessors with one having a full mask"); 6652 if (EdgeMask) 6653 Masks.push_back(EdgeMask); 6654 } 6655 return new VPBlendRecipe(Phi, Masks); 6656 } 6657 6658 bool LoopVectorizationPlanner::tryToWiden(Instruction *I, VPBasicBlock *VPBB, 6659 VFRange &Range) { 6660 if (CM.isScalarWithPredication(I)) 6661 return false; 6662 6663 auto IsVectorizableOpcode = [](unsigned Opcode) { 6664 switch (Opcode) { 6665 case Instruction::Add: 6666 case Instruction::And: 6667 case Instruction::AShr: 6668 case Instruction::BitCast: 6669 case Instruction::Br: 6670 case Instruction::Call: 6671 case Instruction::FAdd: 6672 case Instruction::FCmp: 6673 case Instruction::FDiv: 6674 case Instruction::FMul: 6675 case Instruction::FPExt: 6676 case Instruction::FPToSI: 6677 case Instruction::FPToUI: 6678 case Instruction::FPTrunc: 6679 case Instruction::FRem: 6680 case Instruction::FSub: 6681 case Instruction::GetElementPtr: 6682 case Instruction::ICmp: 6683 case Instruction::IntToPtr: 6684 case Instruction::Load: 6685 case Instruction::LShr: 6686 case Instruction::Mul: 6687 case Instruction::Or: 6688 case Instruction::PHI: 6689 case Instruction::PtrToInt: 6690 case Instruction::SDiv: 6691 case Instruction::Select: 6692 case Instruction::SExt: 6693 case Instruction::Shl: 6694 case Instruction::SIToFP: 6695 case Instruction::SRem: 6696 case Instruction::Store: 6697 case Instruction::Sub: 6698 case Instruction::Trunc: 6699 case Instruction::UDiv: 6700 case Instruction::UIToFP: 6701 case Instruction::URem: 6702 case Instruction::Xor: 6703 case Instruction::ZExt: 6704 return true; 6705 } 6706 return false; 6707 }; 6708 6709 if (!IsVectorizableOpcode(I->getOpcode())) 6710 return false; 6711 6712 if (CallInst *CI = dyn_cast<CallInst>(I)) { 6713 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6714 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 6715 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect)) 6716 return false; 6717 } 6718 6719 auto willWiden = [&](unsigned VF) -> bool { 6720 if (!isa<PHINode>(I) && (CM.isScalarAfterVectorization(I, VF) || 6721 CM.isProfitableToScalarize(I, VF))) 6722 return false; 6723 if (CallInst *CI = dyn_cast<CallInst>(I)) { 6724 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6725 // The following case may be scalarized depending on the VF. 6726 // The flag shows whether we use Intrinsic or a usual Call for vectorized 6727 // version of the instruction. 6728 // Is it beneficial to perform intrinsic call compared to lib call? 6729 bool NeedToScalarize; 6730 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 6731 bool UseVectorIntrinsic = 6732 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 6733 return UseVectorIntrinsic || !NeedToScalarize; 6734 } 6735 if (isa<LoadInst>(I) || isa<StoreInst>(I)) { 6736 assert(CM.getWideningDecision(I, VF) == 6737 LoopVectorizationCostModel::CM_Scalarize && 6738 "Memory widening decisions should have been taken care by now"); 6739 return false; 6740 } 6741 return true; 6742 }; 6743 6744 if (!getDecisionAndClampRange(willWiden, Range)) 6745 return false; 6746 6747 // Success: widen this instruction. We optimize the common case where 6748 // consecutive instructions can be represented by a single recipe. 6749 if (!VPBB->empty()) { 6750 VPWidenRecipe *LastWidenRecipe = dyn_cast<VPWidenRecipe>(&VPBB->back()); 6751 if (LastWidenRecipe && LastWidenRecipe->appendInstruction(I)) 6752 return true; 6753 } 6754 6755 VPBB->appendRecipe(new VPWidenRecipe(I)); 6756 return true; 6757 } 6758 6759 VPBasicBlock *LoopVectorizationPlanner::handleReplication( 6760 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 6761 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe, 6762 VPlanPtr &Plan) { 6763 bool IsUniform = getDecisionAndClampRange( 6764 [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); }, 6765 Range); 6766 6767 bool IsPredicated = CM.isScalarWithPredication(I); 6768 auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated); 6769 6770 // Find if I uses a predicated instruction. If so, it will use its scalar 6771 // value. Avoid hoisting the insert-element which packs the scalar value into 6772 // a vector value, as that happens iff all users use the vector value. 6773 for (auto &Op : I->operands()) 6774 if (auto *PredInst = dyn_cast<Instruction>(Op)) 6775 if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end()) 6776 PredInst2Recipe[PredInst]->setAlsoPack(false); 6777 6778 // Finalize the recipe for Instr, first if it is not predicated. 6779 if (!IsPredicated) { 6780 DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 6781 VPBB->appendRecipe(Recipe); 6782 return VPBB; 6783 } 6784 DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 6785 assert(VPBB->getSuccessors().empty() && 6786 "VPBB has successors when handling predicated replication."); 6787 // Record predicated instructions for above packing optimizations. 6788 PredInst2Recipe[I] = Recipe; 6789 VPBlockBase *Region = 6790 VPBB->setOneSuccessor(createReplicateRegion(I, Recipe, Plan)); 6791 return cast<VPBasicBlock>(Region->setOneSuccessor(new VPBasicBlock())); 6792 } 6793 6794 VPRegionBlock * 6795 LoopVectorizationPlanner::createReplicateRegion(Instruction *Instr, 6796 VPRecipeBase *PredRecipe, 6797 VPlanPtr &Plan) { 6798 // Instructions marked for predication are replicated and placed under an 6799 // if-then construct to prevent side-effects. 6800 6801 // Generate recipes to compute the block mask for this region. 6802 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 6803 6804 // Build the triangular if-then region. 6805 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 6806 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 6807 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 6808 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 6809 auto *PHIRecipe = 6810 Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr); 6811 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 6812 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 6813 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 6814 6815 // Note: first set Entry as region entry and then connect successors starting 6816 // from it in order, to propagate the "parent" of each VPBasicBlock. 6817 Entry->setTwoSuccessors(Pred, Exit); 6818 Pred->setOneSuccessor(Exit); 6819 6820 return Region; 6821 } 6822 6823 LoopVectorizationPlanner::VPlanPtr 6824 LoopVectorizationPlanner::buildVPlan(VFRange &Range, 6825 const SmallPtrSetImpl<Value *> &NeedDef) { 6826 // Outer loop handling: They may require CFG and instruction level 6827 // transformations before even evaluating whether vectorization is profitable. 6828 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 6829 // the vectorization pipeline. 6830 if (!OrigLoop->empty()) { 6831 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 6832 6833 // Create new empty VPlan 6834 auto Plan = llvm::make_unique<VPlan>(); 6835 return Plan; 6836 } 6837 6838 assert(OrigLoop->empty() && "Inner loop expected."); 6839 EdgeMaskCache.clear(); 6840 BlockMaskCache.clear(); 6841 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 6842 DenseMap<Instruction *, Instruction *> SinkAfterInverse; 6843 6844 // Collect instructions from the original loop that will become trivially dead 6845 // in the vectorized loop. We don't need to vectorize these instructions. For 6846 // example, original induction update instructions can become dead because we 6847 // separately emit induction "steps" when generating code for the new loop. 6848 // Similarly, we create a new latch condition when setting up the structure 6849 // of the new loop, so the old one can become dead. 6850 SmallPtrSet<Instruction *, 4> DeadInstructions; 6851 collectTriviallyDeadInstructions(DeadInstructions); 6852 6853 // Hold a mapping from predicated instructions to their recipes, in order to 6854 // fix their AlsoPack behavior if a user is determined to replicate and use a 6855 // scalar instead of vector value. 6856 DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe; 6857 6858 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 6859 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 6860 auto Plan = llvm::make_unique<VPlan>(VPBB); 6861 6862 // Represent values that will have defs inside VPlan. 6863 for (Value *V : NeedDef) 6864 Plan->addVPValue(V); 6865 6866 // Scan the body of the loop in a topological order to visit each basic block 6867 // after having visited its predecessor basic blocks. 6868 LoopBlocksDFS DFS(OrigLoop); 6869 DFS.perform(LI); 6870 6871 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6872 // Relevant instructions from basic block BB will be grouped into VPRecipe 6873 // ingredients and fill a new VPBasicBlock. 6874 unsigned VPBBsForBB = 0; 6875 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 6876 VPBB->setOneSuccessor(FirstVPBBForBB); 6877 VPBB = FirstVPBBForBB; 6878 Builder.setInsertPoint(VPBB); 6879 6880 std::vector<Instruction *> Ingredients; 6881 6882 // Organize the ingredients to vectorize from current basic block in the 6883 // right order. 6884 for (Instruction &I : BB->instructionsWithoutDebug()) { 6885 Instruction *Instr = &I; 6886 6887 // First filter out irrelevant instructions, to ensure no recipes are 6888 // built for them. 6889 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 6890 continue; 6891 6892 // I is a member of an InterleaveGroup for Range.Start. If it's an adjunct 6893 // member of the IG, do not construct any Recipe for it. 6894 const InterleaveGroup *IG = CM.getInterleavedAccessGroup(Instr); 6895 if (IG && Instr != IG->getInsertPos() && 6896 Range.Start >= 2 && // Query is illegal for VF == 1 6897 CM.getWideningDecision(Instr, Range.Start) == 6898 LoopVectorizationCostModel::CM_Interleave) { 6899 if (SinkAfterInverse.count(Instr)) 6900 Ingredients.push_back(SinkAfterInverse.find(Instr)->second); 6901 continue; 6902 } 6903 6904 // Move instructions to handle first-order recurrences, step 1: avoid 6905 // handling this instruction until after we've handled the instruction it 6906 // should follow. 6907 auto SAIt = SinkAfter.find(Instr); 6908 if (SAIt != SinkAfter.end()) { 6909 DEBUG(dbgs() << "Sinking" << *SAIt->first << " after" << *SAIt->second 6910 << " to vectorize a 1st order recurrence.\n"); 6911 SinkAfterInverse[SAIt->second] = Instr; 6912 continue; 6913 } 6914 6915 Ingredients.push_back(Instr); 6916 6917 // Move instructions to handle first-order recurrences, step 2: push the 6918 // instruction to be sunk at its insertion point. 6919 auto SAInvIt = SinkAfterInverse.find(Instr); 6920 if (SAInvIt != SinkAfterInverse.end()) 6921 Ingredients.push_back(SAInvIt->second); 6922 } 6923 6924 // Introduce each ingredient into VPlan. 6925 for (Instruction *Instr : Ingredients) { 6926 VPRecipeBase *Recipe = nullptr; 6927 6928 // Check if Instr should belong to an interleave memory recipe, or already 6929 // does. In the latter case Instr is irrelevant. 6930 if ((Recipe = tryToInterleaveMemory(Instr, Range))) { 6931 VPBB->appendRecipe(Recipe); 6932 continue; 6933 } 6934 6935 // Check if Instr is a memory operation that should be widened. 6936 if ((Recipe = tryToWidenMemory(Instr, Range, Plan))) { 6937 VPBB->appendRecipe(Recipe); 6938 continue; 6939 } 6940 6941 // Check if Instr should form some PHI recipe. 6942 if ((Recipe = tryToOptimizeInduction(Instr, Range))) { 6943 VPBB->appendRecipe(Recipe); 6944 continue; 6945 } 6946 if ((Recipe = tryToBlend(Instr, Plan))) { 6947 VPBB->appendRecipe(Recipe); 6948 continue; 6949 } 6950 if (PHINode *Phi = dyn_cast<PHINode>(Instr)) { 6951 VPBB->appendRecipe(new VPWidenPHIRecipe(Phi)); 6952 continue; 6953 } 6954 6955 // Check if Instr is to be widened by a general VPWidenRecipe, after 6956 // having first checked for specific widening recipes that deal with 6957 // Interleave Groups, Inductions and Phi nodes. 6958 if (tryToWiden(Instr, VPBB, Range)) 6959 continue; 6960 6961 // Otherwise, if all widening options failed, Instruction is to be 6962 // replicated. This may create a successor for VPBB. 6963 VPBasicBlock *NextVPBB = 6964 handleReplication(Instr, Range, VPBB, PredInst2Recipe, Plan); 6965 if (NextVPBB != VPBB) { 6966 VPBB = NextVPBB; 6967 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 6968 : ""); 6969 } 6970 } 6971 } 6972 6973 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 6974 // may also be empty, such as the last one VPBB, reflecting original 6975 // basic-blocks with no recipes. 6976 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 6977 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 6978 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 6979 PreEntry->disconnectSuccessor(Entry); 6980 delete PreEntry; 6981 6982 std::string PlanName; 6983 raw_string_ostream RSO(PlanName); 6984 unsigned VF = Range.Start; 6985 Plan->addVF(VF); 6986 RSO << "Initial VPlan for VF={" << VF; 6987 for (VF *= 2; VF < Range.End; VF *= 2) { 6988 Plan->addVF(VF); 6989 RSO << "," << VF; 6990 } 6991 RSO << "},UF>=1"; 6992 RSO.flush(); 6993 Plan->setName(PlanName); 6994 6995 return Plan; 6996 } 6997 6998 Value* LoopVectorizationPlanner::VPCallbackILV:: 6999 getOrCreateVectorValues(Value *V, unsigned Part) { 7000 return ILV.getOrCreateVectorValue(V, Part); 7001 } 7002 7003 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent) const { 7004 O << " +\n" 7005 << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 7006 IG->getInsertPos()->printAsOperand(O, false); 7007 O << "\\l\""; 7008 for (unsigned i = 0; i < IG->getFactor(); ++i) 7009 if (Instruction *I = IG->getMember(i)) 7010 O << " +\n" 7011 << Indent << "\" " << VPlanIngredient(I) << " " << i << "\\l\""; 7012 } 7013 7014 void VPWidenRecipe::execute(VPTransformState &State) { 7015 for (auto &Instr : make_range(Begin, End)) 7016 State.ILV->widenInstruction(Instr); 7017 } 7018 7019 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 7020 assert(!State.Instance && "Int or FP induction being replicated."); 7021 State.ILV->widenIntOrFpInduction(IV, Trunc); 7022 } 7023 7024 void VPWidenPHIRecipe::execute(VPTransformState &State) { 7025 State.ILV->widenPHIInstruction(Phi, State.UF, State.VF); 7026 } 7027 7028 void VPBlendRecipe::execute(VPTransformState &State) { 7029 State.ILV->setDebugLocFromInst(State.Builder, Phi); 7030 // We know that all PHIs in non-header blocks are converted into 7031 // selects, so we don't have to worry about the insertion order and we 7032 // can just use the builder. 7033 // At this point we generate the predication tree. There may be 7034 // duplications since this is a simple recursive scan, but future 7035 // optimizations will clean it up. 7036 7037 unsigned NumIncoming = Phi->getNumIncomingValues(); 7038 7039 assert((User || NumIncoming == 1) && 7040 "Multiple predecessors with predecessors having a full mask"); 7041 // Generate a sequence of selects of the form: 7042 // SELECT(Mask3, In3, 7043 // SELECT(Mask2, In2, 7044 // ( ...))) 7045 InnerLoopVectorizer::VectorParts Entry(State.UF); 7046 for (unsigned In = 0; In < NumIncoming; ++In) { 7047 for (unsigned Part = 0; Part < State.UF; ++Part) { 7048 // We might have single edge PHIs (blocks) - use an identity 7049 // 'select' for the first PHI operand. 7050 Value *In0 = 7051 State.ILV->getOrCreateVectorValue(Phi->getIncomingValue(In), Part); 7052 if (In == 0) 7053 Entry[Part] = In0; // Initialize with the first incoming value. 7054 else { 7055 // Select between the current value and the previous incoming edge 7056 // based on the incoming mask. 7057 Value *Cond = State.get(User->getOperand(In), Part); 7058 Entry[Part] = 7059 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 7060 } 7061 } 7062 } 7063 for (unsigned Part = 0; Part < State.UF; ++Part) 7064 State.ValueMap.setVectorValue(Phi, Part, Entry[Part]); 7065 } 7066 7067 void VPInterleaveRecipe::execute(VPTransformState &State) { 7068 assert(!State.Instance && "Interleave group being replicated."); 7069 State.ILV->vectorizeInterleaveGroup(IG->getInsertPos()); 7070 } 7071 7072 void VPReplicateRecipe::execute(VPTransformState &State) { 7073 if (State.Instance) { // Generate a single instance. 7074 State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated); 7075 // Insert scalar instance packing it into a vector. 7076 if (AlsoPack && State.VF > 1) { 7077 // If we're constructing lane 0, initialize to start from undef. 7078 if (State.Instance->Lane == 0) { 7079 Value *Undef = 7080 UndefValue::get(VectorType::get(Ingredient->getType(), State.VF)); 7081 State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef); 7082 } 7083 State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance); 7084 } 7085 return; 7086 } 7087 7088 // Generate scalar instances for all VF lanes of all UF parts, unless the 7089 // instruction is uniform inwhich case generate only the first lane for each 7090 // of the UF parts. 7091 unsigned EndLane = IsUniform ? 1 : State.VF; 7092 for (unsigned Part = 0; Part < State.UF; ++Part) 7093 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 7094 State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated); 7095 } 7096 7097 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 7098 assert(State.Instance && "Branch on Mask works only on single instance."); 7099 7100 unsigned Part = State.Instance->Part; 7101 unsigned Lane = State.Instance->Lane; 7102 7103 Value *ConditionBit = nullptr; 7104 if (!User) // Block in mask is all-one. 7105 ConditionBit = State.Builder.getTrue(); 7106 else { 7107 VPValue *BlockInMask = User->getOperand(0); 7108 ConditionBit = State.get(BlockInMask, Part); 7109 if (ConditionBit->getType()->isVectorTy()) 7110 ConditionBit = State.Builder.CreateExtractElement( 7111 ConditionBit, State.Builder.getInt32(Lane)); 7112 } 7113 7114 // Replace the temporary unreachable terminator with a new conditional branch, 7115 // whose two destinations will be set later when they are created. 7116 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 7117 assert(isa<UnreachableInst>(CurrentTerminator) && 7118 "Expected to replace unreachable terminator with conditional branch."); 7119 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 7120 CondBr->setSuccessor(0, nullptr); 7121 ReplaceInstWithInst(CurrentTerminator, CondBr); 7122 } 7123 7124 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 7125 assert(State.Instance && "Predicated instruction PHI works per instance."); 7126 Instruction *ScalarPredInst = cast<Instruction>( 7127 State.ValueMap.getScalarValue(PredInst, *State.Instance)); 7128 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 7129 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 7130 assert(PredicatingBB && "Predicated block has no single predecessor."); 7131 7132 // By current pack/unpack logic we need to generate only a single phi node: if 7133 // a vector value for the predicated instruction exists at this point it means 7134 // the instruction has vector users only, and a phi for the vector value is 7135 // needed. In this case the recipe of the predicated instruction is marked to 7136 // also do that packing, thereby "hoisting" the insert-element sequence. 7137 // Otherwise, a phi node for the scalar value is needed. 7138 unsigned Part = State.Instance->Part; 7139 if (State.ValueMap.hasVectorValue(PredInst, Part)) { 7140 Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part); 7141 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 7142 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 7143 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 7144 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 7145 State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache. 7146 } else { 7147 Type *PredInstType = PredInst->getType(); 7148 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 7149 Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB); 7150 Phi->addIncoming(ScalarPredInst, PredicatedBB); 7151 State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi); 7152 } 7153 } 7154 7155 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 7156 if (!User) 7157 return State.ILV->vectorizeMemoryInstruction(&Instr); 7158 7159 // Last (and currently only) operand is a mask. 7160 InnerLoopVectorizer::VectorParts MaskValues(State.UF); 7161 VPValue *Mask = User->getOperand(User->getNumOperands() - 1); 7162 for (unsigned Part = 0; Part < State.UF; ++Part) 7163 MaskValues[Part] = State.get(Mask, Part); 7164 State.ILV->vectorizeMemoryInstruction(&Instr, &MaskValues); 7165 } 7166 7167 // Process the loop in the VPlan-native vectorization path. This path builds 7168 // VPlan upfront in the vectorization pipeline, which allows to apply 7169 // VPlan-to-VPlan transformations from the very beginning without modifying the 7170 // input LLVM IR. 7171 static bool processLoopInVPlanNativePath( 7172 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 7173 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 7174 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 7175 OptimizationRemarkEmitter *ORE, LoopVectorizeHints &Hints) { 7176 7177 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 7178 Function *F = L->getHeader()->getParent(); 7179 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 7180 LoopVectorizationCostModel CM(L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 7181 &Hints, IAI); 7182 // Use the planner for outer loop vectorization. 7183 // TODO: CM is not used at this point inside the planner. Turn CM into an 7184 // optional argument if we don't need it in the future. 7185 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM); 7186 7187 // Get user vectorization factor. 7188 unsigned UserVF = Hints.getWidth(); 7189 7190 // Check the function attributes to find out if this function should be 7191 // optimized for size. 7192 bool OptForSize = 7193 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 7194 7195 // Plan how to best vectorize, return the best VF and its cost. 7196 LVP.planInVPlanNativePath(OptForSize, UserVF); 7197 7198 // Returning false. We are currently not generating vector code in the VPlan 7199 // native path. 7200 return false; 7201 } 7202 7203 bool LoopVectorizePass::processLoop(Loop *L) { 7204 assert((EnableVPlanNativePath || L->empty()) && 7205 "VPlan-native path is not enabled. Only process inner loops."); 7206 7207 #ifndef NDEBUG 7208 const std::string DebugLocStr = getDebugLocString(L); 7209 #endif /* NDEBUG */ 7210 7211 DEBUG(dbgs() << "\nLV: Checking a loop in \"" 7212 << L->getHeader()->getParent()->getName() << "\" from " 7213 << DebugLocStr << "\n"); 7214 7215 LoopVectorizeHints Hints(L, DisableUnrolling, *ORE); 7216 7217 DEBUG(dbgs() << "LV: Loop hints:" 7218 << " force=" 7219 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 7220 ? "disabled" 7221 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 7222 ? "enabled" 7223 : "?")) 7224 << " width=" << Hints.getWidth() 7225 << " unroll=" << Hints.getInterleave() << "\n"); 7226 7227 // Function containing loop 7228 Function *F = L->getHeader()->getParent(); 7229 7230 // Looking at the diagnostic output is the only way to determine if a loop 7231 // was vectorized (other than looking at the IR or machine code), so it 7232 // is important to generate an optimization remark for each loop. Most of 7233 // these messages are generated as OptimizationRemarkAnalysis. Remarks 7234 // generated as OptimizationRemark and OptimizationRemarkMissed are 7235 // less verbose reporting vectorized loops and unvectorized loops that may 7236 // benefit from vectorization, respectively. 7237 7238 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { 7239 DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 7240 return false; 7241 } 7242 7243 PredicatedScalarEvolution PSE(*SE, *L); 7244 7245 // Check if it is legal to vectorize the loop. 7246 LoopVectorizationRequirements Requirements(*ORE); 7247 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, GetLAA, LI, ORE, 7248 &Requirements, &Hints, DB, AC); 7249 if (!LVL.canVectorize(EnableVPlanNativePath)) { 7250 DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 7251 emitMissedWarning(F, L, Hints, ORE); 7252 return false; 7253 } 7254 7255 // Check the function attributes to find out if this function should be 7256 // optimized for size. 7257 bool OptForSize = 7258 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 7259 7260 // Entrance to the VPlan-native vectorization path. Outer loops are processed 7261 // here. They may require CFG and instruction level transformations before 7262 // even evaluating whether vectorization is profitable. Since we cannot modify 7263 // the incoming IR, we need to build VPlan upfront in the vectorization 7264 // pipeline. 7265 if (!L->empty()) 7266 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 7267 ORE, Hints); 7268 7269 assert(L->empty() && "Inner loop expected."); 7270 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 7271 // count by optimizing for size, to minimize overheads. 7272 // Prefer constant trip counts over profile data, over upper bound estimate. 7273 unsigned ExpectedTC = 0; 7274 bool HasExpectedTC = false; 7275 if (const SCEVConstant *ConstExits = 7276 dyn_cast<SCEVConstant>(SE->getBackedgeTakenCount(L))) { 7277 const APInt &ExitsCount = ConstExits->getAPInt(); 7278 // We are interested in small values for ExpectedTC. Skip over those that 7279 // can't fit an unsigned. 7280 if (ExitsCount.ult(std::numeric_limits<unsigned>::max())) { 7281 ExpectedTC = static_cast<unsigned>(ExitsCount.getZExtValue()) + 1; 7282 HasExpectedTC = true; 7283 } 7284 } 7285 // ExpectedTC may be large because it's bound by a variable. Check 7286 // profiling information to validate we should vectorize. 7287 if (!HasExpectedTC && LoopVectorizeWithBlockFrequency) { 7288 auto EstimatedTC = getLoopEstimatedTripCount(L); 7289 if (EstimatedTC) { 7290 ExpectedTC = *EstimatedTC; 7291 HasExpectedTC = true; 7292 } 7293 } 7294 if (!HasExpectedTC) { 7295 ExpectedTC = SE->getSmallConstantMaxTripCount(L); 7296 HasExpectedTC = (ExpectedTC > 0); 7297 } 7298 7299 if (HasExpectedTC && ExpectedTC < TinyTripCountVectorThreshold) { 7300 DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 7301 << "This loop is worth vectorizing only if no scalar " 7302 << "iteration overheads are incurred."); 7303 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 7304 DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 7305 else { 7306 DEBUG(dbgs() << "\n"); 7307 // Loops with a very small trip count are considered for vectorization 7308 // under OptForSize, thereby making sure the cost of their loop body is 7309 // dominant, free of runtime guards and scalar iteration overheads. 7310 OptForSize = true; 7311 } 7312 } 7313 7314 // Check the function attributes to see if implicit floats are allowed. 7315 // FIXME: This check doesn't seem possibly correct -- what if the loop is 7316 // an integer loop and the vector instructions selected are purely integer 7317 // vector instructions? 7318 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 7319 DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 7320 "attribute is used.\n"); 7321 ORE->emit(createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(), 7322 "NoImplicitFloat", L) 7323 << "loop not vectorized due to NoImplicitFloat attribute"); 7324 emitMissedWarning(F, L, Hints, ORE); 7325 return false; 7326 } 7327 7328 // Check if the target supports potentially unsafe FP vectorization. 7329 // FIXME: Add a check for the type of safety issue (denormal, signaling) 7330 // for the target we're vectorizing for, to make sure none of the 7331 // additional fp-math flags can help. 7332 if (Hints.isPotentiallyUnsafe() && 7333 TTI->isFPVectorizationPotentiallyUnsafe()) { 7334 DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 7335 ORE->emit( 7336 createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L) 7337 << "loop not vectorized due to unsafe FP support."); 7338 emitMissedWarning(F, L, Hints, ORE); 7339 return false; 7340 } 7341 7342 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 7343 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 7344 7345 // If an override option has been passed in for interleaved accesses, use it. 7346 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 7347 UseInterleaved = EnableInterleavedMemAccesses; 7348 7349 // Analyze interleaved memory accesses. 7350 if (UseInterleaved) { 7351 IAI.analyzeInterleaving(); 7352 } 7353 7354 // Use the cost model. 7355 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F, 7356 &Hints, IAI); 7357 CM.collectValuesToIgnore(); 7358 7359 // Use the planner for vectorization. 7360 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM); 7361 7362 // Get user vectorization factor. 7363 unsigned UserVF = Hints.getWidth(); 7364 7365 // Plan how to best vectorize, return the best VF and its cost. 7366 VectorizationFactor VF = LVP.plan(OptForSize, UserVF); 7367 7368 // Select the interleave count. 7369 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 7370 7371 // Get user interleave count. 7372 unsigned UserIC = Hints.getInterleave(); 7373 7374 // Identify the diagnostic messages that should be produced. 7375 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 7376 bool VectorizeLoop = true, InterleaveLoop = true; 7377 if (Requirements.doesNotMeet(F, L, Hints)) { 7378 DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 7379 "requirements.\n"); 7380 emitMissedWarning(F, L, Hints, ORE); 7381 return false; 7382 } 7383 7384 if (VF.Width == 1) { 7385 DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 7386 VecDiagMsg = std::make_pair( 7387 "VectorizationNotBeneficial", 7388 "the cost-model indicates that vectorization is not beneficial"); 7389 VectorizeLoop = false; 7390 } 7391 7392 if (IC == 1 && UserIC <= 1) { 7393 // Tell the user interleaving is not beneficial. 7394 DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 7395 IntDiagMsg = std::make_pair( 7396 "InterleavingNotBeneficial", 7397 "the cost-model indicates that interleaving is not beneficial"); 7398 InterleaveLoop = false; 7399 if (UserIC == 1) { 7400 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 7401 IntDiagMsg.second += 7402 " and is explicitly disabled or interleave count is set to 1"; 7403 } 7404 } else if (IC > 1 && UserIC == 1) { 7405 // Tell the user interleaving is beneficial, but it explicitly disabled. 7406 DEBUG(dbgs() 7407 << "LV: Interleaving is beneficial but is explicitly disabled."); 7408 IntDiagMsg = std::make_pair( 7409 "InterleavingBeneficialButDisabled", 7410 "the cost-model indicates that interleaving is beneficial " 7411 "but is explicitly disabled or interleave count is set to 1"); 7412 InterleaveLoop = false; 7413 } 7414 7415 // Override IC if user provided an interleave count. 7416 IC = UserIC > 0 ? UserIC : IC; 7417 7418 // Emit diagnostic messages, if any. 7419 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 7420 if (!VectorizeLoop && !InterleaveLoop) { 7421 // Do not vectorize or interleaving the loop. 7422 ORE->emit([&]() { 7423 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 7424 L->getStartLoc(), L->getHeader()) 7425 << VecDiagMsg.second; 7426 }); 7427 ORE->emit([&]() { 7428 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 7429 L->getStartLoc(), L->getHeader()) 7430 << IntDiagMsg.second; 7431 }); 7432 return false; 7433 } else if (!VectorizeLoop && InterleaveLoop) { 7434 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7435 ORE->emit([&]() { 7436 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 7437 L->getStartLoc(), L->getHeader()) 7438 << VecDiagMsg.second; 7439 }); 7440 } else if (VectorizeLoop && !InterleaveLoop) { 7441 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 7442 << DebugLocStr << '\n'); 7443 ORE->emit([&]() { 7444 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 7445 L->getStartLoc(), L->getHeader()) 7446 << IntDiagMsg.second; 7447 }); 7448 } else if (VectorizeLoop && InterleaveLoop) { 7449 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 7450 << DebugLocStr << '\n'); 7451 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7452 } 7453 7454 LVP.setBestPlan(VF.Width, IC); 7455 7456 using namespace ore; 7457 7458 if (!VectorizeLoop) { 7459 assert(IC > 1 && "interleave count should not be 1 or 0"); 7460 // If we decided that it is not legal to vectorize the loop, then 7461 // interleave it. 7462 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 7463 &CM); 7464 LVP.executePlan(Unroller, DT); 7465 7466 ORE->emit([&]() { 7467 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 7468 L->getHeader()) 7469 << "interleaved loop (interleaved count: " 7470 << NV("InterleaveCount", IC) << ")"; 7471 }); 7472 } else { 7473 // If we decided that it is *legal* to vectorize the loop, then do it. 7474 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 7475 &LVL, &CM); 7476 LVP.executePlan(LB, DT); 7477 ++LoopsVectorized; 7478 7479 // Add metadata to disable runtime unrolling a scalar loop when there are 7480 // no runtime checks about strides and memory. A scalar loop that is 7481 // rarely used is not worth unrolling. 7482 if (!LB.areSafetyChecksAdded()) 7483 AddRuntimeUnrollDisableMetaData(L); 7484 7485 // Report the vectorization decision. 7486 ORE->emit([&]() { 7487 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 7488 L->getHeader()) 7489 << "vectorized loop (vectorization width: " 7490 << NV("VectorizationFactor", VF.Width) 7491 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 7492 }); 7493 } 7494 7495 // Mark the loop as already vectorized to avoid vectorizing again. 7496 Hints.setAlreadyVectorized(); 7497 7498 DEBUG(verifyFunction(*L->getHeader()->getParent())); 7499 return true; 7500 } 7501 7502 bool LoopVectorizePass::runImpl( 7503 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 7504 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 7505 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 7506 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 7507 OptimizationRemarkEmitter &ORE_) { 7508 SE = &SE_; 7509 LI = &LI_; 7510 TTI = &TTI_; 7511 DT = &DT_; 7512 BFI = &BFI_; 7513 TLI = TLI_; 7514 AA = &AA_; 7515 AC = &AC_; 7516 GetLAA = &GetLAA_; 7517 DB = &DB_; 7518 ORE = &ORE_; 7519 7520 // Don't attempt if 7521 // 1. the target claims to have no vector registers, and 7522 // 2. interleaving won't help ILP. 7523 // 7524 // The second condition is necessary because, even if the target has no 7525 // vector registers, loop vectorization may still enable scalar 7526 // interleaving. 7527 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 7528 return false; 7529 7530 bool Changed = false; 7531 7532 // The vectorizer requires loops to be in simplified form. 7533 // Since simplification may add new inner loops, it has to run before the 7534 // legality and profitability checks. This means running the loop vectorizer 7535 // will simplify all loops, regardless of whether anything end up being 7536 // vectorized. 7537 for (auto &L : *LI) 7538 Changed |= simplifyLoop(L, DT, LI, SE, AC, false /* PreserveLCSSA */); 7539 7540 // Build up a worklist of inner-loops to vectorize. This is necessary as 7541 // the act of vectorizing or partially unrolling a loop creates new loops 7542 // and can invalidate iterators across the loops. 7543 SmallVector<Loop *, 8> Worklist; 7544 7545 for (Loop *L : *LI) 7546 collectSupportedLoops(*L, LI, ORE, Worklist); 7547 7548 LoopsAnalyzed += Worklist.size(); 7549 7550 // Now walk the identified inner loops. 7551 while (!Worklist.empty()) { 7552 Loop *L = Worklist.pop_back_val(); 7553 7554 // For the inner loops we actually process, form LCSSA to simplify the 7555 // transform. 7556 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 7557 7558 Changed |= processLoop(L); 7559 } 7560 7561 // Process each loop nest in the function. 7562 return Changed; 7563 } 7564 7565 PreservedAnalyses LoopVectorizePass::run(Function &F, 7566 FunctionAnalysisManager &AM) { 7567 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 7568 auto &LI = AM.getResult<LoopAnalysis>(F); 7569 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 7570 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 7571 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 7572 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 7573 auto &AA = AM.getResult<AAManager>(F); 7574 auto &AC = AM.getResult<AssumptionAnalysis>(F); 7575 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 7576 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 7577 7578 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 7579 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 7580 [&](Loop &L) -> const LoopAccessInfo & { 7581 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, nullptr}; 7582 return LAM.getResult<LoopAccessAnalysis>(L, AR); 7583 }; 7584 bool Changed = 7585 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE); 7586 if (!Changed) 7587 return PreservedAnalyses::all(); 7588 PreservedAnalyses PA; 7589 PA.preserve<LoopAnalysis>(); 7590 PA.preserve<DominatorTreeAnalysis>(); 7591 PA.preserve<BasicAA>(); 7592 PA.preserve<GlobalsAA>(); 7593 return PA; 7594 } 7595