1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 11 // and generates target-independent LLVM-IR. 12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 13 // of instructions in order to estimate the profitability of vectorization. 14 // 15 // The loop vectorizer combines consecutive loop iterations into a single 16 // 'wide' iteration. After this transformation the index is incremented 17 // by the SIMD vector width, and not by one. 18 // 19 // This pass has three parts: 20 // 1. The main loop pass that drives the different parts. 21 // 2. LoopVectorizationLegality - A unit that checks for the legality 22 // of the vectorization. 23 // 3. InnerLoopVectorizer - A unit that performs the actual 24 // widening of instructions. 25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 26 // of vectorization. It decides on the optimal vector width, which 27 // can be one, if vectorization is not profitable. 28 // 29 // There is a development effort going on to migrate loop vectorizer to the 30 // VPlan infrastructure and to introduce outer loop vectorization support (see 31 // docs/Proposal/VectorizationPlan.rst and 32 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 33 // purpose, we temporarily introduced the VPlan-native vectorization path: an 34 // alternative vectorization path that is natively implemented on top of the 35 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 36 // 37 //===----------------------------------------------------------------------===// 38 // 39 // The reduction-variable vectorization is based on the paper: 40 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 41 // 42 // Variable uniformity checks are inspired by: 43 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 44 // 45 // The interleaved access vectorization is based on the paper: 46 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 47 // Data for SIMD 48 // 49 // Other ideas/concepts are from: 50 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 51 // 52 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 53 // Vectorizing Compilers. 54 // 55 //===----------------------------------------------------------------------===// 56 57 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 58 #include "LoopVectorizationPlanner.h" 59 #include "VPRecipeBuilder.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanHCFGTransforms.h" 62 #include "llvm/ADT/APInt.h" 63 #include "llvm/ADT/ArrayRef.h" 64 #include "llvm/ADT/DenseMap.h" 65 #include "llvm/ADT/DenseMapInfo.h" 66 #include "llvm/ADT/Hashing.h" 67 #include "llvm/ADT/MapVector.h" 68 #include "llvm/ADT/None.h" 69 #include "llvm/ADT/Optional.h" 70 #include "llvm/ADT/STLExtras.h" 71 #include "llvm/ADT/SetVector.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallVector.h" 74 #include "llvm/ADT/Statistic.h" 75 #include "llvm/ADT/StringRef.h" 76 #include "llvm/ADT/Twine.h" 77 #include "llvm/ADT/iterator_range.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/BasicAliasAnalysis.h" 80 #include "llvm/Analysis/BlockFrequencyInfo.h" 81 #include "llvm/Analysis/CFG.h" 82 #include "llvm/Analysis/CodeMetrics.h" 83 #include "llvm/Analysis/DemandedBits.h" 84 #include "llvm/Analysis/GlobalsModRef.h" 85 #include "llvm/Analysis/LoopAccessAnalysis.h" 86 #include "llvm/Analysis/LoopAnalysisManager.h" 87 #include "llvm/Analysis/LoopInfo.h" 88 #include "llvm/Analysis/LoopIterator.h" 89 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 90 #include "llvm/Analysis/ScalarEvolution.h" 91 #include "llvm/Analysis/ScalarEvolutionExpander.h" 92 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 93 #include "llvm/Analysis/TargetLibraryInfo.h" 94 #include "llvm/Analysis/TargetTransformInfo.h" 95 #include "llvm/Analysis/VectorUtils.h" 96 #include "llvm/IR/Attributes.h" 97 #include "llvm/IR/BasicBlock.h" 98 #include "llvm/IR/CFG.h" 99 #include "llvm/IR/Constant.h" 100 #include "llvm/IR/Constants.h" 101 #include "llvm/IR/DataLayout.h" 102 #include "llvm/IR/DebugInfoMetadata.h" 103 #include "llvm/IR/DebugLoc.h" 104 #include "llvm/IR/DerivedTypes.h" 105 #include "llvm/IR/DiagnosticInfo.h" 106 #include "llvm/IR/Dominators.h" 107 #include "llvm/IR/Function.h" 108 #include "llvm/IR/IRBuilder.h" 109 #include "llvm/IR/InstrTypes.h" 110 #include "llvm/IR/Instruction.h" 111 #include "llvm/IR/Instructions.h" 112 #include "llvm/IR/IntrinsicInst.h" 113 #include "llvm/IR/Intrinsics.h" 114 #include "llvm/IR/LLVMContext.h" 115 #include "llvm/IR/Metadata.h" 116 #include "llvm/IR/Module.h" 117 #include "llvm/IR/Operator.h" 118 #include "llvm/IR/Type.h" 119 #include "llvm/IR/Use.h" 120 #include "llvm/IR/User.h" 121 #include "llvm/IR/Value.h" 122 #include "llvm/IR/ValueHandle.h" 123 #include "llvm/IR/Verifier.h" 124 #include "llvm/Pass.h" 125 #include "llvm/Support/Casting.h" 126 #include "llvm/Support/CommandLine.h" 127 #include "llvm/Support/Compiler.h" 128 #include "llvm/Support/Debug.h" 129 #include "llvm/Support/ErrorHandling.h" 130 #include "llvm/Support/MathExtras.h" 131 #include "llvm/Support/raw_ostream.h" 132 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 133 #include "llvm/Transforms/Utils/LoopSimplify.h" 134 #include "llvm/Transforms/Utils/LoopUtils.h" 135 #include "llvm/Transforms/Utils/LoopVersioning.h" 136 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 137 #include <algorithm> 138 #include <cassert> 139 #include <cstdint> 140 #include <cstdlib> 141 #include <functional> 142 #include <iterator> 143 #include <limits> 144 #include <memory> 145 #include <string> 146 #include <tuple> 147 #include <utility> 148 #include <vector> 149 150 using namespace llvm; 151 152 #define LV_NAME "loop-vectorize" 153 #define DEBUG_TYPE LV_NAME 154 155 /// @{ 156 /// Metadata attribute names 157 static const char *const LLVMLoopVectorizeFollowupAll = 158 "llvm.loop.vectorize.followup_all"; 159 static const char *const LLVMLoopVectorizeFollowupVectorized = 160 "llvm.loop.vectorize.followup_vectorized"; 161 static const char *const LLVMLoopVectorizeFollowupEpilogue = 162 "llvm.loop.vectorize.followup_epilogue"; 163 /// @} 164 165 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 166 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 167 168 /// Loops with a known constant trip count below this number are vectorized only 169 /// if no scalar iteration overheads are incurred. 170 static cl::opt<unsigned> TinyTripCountVectorThreshold( 171 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 172 cl::desc("Loops with a constant trip count that is smaller than this " 173 "value are vectorized only if no scalar iteration overheads " 174 "are incurred.")); 175 176 static cl::opt<bool> MaximizeBandwidth( 177 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 178 cl::desc("Maximize bandwidth when selecting vectorization factor which " 179 "will be determined by the smallest type in loop.")); 180 181 static cl::opt<bool> EnableInterleavedMemAccesses( 182 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 183 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 184 185 /// An interleave-group may need masking if it resides in a block that needs 186 /// predication, or in order to mask away gaps. 187 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 188 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 189 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 190 191 /// We don't interleave loops with a known constant trip count below this 192 /// number. 193 static const unsigned TinyTripCountInterleaveThreshold = 128; 194 195 static cl::opt<unsigned> ForceTargetNumScalarRegs( 196 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 197 cl::desc("A flag that overrides the target's number of scalar registers.")); 198 199 static cl::opt<unsigned> ForceTargetNumVectorRegs( 200 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 201 cl::desc("A flag that overrides the target's number of vector registers.")); 202 203 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 204 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 205 cl::desc("A flag that overrides the target's max interleave factor for " 206 "scalar loops.")); 207 208 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 209 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 210 cl::desc("A flag that overrides the target's max interleave factor for " 211 "vectorized loops.")); 212 213 static cl::opt<unsigned> ForceTargetInstructionCost( 214 "force-target-instruction-cost", cl::init(0), cl::Hidden, 215 cl::desc("A flag that overrides the target's expected cost for " 216 "an instruction to a single constant value. Mostly " 217 "useful for getting consistent testing.")); 218 219 static cl::opt<unsigned> SmallLoopCost( 220 "small-loop-cost", cl::init(20), cl::Hidden, 221 cl::desc( 222 "The cost of a loop that is considered 'small' by the interleaver.")); 223 224 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 225 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 226 cl::desc("Enable the use of the block frequency analysis to access PGO " 227 "heuristics minimizing code growth in cold regions and being more " 228 "aggressive in hot regions.")); 229 230 // Runtime interleave loops for load/store throughput. 231 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 232 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 233 cl::desc( 234 "Enable runtime interleaving until load/store ports are saturated")); 235 236 /// The number of stores in a loop that are allowed to need predication. 237 static cl::opt<unsigned> NumberOfStoresToPredicate( 238 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 239 cl::desc("Max number of stores to be predicated behind an if.")); 240 241 static cl::opt<bool> EnableIndVarRegisterHeur( 242 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 243 cl::desc("Count the induction variable only once when interleaving")); 244 245 static cl::opt<bool> EnableCondStoresVectorization( 246 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 247 cl::desc("Enable if predication of stores during vectorization.")); 248 249 static cl::opt<unsigned> MaxNestedScalarReductionIC( 250 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 251 cl::desc("The maximum interleave count to use when interleaving a scalar " 252 "reduction in a nested loop.")); 253 254 cl::opt<bool> EnableVPlanNativePath( 255 "enable-vplan-native-path", cl::init(false), cl::Hidden, 256 cl::desc("Enable VPlan-native vectorization path with " 257 "support for outer loop vectorization.")); 258 259 // This flag enables the stress testing of the VPlan H-CFG construction in the 260 // VPlan-native vectorization path. It must be used in conjuction with 261 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 262 // verification of the H-CFGs built. 263 static cl::opt<bool> VPlanBuildStressTest( 264 "vplan-build-stress-test", cl::init(false), cl::Hidden, 265 cl::desc( 266 "Build VPlan for every supported loop nest in the function and bail " 267 "out right after the build (stress test the VPlan H-CFG construction " 268 "in the VPlan-native vectorization path).")); 269 270 /// A helper function for converting Scalar types to vector types. 271 /// If the incoming type is void, we return void. If the VF is 1, we return 272 /// the scalar type. 273 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 274 if (Scalar->isVoidTy() || VF == 1) 275 return Scalar; 276 return VectorType::get(Scalar, VF); 277 } 278 279 /// A helper function that returns the type of loaded or stored value. 280 static Type *getMemInstValueType(Value *I) { 281 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 282 "Expected Load or Store instruction"); 283 if (auto *LI = dyn_cast<LoadInst>(I)) 284 return LI->getType(); 285 return cast<StoreInst>(I)->getValueOperand()->getType(); 286 } 287 288 /// A helper function that returns true if the given type is irregular. The 289 /// type is irregular if its allocated size doesn't equal the store size of an 290 /// element of the corresponding vector type at the given vectorization factor. 291 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) { 292 // Determine if an array of VF elements of type Ty is "bitcast compatible" 293 // with a <VF x Ty> vector. 294 if (VF > 1) { 295 auto *VectorTy = VectorType::get(Ty, VF); 296 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); 297 } 298 299 // If the vectorization factor is one, we just check if an array of type Ty 300 // requires padding between elements. 301 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 302 } 303 304 /// A helper function that returns the reciprocal of the block probability of 305 /// predicated blocks. If we return X, we are assuming the predicated block 306 /// will execute once for every X iterations of the loop header. 307 /// 308 /// TODO: We should use actual block probability here, if available. Currently, 309 /// we always assume predicated blocks have a 50% chance of executing. 310 static unsigned getReciprocalPredBlockProb() { return 2; } 311 312 /// A helper function that adds a 'fast' flag to floating-point operations. 313 static Value *addFastMathFlag(Value *V) { 314 if (isa<FPMathOperator>(V)) { 315 FastMathFlags Flags; 316 Flags.setFast(); 317 cast<Instruction>(V)->setFastMathFlags(Flags); 318 } 319 return V; 320 } 321 322 /// A helper function that returns an integer or floating-point constant with 323 /// value C. 324 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 325 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 326 : ConstantFP::get(Ty, C); 327 } 328 329 namespace llvm { 330 331 /// InnerLoopVectorizer vectorizes loops which contain only one basic 332 /// block to a specified vectorization factor (VF). 333 /// This class performs the widening of scalars into vectors, or multiple 334 /// scalars. This class also implements the following features: 335 /// * It inserts an epilogue loop for handling loops that don't have iteration 336 /// counts that are known to be a multiple of the vectorization factor. 337 /// * It handles the code generation for reduction variables. 338 /// * Scalarization (implementation using scalars) of un-vectorizable 339 /// instructions. 340 /// InnerLoopVectorizer does not perform any vectorization-legality 341 /// checks, and relies on the caller to check for the different legality 342 /// aspects. The InnerLoopVectorizer relies on the 343 /// LoopVectorizationLegality class to provide information about the induction 344 /// and reduction variables that were found to a given vectorization factor. 345 class InnerLoopVectorizer { 346 public: 347 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 348 LoopInfo *LI, DominatorTree *DT, 349 const TargetLibraryInfo *TLI, 350 const TargetTransformInfo *TTI, AssumptionCache *AC, 351 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 352 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 353 LoopVectorizationCostModel *CM) 354 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 355 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 356 Builder(PSE.getSE()->getContext()), 357 VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {} 358 virtual ~InnerLoopVectorizer() = default; 359 360 /// Create a new empty loop. Unlink the old loop and connect the new one. 361 /// Return the pre-header block of the new loop. 362 BasicBlock *createVectorizedLoopSkeleton(); 363 364 /// Widen a single instruction within the innermost loop. 365 void widenInstruction(Instruction &I); 366 367 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 368 void fixVectorizedLoop(); 369 370 // Return true if any runtime check is added. 371 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 372 373 /// A type for vectorized values in the new loop. Each value from the 374 /// original loop, when vectorized, is represented by UF vector values in the 375 /// new unrolled loop, where UF is the unroll factor. 376 using VectorParts = SmallVector<Value *, 2>; 377 378 /// Vectorize a single PHINode in a block. This method handles the induction 379 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 380 /// arbitrary length vectors. 381 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF); 382 383 /// A helper function to scalarize a single Instruction in the innermost loop. 384 /// Generates a sequence of scalar instances for each lane between \p MinLane 385 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 386 /// inclusive.. 387 void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance, 388 bool IfPredicateInstr); 389 390 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 391 /// is provided, the integer induction variable will first be truncated to 392 /// the corresponding type. 393 void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr); 394 395 /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a 396 /// vector or scalar value on-demand if one is not yet available. When 397 /// vectorizing a loop, we visit the definition of an instruction before its 398 /// uses. When visiting the definition, we either vectorize or scalarize the 399 /// instruction, creating an entry for it in the corresponding map. (In some 400 /// cases, such as induction variables, we will create both vector and scalar 401 /// entries.) Then, as we encounter uses of the definition, we derive values 402 /// for each scalar or vector use unless such a value is already available. 403 /// For example, if we scalarize a definition and one of its uses is vector, 404 /// we build the required vector on-demand with an insertelement sequence 405 /// when visiting the use. Otherwise, if the use is scalar, we can use the 406 /// existing scalar definition. 407 /// 408 /// Return a value in the new loop corresponding to \p V from the original 409 /// loop at unroll index \p Part. If the value has already been vectorized, 410 /// the corresponding vector entry in VectorLoopValueMap is returned. If, 411 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 412 /// a new vector value on-demand by inserting the scalar values into a vector 413 /// with an insertelement sequence. If the value has been neither vectorized 414 /// nor scalarized, it must be loop invariant, so we simply broadcast the 415 /// value into a vector. 416 Value *getOrCreateVectorValue(Value *V, unsigned Part); 417 418 /// Return a value in the new loop corresponding to \p V from the original 419 /// loop at unroll and vector indices \p Instance. If the value has been 420 /// vectorized but not scalarized, the necessary extractelement instruction 421 /// will be generated. 422 Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance); 423 424 /// Construct the vector value of a scalarized value \p V one lane at a time. 425 void packScalarIntoVectorValue(Value *V, const VPIteration &Instance); 426 427 /// Try to vectorize the interleaved access group that \p Instr belongs to, 428 /// optionally masking the vector operations if \p BlockInMask is non-null. 429 void vectorizeInterleaveGroup(Instruction *Instr, 430 VectorParts *BlockInMask = nullptr); 431 432 /// Vectorize Load and Store instructions, optionally masking the vector 433 /// operations if \p BlockInMask is non-null. 434 void vectorizeMemoryInstruction(Instruction *Instr, 435 VectorParts *BlockInMask = nullptr); 436 437 /// Set the debug location in the builder using the debug location in 438 /// the instruction. 439 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 440 441 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 442 void fixNonInductionPHIs(void); 443 444 protected: 445 friend class LoopVectorizationPlanner; 446 447 /// A small list of PHINodes. 448 using PhiVector = SmallVector<PHINode *, 4>; 449 450 /// A type for scalarized values in the new loop. Each value from the 451 /// original loop, when scalarized, is represented by UF x VF scalar values 452 /// in the new unrolled loop, where UF is the unroll factor and VF is the 453 /// vectorization factor. 454 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 455 456 /// Set up the values of the IVs correctly when exiting the vector loop. 457 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 458 Value *CountRoundDown, Value *EndValue, 459 BasicBlock *MiddleBlock); 460 461 /// Create a new induction variable inside L. 462 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 463 Value *Step, Instruction *DL); 464 465 /// Handle all cross-iteration phis in the header. 466 void fixCrossIterationPHIs(); 467 468 /// Fix a first-order recurrence. This is the second phase of vectorizing 469 /// this phi node. 470 void fixFirstOrderRecurrence(PHINode *Phi); 471 472 /// Fix a reduction cross-iteration phi. This is the second phase of 473 /// vectorizing this phi node. 474 void fixReduction(PHINode *Phi); 475 476 /// The Loop exit block may have single value PHI nodes with some 477 /// incoming value. While vectorizing we only handled real values 478 /// that were defined inside the loop and we should have one value for 479 /// each predecessor of its parent basic block. See PR14725. 480 void fixLCSSAPHIs(); 481 482 /// Iteratively sink the scalarized operands of a predicated instruction into 483 /// the block that was created for it. 484 void sinkScalarOperands(Instruction *PredInst); 485 486 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 487 /// represented as. 488 void truncateToMinimalBitwidths(); 489 490 /// Insert the new loop to the loop hierarchy and pass manager 491 /// and update the analysis passes. 492 void updateAnalysis(); 493 494 /// Create a broadcast instruction. This method generates a broadcast 495 /// instruction (shuffle) for loop invariant values and for the induction 496 /// value. If this is the induction variable then we extend it to N, N+1, ... 497 /// this is needed because each iteration in the loop corresponds to a SIMD 498 /// element. 499 virtual Value *getBroadcastInstrs(Value *V); 500 501 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 502 /// to each vector element of Val. The sequence starts at StartIndex. 503 /// \p Opcode is relevant for FP induction variable. 504 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 505 Instruction::BinaryOps Opcode = 506 Instruction::BinaryOpsEnd); 507 508 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 509 /// variable on which to base the steps, \p Step is the size of the step, and 510 /// \p EntryVal is the value from the original loop that maps to the steps. 511 /// Note that \p EntryVal doesn't have to be an induction variable - it 512 /// can also be a truncate instruction. 513 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 514 const InductionDescriptor &ID); 515 516 /// Create a vector induction phi node based on an existing scalar one. \p 517 /// EntryVal is the value from the original loop that maps to the vector phi 518 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 519 /// truncate instruction, instead of widening the original IV, we widen a 520 /// version of the IV truncated to \p EntryVal's type. 521 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 522 Value *Step, Instruction *EntryVal); 523 524 /// Returns true if an instruction \p I should be scalarized instead of 525 /// vectorized for the chosen vectorization factor. 526 bool shouldScalarizeInstruction(Instruction *I) const; 527 528 /// Returns true if we should generate a scalar version of \p IV. 529 bool needsScalarInduction(Instruction *IV) const; 530 531 /// If there is a cast involved in the induction variable \p ID, which should 532 /// be ignored in the vectorized loop body, this function records the 533 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 534 /// cast. We had already proved that the casted Phi is equal to the uncasted 535 /// Phi in the vectorized loop (under a runtime guard), and therefore 536 /// there is no need to vectorize the cast - the same value can be used in the 537 /// vector loop for both the Phi and the cast. 538 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 539 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 540 /// 541 /// \p EntryVal is the value from the original loop that maps to the vector 542 /// phi node and is used to distinguish what is the IV currently being 543 /// processed - original one (if \p EntryVal is a phi corresponding to the 544 /// original IV) or the "newly-created" one based on the proof mentioned above 545 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 546 /// latter case \p EntryVal is a TruncInst and we must not record anything for 547 /// that IV, but it's error-prone to expect callers of this routine to care 548 /// about that, hence this explicit parameter. 549 void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID, 550 const Instruction *EntryVal, 551 Value *VectorLoopValue, 552 unsigned Part, 553 unsigned Lane = UINT_MAX); 554 555 /// Generate a shuffle sequence that will reverse the vector Vec. 556 virtual Value *reverseVector(Value *Vec); 557 558 /// Returns (and creates if needed) the original loop trip count. 559 Value *getOrCreateTripCount(Loop *NewLoop); 560 561 /// Returns (and creates if needed) the trip count of the widened loop. 562 Value *getOrCreateVectorTripCount(Loop *NewLoop); 563 564 /// Returns a bitcasted value to the requested vector type. 565 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 566 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 567 const DataLayout &DL); 568 569 /// Emit a bypass check to see if the vector trip count is zero, including if 570 /// it overflows. 571 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 572 573 /// Emit a bypass check to see if all of the SCEV assumptions we've 574 /// had to make are correct. 575 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 576 577 /// Emit bypass checks to check any memory assumptions we may have made. 578 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 579 580 /// Compute the transformed value of Index at offset StartValue using step 581 /// StepValue. 582 /// For integer induction, returns StartValue + Index * StepValue. 583 /// For pointer induction, returns StartValue[Index * StepValue]. 584 /// FIXME: The newly created binary instructions should contain nsw/nuw 585 /// flags, which can be found from the original scalar operations. 586 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 587 const DataLayout &DL, 588 const InductionDescriptor &ID) const; 589 590 /// Add additional metadata to \p To that was not present on \p Orig. 591 /// 592 /// Currently this is used to add the noalias annotations based on the 593 /// inserted memchecks. Use this for instructions that are *cloned* into the 594 /// vector loop. 595 void addNewMetadata(Instruction *To, const Instruction *Orig); 596 597 /// Add metadata from one instruction to another. 598 /// 599 /// This includes both the original MDs from \p From and additional ones (\see 600 /// addNewMetadata). Use this for *newly created* instructions in the vector 601 /// loop. 602 void addMetadata(Instruction *To, Instruction *From); 603 604 /// Similar to the previous function but it adds the metadata to a 605 /// vector of instructions. 606 void addMetadata(ArrayRef<Value *> To, Instruction *From); 607 608 /// The original loop. 609 Loop *OrigLoop; 610 611 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 612 /// dynamic knowledge to simplify SCEV expressions and converts them to a 613 /// more usable form. 614 PredicatedScalarEvolution &PSE; 615 616 /// Loop Info. 617 LoopInfo *LI; 618 619 /// Dominator Tree. 620 DominatorTree *DT; 621 622 /// Alias Analysis. 623 AliasAnalysis *AA; 624 625 /// Target Library Info. 626 const TargetLibraryInfo *TLI; 627 628 /// Target Transform Info. 629 const TargetTransformInfo *TTI; 630 631 /// Assumption Cache. 632 AssumptionCache *AC; 633 634 /// Interface to emit optimization remarks. 635 OptimizationRemarkEmitter *ORE; 636 637 /// LoopVersioning. It's only set up (non-null) if memchecks were 638 /// used. 639 /// 640 /// This is currently only used to add no-alias metadata based on the 641 /// memchecks. The actually versioning is performed manually. 642 std::unique_ptr<LoopVersioning> LVer; 643 644 /// The vectorization SIMD factor to use. Each vector will have this many 645 /// vector elements. 646 unsigned VF; 647 648 /// The vectorization unroll factor to use. Each scalar is vectorized to this 649 /// many different vector instructions. 650 unsigned UF; 651 652 /// The builder that we use 653 IRBuilder<> Builder; 654 655 // --- Vectorization state --- 656 657 /// The vector-loop preheader. 658 BasicBlock *LoopVectorPreHeader; 659 660 /// The scalar-loop preheader. 661 BasicBlock *LoopScalarPreHeader; 662 663 /// Middle Block between the vector and the scalar. 664 BasicBlock *LoopMiddleBlock; 665 666 /// The ExitBlock of the scalar loop. 667 BasicBlock *LoopExitBlock; 668 669 /// The vector loop body. 670 BasicBlock *LoopVectorBody; 671 672 /// The scalar loop body. 673 BasicBlock *LoopScalarBody; 674 675 /// A list of all bypass blocks. The first block is the entry of the loop. 676 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 677 678 /// The new Induction variable which was added to the new block. 679 PHINode *Induction = nullptr; 680 681 /// The induction variable of the old basic block. 682 PHINode *OldInduction = nullptr; 683 684 /// Maps values from the original loop to their corresponding values in the 685 /// vectorized loop. A key value can map to either vector values, scalar 686 /// values or both kinds of values, depending on whether the key was 687 /// vectorized and scalarized. 688 VectorizerValueMap VectorLoopValueMap; 689 690 /// Store instructions that were predicated. 691 SmallVector<Instruction *, 4> PredicatedInstructions; 692 693 /// Trip count of the original loop. 694 Value *TripCount = nullptr; 695 696 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 697 Value *VectorTripCount = nullptr; 698 699 /// The legality analysis. 700 LoopVectorizationLegality *Legal; 701 702 /// The profitablity analysis. 703 LoopVectorizationCostModel *Cost; 704 705 // Record whether runtime checks are added. 706 bool AddedSafetyChecks = false; 707 708 // Holds the end values for each induction variable. We save the end values 709 // so we can later fix-up the external users of the induction variables. 710 DenseMap<PHINode *, Value *> IVEndValues; 711 712 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 713 // fixed up at the end of vector code generation. 714 SmallVector<PHINode *, 8> OrigPHIsToFix; 715 }; 716 717 class InnerLoopUnroller : public InnerLoopVectorizer { 718 public: 719 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 720 LoopInfo *LI, DominatorTree *DT, 721 const TargetLibraryInfo *TLI, 722 const TargetTransformInfo *TTI, AssumptionCache *AC, 723 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 724 LoopVectorizationLegality *LVL, 725 LoopVectorizationCostModel *CM) 726 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 727 UnrollFactor, LVL, CM) {} 728 729 private: 730 Value *getBroadcastInstrs(Value *V) override; 731 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 732 Instruction::BinaryOps Opcode = 733 Instruction::BinaryOpsEnd) override; 734 Value *reverseVector(Value *Vec) override; 735 }; 736 737 } // end namespace llvm 738 739 /// Look for a meaningful debug location on the instruction or it's 740 /// operands. 741 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 742 if (!I) 743 return I; 744 745 DebugLoc Empty; 746 if (I->getDebugLoc() != Empty) 747 return I; 748 749 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 750 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 751 if (OpInst->getDebugLoc() != Empty) 752 return OpInst; 753 } 754 755 return I; 756 } 757 758 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 759 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 760 const DILocation *DIL = Inst->getDebugLoc(); 761 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 762 !isa<DbgInfoIntrinsic>(Inst)) 763 B.SetCurrentDebugLocation(DIL->cloneWithDuplicationFactor(UF * VF)); 764 else 765 B.SetCurrentDebugLocation(DIL); 766 } else 767 B.SetCurrentDebugLocation(DebugLoc()); 768 } 769 770 #ifndef NDEBUG 771 /// \return string containing a file name and a line # for the given loop. 772 static std::string getDebugLocString(const Loop *L) { 773 std::string Result; 774 if (L) { 775 raw_string_ostream OS(Result); 776 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 777 LoopDbgLoc.print(OS); 778 else 779 // Just print the module name. 780 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 781 OS.flush(); 782 } 783 return Result; 784 } 785 #endif 786 787 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 788 const Instruction *Orig) { 789 // If the loop was versioned with memchecks, add the corresponding no-alias 790 // metadata. 791 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 792 LVer->annotateInstWithNoAlias(To, Orig); 793 } 794 795 void InnerLoopVectorizer::addMetadata(Instruction *To, 796 Instruction *From) { 797 propagateMetadata(To, From); 798 addNewMetadata(To, From); 799 } 800 801 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 802 Instruction *From) { 803 for (Value *V : To) { 804 if (Instruction *I = dyn_cast<Instruction>(V)) 805 addMetadata(I, From); 806 } 807 } 808 809 namespace llvm { 810 811 /// LoopVectorizationCostModel - estimates the expected speedups due to 812 /// vectorization. 813 /// In many cases vectorization is not profitable. This can happen because of 814 /// a number of reasons. In this class we mainly attempt to predict the 815 /// expected speedup/slowdowns due to the supported instruction set. We use the 816 /// TargetTransformInfo to query the different backends for the cost of 817 /// different operations. 818 class LoopVectorizationCostModel { 819 public: 820 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, 821 LoopInfo *LI, LoopVectorizationLegality *Legal, 822 const TargetTransformInfo &TTI, 823 const TargetLibraryInfo *TLI, DemandedBits *DB, 824 AssumptionCache *AC, 825 OptimizationRemarkEmitter *ORE, const Function *F, 826 const LoopVectorizeHints *Hints, 827 InterleavedAccessInfo &IAI) 828 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 829 AC(AC), ORE(ORE), TheFunction(F), Hints(Hints), InterleaveInfo(IAI) {} 830 831 /// \return An upper bound for the vectorization factor, or None if 832 /// vectorization should be avoided up front. 833 Optional<unsigned> computeMaxVF(bool OptForSize); 834 835 /// \return The most profitable vectorization factor and the cost of that VF. 836 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 837 /// then this vectorization factor will be selected if vectorization is 838 /// possible. 839 VectorizationFactor selectVectorizationFactor(unsigned MaxVF); 840 841 /// Setup cost-based decisions for user vectorization factor. 842 void selectUserVectorizationFactor(unsigned UserVF) { 843 collectUniformsAndScalars(UserVF); 844 collectInstsToScalarize(UserVF); 845 } 846 847 /// \return The size (in bits) of the smallest and widest types in the code 848 /// that needs to be vectorized. We ignore values that remain scalar such as 849 /// 64 bit loop indices. 850 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 851 852 /// \return The desired interleave count. 853 /// If interleave count has been specified by metadata it will be returned. 854 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 855 /// are the selected vectorization factor and the cost of the selected VF. 856 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 857 unsigned LoopCost); 858 859 /// Memory access instruction may be vectorized in more than one way. 860 /// Form of instruction after vectorization depends on cost. 861 /// This function takes cost-based decisions for Load/Store instructions 862 /// and collects them in a map. This decisions map is used for building 863 /// the lists of loop-uniform and loop-scalar instructions. 864 /// The calculated cost is saved with widening decision in order to 865 /// avoid redundant calculations. 866 void setCostBasedWideningDecision(unsigned VF); 867 868 /// A struct that represents some properties of the register usage 869 /// of a loop. 870 struct RegisterUsage { 871 /// Holds the number of loop invariant values that are used in the loop. 872 unsigned LoopInvariantRegs; 873 874 /// Holds the maximum number of concurrent live intervals in the loop. 875 unsigned MaxLocalUsers; 876 }; 877 878 /// \return Returns information about the register usages of the loop for the 879 /// given vectorization factors. 880 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 881 882 /// Collect values we want to ignore in the cost model. 883 void collectValuesToIgnore(); 884 885 /// \returns The smallest bitwidth each instruction can be represented with. 886 /// The vector equivalents of these instructions should be truncated to this 887 /// type. 888 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 889 return MinBWs; 890 } 891 892 /// \returns True if it is more profitable to scalarize instruction \p I for 893 /// vectorization factor \p VF. 894 bool isProfitableToScalarize(Instruction *I, unsigned VF) const { 895 assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1."); 896 897 // Cost model is not run in the VPlan-native path - return conservative 898 // result until this changes. 899 if (EnableVPlanNativePath) 900 return false; 901 902 auto Scalars = InstsToScalarize.find(VF); 903 assert(Scalars != InstsToScalarize.end() && 904 "VF not yet analyzed for scalarization profitability"); 905 return Scalars->second.find(I) != Scalars->second.end(); 906 } 907 908 /// Returns true if \p I is known to be uniform after vectorization. 909 bool isUniformAfterVectorization(Instruction *I, unsigned VF) const { 910 if (VF == 1) 911 return true; 912 913 // Cost model is not run in the VPlan-native path - return conservative 914 // result until this changes. 915 if (EnableVPlanNativePath) 916 return false; 917 918 auto UniformsPerVF = Uniforms.find(VF); 919 assert(UniformsPerVF != Uniforms.end() && 920 "VF not yet analyzed for uniformity"); 921 return UniformsPerVF->second.find(I) != UniformsPerVF->second.end(); 922 } 923 924 /// Returns true if \p I is known to be scalar after vectorization. 925 bool isScalarAfterVectorization(Instruction *I, unsigned VF) const { 926 if (VF == 1) 927 return true; 928 929 // Cost model is not run in the VPlan-native path - return conservative 930 // result until this changes. 931 if (EnableVPlanNativePath) 932 return false; 933 934 auto ScalarsPerVF = Scalars.find(VF); 935 assert(ScalarsPerVF != Scalars.end() && 936 "Scalar values are not calculated for VF"); 937 return ScalarsPerVF->second.find(I) != ScalarsPerVF->second.end(); 938 } 939 940 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 941 /// for vectorization factor \p VF. 942 bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const { 943 return VF > 1 && MinBWs.find(I) != MinBWs.end() && 944 !isProfitableToScalarize(I, VF) && 945 !isScalarAfterVectorization(I, VF); 946 } 947 948 /// Decision that was taken during cost calculation for memory instruction. 949 enum InstWidening { 950 CM_Unknown, 951 CM_Widen, // For consecutive accesses with stride +1. 952 CM_Widen_Reverse, // For consecutive accesses with stride -1. 953 CM_Interleave, 954 CM_GatherScatter, 955 CM_Scalarize 956 }; 957 958 /// Save vectorization decision \p W and \p Cost taken by the cost model for 959 /// instruction \p I and vector width \p VF. 960 void setWideningDecision(Instruction *I, unsigned VF, InstWidening W, 961 unsigned Cost) { 962 assert(VF >= 2 && "Expected VF >=2"); 963 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 964 } 965 966 /// Save vectorization decision \p W and \p Cost taken by the cost model for 967 /// interleaving group \p Grp and vector width \p VF. 968 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, unsigned VF, 969 InstWidening W, unsigned Cost) { 970 assert(VF >= 2 && "Expected VF >=2"); 971 /// Broadcast this decicion to all instructions inside the group. 972 /// But the cost will be assigned to one instruction only. 973 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 974 if (auto *I = Grp->getMember(i)) { 975 if (Grp->getInsertPos() == I) 976 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 977 else 978 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 979 } 980 } 981 } 982 983 /// Return the cost model decision for the given instruction \p I and vector 984 /// width \p VF. Return CM_Unknown if this instruction did not pass 985 /// through the cost modeling. 986 InstWidening getWideningDecision(Instruction *I, unsigned VF) { 987 assert(VF >= 2 && "Expected VF >=2"); 988 989 // Cost model is not run in the VPlan-native path - return conservative 990 // result until this changes. 991 if (EnableVPlanNativePath) 992 return CM_GatherScatter; 993 994 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 995 auto Itr = WideningDecisions.find(InstOnVF); 996 if (Itr == WideningDecisions.end()) 997 return CM_Unknown; 998 return Itr->second.first; 999 } 1000 1001 /// Return the vectorization cost for the given instruction \p I and vector 1002 /// width \p VF. 1003 unsigned getWideningCost(Instruction *I, unsigned VF) { 1004 assert(VF >= 2 && "Expected VF >=2"); 1005 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1006 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1007 "The cost is not calculated"); 1008 return WideningDecisions[InstOnVF].second; 1009 } 1010 1011 /// Return True if instruction \p I is an optimizable truncate whose operand 1012 /// is an induction variable. Such a truncate will be removed by adding a new 1013 /// induction variable with the destination type. 1014 bool isOptimizableIVTruncate(Instruction *I, unsigned VF) { 1015 // If the instruction is not a truncate, return false. 1016 auto *Trunc = dyn_cast<TruncInst>(I); 1017 if (!Trunc) 1018 return false; 1019 1020 // Get the source and destination types of the truncate. 1021 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1022 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1023 1024 // If the truncate is free for the given types, return false. Replacing a 1025 // free truncate with an induction variable would add an induction variable 1026 // update instruction to each iteration of the loop. We exclude from this 1027 // check the primary induction variable since it will need an update 1028 // instruction regardless. 1029 Value *Op = Trunc->getOperand(0); 1030 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1031 return false; 1032 1033 // If the truncated value is not an induction variable, return false. 1034 return Legal->isInductionPhi(Op); 1035 } 1036 1037 /// Collects the instructions to scalarize for each predicated instruction in 1038 /// the loop. 1039 void collectInstsToScalarize(unsigned VF); 1040 1041 /// Collect Uniform and Scalar values for the given \p VF. 1042 /// The sets depend on CM decision for Load/Store instructions 1043 /// that may be vectorized as interleave, gather-scatter or scalarized. 1044 void collectUniformsAndScalars(unsigned VF) { 1045 // Do the analysis once. 1046 if (VF == 1 || Uniforms.find(VF) != Uniforms.end()) 1047 return; 1048 setCostBasedWideningDecision(VF); 1049 collectLoopUniforms(VF); 1050 collectLoopScalars(VF); 1051 } 1052 1053 /// Returns true if the target machine supports masked store operation 1054 /// for the given \p DataType and kind of access to \p Ptr. 1055 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1056 return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedStore(DataType); 1057 } 1058 1059 /// Returns true if the target machine supports masked load operation 1060 /// for the given \p DataType and kind of access to \p Ptr. 1061 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1062 return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedLoad(DataType); 1063 } 1064 1065 /// Returns true if the target machine supports masked scatter operation 1066 /// for the given \p DataType. 1067 bool isLegalMaskedScatter(Type *DataType) { 1068 return TTI.isLegalMaskedScatter(DataType); 1069 } 1070 1071 /// Returns true if the target machine supports masked gather operation 1072 /// for the given \p DataType. 1073 bool isLegalMaskedGather(Type *DataType) { 1074 return TTI.isLegalMaskedGather(DataType); 1075 } 1076 1077 /// Returns true if the target machine can represent \p V as a masked gather 1078 /// or scatter operation. 1079 bool isLegalGatherOrScatter(Value *V) { 1080 bool LI = isa<LoadInst>(V); 1081 bool SI = isa<StoreInst>(V); 1082 if (!LI && !SI) 1083 return false; 1084 auto *Ty = getMemInstValueType(V); 1085 return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty)); 1086 } 1087 1088 /// Returns true if \p I is an instruction that will be scalarized with 1089 /// predication. Such instructions include conditional stores and 1090 /// instructions that may divide by zero. 1091 /// If a non-zero VF has been calculated, we check if I will be scalarized 1092 /// predication for that VF. 1093 bool isScalarWithPredication(Instruction *I, unsigned VF = 1); 1094 1095 // Returns true if \p I is an instruction that will be predicated either 1096 // through scalar predication or masked load/store or masked gather/scatter. 1097 // Superset of instructions that return true for isScalarWithPredication. 1098 bool isPredicatedInst(Instruction *I) { 1099 if (!blockNeedsPredication(I->getParent())) 1100 return false; 1101 // Loads and stores that need some form of masked operation are predicated 1102 // instructions. 1103 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1104 return Legal->isMaskRequired(I); 1105 return isScalarWithPredication(I); 1106 } 1107 1108 /// Returns true if \p I is a memory instruction with consecutive memory 1109 /// access that can be widened. 1110 bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1); 1111 1112 /// Returns true if \p I is a memory instruction in an interleaved-group 1113 /// of memory accesses that can be vectorized with wide vector loads/stores 1114 /// and shuffles. 1115 bool interleavedAccessCanBeWidened(Instruction *I, unsigned VF = 1); 1116 1117 /// Check if \p Instr belongs to any interleaved access group. 1118 bool isAccessInterleaved(Instruction *Instr) { 1119 return InterleaveInfo.isInterleaved(Instr); 1120 } 1121 1122 /// Get the interleaved access group that \p Instr belongs to. 1123 const InterleaveGroup<Instruction> * 1124 getInterleavedAccessGroup(Instruction *Instr) { 1125 return InterleaveInfo.getInterleaveGroup(Instr); 1126 } 1127 1128 /// Returns true if an interleaved group requires a scalar iteration 1129 /// to handle accesses with gaps, and there is nothing preventing us from 1130 /// creating a scalar epilogue. 1131 bool requiresScalarEpilogue() const { 1132 return IsScalarEpilogueAllowed && InterleaveInfo.requiresScalarEpilogue(); 1133 } 1134 1135 /// Returns true if a scalar epilogue is not allowed due to optsize. 1136 bool isScalarEpilogueAllowed() const { return IsScalarEpilogueAllowed; } 1137 1138 /// Returns true if all loop blocks should be masked to fold tail loop. 1139 bool foldTailByMasking() const { return FoldTailByMasking; } 1140 1141 bool blockNeedsPredication(BasicBlock *BB) { 1142 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1143 } 1144 1145 private: 1146 unsigned NumPredStores = 0; 1147 1148 /// \return An upper bound for the vectorization factor, larger than zero. 1149 /// One is returned if vectorization should best be avoided due to cost. 1150 unsigned computeFeasibleMaxVF(bool OptForSize, unsigned ConstTripCount); 1151 1152 /// The vectorization cost is a combination of the cost itself and a boolean 1153 /// indicating whether any of the contributing operations will actually 1154 /// operate on 1155 /// vector values after type legalization in the backend. If this latter value 1156 /// is 1157 /// false, then all operations will be scalarized (i.e. no vectorization has 1158 /// actually taken place). 1159 using VectorizationCostTy = std::pair<unsigned, bool>; 1160 1161 /// Returns the expected execution cost. The unit of the cost does 1162 /// not matter because we use the 'cost' units to compare different 1163 /// vector widths. The cost that is returned is *not* normalized by 1164 /// the factor width. 1165 VectorizationCostTy expectedCost(unsigned VF); 1166 1167 /// Returns the execution time cost of an instruction for a given vector 1168 /// width. Vector width of one means scalar. 1169 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1170 1171 /// The cost-computation logic from getInstructionCost which provides 1172 /// the vector type as an output parameter. 1173 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1174 1175 /// Calculate vectorization cost of memory instruction \p I. 1176 unsigned getMemoryInstructionCost(Instruction *I, unsigned VF); 1177 1178 /// The cost computation for scalarized memory instruction. 1179 unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF); 1180 1181 /// The cost computation for interleaving group of memory instructions. 1182 unsigned getInterleaveGroupCost(Instruction *I, unsigned VF); 1183 1184 /// The cost computation for Gather/Scatter instruction. 1185 unsigned getGatherScatterCost(Instruction *I, unsigned VF); 1186 1187 /// The cost computation for widening instruction \p I with consecutive 1188 /// memory access. 1189 unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF); 1190 1191 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1192 /// Load: scalar load + broadcast. 1193 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1194 /// element) 1195 unsigned getUniformMemOpCost(Instruction *I, unsigned VF); 1196 1197 /// Returns whether the instruction is a load or store and will be a emitted 1198 /// as a vector operation. 1199 bool isConsecutiveLoadOrStore(Instruction *I); 1200 1201 /// Returns true if an artificially high cost for emulated masked memrefs 1202 /// should be used. 1203 bool useEmulatedMaskMemRefHack(Instruction *I); 1204 1205 /// Create an analysis remark that explains why vectorization failed 1206 /// 1207 /// \p RemarkName is the identifier for the remark. \return the remark object 1208 /// that can be streamed to. 1209 OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) { 1210 return createLVMissedAnalysis(Hints->vectorizeAnalysisPassName(), 1211 RemarkName, TheLoop); 1212 } 1213 1214 /// Map of scalar integer values to the smallest bitwidth they can be legally 1215 /// represented as. The vector equivalents of these values should be truncated 1216 /// to this type. 1217 MapVector<Instruction *, uint64_t> MinBWs; 1218 1219 /// A type representing the costs for instructions if they were to be 1220 /// scalarized rather than vectorized. The entries are Instruction-Cost 1221 /// pairs. 1222 using ScalarCostsTy = DenseMap<Instruction *, unsigned>; 1223 1224 /// A set containing all BasicBlocks that are known to present after 1225 /// vectorization as a predicated block. 1226 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1227 1228 /// Records whether it is allowed to have the original scalar loop execute at 1229 /// least once. This may be needed as a fallback loop in case runtime 1230 /// aliasing/dependence checks fail, or to handle the tail/remainder 1231 /// iterations when the trip count is unknown or doesn't divide by the VF, 1232 /// or as a peel-loop to handle gaps in interleave-groups. 1233 /// Under optsize and when the trip count is very small we don't allow any 1234 /// iterations to execute in the scalar loop. 1235 bool IsScalarEpilogueAllowed = true; 1236 1237 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1238 bool FoldTailByMasking = false; 1239 1240 /// A map holding scalar costs for different vectorization factors. The 1241 /// presence of a cost for an instruction in the mapping indicates that the 1242 /// instruction will be scalarized when vectorizing with the associated 1243 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1244 DenseMap<unsigned, ScalarCostsTy> InstsToScalarize; 1245 1246 /// Holds the instructions known to be uniform after vectorization. 1247 /// The data is collected per VF. 1248 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms; 1249 1250 /// Holds the instructions known to be scalar after vectorization. 1251 /// The data is collected per VF. 1252 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars; 1253 1254 /// Holds the instructions (address computations) that are forced to be 1255 /// scalarized. 1256 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1257 1258 /// Returns the expected difference in cost from scalarizing the expression 1259 /// feeding a predicated instruction \p PredInst. The instructions to 1260 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1261 /// non-negative return value implies the expression will be scalarized. 1262 /// Currently, only single-use chains are considered for scalarization. 1263 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1264 unsigned VF); 1265 1266 /// Collect the instructions that are uniform after vectorization. An 1267 /// instruction is uniform if we represent it with a single scalar value in 1268 /// the vectorized loop corresponding to each vector iteration. Examples of 1269 /// uniform instructions include pointer operands of consecutive or 1270 /// interleaved memory accesses. Note that although uniformity implies an 1271 /// instruction will be scalar, the reverse is not true. In general, a 1272 /// scalarized instruction will be represented by VF scalar values in the 1273 /// vectorized loop, each corresponding to an iteration of the original 1274 /// scalar loop. 1275 void collectLoopUniforms(unsigned VF); 1276 1277 /// Collect the instructions that are scalar after vectorization. An 1278 /// instruction is scalar if it is known to be uniform or will be scalarized 1279 /// during vectorization. Non-uniform scalarized instructions will be 1280 /// represented by VF values in the vectorized loop, each corresponding to an 1281 /// iteration of the original scalar loop. 1282 void collectLoopScalars(unsigned VF); 1283 1284 /// Keeps cost model vectorization decision and cost for instructions. 1285 /// Right now it is used for memory instructions only. 1286 using DecisionList = DenseMap<std::pair<Instruction *, unsigned>, 1287 std::pair<InstWidening, unsigned>>; 1288 1289 DecisionList WideningDecisions; 1290 1291 public: 1292 /// The loop that we evaluate. 1293 Loop *TheLoop; 1294 1295 /// Predicated scalar evolution analysis. 1296 PredicatedScalarEvolution &PSE; 1297 1298 /// Loop Info analysis. 1299 LoopInfo *LI; 1300 1301 /// Vectorization legality. 1302 LoopVectorizationLegality *Legal; 1303 1304 /// Vector target information. 1305 const TargetTransformInfo &TTI; 1306 1307 /// Target Library Info. 1308 const TargetLibraryInfo *TLI; 1309 1310 /// Demanded bits analysis. 1311 DemandedBits *DB; 1312 1313 /// Assumption cache. 1314 AssumptionCache *AC; 1315 1316 /// Interface to emit optimization remarks. 1317 OptimizationRemarkEmitter *ORE; 1318 1319 const Function *TheFunction; 1320 1321 /// Loop Vectorize Hint. 1322 const LoopVectorizeHints *Hints; 1323 1324 /// The interleave access information contains groups of interleaved accesses 1325 /// with the same stride and close to each other. 1326 InterleavedAccessInfo &InterleaveInfo; 1327 1328 /// Values to ignore in the cost model. 1329 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1330 1331 /// Values to ignore in the cost model when VF > 1. 1332 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1333 }; 1334 1335 } // end namespace llvm 1336 1337 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 1338 // vectorization. The loop needs to be annotated with #pragma omp simd 1339 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 1340 // vector length information is not provided, vectorization is not considered 1341 // explicit. Interleave hints are not allowed either. These limitations will be 1342 // relaxed in the future. 1343 // Please, note that we are currently forced to abuse the pragma 'clang 1344 // vectorize' semantics. This pragma provides *auto-vectorization hints* 1345 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 1346 // provides *explicit vectorization hints* (LV can bypass legal checks and 1347 // assume that vectorization is legal). However, both hints are implemented 1348 // using the same metadata (llvm.loop.vectorize, processed by 1349 // LoopVectorizeHints). This will be fixed in the future when the native IR 1350 // representation for pragma 'omp simd' is introduced. 1351 static bool isExplicitVecOuterLoop(Loop *OuterLp, 1352 OptimizationRemarkEmitter *ORE) { 1353 assert(!OuterLp->empty() && "This is not an outer loop"); 1354 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 1355 1356 // Only outer loops with an explicit vectorization hint are supported. 1357 // Unannotated outer loops are ignored. 1358 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 1359 return false; 1360 1361 Function *Fn = OuterLp->getHeader()->getParent(); 1362 if (!Hints.allowVectorization(Fn, OuterLp, false /*AlwaysVectorize*/)) { 1363 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 1364 return false; 1365 } 1366 1367 if (!Hints.getWidth()) { 1368 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: No user vector width.\n"); 1369 Hints.emitRemarkWithHints(); 1370 return false; 1371 } 1372 1373 if (Hints.getInterleave() > 1) { 1374 // TODO: Interleave support is future work. 1375 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 1376 "outer loops.\n"); 1377 Hints.emitRemarkWithHints(); 1378 return false; 1379 } 1380 1381 return true; 1382 } 1383 1384 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 1385 OptimizationRemarkEmitter *ORE, 1386 SmallVectorImpl<Loop *> &V) { 1387 // Collect inner loops and outer loops without irreducible control flow. For 1388 // now, only collect outer loops that have explicit vectorization hints. If we 1389 // are stress testing the VPlan H-CFG construction, we collect the outermost 1390 // loop of every loop nest. 1391 if (L.empty() || VPlanBuildStressTest || 1392 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 1393 LoopBlocksRPO RPOT(&L); 1394 RPOT.perform(LI); 1395 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 1396 V.push_back(&L); 1397 // TODO: Collect inner loops inside marked outer loops in case 1398 // vectorization fails for the outer loop. Do not invoke 1399 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 1400 // already known to be reducible. We can use an inherited attribute for 1401 // that. 1402 return; 1403 } 1404 } 1405 for (Loop *InnerL : L) 1406 collectSupportedLoops(*InnerL, LI, ORE, V); 1407 } 1408 1409 namespace { 1410 1411 /// The LoopVectorize Pass. 1412 struct LoopVectorize : public FunctionPass { 1413 /// Pass identification, replacement for typeid 1414 static char ID; 1415 1416 LoopVectorizePass Impl; 1417 1418 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) 1419 : FunctionPass(ID) { 1420 Impl.DisableUnrolling = NoUnrolling; 1421 Impl.AlwaysVectorize = AlwaysVectorize; 1422 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1423 } 1424 1425 bool runOnFunction(Function &F) override { 1426 if (skipFunction(F)) 1427 return false; 1428 1429 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1430 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1431 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1432 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1433 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1434 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1435 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 1436 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1437 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1438 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 1439 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1440 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 1441 1442 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 1443 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 1444 1445 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 1446 GetLAA, *ORE); 1447 } 1448 1449 void getAnalysisUsage(AnalysisUsage &AU) const override { 1450 AU.addRequired<AssumptionCacheTracker>(); 1451 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1452 AU.addRequired<DominatorTreeWrapperPass>(); 1453 AU.addRequired<LoopInfoWrapperPass>(); 1454 AU.addRequired<ScalarEvolutionWrapperPass>(); 1455 AU.addRequired<TargetTransformInfoWrapperPass>(); 1456 AU.addRequired<AAResultsWrapperPass>(); 1457 AU.addRequired<LoopAccessLegacyAnalysis>(); 1458 AU.addRequired<DemandedBitsWrapperPass>(); 1459 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 1460 1461 // We currently do not preserve loopinfo/dominator analyses with outer loop 1462 // vectorization. Until this is addressed, mark these analyses as preserved 1463 // only for non-VPlan-native path. 1464 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 1465 if (!EnableVPlanNativePath) { 1466 AU.addPreserved<LoopInfoWrapperPass>(); 1467 AU.addPreserved<DominatorTreeWrapperPass>(); 1468 } 1469 1470 AU.addPreserved<BasicAAWrapperPass>(); 1471 AU.addPreserved<GlobalsAAWrapperPass>(); 1472 } 1473 }; 1474 1475 } // end anonymous namespace 1476 1477 //===----------------------------------------------------------------------===// 1478 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 1479 // LoopVectorizationCostModel and LoopVectorizationPlanner. 1480 //===----------------------------------------------------------------------===// 1481 1482 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 1483 // We need to place the broadcast of invariant variables outside the loop, 1484 // but only if it's proven safe to do so. Else, broadcast will be inside 1485 // vector loop body. 1486 Instruction *Instr = dyn_cast<Instruction>(V); 1487 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 1488 (!Instr || 1489 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 1490 // Place the code for broadcasting invariant variables in the new preheader. 1491 IRBuilder<>::InsertPointGuard Guard(Builder); 1492 if (SafeToHoist) 1493 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1494 1495 // Broadcast the scalar into all locations in the vector. 1496 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 1497 1498 return Shuf; 1499 } 1500 1501 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 1502 const InductionDescriptor &II, Value *Step, Instruction *EntryVal) { 1503 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1504 "Expected either an induction phi-node or a truncate of it!"); 1505 Value *Start = II.getStartValue(); 1506 1507 // Construct the initial value of the vector IV in the vector loop preheader 1508 auto CurrIP = Builder.saveIP(); 1509 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1510 if (isa<TruncInst>(EntryVal)) { 1511 assert(Start->getType()->isIntegerTy() && 1512 "Truncation requires an integer type"); 1513 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 1514 Step = Builder.CreateTrunc(Step, TruncType); 1515 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 1516 } 1517 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 1518 Value *SteppedStart = 1519 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 1520 1521 // We create vector phi nodes for both integer and floating-point induction 1522 // variables. Here, we determine the kind of arithmetic we will perform. 1523 Instruction::BinaryOps AddOp; 1524 Instruction::BinaryOps MulOp; 1525 if (Step->getType()->isIntegerTy()) { 1526 AddOp = Instruction::Add; 1527 MulOp = Instruction::Mul; 1528 } else { 1529 AddOp = II.getInductionOpcode(); 1530 MulOp = Instruction::FMul; 1531 } 1532 1533 // Multiply the vectorization factor by the step using integer or 1534 // floating-point arithmetic as appropriate. 1535 Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF); 1536 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF)); 1537 1538 // Create a vector splat to use in the induction update. 1539 // 1540 // FIXME: If the step is non-constant, we create the vector splat with 1541 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 1542 // handle a constant vector splat. 1543 Value *SplatVF = isa<Constant>(Mul) 1544 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 1545 : Builder.CreateVectorSplat(VF, Mul); 1546 Builder.restoreIP(CurrIP); 1547 1548 // We may need to add the step a number of times, depending on the unroll 1549 // factor. The last of those goes into the PHI. 1550 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 1551 &*LoopVectorBody->getFirstInsertionPt()); 1552 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 1553 Instruction *LastInduction = VecInd; 1554 for (unsigned Part = 0; Part < UF; ++Part) { 1555 VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction); 1556 1557 if (isa<TruncInst>(EntryVal)) 1558 addMetadata(LastInduction, EntryVal); 1559 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part); 1560 1561 LastInduction = cast<Instruction>(addFastMathFlag( 1562 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"))); 1563 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 1564 } 1565 1566 // Move the last step to the end of the latch block. This ensures consistent 1567 // placement of all induction updates. 1568 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 1569 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 1570 auto *ICmp = cast<Instruction>(Br->getCondition()); 1571 LastInduction->moveBefore(ICmp); 1572 LastInduction->setName("vec.ind.next"); 1573 1574 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 1575 VecInd->addIncoming(LastInduction, LoopVectorLatch); 1576 } 1577 1578 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 1579 return Cost->isScalarAfterVectorization(I, VF) || 1580 Cost->isProfitableToScalarize(I, VF); 1581 } 1582 1583 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 1584 if (shouldScalarizeInstruction(IV)) 1585 return true; 1586 auto isScalarInst = [&](User *U) -> bool { 1587 auto *I = cast<Instruction>(U); 1588 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 1589 }; 1590 return llvm::any_of(IV->users(), isScalarInst); 1591 } 1592 1593 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 1594 const InductionDescriptor &ID, const Instruction *EntryVal, 1595 Value *VectorLoopVal, unsigned Part, unsigned Lane) { 1596 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1597 "Expected either an induction phi-node or a truncate of it!"); 1598 1599 // This induction variable is not the phi from the original loop but the 1600 // newly-created IV based on the proof that casted Phi is equal to the 1601 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 1602 // re-uses the same InductionDescriptor that original IV uses but we don't 1603 // have to do any recording in this case - that is done when original IV is 1604 // processed. 1605 if (isa<TruncInst>(EntryVal)) 1606 return; 1607 1608 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 1609 if (Casts.empty()) 1610 return; 1611 // Only the first Cast instruction in the Casts vector is of interest. 1612 // The rest of the Casts (if exist) have no uses outside the 1613 // induction update chain itself. 1614 Instruction *CastInst = *Casts.begin(); 1615 if (Lane < UINT_MAX) 1616 VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal); 1617 else 1618 VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal); 1619 } 1620 1621 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) { 1622 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 1623 "Primary induction variable must have an integer type"); 1624 1625 auto II = Legal->getInductionVars()->find(IV); 1626 assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); 1627 1628 auto ID = II->second; 1629 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 1630 1631 // The scalar value to broadcast. This will be derived from the canonical 1632 // induction variable. 1633 Value *ScalarIV = nullptr; 1634 1635 // The value from the original loop to which we are mapping the new induction 1636 // variable. 1637 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 1638 1639 // True if we have vectorized the induction variable. 1640 auto VectorizedIV = false; 1641 1642 // Determine if we want a scalar version of the induction variable. This is 1643 // true if the induction variable itself is not widened, or if it has at 1644 // least one user in the loop that is not widened. 1645 auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal); 1646 1647 // Generate code for the induction step. Note that induction steps are 1648 // required to be loop-invariant 1649 assert(PSE.getSE()->isLoopInvariant(ID.getStep(), OrigLoop) && 1650 "Induction step should be loop invariant"); 1651 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 1652 Value *Step = nullptr; 1653 if (PSE.getSE()->isSCEVable(IV->getType())) { 1654 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 1655 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), 1656 LoopVectorPreHeader->getTerminator()); 1657 } else { 1658 Step = cast<SCEVUnknown>(ID.getStep())->getValue(); 1659 } 1660 1661 // Try to create a new independent vector induction variable. If we can't 1662 // create the phi node, we will splat the scalar induction variable in each 1663 // loop iteration. 1664 if (VF > 1 && !shouldScalarizeInstruction(EntryVal)) { 1665 createVectorIntOrFpInductionPHI(ID, Step, EntryVal); 1666 VectorizedIV = true; 1667 } 1668 1669 // If we haven't yet vectorized the induction variable, or if we will create 1670 // a scalar one, we need to define the scalar induction variable and step 1671 // values. If we were given a truncation type, truncate the canonical 1672 // induction variable and step. Otherwise, derive these values from the 1673 // induction descriptor. 1674 if (!VectorizedIV || NeedsScalarIV) { 1675 ScalarIV = Induction; 1676 if (IV != OldInduction) { 1677 ScalarIV = IV->getType()->isIntegerTy() 1678 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 1679 : Builder.CreateCast(Instruction::SIToFP, Induction, 1680 IV->getType()); 1681 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 1682 ScalarIV->setName("offset.idx"); 1683 } 1684 if (Trunc) { 1685 auto *TruncType = cast<IntegerType>(Trunc->getType()); 1686 assert(Step->getType()->isIntegerTy() && 1687 "Truncation requires an integer step"); 1688 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 1689 Step = Builder.CreateTrunc(Step, TruncType); 1690 } 1691 } 1692 1693 // If we haven't yet vectorized the induction variable, splat the scalar 1694 // induction variable, and build the necessary step vectors. 1695 // TODO: Don't do it unless the vectorized IV is really required. 1696 if (!VectorizedIV) { 1697 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 1698 for (unsigned Part = 0; Part < UF; ++Part) { 1699 Value *EntryPart = 1700 getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode()); 1701 VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart); 1702 if (Trunc) 1703 addMetadata(EntryPart, Trunc); 1704 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part); 1705 } 1706 } 1707 1708 // If an induction variable is only used for counting loop iterations or 1709 // calculating addresses, it doesn't need to be widened. Create scalar steps 1710 // that can be used by instructions we will later scalarize. Note that the 1711 // addition of the scalar steps will not increase the number of instructions 1712 // in the loop in the common case prior to InstCombine. We will be trading 1713 // one vector extract for each scalar step. 1714 if (NeedsScalarIV) 1715 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 1716 } 1717 1718 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 1719 Instruction::BinaryOps BinOp) { 1720 // Create and check the types. 1721 assert(Val->getType()->isVectorTy() && "Must be a vector"); 1722 int VLen = Val->getType()->getVectorNumElements(); 1723 1724 Type *STy = Val->getType()->getScalarType(); 1725 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 1726 "Induction Step must be an integer or FP"); 1727 assert(Step->getType() == STy && "Step has wrong type"); 1728 1729 SmallVector<Constant *, 8> Indices; 1730 1731 if (STy->isIntegerTy()) { 1732 // Create a vector of consecutive numbers from zero to VF. 1733 for (int i = 0; i < VLen; ++i) 1734 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 1735 1736 // Add the consecutive indices to the vector value. 1737 Constant *Cv = ConstantVector::get(Indices); 1738 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 1739 Step = Builder.CreateVectorSplat(VLen, Step); 1740 assert(Step->getType() == Val->getType() && "Invalid step vec"); 1741 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 1742 // which can be found from the original scalar operations. 1743 Step = Builder.CreateMul(Cv, Step); 1744 return Builder.CreateAdd(Val, Step, "induction"); 1745 } 1746 1747 // Floating point induction. 1748 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 1749 "Binary Opcode should be specified for FP induction"); 1750 // Create a vector of consecutive numbers from zero to VF. 1751 for (int i = 0; i < VLen; ++i) 1752 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 1753 1754 // Add the consecutive indices to the vector value. 1755 Constant *Cv = ConstantVector::get(Indices); 1756 1757 Step = Builder.CreateVectorSplat(VLen, Step); 1758 1759 // Floating point operations had to be 'fast' to enable the induction. 1760 FastMathFlags Flags; 1761 Flags.setFast(); 1762 1763 Value *MulOp = Builder.CreateFMul(Cv, Step); 1764 if (isa<Instruction>(MulOp)) 1765 // Have to check, MulOp may be a constant 1766 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 1767 1768 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 1769 if (isa<Instruction>(BOp)) 1770 cast<Instruction>(BOp)->setFastMathFlags(Flags); 1771 return BOp; 1772 } 1773 1774 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 1775 Instruction *EntryVal, 1776 const InductionDescriptor &ID) { 1777 // We shouldn't have to build scalar steps if we aren't vectorizing. 1778 assert(VF > 1 && "VF should be greater than one"); 1779 1780 // Get the value type and ensure it and the step have the same integer type. 1781 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 1782 assert(ScalarIVTy == Step->getType() && 1783 "Val and Step should have the same type"); 1784 1785 // We build scalar steps for both integer and floating-point induction 1786 // variables. Here, we determine the kind of arithmetic we will perform. 1787 Instruction::BinaryOps AddOp; 1788 Instruction::BinaryOps MulOp; 1789 if (ScalarIVTy->isIntegerTy()) { 1790 AddOp = Instruction::Add; 1791 MulOp = Instruction::Mul; 1792 } else { 1793 AddOp = ID.getInductionOpcode(); 1794 MulOp = Instruction::FMul; 1795 } 1796 1797 // Determine the number of scalars we need to generate for each unroll 1798 // iteration. If EntryVal is uniform, we only need to generate the first 1799 // lane. Otherwise, we generate all VF values. 1800 unsigned Lanes = 1801 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1 1802 : VF; 1803 // Compute the scalar steps and save the results in VectorLoopValueMap. 1804 for (unsigned Part = 0; Part < UF; ++Part) { 1805 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 1806 auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane); 1807 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step)); 1808 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul)); 1809 VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add); 1810 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane); 1811 } 1812 } 1813 } 1814 1815 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) { 1816 assert(V != Induction && "The new induction variable should not be used."); 1817 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 1818 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 1819 1820 // If we have a stride that is replaced by one, do it here. Defer this for 1821 // the VPlan-native path until we start running Legal checks in that path. 1822 if (!EnableVPlanNativePath && Legal->hasStride(V)) 1823 V = ConstantInt::get(V->getType(), 1); 1824 1825 // If we have a vector mapped to this value, return it. 1826 if (VectorLoopValueMap.hasVectorValue(V, Part)) 1827 return VectorLoopValueMap.getVectorValue(V, Part); 1828 1829 // If the value has not been vectorized, check if it has been scalarized 1830 // instead. If it has been scalarized, and we actually need the value in 1831 // vector form, we will construct the vector values on demand. 1832 if (VectorLoopValueMap.hasAnyScalarValue(V)) { 1833 Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0}); 1834 1835 // If we've scalarized a value, that value should be an instruction. 1836 auto *I = cast<Instruction>(V); 1837 1838 // If we aren't vectorizing, we can just copy the scalar map values over to 1839 // the vector map. 1840 if (VF == 1) { 1841 VectorLoopValueMap.setVectorValue(V, Part, ScalarValue); 1842 return ScalarValue; 1843 } 1844 1845 // Get the last scalar instruction we generated for V and Part. If the value 1846 // is known to be uniform after vectorization, this corresponds to lane zero 1847 // of the Part unroll iteration. Otherwise, the last instruction is the one 1848 // we created for the last vector lane of the Part unroll iteration. 1849 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1; 1850 auto *LastInst = cast<Instruction>( 1851 VectorLoopValueMap.getScalarValue(V, {Part, LastLane})); 1852 1853 // Set the insert point after the last scalarized instruction. This ensures 1854 // the insertelement sequence will directly follow the scalar definitions. 1855 auto OldIP = Builder.saveIP(); 1856 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 1857 Builder.SetInsertPoint(&*NewIP); 1858 1859 // However, if we are vectorizing, we need to construct the vector values. 1860 // If the value is known to be uniform after vectorization, we can just 1861 // broadcast the scalar value corresponding to lane zero for each unroll 1862 // iteration. Otherwise, we construct the vector values using insertelement 1863 // instructions. Since the resulting vectors are stored in 1864 // VectorLoopValueMap, we will only generate the insertelements once. 1865 Value *VectorValue = nullptr; 1866 if (Cost->isUniformAfterVectorization(I, VF)) { 1867 VectorValue = getBroadcastInstrs(ScalarValue); 1868 VectorLoopValueMap.setVectorValue(V, Part, VectorValue); 1869 } else { 1870 // Initialize packing with insertelements to start from undef. 1871 Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF)); 1872 VectorLoopValueMap.setVectorValue(V, Part, Undef); 1873 for (unsigned Lane = 0; Lane < VF; ++Lane) 1874 packScalarIntoVectorValue(V, {Part, Lane}); 1875 VectorValue = VectorLoopValueMap.getVectorValue(V, Part); 1876 } 1877 Builder.restoreIP(OldIP); 1878 return VectorValue; 1879 } 1880 1881 // If this scalar is unknown, assume that it is a constant or that it is 1882 // loop invariant. Broadcast V and save the value for future uses. 1883 Value *B = getBroadcastInstrs(V); 1884 VectorLoopValueMap.setVectorValue(V, Part, B); 1885 return B; 1886 } 1887 1888 Value * 1889 InnerLoopVectorizer::getOrCreateScalarValue(Value *V, 1890 const VPIteration &Instance) { 1891 // If the value is not an instruction contained in the loop, it should 1892 // already be scalar. 1893 if (OrigLoop->isLoopInvariant(V)) 1894 return V; 1895 1896 assert(Instance.Lane > 0 1897 ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF) 1898 : true && "Uniform values only have lane zero"); 1899 1900 // If the value from the original loop has not been vectorized, it is 1901 // represented by UF x VF scalar values in the new loop. Return the requested 1902 // scalar value. 1903 if (VectorLoopValueMap.hasScalarValue(V, Instance)) 1904 return VectorLoopValueMap.getScalarValue(V, Instance); 1905 1906 // If the value has not been scalarized, get its entry in VectorLoopValueMap 1907 // for the given unroll part. If this entry is not a vector type (i.e., the 1908 // vectorization factor is one), there is no need to generate an 1909 // extractelement instruction. 1910 auto *U = getOrCreateVectorValue(V, Instance.Part); 1911 if (!U->getType()->isVectorTy()) { 1912 assert(VF == 1 && "Value not scalarized has non-vector type"); 1913 return U; 1914 } 1915 1916 // Otherwise, the value from the original loop has been vectorized and is 1917 // represented by UF vector values. Extract and return the requested scalar 1918 // value from the appropriate vector lane. 1919 return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane)); 1920 } 1921 1922 void InnerLoopVectorizer::packScalarIntoVectorValue( 1923 Value *V, const VPIteration &Instance) { 1924 assert(V != Induction && "The new induction variable should not be used."); 1925 assert(!V->getType()->isVectorTy() && "Can't pack a vector"); 1926 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 1927 1928 Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance); 1929 Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part); 1930 VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst, 1931 Builder.getInt32(Instance.Lane)); 1932 VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue); 1933 } 1934 1935 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 1936 assert(Vec->getType()->isVectorTy() && "Invalid type"); 1937 SmallVector<Constant *, 8> ShuffleMask; 1938 for (unsigned i = 0; i < VF; ++i) 1939 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 1940 1941 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 1942 ConstantVector::get(ShuffleMask), 1943 "reverse"); 1944 } 1945 1946 // Return whether we allow using masked interleave-groups (for dealing with 1947 // strided loads/stores that reside in predicated blocks, or for dealing 1948 // with gaps). 1949 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 1950 // If an override option has been passed in for interleaved accesses, use it. 1951 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 1952 return EnableMaskedInterleavedMemAccesses; 1953 1954 return TTI.enableMaskedInterleavedAccessVectorization(); 1955 } 1956 1957 // Try to vectorize the interleave group that \p Instr belongs to. 1958 // 1959 // E.g. Translate following interleaved load group (factor = 3): 1960 // for (i = 0; i < N; i+=3) { 1961 // R = Pic[i]; // Member of index 0 1962 // G = Pic[i+1]; // Member of index 1 1963 // B = Pic[i+2]; // Member of index 2 1964 // ... // do something to R, G, B 1965 // } 1966 // To: 1967 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 1968 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 1969 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 1970 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 1971 // 1972 // Or translate following interleaved store group (factor = 3): 1973 // for (i = 0; i < N; i+=3) { 1974 // ... do something to R, G, B 1975 // Pic[i] = R; // Member of index 0 1976 // Pic[i+1] = G; // Member of index 1 1977 // Pic[i+2] = B; // Member of index 2 1978 // } 1979 // To: 1980 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 1981 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 1982 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 1983 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 1984 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 1985 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr, 1986 VectorParts *BlockInMask) { 1987 const InterleaveGroup<Instruction> *Group = 1988 Cost->getInterleavedAccessGroup(Instr); 1989 assert(Group && "Fail to get an interleaved access group."); 1990 1991 // Skip if current instruction is not the insert position. 1992 if (Instr != Group->getInsertPos()) 1993 return; 1994 1995 const DataLayout &DL = Instr->getModule()->getDataLayout(); 1996 Value *Ptr = getLoadStorePointerOperand(Instr); 1997 1998 // Prepare for the vector type of the interleaved load/store. 1999 Type *ScalarTy = getMemInstValueType(Instr); 2000 unsigned InterleaveFactor = Group->getFactor(); 2001 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2002 Type *PtrTy = VecTy->getPointerTo(getLoadStoreAddressSpace(Instr)); 2003 2004 // Prepare for the new pointers. 2005 setDebugLocFromInst(Builder, Ptr); 2006 SmallVector<Value *, 2> NewPtrs; 2007 unsigned Index = Group->getIndex(Instr); 2008 2009 VectorParts Mask; 2010 bool IsMaskForCondRequired = BlockInMask; 2011 if (IsMaskForCondRequired) { 2012 Mask = *BlockInMask; 2013 // TODO: extend the masked interleaved-group support to reversed access. 2014 assert(!Group->isReverse() && "Reversed masked interleave-group " 2015 "not supported."); 2016 } 2017 2018 // If the group is reverse, adjust the index to refer to the last vector lane 2019 // instead of the first. We adjust the index from the first vector lane, 2020 // rather than directly getting the pointer for lane VF - 1, because the 2021 // pointer operand of the interleaved access is supposed to be uniform. For 2022 // uniform instructions, we're only required to generate a value for the 2023 // first vector lane in each unroll iteration. 2024 if (Group->isReverse()) 2025 Index += (VF - 1) * Group->getFactor(); 2026 2027 bool InBounds = false; 2028 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2029 InBounds = gep->isInBounds(); 2030 2031 for (unsigned Part = 0; Part < UF; Part++) { 2032 Value *NewPtr = getOrCreateScalarValue(Ptr, {Part, 0}); 2033 2034 // Notice current instruction could be any index. Need to adjust the address 2035 // to the member of index 0. 2036 // 2037 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2038 // b = A[i]; // Member of index 0 2039 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2040 // 2041 // E.g. A[i+1] = a; // Member of index 1 2042 // A[i] = b; // Member of index 0 2043 // A[i+2] = c; // Member of index 2 (Current instruction) 2044 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2045 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 2046 if (InBounds) 2047 cast<GetElementPtrInst>(NewPtr)->setIsInBounds(true); 2048 2049 // Cast to the vector pointer type. 2050 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2051 } 2052 2053 setDebugLocFromInst(Builder, Instr); 2054 Value *UndefVec = UndefValue::get(VecTy); 2055 2056 Value *MaskForGaps = nullptr; 2057 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2058 MaskForGaps = createBitMaskForGaps(Builder, VF, *Group); 2059 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2060 } 2061 2062 // Vectorize the interleaved load group. 2063 if (isa<LoadInst>(Instr)) { 2064 // For each unroll part, create a wide load for the group. 2065 SmallVector<Value *, 2> NewLoads; 2066 for (unsigned Part = 0; Part < UF; Part++) { 2067 Instruction *NewLoad; 2068 if (IsMaskForCondRequired || MaskForGaps) { 2069 assert(useMaskedInterleavedAccesses(*TTI) && 2070 "masked interleaved groups are not allowed."); 2071 Value *GroupMask = MaskForGaps; 2072 if (IsMaskForCondRequired) { 2073 auto *Undefs = UndefValue::get(Mask[Part]->getType()); 2074 auto *RepMask = createReplicatedMask(Builder, InterleaveFactor, VF); 2075 Value *ShuffledMask = Builder.CreateShuffleVector( 2076 Mask[Part], Undefs, RepMask, "interleaved.mask"); 2077 GroupMask = MaskForGaps 2078 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2079 MaskForGaps) 2080 : ShuffledMask; 2081 } 2082 NewLoad = 2083 Builder.CreateMaskedLoad(NewPtrs[Part], Group->getAlignment(), 2084 GroupMask, UndefVec, "wide.masked.vec"); 2085 } 2086 else 2087 NewLoad = Builder.CreateAlignedLoad(NewPtrs[Part], 2088 Group->getAlignment(), "wide.vec"); 2089 Group->addMetadata(NewLoad); 2090 NewLoads.push_back(NewLoad); 2091 } 2092 2093 // For each member in the group, shuffle out the appropriate data from the 2094 // wide loads. 2095 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2096 Instruction *Member = Group->getMember(I); 2097 2098 // Skip the gaps in the group. 2099 if (!Member) 2100 continue; 2101 2102 Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF); 2103 for (unsigned Part = 0; Part < UF; Part++) { 2104 Value *StridedVec = Builder.CreateShuffleVector( 2105 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 2106 2107 // If this member has different type, cast the result type. 2108 if (Member->getType() != ScalarTy) { 2109 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2110 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2111 } 2112 2113 if (Group->isReverse()) 2114 StridedVec = reverseVector(StridedVec); 2115 2116 VectorLoopValueMap.setVectorValue(Member, Part, StridedVec); 2117 } 2118 } 2119 return; 2120 } 2121 2122 // The sub vector type for current instruction. 2123 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2124 2125 // Vectorize the interleaved store group. 2126 for (unsigned Part = 0; Part < UF; Part++) { 2127 // Collect the stored vector from each member. 2128 SmallVector<Value *, 4> StoredVecs; 2129 for (unsigned i = 0; i < InterleaveFactor; i++) { 2130 // Interleaved store group doesn't allow a gap, so each index has a member 2131 Instruction *Member = Group->getMember(i); 2132 assert(Member && "Fail to get a member from an interleaved store group"); 2133 2134 Value *StoredVec = getOrCreateVectorValue( 2135 cast<StoreInst>(Member)->getValueOperand(), Part); 2136 if (Group->isReverse()) 2137 StoredVec = reverseVector(StoredVec); 2138 2139 // If this member has different type, cast it to a unified type. 2140 2141 if (StoredVec->getType() != SubVT) 2142 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2143 2144 StoredVecs.push_back(StoredVec); 2145 } 2146 2147 // Concatenate all vectors into a wide vector. 2148 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2149 2150 // Interleave the elements in the wide vector. 2151 Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor); 2152 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2153 "interleaved.vec"); 2154 2155 Instruction *NewStoreInstr; 2156 if (IsMaskForCondRequired) { 2157 auto *Undefs = UndefValue::get(Mask[Part]->getType()); 2158 auto *RepMask = createReplicatedMask(Builder, InterleaveFactor, VF); 2159 Value *ShuffledMask = Builder.CreateShuffleVector( 2160 Mask[Part], Undefs, RepMask, "interleaved.mask"); 2161 NewStoreInstr = Builder.CreateMaskedStore( 2162 IVec, NewPtrs[Part], Group->getAlignment(), ShuffledMask); 2163 } 2164 else 2165 NewStoreInstr = Builder.CreateAlignedStore(IVec, NewPtrs[Part], 2166 Group->getAlignment()); 2167 2168 Group->addMetadata(NewStoreInstr); 2169 } 2170 } 2171 2172 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr, 2173 VectorParts *BlockInMask) { 2174 // Attempt to issue a wide load. 2175 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2176 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2177 2178 assert((LI || SI) && "Invalid Load/Store instruction"); 2179 2180 LoopVectorizationCostModel::InstWidening Decision = 2181 Cost->getWideningDecision(Instr, VF); 2182 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 2183 "CM decision should be taken at this point"); 2184 if (Decision == LoopVectorizationCostModel::CM_Interleave) 2185 return vectorizeInterleaveGroup(Instr); 2186 2187 Type *ScalarDataTy = getMemInstValueType(Instr); 2188 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2189 Value *Ptr = getLoadStorePointerOperand(Instr); 2190 unsigned Alignment = getLoadStoreAlignment(Instr); 2191 // An alignment of 0 means target abi alignment. We need to use the scalar's 2192 // target abi alignment in such a case. 2193 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2194 if (!Alignment) 2195 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2196 unsigned AddressSpace = getLoadStoreAddressSpace(Instr); 2197 2198 // Determine if the pointer operand of the access is either consecutive or 2199 // reverse consecutive. 2200 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2201 bool ConsecutiveStride = 2202 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2203 bool CreateGatherScatter = 2204 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2205 2206 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2207 // gather/scatter. Otherwise Decision should have been to Scalarize. 2208 assert((ConsecutiveStride || CreateGatherScatter) && 2209 "The instruction should be scalarized"); 2210 2211 // Handle consecutive loads/stores. 2212 if (ConsecutiveStride) 2213 Ptr = getOrCreateScalarValue(Ptr, {0, 0}); 2214 2215 VectorParts Mask; 2216 bool isMaskRequired = BlockInMask; 2217 if (isMaskRequired) 2218 Mask = *BlockInMask; 2219 2220 bool InBounds = false; 2221 if (auto *gep = dyn_cast<GetElementPtrInst>( 2222 getLoadStorePointerOperand(Instr)->stripPointerCasts())) 2223 InBounds = gep->isInBounds(); 2224 2225 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2226 // Calculate the pointer for the specific unroll-part. 2227 GetElementPtrInst *PartPtr = nullptr; 2228 2229 if (Reverse) { 2230 // If the address is consecutive but reversed, then the 2231 // wide store needs to start at the last vector element. 2232 PartPtr = cast<GetElementPtrInst>( 2233 Builder.CreateGEP(Ptr, Builder.getInt32(-Part * VF))); 2234 PartPtr->setIsInBounds(InBounds); 2235 PartPtr = cast<GetElementPtrInst>( 2236 Builder.CreateGEP(PartPtr, Builder.getInt32(1 - VF))); 2237 PartPtr->setIsInBounds(InBounds); 2238 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2239 Mask[Part] = reverseVector(Mask[Part]); 2240 } else { 2241 PartPtr = cast<GetElementPtrInst>( 2242 Builder.CreateGEP(Ptr, Builder.getInt32(Part * VF))); 2243 PartPtr->setIsInBounds(InBounds); 2244 } 2245 2246 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2247 }; 2248 2249 // Handle Stores: 2250 if (SI) { 2251 setDebugLocFromInst(Builder, SI); 2252 2253 for (unsigned Part = 0; Part < UF; ++Part) { 2254 Instruction *NewSI = nullptr; 2255 Value *StoredVal = getOrCreateVectorValue(SI->getValueOperand(), Part); 2256 if (CreateGatherScatter) { 2257 Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr; 2258 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 2259 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2260 MaskPart); 2261 } else { 2262 if (Reverse) { 2263 // If we store to reverse consecutive memory locations, then we need 2264 // to reverse the order of elements in the stored value. 2265 StoredVal = reverseVector(StoredVal); 2266 // We don't want to update the value in the map as it might be used in 2267 // another expression. So don't call resetVectorValue(StoredVal). 2268 } 2269 auto *VecPtr = CreateVecPtr(Part, Ptr); 2270 if (isMaskRequired) 2271 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2272 Mask[Part]); 2273 else 2274 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 2275 } 2276 addMetadata(NewSI, SI); 2277 } 2278 return; 2279 } 2280 2281 // Handle loads. 2282 assert(LI && "Must have a load instruction"); 2283 setDebugLocFromInst(Builder, LI); 2284 for (unsigned Part = 0; Part < UF; ++Part) { 2285 Value *NewLI; 2286 if (CreateGatherScatter) { 2287 Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr; 2288 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 2289 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 2290 nullptr, "wide.masked.gather"); 2291 addMetadata(NewLI, LI); 2292 } else { 2293 auto *VecPtr = CreateVecPtr(Part, Ptr); 2294 if (isMaskRequired) 2295 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 2296 UndefValue::get(DataTy), 2297 "wide.masked.load"); 2298 else 2299 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 2300 2301 // Add metadata to the load, but setVectorValue to the reverse shuffle. 2302 addMetadata(NewLI, LI); 2303 if (Reverse) 2304 NewLI = reverseVector(NewLI); 2305 } 2306 VectorLoopValueMap.setVectorValue(Instr, Part, NewLI); 2307 } 2308 } 2309 2310 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2311 const VPIteration &Instance, 2312 bool IfPredicateInstr) { 2313 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2314 2315 setDebugLocFromInst(Builder, Instr); 2316 2317 // Does this instruction return a value ? 2318 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2319 2320 Instruction *Cloned = Instr->clone(); 2321 if (!IsVoidRetTy) 2322 Cloned->setName(Instr->getName() + ".cloned"); 2323 2324 // Replace the operands of the cloned instructions with their scalar 2325 // equivalents in the new loop. 2326 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2327 auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance); 2328 Cloned->setOperand(op, NewOp); 2329 } 2330 addNewMetadata(Cloned, Instr); 2331 2332 // Place the cloned scalar in the new loop. 2333 Builder.Insert(Cloned); 2334 2335 // Add the cloned scalar to the scalar map entry. 2336 VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned); 2337 2338 // If we just cloned a new assumption, add it the assumption cache. 2339 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2340 if (II->getIntrinsicID() == Intrinsic::assume) 2341 AC->registerAssumption(II); 2342 2343 // End if-block. 2344 if (IfPredicateInstr) 2345 PredicatedInstructions.push_back(Cloned); 2346 } 2347 2348 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2349 Value *End, Value *Step, 2350 Instruction *DL) { 2351 BasicBlock *Header = L->getHeader(); 2352 BasicBlock *Latch = L->getLoopLatch(); 2353 // As we're just creating this loop, it's possible no latch exists 2354 // yet. If so, use the header as this will be a single block loop. 2355 if (!Latch) 2356 Latch = Header; 2357 2358 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2359 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 2360 setDebugLocFromInst(Builder, OldInst); 2361 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2362 2363 Builder.SetInsertPoint(Latch->getTerminator()); 2364 setDebugLocFromInst(Builder, OldInst); 2365 2366 // Create i+1 and fill the PHINode. 2367 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2368 Induction->addIncoming(Start, L->getLoopPreheader()); 2369 Induction->addIncoming(Next, Latch); 2370 // Create the compare. 2371 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2372 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 2373 2374 // Now we have two terminators. Remove the old one from the block. 2375 Latch->getTerminator()->eraseFromParent(); 2376 2377 return Induction; 2378 } 2379 2380 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2381 if (TripCount) 2382 return TripCount; 2383 2384 assert(L && "Create Trip Count for null loop."); 2385 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2386 // Find the loop boundaries. 2387 ScalarEvolution *SE = PSE.getSE(); 2388 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2389 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 2390 "Invalid loop count"); 2391 2392 Type *IdxTy = Legal->getWidestInductionType(); 2393 assert(IdxTy && "No type for induction"); 2394 2395 // The exit count might have the type of i64 while the phi is i32. This can 2396 // happen if we have an induction variable that is sign extended before the 2397 // compare. The only way that we get a backedge taken count is that the 2398 // induction variable was signed and as such will not overflow. In such a case 2399 // truncation is legal. 2400 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 2401 IdxTy->getPrimitiveSizeInBits()) 2402 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2403 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2404 2405 // Get the total trip count from the count by adding 1. 2406 const SCEV *ExitCount = SE->getAddExpr( 2407 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2408 2409 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2410 2411 // Expand the trip count and place the new instructions in the preheader. 2412 // Notice that the pre-header does not change, only the loop body. 2413 SCEVExpander Exp(*SE, DL, "induction"); 2414 2415 // Count holds the overall loop count (N). 2416 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2417 L->getLoopPreheader()->getTerminator()); 2418 2419 if (TripCount->getType()->isPointerTy()) 2420 TripCount = 2421 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2422 L->getLoopPreheader()->getTerminator()); 2423 2424 return TripCount; 2425 } 2426 2427 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 2428 if (VectorTripCount) 2429 return VectorTripCount; 2430 2431 Value *TC = getOrCreateTripCount(L); 2432 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2433 2434 Type *Ty = TC->getType(); 2435 Constant *Step = ConstantInt::get(Ty, VF * UF); 2436 2437 // If the tail is to be folded by masking, round the number of iterations N 2438 // up to a multiple of Step instead of rounding down. This is done by first 2439 // adding Step-1 and then rounding down. Note that it's ok if this addition 2440 // overflows: the vector induction variable will eventually wrap to zero given 2441 // that it starts at zero and its Step is a power of two; the loop will then 2442 // exit, with the last early-exit vector comparison also producing all-true. 2443 if (Cost->foldTailByMasking()) { 2444 assert(isPowerOf2_32(VF * UF) && 2445 "VF*UF must be a power of 2 when folding tail by masking"); 2446 TC = Builder.CreateAdd(TC, ConstantInt::get(Ty, VF * UF - 1), "n.rnd.up"); 2447 } 2448 2449 // Now we need to generate the expression for the part of the loop that the 2450 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2451 // iterations are not required for correctness, or N - Step, otherwise. Step 2452 // is equal to the vectorization factor (number of SIMD elements) times the 2453 // unroll factor (number of SIMD instructions). 2454 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2455 2456 // If there is a non-reversed interleaved group that may speculatively access 2457 // memory out-of-bounds, we need to ensure that there will be at least one 2458 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 2459 // the trip count, we set the remainder to be equal to the step. If the step 2460 // does not evenly divide the trip count, no adjustment is necessary since 2461 // there will already be scalar iterations. Note that the minimum iterations 2462 // check ensures that N >= Step. 2463 if (VF > 1 && Cost->requiresScalarEpilogue()) { 2464 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2465 R = Builder.CreateSelect(IsZero, Step, R); 2466 } 2467 2468 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2469 2470 return VectorTripCount; 2471 } 2472 2473 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2474 const DataLayout &DL) { 2475 // Verify that V is a vector type with same number of elements as DstVTy. 2476 unsigned VF = DstVTy->getNumElements(); 2477 VectorType *SrcVecTy = cast<VectorType>(V->getType()); 2478 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2479 Type *SrcElemTy = SrcVecTy->getElementType(); 2480 Type *DstElemTy = DstVTy->getElementType(); 2481 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2482 "Vector elements must have same size"); 2483 2484 // Do a direct cast if element types are castable. 2485 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2486 return Builder.CreateBitOrPointerCast(V, DstVTy); 2487 } 2488 // V cannot be directly casted to desired vector type. 2489 // May happen when V is a floating point vector but DstVTy is a vector of 2490 // pointers or vice-versa. Handle this using a two-step bitcast using an 2491 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2492 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2493 "Only one type should be a pointer type"); 2494 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2495 "Only one type should be a floating point type"); 2496 Type *IntTy = 2497 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2498 VectorType *VecIntTy = VectorType::get(IntTy, VF); 2499 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2500 return Builder.CreateBitOrPointerCast(CastVal, DstVTy); 2501 } 2502 2503 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 2504 BasicBlock *Bypass) { 2505 Value *Count = getOrCreateTripCount(L); 2506 BasicBlock *BB = L->getLoopPreheader(); 2507 IRBuilder<> Builder(BB->getTerminator()); 2508 2509 // Generate code to check if the loop's trip count is less than VF * UF, or 2510 // equal to it in case a scalar epilogue is required; this implies that the 2511 // vector trip count is zero. This check also covers the case where adding one 2512 // to the backedge-taken count overflowed leading to an incorrect trip count 2513 // of zero. In this case we will also jump to the scalar loop. 2514 auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 2515 : ICmpInst::ICMP_ULT; 2516 2517 // If tail is to be folded, vector loop takes care of all iterations. 2518 Value *CheckMinIters = Builder.getFalse(); 2519 if (!Cost->foldTailByMasking()) 2520 CheckMinIters = Builder.CreateICmp( 2521 P, Count, ConstantInt::get(Count->getType(), VF * UF), 2522 "min.iters.check"); 2523 2524 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2525 // Update dominator tree immediately if the generated block is a 2526 // LoopBypassBlock because SCEV expansions to generate loop bypass 2527 // checks may query it before the current function is finished. 2528 DT->addNewBlock(NewBB, BB); 2529 if (L->getParentLoop()) 2530 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2531 ReplaceInstWithInst(BB->getTerminator(), 2532 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 2533 LoopBypassBlocks.push_back(BB); 2534 } 2535 2536 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 2537 BasicBlock *BB = L->getLoopPreheader(); 2538 2539 // Generate the code to check that the SCEV assumptions that we made. 2540 // We want the new basic block to start at the first instruction in a 2541 // sequence of instructions that form a check. 2542 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 2543 "scev.check"); 2544 Value *SCEVCheck = 2545 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 2546 2547 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 2548 if (C->isZero()) 2549 return; 2550 2551 assert(!Cost->foldTailByMasking() && 2552 "Cannot SCEV check stride or overflow when folding tail"); 2553 // Create a new block containing the stride check. 2554 BB->setName("vector.scevcheck"); 2555 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2556 // Update dominator tree immediately if the generated block is a 2557 // LoopBypassBlock because SCEV expansions to generate loop bypass 2558 // checks may query it before the current function is finished. 2559 DT->addNewBlock(NewBB, BB); 2560 if (L->getParentLoop()) 2561 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2562 ReplaceInstWithInst(BB->getTerminator(), 2563 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 2564 LoopBypassBlocks.push_back(BB); 2565 AddedSafetyChecks = true; 2566 } 2567 2568 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 2569 // VPlan-native path does not do any analysis for runtime checks currently. 2570 if (EnableVPlanNativePath) 2571 return; 2572 2573 BasicBlock *BB = L->getLoopPreheader(); 2574 2575 // Generate the code that checks in runtime if arrays overlap. We put the 2576 // checks into a separate block to make the more common case of few elements 2577 // faster. 2578 Instruction *FirstCheckInst; 2579 Instruction *MemRuntimeCheck; 2580 std::tie(FirstCheckInst, MemRuntimeCheck) = 2581 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 2582 if (!MemRuntimeCheck) 2583 return; 2584 2585 assert(!Cost->foldTailByMasking() && "Cannot check memory when folding tail"); 2586 // Create a new block containing the memory check. 2587 BB->setName("vector.memcheck"); 2588 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2589 // Update dominator tree immediately if the generated block is a 2590 // LoopBypassBlock because SCEV expansions to generate loop bypass 2591 // checks may query it before the current function is finished. 2592 DT->addNewBlock(NewBB, BB); 2593 if (L->getParentLoop()) 2594 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2595 ReplaceInstWithInst(BB->getTerminator(), 2596 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 2597 LoopBypassBlocks.push_back(BB); 2598 AddedSafetyChecks = true; 2599 2600 // We currently don't use LoopVersioning for the actual loop cloning but we 2601 // still use it to add the noalias metadata. 2602 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 2603 PSE.getSE()); 2604 LVer->prepareNoAliasMetadata(); 2605 } 2606 2607 Value *InnerLoopVectorizer::emitTransformedIndex( 2608 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 2609 const InductionDescriptor &ID) const { 2610 2611 SCEVExpander Exp(*SE, DL, "induction"); 2612 auto Step = ID.getStep(); 2613 auto StartValue = ID.getStartValue(); 2614 assert(Index->getType() == Step->getType() && 2615 "Index type does not match StepValue type"); 2616 2617 // Note: the IR at this point is broken. We cannot use SE to create any new 2618 // SCEV and then expand it, hoping that SCEV's simplification will give us 2619 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 2620 // lead to various SCEV crashes. So all we can do is to use builder and rely 2621 // on InstCombine for future simplifications. Here we handle some trivial 2622 // cases only. 2623 auto CreateAdd = [&B](Value *X, Value *Y) { 2624 assert(X->getType() == Y->getType() && "Types don't match!"); 2625 if (auto *CX = dyn_cast<ConstantInt>(X)) 2626 if (CX->isZero()) 2627 return Y; 2628 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2629 if (CY->isZero()) 2630 return X; 2631 return B.CreateAdd(X, Y); 2632 }; 2633 2634 auto CreateMul = [&B](Value *X, Value *Y) { 2635 assert(X->getType() == Y->getType() && "Types don't match!"); 2636 if (auto *CX = dyn_cast<ConstantInt>(X)) 2637 if (CX->isOne()) 2638 return Y; 2639 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2640 if (CY->isOne()) 2641 return X; 2642 return B.CreateMul(X, Y); 2643 }; 2644 2645 switch (ID.getKind()) { 2646 case InductionDescriptor::IK_IntInduction: { 2647 assert(Index->getType() == StartValue->getType() && 2648 "Index type does not match StartValue type"); 2649 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 2650 return B.CreateSub(StartValue, Index); 2651 auto *Offset = CreateMul( 2652 Index, Exp.expandCodeFor(Step, Index->getType(), &*B.GetInsertPoint())); 2653 return CreateAdd(StartValue, Offset); 2654 } 2655 case InductionDescriptor::IK_PtrInduction: { 2656 assert(isa<SCEVConstant>(Step) && 2657 "Expected constant step for pointer induction"); 2658 return B.CreateGEP( 2659 nullptr, StartValue, 2660 CreateMul(Index, Exp.expandCodeFor(Step, Index->getType(), 2661 &*B.GetInsertPoint()))); 2662 } 2663 case InductionDescriptor::IK_FpInduction: { 2664 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 2665 auto InductionBinOp = ID.getInductionBinOp(); 2666 assert(InductionBinOp && 2667 (InductionBinOp->getOpcode() == Instruction::FAdd || 2668 InductionBinOp->getOpcode() == Instruction::FSub) && 2669 "Original bin op should be defined for FP induction"); 2670 2671 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 2672 2673 // Floating point operations had to be 'fast' to enable the induction. 2674 FastMathFlags Flags; 2675 Flags.setFast(); 2676 2677 Value *MulExp = B.CreateFMul(StepValue, Index); 2678 if (isa<Instruction>(MulExp)) 2679 // We have to check, the MulExp may be a constant. 2680 cast<Instruction>(MulExp)->setFastMathFlags(Flags); 2681 2682 Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 2683 "induction"); 2684 if (isa<Instruction>(BOp)) 2685 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2686 2687 return BOp; 2688 } 2689 case InductionDescriptor::IK_NoInduction: 2690 return nullptr; 2691 } 2692 llvm_unreachable("invalid enum"); 2693 } 2694 2695 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 2696 /* 2697 In this function we generate a new loop. The new loop will contain 2698 the vectorized instructions while the old loop will continue to run the 2699 scalar remainder. 2700 2701 [ ] <-- loop iteration number check. 2702 / | 2703 / v 2704 | [ ] <-- vector loop bypass (may consist of multiple blocks). 2705 | / | 2706 | / v 2707 || [ ] <-- vector pre header. 2708 |/ | 2709 | v 2710 | [ ] \ 2711 | [ ]_| <-- vector loop. 2712 | | 2713 | v 2714 | -[ ] <--- middle-block. 2715 | / | 2716 | / v 2717 -|- >[ ] <--- new preheader. 2718 | | 2719 | v 2720 | [ ] \ 2721 | [ ]_| <-- old scalar loop to handle remainder. 2722 \ | 2723 \ v 2724 >[ ] <-- exit block. 2725 ... 2726 */ 2727 2728 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 2729 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 2730 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 2731 MDNode *OrigLoopID = OrigLoop->getLoopID(); 2732 assert(VectorPH && "Invalid loop structure"); 2733 assert(ExitBlock && "Must have an exit block"); 2734 2735 // Some loops have a single integer induction variable, while other loops 2736 // don't. One example is c++ iterators that often have multiple pointer 2737 // induction variables. In the code below we also support a case where we 2738 // don't have a single induction variable. 2739 // 2740 // We try to obtain an induction variable from the original loop as hard 2741 // as possible. However if we don't find one that: 2742 // - is an integer 2743 // - counts from zero, stepping by one 2744 // - is the size of the widest induction variable type 2745 // then we create a new one. 2746 OldInduction = Legal->getPrimaryInduction(); 2747 Type *IdxTy = Legal->getWidestInductionType(); 2748 2749 // Split the single block loop into the two loop structure described above. 2750 BasicBlock *VecBody = 2751 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 2752 BasicBlock *MiddleBlock = 2753 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 2754 BasicBlock *ScalarPH = 2755 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 2756 2757 // Create and register the new vector loop. 2758 Loop *Lp = LI->AllocateLoop(); 2759 Loop *ParentLoop = OrigLoop->getParentLoop(); 2760 2761 // Insert the new loop into the loop nest and register the new basic blocks 2762 // before calling any utilities such as SCEV that require valid LoopInfo. 2763 if (ParentLoop) { 2764 ParentLoop->addChildLoop(Lp); 2765 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 2766 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 2767 } else { 2768 LI->addTopLevelLoop(Lp); 2769 } 2770 Lp->addBasicBlockToLoop(VecBody, *LI); 2771 2772 // Find the loop boundaries. 2773 Value *Count = getOrCreateTripCount(Lp); 2774 2775 Value *StartIdx = ConstantInt::get(IdxTy, 0); 2776 2777 // Now, compare the new count to zero. If it is zero skip the vector loop and 2778 // jump to the scalar loop. This check also covers the case where the 2779 // backedge-taken count is uint##_max: adding one to it will overflow leading 2780 // to an incorrect trip count of zero. In this (rare) case we will also jump 2781 // to the scalar loop. 2782 emitMinimumIterationCountCheck(Lp, ScalarPH); 2783 2784 // Generate the code to check any assumptions that we've made for SCEV 2785 // expressions. 2786 emitSCEVChecks(Lp, ScalarPH); 2787 2788 // Generate the code that checks in runtime if arrays overlap. We put the 2789 // checks into a separate block to make the more common case of few elements 2790 // faster. 2791 emitMemRuntimeChecks(Lp, ScalarPH); 2792 2793 // Generate the induction variable. 2794 // The loop step is equal to the vectorization factor (num of SIMD elements) 2795 // times the unroll factor (num of SIMD instructions). 2796 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 2797 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 2798 Induction = 2799 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 2800 getDebugLocFromInstOrOperands(OldInduction)); 2801 2802 // We are going to resume the execution of the scalar loop. 2803 // Go over all of the induction variables that we found and fix the 2804 // PHIs that are left in the scalar version of the loop. 2805 // The starting values of PHI nodes depend on the counter of the last 2806 // iteration in the vectorized loop. 2807 // If we come from a bypass edge then we need to start from the original 2808 // start value. 2809 2810 // This variable saves the new starting index for the scalar loop. It is used 2811 // to test if there are any tail iterations left once the vector loop has 2812 // completed. 2813 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 2814 for (auto &InductionEntry : *List) { 2815 PHINode *OrigPhi = InductionEntry.first; 2816 InductionDescriptor II = InductionEntry.second; 2817 2818 // Create phi nodes to merge from the backedge-taken check block. 2819 PHINode *BCResumeVal = PHINode::Create( 2820 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 2821 // Copy original phi DL over to the new one. 2822 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 2823 Value *&EndValue = IVEndValues[OrigPhi]; 2824 if (OrigPhi == OldInduction) { 2825 // We know what the end value is. 2826 EndValue = CountRoundDown; 2827 } else { 2828 IRBuilder<> B(Lp->getLoopPreheader()->getTerminator()); 2829 Type *StepType = II.getStep()->getType(); 2830 Instruction::CastOps CastOp = 2831 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 2832 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 2833 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2834 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 2835 EndValue->setName("ind.end"); 2836 } 2837 2838 // The new PHI merges the original incoming value, in case of a bypass, 2839 // or the value at the end of the vectorized loop. 2840 BCResumeVal->addIncoming(EndValue, MiddleBlock); 2841 2842 // Fix the scalar body counter (PHI node). 2843 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 2844 2845 // The old induction's phi node in the scalar body needs the truncated 2846 // value. 2847 for (BasicBlock *BB : LoopBypassBlocks) 2848 BCResumeVal->addIncoming(II.getStartValue(), BB); 2849 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 2850 } 2851 2852 // Add a check in the middle block to see if we have completed 2853 // all of the iterations in the first vector loop. 2854 // If (N - N%VF) == N, then we *don't* need to run the remainder. 2855 // If tail is to be folded, we know we don't need to run the remainder. 2856 Value *CmpN = Builder.getTrue(); 2857 if (!Cost->foldTailByMasking()) 2858 CmpN = 2859 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 2860 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 2861 ReplaceInstWithInst(MiddleBlock->getTerminator(), 2862 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 2863 2864 // Get ready to start creating new instructions into the vectorized body. 2865 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 2866 2867 // Save the state. 2868 LoopVectorPreHeader = Lp->getLoopPreheader(); 2869 LoopScalarPreHeader = ScalarPH; 2870 LoopMiddleBlock = MiddleBlock; 2871 LoopExitBlock = ExitBlock; 2872 LoopVectorBody = VecBody; 2873 LoopScalarBody = OldBasicBlock; 2874 2875 Optional<MDNode *> VectorizedLoopID = 2876 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 2877 LLVMLoopVectorizeFollowupVectorized}); 2878 if (VectorizedLoopID.hasValue()) { 2879 Lp->setLoopID(VectorizedLoopID.getValue()); 2880 2881 // Do not setAlreadyVectorized if loop attributes have been defined 2882 // explicitly. 2883 return LoopVectorPreHeader; 2884 } 2885 2886 // Keep all loop hints from the original loop on the vector loop (we'll 2887 // replace the vectorizer-specific hints below). 2888 if (MDNode *LID = OrigLoop->getLoopID()) 2889 Lp->setLoopID(LID); 2890 2891 LoopVectorizeHints Hints(Lp, true, *ORE); 2892 Hints.setAlreadyVectorized(); 2893 2894 return LoopVectorPreHeader; 2895 } 2896 2897 // Fix up external users of the induction variable. At this point, we are 2898 // in LCSSA form, with all external PHIs that use the IV having one input value, 2899 // coming from the remainder loop. We need those PHIs to also have a correct 2900 // value for the IV when arriving directly from the middle block. 2901 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 2902 const InductionDescriptor &II, 2903 Value *CountRoundDown, Value *EndValue, 2904 BasicBlock *MiddleBlock) { 2905 // There are two kinds of external IV usages - those that use the value 2906 // computed in the last iteration (the PHI) and those that use the penultimate 2907 // value (the value that feeds into the phi from the loop latch). 2908 // We allow both, but they, obviously, have different values. 2909 2910 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 2911 2912 DenseMap<Value *, Value *> MissingVals; 2913 2914 // An external user of the last iteration's value should see the value that 2915 // the remainder loop uses to initialize its own IV. 2916 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 2917 for (User *U : PostInc->users()) { 2918 Instruction *UI = cast<Instruction>(U); 2919 if (!OrigLoop->contains(UI)) { 2920 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 2921 MissingVals[UI] = EndValue; 2922 } 2923 } 2924 2925 // An external user of the penultimate value need to see EndValue - Step. 2926 // The simplest way to get this is to recompute it from the constituent SCEVs, 2927 // that is Start + (Step * (CRD - 1)). 2928 for (User *U : OrigPhi->users()) { 2929 auto *UI = cast<Instruction>(U); 2930 if (!OrigLoop->contains(UI)) { 2931 const DataLayout &DL = 2932 OrigLoop->getHeader()->getModule()->getDataLayout(); 2933 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 2934 2935 IRBuilder<> B(MiddleBlock->getTerminator()); 2936 Value *CountMinusOne = B.CreateSub( 2937 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 2938 Value *CMO = 2939 !II.getStep()->getType()->isIntegerTy() 2940 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 2941 II.getStep()->getType()) 2942 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 2943 CMO->setName("cast.cmo"); 2944 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 2945 Escape->setName("ind.escape"); 2946 MissingVals[UI] = Escape; 2947 } 2948 } 2949 2950 for (auto &I : MissingVals) { 2951 PHINode *PHI = cast<PHINode>(I.first); 2952 // One corner case we have to handle is two IVs "chasing" each-other, 2953 // that is %IV2 = phi [...], [ %IV1, %latch ] 2954 // In this case, if IV1 has an external use, we need to avoid adding both 2955 // "last value of IV1" and "penultimate value of IV2". So, verify that we 2956 // don't already have an incoming value for the middle block. 2957 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 2958 PHI->addIncoming(I.second, MiddleBlock); 2959 } 2960 } 2961 2962 namespace { 2963 2964 struct CSEDenseMapInfo { 2965 static bool canHandle(const Instruction *I) { 2966 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 2967 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 2968 } 2969 2970 static inline Instruction *getEmptyKey() { 2971 return DenseMapInfo<Instruction *>::getEmptyKey(); 2972 } 2973 2974 static inline Instruction *getTombstoneKey() { 2975 return DenseMapInfo<Instruction *>::getTombstoneKey(); 2976 } 2977 2978 static unsigned getHashValue(const Instruction *I) { 2979 assert(canHandle(I) && "Unknown instruction!"); 2980 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 2981 I->value_op_end())); 2982 } 2983 2984 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 2985 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 2986 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 2987 return LHS == RHS; 2988 return LHS->isIdenticalTo(RHS); 2989 } 2990 }; 2991 2992 } // end anonymous namespace 2993 2994 ///Perform cse of induction variable instructions. 2995 static void cse(BasicBlock *BB) { 2996 // Perform simple cse. 2997 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 2998 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 2999 Instruction *In = &*I++; 3000 3001 if (!CSEDenseMapInfo::canHandle(In)) 3002 continue; 3003 3004 // Check if we can replace this instruction with any of the 3005 // visited instructions. 3006 if (Instruction *V = CSEMap.lookup(In)) { 3007 In->replaceAllUsesWith(V); 3008 In->eraseFromParent(); 3009 continue; 3010 } 3011 3012 CSEMap[In] = In; 3013 } 3014 } 3015 3016 /// Estimate the overhead of scalarizing an instruction. This is a 3017 /// convenience wrapper for the type-based getScalarizationOverhead API. 3018 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF, 3019 const TargetTransformInfo &TTI) { 3020 if (VF == 1) 3021 return 0; 3022 3023 unsigned Cost = 0; 3024 Type *RetTy = ToVectorTy(I->getType(), VF); 3025 if (!RetTy->isVoidTy() && 3026 (!isa<LoadInst>(I) || 3027 !TTI.supportsEfficientVectorElementLoadStore())) 3028 Cost += TTI.getScalarizationOverhead(RetTy, true, false); 3029 3030 // Some targets keep addresses scalar. 3031 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 3032 return Cost; 3033 3034 if (CallInst *CI = dyn_cast<CallInst>(I)) { 3035 SmallVector<const Value *, 4> Operands(CI->arg_operands()); 3036 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3037 } 3038 else if (!isa<StoreInst>(I) || 3039 !TTI.supportsEfficientVectorElementLoadStore()) { 3040 SmallVector<const Value *, 4> Operands(I->operand_values()); 3041 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3042 } 3043 3044 return Cost; 3045 } 3046 3047 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3048 // Return the cost of the instruction, including scalarization overhead if it's 3049 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3050 // i.e. either vector version isn't available, or is too expensive. 3051 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3052 const TargetTransformInfo &TTI, 3053 const TargetLibraryInfo *TLI, 3054 bool &NeedToScalarize) { 3055 Function *F = CI->getCalledFunction(); 3056 StringRef FnName = CI->getCalledFunction()->getName(); 3057 Type *ScalarRetTy = CI->getType(); 3058 SmallVector<Type *, 4> Tys, ScalarTys; 3059 for (auto &ArgOp : CI->arg_operands()) 3060 ScalarTys.push_back(ArgOp->getType()); 3061 3062 // Estimate cost of scalarized vector call. The source operands are assumed 3063 // to be vectors, so we need to extract individual elements from there, 3064 // execute VF scalar calls, and then gather the result into the vector return 3065 // value. 3066 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3067 if (VF == 1) 3068 return ScalarCallCost; 3069 3070 // Compute corresponding vector type for return value and arguments. 3071 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3072 for (Type *ScalarTy : ScalarTys) 3073 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3074 3075 // Compute costs of unpacking argument values for the scalar calls and 3076 // packing the return values to a vector. 3077 unsigned ScalarizationCost = getScalarizationOverhead(CI, VF, TTI); 3078 3079 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3080 3081 // If we can't emit a vector call for this function, then the currently found 3082 // cost is the cost we need to return. 3083 NeedToScalarize = true; 3084 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3085 return Cost; 3086 3087 // If the corresponding vector cost is cheaper, return its cost. 3088 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3089 if (VectorCallCost < Cost) { 3090 NeedToScalarize = false; 3091 return VectorCallCost; 3092 } 3093 return Cost; 3094 } 3095 3096 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3097 // factor VF. Return the cost of the instruction, including scalarization 3098 // overhead if it's needed. 3099 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3100 const TargetTransformInfo &TTI, 3101 const TargetLibraryInfo *TLI) { 3102 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3103 assert(ID && "Expected intrinsic call!"); 3104 3105 FastMathFlags FMF; 3106 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3107 FMF = FPMO->getFastMathFlags(); 3108 3109 SmallVector<Value *, 4> Operands(CI->arg_operands()); 3110 return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF); 3111 } 3112 3113 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3114 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3115 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3116 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3117 } 3118 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3119 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3120 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3121 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3122 } 3123 3124 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3125 // For every instruction `I` in MinBWs, truncate the operands, create a 3126 // truncated version of `I` and reextend its result. InstCombine runs 3127 // later and will remove any ext/trunc pairs. 3128 SmallPtrSet<Value *, 4> Erased; 3129 for (const auto &KV : Cost->getMinimalBitwidths()) { 3130 // If the value wasn't vectorized, we must maintain the original scalar 3131 // type. The absence of the value from VectorLoopValueMap indicates that it 3132 // wasn't vectorized. 3133 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3134 continue; 3135 for (unsigned Part = 0; Part < UF; ++Part) { 3136 Value *I = getOrCreateVectorValue(KV.first, Part); 3137 if (Erased.find(I) != Erased.end() || I->use_empty() || 3138 !isa<Instruction>(I)) 3139 continue; 3140 Type *OriginalTy = I->getType(); 3141 Type *ScalarTruncatedTy = 3142 IntegerType::get(OriginalTy->getContext(), KV.second); 3143 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3144 OriginalTy->getVectorNumElements()); 3145 if (TruncatedTy == OriginalTy) 3146 continue; 3147 3148 IRBuilder<> B(cast<Instruction>(I)); 3149 auto ShrinkOperand = [&](Value *V) -> Value * { 3150 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3151 if (ZI->getSrcTy() == TruncatedTy) 3152 return ZI->getOperand(0); 3153 return B.CreateZExtOrTrunc(V, TruncatedTy); 3154 }; 3155 3156 // The actual instruction modification depends on the instruction type, 3157 // unfortunately. 3158 Value *NewI = nullptr; 3159 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3160 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3161 ShrinkOperand(BO->getOperand(1))); 3162 3163 // Any wrapping introduced by shrinking this operation shouldn't be 3164 // considered undefined behavior. So, we can't unconditionally copy 3165 // arithmetic wrapping flags to NewI. 3166 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3167 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3168 NewI = 3169 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3170 ShrinkOperand(CI->getOperand(1))); 3171 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3172 NewI = B.CreateSelect(SI->getCondition(), 3173 ShrinkOperand(SI->getTrueValue()), 3174 ShrinkOperand(SI->getFalseValue())); 3175 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3176 switch (CI->getOpcode()) { 3177 default: 3178 llvm_unreachable("Unhandled cast!"); 3179 case Instruction::Trunc: 3180 NewI = ShrinkOperand(CI->getOperand(0)); 3181 break; 3182 case Instruction::SExt: 3183 NewI = B.CreateSExtOrTrunc( 3184 CI->getOperand(0), 3185 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3186 break; 3187 case Instruction::ZExt: 3188 NewI = B.CreateZExtOrTrunc( 3189 CI->getOperand(0), 3190 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3191 break; 3192 } 3193 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3194 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3195 auto *O0 = B.CreateZExtOrTrunc( 3196 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3197 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3198 auto *O1 = B.CreateZExtOrTrunc( 3199 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3200 3201 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3202 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3203 // Don't do anything with the operands, just extend the result. 3204 continue; 3205 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3206 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3207 auto *O0 = B.CreateZExtOrTrunc( 3208 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3209 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3210 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3211 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3212 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3213 auto *O0 = B.CreateZExtOrTrunc( 3214 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3215 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3216 } else { 3217 // If we don't know what to do, be conservative and don't do anything. 3218 continue; 3219 } 3220 3221 // Lastly, extend the result. 3222 NewI->takeName(cast<Instruction>(I)); 3223 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3224 I->replaceAllUsesWith(Res); 3225 cast<Instruction>(I)->eraseFromParent(); 3226 Erased.insert(I); 3227 VectorLoopValueMap.resetVectorValue(KV.first, Part, Res); 3228 } 3229 } 3230 3231 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3232 for (const auto &KV : Cost->getMinimalBitwidths()) { 3233 // If the value wasn't vectorized, we must maintain the original scalar 3234 // type. The absence of the value from VectorLoopValueMap indicates that it 3235 // wasn't vectorized. 3236 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3237 continue; 3238 for (unsigned Part = 0; Part < UF; ++Part) { 3239 Value *I = getOrCreateVectorValue(KV.first, Part); 3240 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3241 if (Inst && Inst->use_empty()) { 3242 Value *NewI = Inst->getOperand(0); 3243 Inst->eraseFromParent(); 3244 VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI); 3245 } 3246 } 3247 } 3248 } 3249 3250 void InnerLoopVectorizer::fixVectorizedLoop() { 3251 // Insert truncates and extends for any truncated instructions as hints to 3252 // InstCombine. 3253 if (VF > 1) 3254 truncateToMinimalBitwidths(); 3255 3256 // Fix widened non-induction PHIs by setting up the PHI operands. 3257 if (OrigPHIsToFix.size()) { 3258 assert(EnableVPlanNativePath && 3259 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3260 fixNonInductionPHIs(); 3261 } 3262 3263 // At this point every instruction in the original loop is widened to a 3264 // vector form. Now we need to fix the recurrences in the loop. These PHI 3265 // nodes are currently empty because we did not want to introduce cycles. 3266 // This is the second stage of vectorizing recurrences. 3267 fixCrossIterationPHIs(); 3268 3269 // Update the dominator tree. 3270 // 3271 // FIXME: After creating the structure of the new loop, the dominator tree is 3272 // no longer up-to-date, and it remains that way until we update it 3273 // here. An out-of-date dominator tree is problematic for SCEV, 3274 // because SCEVExpander uses it to guide code generation. The 3275 // vectorizer use SCEVExpanders in several places. Instead, we should 3276 // keep the dominator tree up-to-date as we go. 3277 updateAnalysis(); 3278 3279 // Fix-up external users of the induction variables. 3280 for (auto &Entry : *Legal->getInductionVars()) 3281 fixupIVUsers(Entry.first, Entry.second, 3282 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 3283 IVEndValues[Entry.first], LoopMiddleBlock); 3284 3285 fixLCSSAPHIs(); 3286 for (Instruction *PI : PredicatedInstructions) 3287 sinkScalarOperands(&*PI); 3288 3289 // Remove redundant induction instructions. 3290 cse(LoopVectorBody); 3291 } 3292 3293 void InnerLoopVectorizer::fixCrossIterationPHIs() { 3294 // In order to support recurrences we need to be able to vectorize Phi nodes. 3295 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3296 // stage #2: We now need to fix the recurrences by adding incoming edges to 3297 // the currently empty PHI nodes. At this point every instruction in the 3298 // original loop is widened to a vector form so we can use them to construct 3299 // the incoming edges. 3300 for (PHINode &Phi : OrigLoop->getHeader()->phis()) { 3301 // Handle first-order recurrences and reductions that need to be fixed. 3302 if (Legal->isFirstOrderRecurrence(&Phi)) 3303 fixFirstOrderRecurrence(&Phi); 3304 else if (Legal->isReductionVariable(&Phi)) 3305 fixReduction(&Phi); 3306 } 3307 } 3308 3309 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 3310 // This is the second phase of vectorizing first-order recurrences. An 3311 // overview of the transformation is described below. Suppose we have the 3312 // following loop. 3313 // 3314 // for (int i = 0; i < n; ++i) 3315 // b[i] = a[i] - a[i - 1]; 3316 // 3317 // There is a first-order recurrence on "a". For this loop, the shorthand 3318 // scalar IR looks like: 3319 // 3320 // scalar.ph: 3321 // s_init = a[-1] 3322 // br scalar.body 3323 // 3324 // scalar.body: 3325 // i = phi [0, scalar.ph], [i+1, scalar.body] 3326 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3327 // s2 = a[i] 3328 // b[i] = s2 - s1 3329 // br cond, scalar.body, ... 3330 // 3331 // In this example, s1 is a recurrence because it's value depends on the 3332 // previous iteration. In the first phase of vectorization, we created a 3333 // temporary value for s1. We now complete the vectorization and produce the 3334 // shorthand vector IR shown below (for VF = 4, UF = 1). 3335 // 3336 // vector.ph: 3337 // v_init = vector(..., ..., ..., a[-1]) 3338 // br vector.body 3339 // 3340 // vector.body 3341 // i = phi [0, vector.ph], [i+4, vector.body] 3342 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3343 // v2 = a[i, i+1, i+2, i+3]; 3344 // v3 = vector(v1(3), v2(0, 1, 2)) 3345 // b[i, i+1, i+2, i+3] = v2 - v3 3346 // br cond, vector.body, middle.block 3347 // 3348 // middle.block: 3349 // x = v2(3) 3350 // br scalar.ph 3351 // 3352 // scalar.ph: 3353 // s_init = phi [x, middle.block], [a[-1], otherwise] 3354 // br scalar.body 3355 // 3356 // After execution completes the vector loop, we extract the next value of 3357 // the recurrence (x) to use as the initial value in the scalar loop. 3358 3359 // Get the original loop preheader and single loop latch. 3360 auto *Preheader = OrigLoop->getLoopPreheader(); 3361 auto *Latch = OrigLoop->getLoopLatch(); 3362 3363 // Get the initial and previous values of the scalar recurrence. 3364 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 3365 auto *Previous = Phi->getIncomingValueForBlock(Latch); 3366 3367 // Create a vector from the initial value. 3368 auto *VectorInit = ScalarInit; 3369 if (VF > 1) { 3370 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3371 VectorInit = Builder.CreateInsertElement( 3372 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 3373 Builder.getInt32(VF - 1), "vector.recur.init"); 3374 } 3375 3376 // We constructed a temporary phi node in the first phase of vectorization. 3377 // This phi node will eventually be deleted. 3378 Builder.SetInsertPoint( 3379 cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0))); 3380 3381 // Create a phi node for the new recurrence. The current value will either be 3382 // the initial value inserted into a vector or loop-varying vector value. 3383 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 3384 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 3385 3386 // Get the vectorized previous value of the last part UF - 1. It appears last 3387 // among all unrolled iterations, due to the order of their construction. 3388 Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1); 3389 3390 // Set the insertion point after the previous value if it is an instruction. 3391 // Note that the previous value may have been constant-folded so it is not 3392 // guaranteed to be an instruction in the vector loop. Also, if the previous 3393 // value is a phi node, we should insert after all the phi nodes to avoid 3394 // breaking basic block verification. 3395 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart) || 3396 isa<PHINode>(PreviousLastPart)) 3397 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3398 else 3399 Builder.SetInsertPoint( 3400 &*++BasicBlock::iterator(cast<Instruction>(PreviousLastPart))); 3401 3402 // We will construct a vector for the recurrence by combining the values for 3403 // the current and previous iterations. This is the required shuffle mask. 3404 SmallVector<Constant *, 8> ShuffleMask(VF); 3405 ShuffleMask[0] = Builder.getInt32(VF - 1); 3406 for (unsigned I = 1; I < VF; ++I) 3407 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 3408 3409 // The vector from which to take the initial value for the current iteration 3410 // (actual or unrolled). Initially, this is the vector phi node. 3411 Value *Incoming = VecPhi; 3412 3413 // Shuffle the current and previous vector and update the vector parts. 3414 for (unsigned Part = 0; Part < UF; ++Part) { 3415 Value *PreviousPart = getOrCreateVectorValue(Previous, Part); 3416 Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part); 3417 auto *Shuffle = 3418 VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart, 3419 ConstantVector::get(ShuffleMask)) 3420 : Incoming; 3421 PhiPart->replaceAllUsesWith(Shuffle); 3422 cast<Instruction>(PhiPart)->eraseFromParent(); 3423 VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle); 3424 Incoming = PreviousPart; 3425 } 3426 3427 // Fix the latch value of the new recurrence in the vector loop. 3428 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3429 3430 // Extract the last vector element in the middle block. This will be the 3431 // initial value for the recurrence when jumping to the scalar loop. 3432 auto *ExtractForScalar = Incoming; 3433 if (VF > 1) { 3434 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3435 ExtractForScalar = Builder.CreateExtractElement( 3436 ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract"); 3437 } 3438 // Extract the second last element in the middle block if the 3439 // Phi is used outside the loop. We need to extract the phi itself 3440 // and not the last element (the phi update in the current iteration). This 3441 // will be the value when jumping to the exit block from the LoopMiddleBlock, 3442 // when the scalar loop is not run at all. 3443 Value *ExtractForPhiUsedOutsideLoop = nullptr; 3444 if (VF > 1) 3445 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 3446 Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi"); 3447 // When loop is unrolled without vectorizing, initialize 3448 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 3449 // `Incoming`. This is analogous to the vectorized case above: extracting the 3450 // second last element when VF > 1. 3451 else if (UF > 1) 3452 ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2); 3453 3454 // Fix the initial value of the original recurrence in the scalar loop. 3455 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3456 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3457 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3458 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 3459 Start->addIncoming(Incoming, BB); 3460 } 3461 3462 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 3463 Phi->setName("scalar.recur"); 3464 3465 // Finally, fix users of the recurrence outside the loop. The users will need 3466 // either the last value of the scalar recurrence or the last value of the 3467 // vector recurrence we extracted in the middle block. Since the loop is in 3468 // LCSSA form, we just need to find all the phi nodes for the original scalar 3469 // recurrence in the exit block, and then add an edge for the middle block. 3470 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3471 if (LCSSAPhi.getIncomingValue(0) == Phi) { 3472 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 3473 } 3474 } 3475 } 3476 3477 void InnerLoopVectorizer::fixReduction(PHINode *Phi) { 3478 Constant *Zero = Builder.getInt32(0); 3479 3480 // Get it's reduction variable descriptor. 3481 assert(Legal->isReductionVariable(Phi) && 3482 "Unable to find the reduction variable"); 3483 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 3484 3485 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3486 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3487 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3488 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3489 RdxDesc.getMinMaxRecurrenceKind(); 3490 setDebugLocFromInst(Builder, ReductionStartValue); 3491 3492 // We need to generate a reduction vector from the incoming scalar. 3493 // To do so, we need to generate the 'identity' vector and override 3494 // one of the elements with the incoming scalar reduction. We need 3495 // to do it in the vector-loop preheader. 3496 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3497 3498 // This is the vector-clone of the value that leaves the loop. 3499 Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType(); 3500 3501 // Find the reduction identity variable. Zero for addition, or, xor, 3502 // one for multiplication, -1 for And. 3503 Value *Identity; 3504 Value *VectorStart; 3505 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3506 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3507 // MinMax reduction have the start value as their identify. 3508 if (VF == 1) { 3509 VectorStart = Identity = ReductionStartValue; 3510 } else { 3511 VectorStart = Identity = 3512 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3513 } 3514 } else { 3515 // Handle other reduction kinds: 3516 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3517 RK, VecTy->getScalarType()); 3518 if (VF == 1) { 3519 Identity = Iden; 3520 // This vector is the Identity vector where the first element is the 3521 // incoming scalar reduction. 3522 VectorStart = ReductionStartValue; 3523 } else { 3524 Identity = ConstantVector::getSplat(VF, Iden); 3525 3526 // This vector is the Identity vector where the first element is the 3527 // incoming scalar reduction. 3528 VectorStart = 3529 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3530 } 3531 } 3532 3533 // Fix the vector-loop phi. 3534 3535 // Reductions do not have to start at zero. They can start with 3536 // any loop invariant values. 3537 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3538 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3539 for (unsigned Part = 0; Part < UF; ++Part) { 3540 Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part); 3541 Value *Val = getOrCreateVectorValue(LoopVal, Part); 3542 // Make sure to add the reduction stat value only to the 3543 // first unroll part. 3544 Value *StartVal = (Part == 0) ? VectorStart : Identity; 3545 cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader); 3546 cast<PHINode>(VecRdxPhi) 3547 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3548 } 3549 3550 // Before each round, move the insertion point right between 3551 // the PHIs and the values we are going to write. 3552 // This allows us to write both PHINodes and the extractelement 3553 // instructions. 3554 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3555 3556 setDebugLocFromInst(Builder, LoopExitInst); 3557 3558 // If the vector reduction can be performed in a smaller type, we truncate 3559 // then extend the loop exit value to enable InstCombine to evaluate the 3560 // entire expression in the smaller type. 3561 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3562 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3563 Builder.SetInsertPoint( 3564 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 3565 VectorParts RdxParts(UF); 3566 for (unsigned Part = 0; Part < UF; ++Part) { 3567 RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3568 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3569 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3570 : Builder.CreateZExt(Trunc, VecTy); 3571 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 3572 UI != RdxParts[Part]->user_end();) 3573 if (*UI != Trunc) { 3574 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 3575 RdxParts[Part] = Extnd; 3576 } else { 3577 ++UI; 3578 } 3579 } 3580 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3581 for (unsigned Part = 0; Part < UF; ++Part) { 3582 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3583 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]); 3584 } 3585 } 3586 3587 // Reduce all of the unrolled parts into a single vector. 3588 Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0); 3589 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3590 setDebugLocFromInst(Builder, ReducedPartRdx); 3591 for (unsigned Part = 1; Part < UF; ++Part) { 3592 Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3593 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3594 // Floating point operations had to be 'fast' to enable the reduction. 3595 ReducedPartRdx = addFastMathFlag( 3596 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart, 3597 ReducedPartRdx, "bin.rdx")); 3598 else 3599 ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx, 3600 RdxPart); 3601 } 3602 3603 if (VF > 1) { 3604 bool NoNaN = Legal->hasFunNoNaNAttr(); 3605 ReducedPartRdx = 3606 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN); 3607 // If the reduction can be performed in a smaller type, we need to extend 3608 // the reduction to the wider type before we branch to the original loop. 3609 if (Phi->getType() != RdxDesc.getRecurrenceType()) 3610 ReducedPartRdx = 3611 RdxDesc.isSigned() 3612 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 3613 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 3614 } 3615 3616 // Create a phi node that merges control-flow from the backedge-taken check 3617 // block and the middle block. 3618 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 3619 LoopScalarPreHeader->getTerminator()); 3620 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3621 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 3622 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3623 3624 // Now, we need to fix the users of the reduction variable 3625 // inside and outside of the scalar remainder loop. 3626 // We know that the loop is in LCSSA form. We need to update the 3627 // PHI nodes in the exit blocks. 3628 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3629 // All PHINodes need to have a single entry edge, or two if 3630 // we already fixed them. 3631 assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 3632 3633 // We found a reduction value exit-PHI. Update it with the 3634 // incoming bypass edge. 3635 if (LCSSAPhi.getIncomingValue(0) == LoopExitInst) 3636 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 3637 } // end of the LCSSA phi scan. 3638 3639 // Fix the scalar loop reduction variable with the incoming reduction sum 3640 // from the vector body and from the backedge value. 3641 int IncomingEdgeBlockIdx = 3642 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 3643 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 3644 // Pick the other block. 3645 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 3646 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 3647 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 3648 } 3649 3650 void InnerLoopVectorizer::fixLCSSAPHIs() { 3651 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3652 if (LCSSAPhi.getNumIncomingValues() == 1) { 3653 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 3654 // Non-instruction incoming values will have only one value. 3655 unsigned LastLane = 0; 3656 if (isa<Instruction>(IncomingValue)) 3657 LastLane = Cost->isUniformAfterVectorization( 3658 cast<Instruction>(IncomingValue), VF) 3659 ? 0 3660 : VF - 1; 3661 // Can be a loop invariant incoming value or the last scalar value to be 3662 // extracted from the vectorized loop. 3663 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3664 Value *lastIncomingValue = 3665 getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane }); 3666 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 3667 } 3668 } 3669 } 3670 3671 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 3672 // The basic block and loop containing the predicated instruction. 3673 auto *PredBB = PredInst->getParent(); 3674 auto *VectorLoop = LI->getLoopFor(PredBB); 3675 3676 // Initialize a worklist with the operands of the predicated instruction. 3677 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 3678 3679 // Holds instructions that we need to analyze again. An instruction may be 3680 // reanalyzed if we don't yet know if we can sink it or not. 3681 SmallVector<Instruction *, 8> InstsToReanalyze; 3682 3683 // Returns true if a given use occurs in the predicated block. Phi nodes use 3684 // their operands in their corresponding predecessor blocks. 3685 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 3686 auto *I = cast<Instruction>(U.getUser()); 3687 BasicBlock *BB = I->getParent(); 3688 if (auto *Phi = dyn_cast<PHINode>(I)) 3689 BB = Phi->getIncomingBlock( 3690 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 3691 return BB == PredBB; 3692 }; 3693 3694 // Iteratively sink the scalarized operands of the predicated instruction 3695 // into the block we created for it. When an instruction is sunk, it's 3696 // operands are then added to the worklist. The algorithm ends after one pass 3697 // through the worklist doesn't sink a single instruction. 3698 bool Changed; 3699 do { 3700 // Add the instructions that need to be reanalyzed to the worklist, and 3701 // reset the changed indicator. 3702 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 3703 InstsToReanalyze.clear(); 3704 Changed = false; 3705 3706 while (!Worklist.empty()) { 3707 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 3708 3709 // We can't sink an instruction if it is a phi node, is already in the 3710 // predicated block, is not in the loop, or may have side effects. 3711 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 3712 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 3713 continue; 3714 3715 // It's legal to sink the instruction if all its uses occur in the 3716 // predicated block. Otherwise, there's nothing to do yet, and we may 3717 // need to reanalyze the instruction. 3718 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 3719 InstsToReanalyze.push_back(I); 3720 continue; 3721 } 3722 3723 // Move the instruction to the beginning of the predicated block, and add 3724 // it's operands to the worklist. 3725 I->moveBefore(&*PredBB->getFirstInsertionPt()); 3726 Worklist.insert(I->op_begin(), I->op_end()); 3727 3728 // The sinking may have enabled other instructions to be sunk, so we will 3729 // need to iterate. 3730 Changed = true; 3731 } 3732 } while (Changed); 3733 } 3734 3735 void InnerLoopVectorizer::fixNonInductionPHIs() { 3736 for (PHINode *OrigPhi : OrigPHIsToFix) { 3737 PHINode *NewPhi = 3738 cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0)); 3739 unsigned NumIncomingValues = OrigPhi->getNumIncomingValues(); 3740 3741 SmallVector<BasicBlock *, 2> ScalarBBPredecessors( 3742 predecessors(OrigPhi->getParent())); 3743 SmallVector<BasicBlock *, 2> VectorBBPredecessors( 3744 predecessors(NewPhi->getParent())); 3745 assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() && 3746 "Scalar and Vector BB should have the same number of predecessors"); 3747 3748 // The insertion point in Builder may be invalidated by the time we get 3749 // here. Force the Builder insertion point to something valid so that we do 3750 // not run into issues during insertion point restore in 3751 // getOrCreateVectorValue calls below. 3752 Builder.SetInsertPoint(NewPhi); 3753 3754 // The predecessor order is preserved and we can rely on mapping between 3755 // scalar and vector block predecessors. 3756 for (unsigned i = 0; i < NumIncomingValues; ++i) { 3757 BasicBlock *NewPredBB = VectorBBPredecessors[i]; 3758 3759 // When looking up the new scalar/vector values to fix up, use incoming 3760 // values from original phi. 3761 Value *ScIncV = 3762 OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]); 3763 3764 // Scalar incoming value may need a broadcast 3765 Value *NewIncV = getOrCreateVectorValue(ScIncV, 0); 3766 NewPhi->addIncoming(NewIncV, NewPredBB); 3767 } 3768 } 3769 } 3770 3771 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 3772 unsigned VF) { 3773 PHINode *P = cast<PHINode>(PN); 3774 if (EnableVPlanNativePath) { 3775 // Currently we enter here in the VPlan-native path for non-induction 3776 // PHIs where all control flow is uniform. We simply widen these PHIs. 3777 // Create a vector phi with no operands - the vector phi operands will be 3778 // set at the end of vector code generation. 3779 Type *VecTy = 3780 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 3781 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 3782 VectorLoopValueMap.setVectorValue(P, 0, VecPhi); 3783 OrigPHIsToFix.push_back(P); 3784 3785 return; 3786 } 3787 3788 assert(PN->getParent() == OrigLoop->getHeader() && 3789 "Non-header phis should have been handled elsewhere"); 3790 3791 // In order to support recurrences we need to be able to vectorize Phi nodes. 3792 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3793 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 3794 // this value when we vectorize all of the instructions that use the PHI. 3795 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 3796 for (unsigned Part = 0; Part < UF; ++Part) { 3797 // This is phase one of vectorizing PHIs. 3798 Type *VecTy = 3799 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 3800 Value *EntryPart = PHINode::Create( 3801 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 3802 VectorLoopValueMap.setVectorValue(P, Part, EntryPart); 3803 } 3804 return; 3805 } 3806 3807 setDebugLocFromInst(Builder, P); 3808 3809 // This PHINode must be an induction variable. 3810 // Make sure that we know about it. 3811 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 3812 3813 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 3814 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3815 3816 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 3817 // which can be found from the original scalar operations. 3818 switch (II.getKind()) { 3819 case InductionDescriptor::IK_NoInduction: 3820 llvm_unreachable("Unknown induction"); 3821 case InductionDescriptor::IK_IntInduction: 3822 case InductionDescriptor::IK_FpInduction: 3823 llvm_unreachable("Integer/fp induction is handled elsewhere."); 3824 case InductionDescriptor::IK_PtrInduction: { 3825 // Handle the pointer induction variable case. 3826 assert(P->getType()->isPointerTy() && "Unexpected type."); 3827 // This is the normalized GEP that starts counting at zero. 3828 Value *PtrInd = Induction; 3829 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 3830 // Determine the number of scalars we need to generate for each unroll 3831 // iteration. If the instruction is uniform, we only need to generate the 3832 // first lane. Otherwise, we generate all VF values. 3833 unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF; 3834 // These are the scalar results. Notice that we don't generate vector GEPs 3835 // because scalar GEPs result in better code. 3836 for (unsigned Part = 0; Part < UF; ++Part) { 3837 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 3838 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 3839 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 3840 Value *SclrGep = 3841 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 3842 SclrGep->setName("next.gep"); 3843 VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep); 3844 } 3845 } 3846 return; 3847 } 3848 } 3849 } 3850 3851 /// A helper function for checking whether an integer division-related 3852 /// instruction may divide by zero (in which case it must be predicated if 3853 /// executed conditionally in the scalar code). 3854 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 3855 /// Non-zero divisors that are non compile-time constants will not be 3856 /// converted into multiplication, so we will still end up scalarizing 3857 /// the division, but can do so w/o predication. 3858 static bool mayDivideByZero(Instruction &I) { 3859 assert((I.getOpcode() == Instruction::UDiv || 3860 I.getOpcode() == Instruction::SDiv || 3861 I.getOpcode() == Instruction::URem || 3862 I.getOpcode() == Instruction::SRem) && 3863 "Unexpected instruction"); 3864 Value *Divisor = I.getOperand(1); 3865 auto *CInt = dyn_cast<ConstantInt>(Divisor); 3866 return !CInt || CInt->isZero(); 3867 } 3868 3869 void InnerLoopVectorizer::widenInstruction(Instruction &I) { 3870 switch (I.getOpcode()) { 3871 case Instruction::Br: 3872 case Instruction::PHI: 3873 llvm_unreachable("This instruction is handled by a different recipe."); 3874 case Instruction::GetElementPtr: { 3875 // Construct a vector GEP by widening the operands of the scalar GEP as 3876 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 3877 // results in a vector of pointers when at least one operand of the GEP 3878 // is vector-typed. Thus, to keep the representation compact, we only use 3879 // vector-typed operands for loop-varying values. 3880 auto *GEP = cast<GetElementPtrInst>(&I); 3881 3882 if (VF > 1 && OrigLoop->hasLoopInvariantOperands(GEP)) { 3883 // If we are vectorizing, but the GEP has only loop-invariant operands, 3884 // the GEP we build (by only using vector-typed operands for 3885 // loop-varying values) would be a scalar pointer. Thus, to ensure we 3886 // produce a vector of pointers, we need to either arbitrarily pick an 3887 // operand to broadcast, or broadcast a clone of the original GEP. 3888 // Here, we broadcast a clone of the original. 3889 // 3890 // TODO: If at some point we decide to scalarize instructions having 3891 // loop-invariant operands, this special case will no longer be 3892 // required. We would add the scalarization decision to 3893 // collectLoopScalars() and teach getVectorValue() to broadcast 3894 // the lane-zero scalar value. 3895 auto *Clone = Builder.Insert(GEP->clone()); 3896 for (unsigned Part = 0; Part < UF; ++Part) { 3897 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 3898 VectorLoopValueMap.setVectorValue(&I, Part, EntryPart); 3899 addMetadata(EntryPart, GEP); 3900 } 3901 } else { 3902 // If the GEP has at least one loop-varying operand, we are sure to 3903 // produce a vector of pointers. But if we are only unrolling, we want 3904 // to produce a scalar GEP for each unroll part. Thus, the GEP we 3905 // produce with the code below will be scalar (if VF == 1) or vector 3906 // (otherwise). Note that for the unroll-only case, we still maintain 3907 // values in the vector mapping with initVector, as we do for other 3908 // instructions. 3909 for (unsigned Part = 0; Part < UF; ++Part) { 3910 // The pointer operand of the new GEP. If it's loop-invariant, we 3911 // won't broadcast it. 3912 auto *Ptr = 3913 OrigLoop->isLoopInvariant(GEP->getPointerOperand()) 3914 ? GEP->getPointerOperand() 3915 : getOrCreateVectorValue(GEP->getPointerOperand(), Part); 3916 3917 // Collect all the indices for the new GEP. If any index is 3918 // loop-invariant, we won't broadcast it. 3919 SmallVector<Value *, 4> Indices; 3920 for (auto &U : make_range(GEP->idx_begin(), GEP->idx_end())) { 3921 if (OrigLoop->isLoopInvariant(U.get())) 3922 Indices.push_back(U.get()); 3923 else 3924 Indices.push_back(getOrCreateVectorValue(U.get(), Part)); 3925 } 3926 3927 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 3928 // but it should be a vector, otherwise. 3929 auto *NewGEP = GEP->isInBounds() 3930 ? Builder.CreateInBoundsGEP(Ptr, Indices) 3931 : Builder.CreateGEP(Ptr, Indices); 3932 assert((VF == 1 || NewGEP->getType()->isVectorTy()) && 3933 "NewGEP is not a pointer vector"); 3934 VectorLoopValueMap.setVectorValue(&I, Part, NewGEP); 3935 addMetadata(NewGEP, GEP); 3936 } 3937 } 3938 3939 break; 3940 } 3941 case Instruction::UDiv: 3942 case Instruction::SDiv: 3943 case Instruction::SRem: 3944 case Instruction::URem: 3945 case Instruction::Add: 3946 case Instruction::FAdd: 3947 case Instruction::Sub: 3948 case Instruction::FSub: 3949 case Instruction::Mul: 3950 case Instruction::FMul: 3951 case Instruction::FDiv: 3952 case Instruction::FRem: 3953 case Instruction::Shl: 3954 case Instruction::LShr: 3955 case Instruction::AShr: 3956 case Instruction::And: 3957 case Instruction::Or: 3958 case Instruction::Xor: { 3959 // Just widen binops. 3960 auto *BinOp = cast<BinaryOperator>(&I); 3961 setDebugLocFromInst(Builder, BinOp); 3962 3963 for (unsigned Part = 0; Part < UF; ++Part) { 3964 Value *A = getOrCreateVectorValue(BinOp->getOperand(0), Part); 3965 Value *B = getOrCreateVectorValue(BinOp->getOperand(1), Part); 3966 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A, B); 3967 3968 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 3969 VecOp->copyIRFlags(BinOp); 3970 3971 // Use this vector value for all users of the original instruction. 3972 VectorLoopValueMap.setVectorValue(&I, Part, V); 3973 addMetadata(V, BinOp); 3974 } 3975 3976 break; 3977 } 3978 case Instruction::Select: { 3979 // Widen selects. 3980 // If the selector is loop invariant we can create a select 3981 // instruction with a scalar condition. Otherwise, use vector-select. 3982 auto *SE = PSE.getSE(); 3983 bool InvariantCond = 3984 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); 3985 setDebugLocFromInst(Builder, &I); 3986 3987 // The condition can be loop invariant but still defined inside the 3988 // loop. This means that we can't just use the original 'cond' value. 3989 // We have to take the 'vectorized' value and pick the first lane. 3990 // Instcombine will make this a no-op. 3991 3992 auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0}); 3993 3994 for (unsigned Part = 0; Part < UF; ++Part) { 3995 Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part); 3996 Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part); 3997 Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part); 3998 Value *Sel = 3999 Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1); 4000 VectorLoopValueMap.setVectorValue(&I, Part, Sel); 4001 addMetadata(Sel, &I); 4002 } 4003 4004 break; 4005 } 4006 4007 case Instruction::ICmp: 4008 case Instruction::FCmp: { 4009 // Widen compares. Generate vector compares. 4010 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4011 auto *Cmp = dyn_cast<CmpInst>(&I); 4012 setDebugLocFromInst(Builder, Cmp); 4013 for (unsigned Part = 0; Part < UF; ++Part) { 4014 Value *A = getOrCreateVectorValue(Cmp->getOperand(0), Part); 4015 Value *B = getOrCreateVectorValue(Cmp->getOperand(1), Part); 4016 Value *C = nullptr; 4017 if (FCmp) { 4018 // Propagate fast math flags. 4019 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4020 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4021 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4022 } else { 4023 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4024 } 4025 VectorLoopValueMap.setVectorValue(&I, Part, C); 4026 addMetadata(C, &I); 4027 } 4028 4029 break; 4030 } 4031 4032 case Instruction::ZExt: 4033 case Instruction::SExt: 4034 case Instruction::FPToUI: 4035 case Instruction::FPToSI: 4036 case Instruction::FPExt: 4037 case Instruction::PtrToInt: 4038 case Instruction::IntToPtr: 4039 case Instruction::SIToFP: 4040 case Instruction::UIToFP: 4041 case Instruction::Trunc: 4042 case Instruction::FPTrunc: 4043 case Instruction::BitCast: { 4044 auto *CI = dyn_cast<CastInst>(&I); 4045 setDebugLocFromInst(Builder, CI); 4046 4047 /// Vectorize casts. 4048 Type *DestTy = 4049 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4050 4051 for (unsigned Part = 0; Part < UF; ++Part) { 4052 Value *A = getOrCreateVectorValue(CI->getOperand(0), Part); 4053 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4054 VectorLoopValueMap.setVectorValue(&I, Part, Cast); 4055 addMetadata(Cast, &I); 4056 } 4057 break; 4058 } 4059 4060 case Instruction::Call: { 4061 // Ignore dbg intrinsics. 4062 if (isa<DbgInfoIntrinsic>(I)) 4063 break; 4064 setDebugLocFromInst(Builder, &I); 4065 4066 Module *M = I.getParent()->getParent()->getParent(); 4067 auto *CI = cast<CallInst>(&I); 4068 4069 StringRef FnName = CI->getCalledFunction()->getName(); 4070 Function *F = CI->getCalledFunction(); 4071 Type *RetTy = ToVectorTy(CI->getType(), VF); 4072 SmallVector<Type *, 4> Tys; 4073 for (Value *ArgOperand : CI->arg_operands()) 4074 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4075 4076 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4077 4078 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4079 // version of the instruction. 4080 // Is it beneficial to perform intrinsic call compared to lib call? 4081 bool NeedToScalarize; 4082 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4083 bool UseVectorIntrinsic = 4084 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4085 assert((UseVectorIntrinsic || !NeedToScalarize) && 4086 "Instruction should be scalarized elsewhere."); 4087 4088 for (unsigned Part = 0; Part < UF; ++Part) { 4089 SmallVector<Value *, 4> Args; 4090 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4091 Value *Arg = CI->getArgOperand(i); 4092 // Some intrinsics have a scalar argument - don't replace it with a 4093 // vector. 4094 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) 4095 Arg = getOrCreateVectorValue(CI->getArgOperand(i), Part); 4096 Args.push_back(Arg); 4097 } 4098 4099 Function *VectorF; 4100 if (UseVectorIntrinsic) { 4101 // Use vector version of the intrinsic. 4102 Type *TysForDecl[] = {CI->getType()}; 4103 if (VF > 1) 4104 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4105 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4106 } else { 4107 // Use vector version of the library call. 4108 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4109 assert(!VFnName.empty() && "Vector function name is empty."); 4110 VectorF = M->getFunction(VFnName); 4111 if (!VectorF) { 4112 // Generate a declaration 4113 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4114 VectorF = 4115 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4116 VectorF->copyAttributesFrom(F); 4117 } 4118 } 4119 assert(VectorF && "Can't create vector function."); 4120 4121 SmallVector<OperandBundleDef, 1> OpBundles; 4122 CI->getOperandBundlesAsDefs(OpBundles); 4123 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4124 4125 if (isa<FPMathOperator>(V)) 4126 V->copyFastMathFlags(CI); 4127 4128 VectorLoopValueMap.setVectorValue(&I, Part, V); 4129 addMetadata(V, &I); 4130 } 4131 4132 break; 4133 } 4134 4135 default: 4136 // This instruction is not vectorized by simple widening. 4137 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4138 llvm_unreachable("Unhandled instruction!"); 4139 } // end of switch. 4140 } 4141 4142 void InnerLoopVectorizer::updateAnalysis() { 4143 // Forget the original basic block. 4144 PSE.getSE()->forgetLoop(OrigLoop); 4145 4146 // DT is not kept up-to-date for outer loop vectorization 4147 if (EnableVPlanNativePath) 4148 return; 4149 4150 // Update the dominator tree information. 4151 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4152 "Entry does not dominate exit."); 4153 4154 DT->addNewBlock(LoopMiddleBlock, 4155 LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4156 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4157 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4158 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4159 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 4160 } 4161 4162 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) { 4163 // We should not collect Scalars more than once per VF. Right now, this 4164 // function is called from collectUniformsAndScalars(), which already does 4165 // this check. Collecting Scalars for VF=1 does not make any sense. 4166 assert(VF >= 2 && Scalars.find(VF) == Scalars.end() && 4167 "This function should not be visited twice for the same VF"); 4168 4169 SmallSetVector<Instruction *, 8> Worklist; 4170 4171 // These sets are used to seed the analysis with pointers used by memory 4172 // accesses that will remain scalar. 4173 SmallSetVector<Instruction *, 8> ScalarPtrs; 4174 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4175 4176 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4177 // The pointer operands of loads and stores will be scalar as long as the 4178 // memory access is not a gather or scatter operation. The value operand of a 4179 // store will remain scalar if the store is scalarized. 4180 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4181 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4182 assert(WideningDecision != CM_Unknown && 4183 "Widening decision should be ready at this moment"); 4184 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4185 if (Ptr == Store->getValueOperand()) 4186 return WideningDecision == CM_Scalarize; 4187 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4188 "Ptr is neither a value or pointer operand"); 4189 return WideningDecision != CM_GatherScatter; 4190 }; 4191 4192 // A helper that returns true if the given value is a bitcast or 4193 // getelementptr instruction contained in the loop. 4194 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4195 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4196 isa<GetElementPtrInst>(V)) && 4197 !TheLoop->isLoopInvariant(V); 4198 }; 4199 4200 // A helper that evaluates a memory access's use of a pointer. If the use 4201 // will be a scalar use, and the pointer is only used by memory accesses, we 4202 // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4203 // PossibleNonScalarPtrs. 4204 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4205 // We only care about bitcast and getelementptr instructions contained in 4206 // the loop. 4207 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4208 return; 4209 4210 // If the pointer has already been identified as scalar (e.g., if it was 4211 // also identified as uniform), there's nothing to do. 4212 auto *I = cast<Instruction>(Ptr); 4213 if (Worklist.count(I)) 4214 return; 4215 4216 // If the use of the pointer will be a scalar use, and all users of the 4217 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4218 // place the pointer in PossibleNonScalarPtrs. 4219 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4220 return isa<LoadInst>(U) || isa<StoreInst>(U); 4221 })) 4222 ScalarPtrs.insert(I); 4223 else 4224 PossibleNonScalarPtrs.insert(I); 4225 }; 4226 4227 // We seed the scalars analysis with three classes of instructions: (1) 4228 // instructions marked uniform-after-vectorization, (2) bitcast and 4229 // getelementptr instructions used by memory accesses requiring a scalar use, 4230 // and (3) pointer induction variables and their update instructions (we 4231 // currently only scalarize these). 4232 // 4233 // (1) Add to the worklist all instructions that have been identified as 4234 // uniform-after-vectorization. 4235 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4236 4237 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4238 // memory accesses requiring a scalar use. The pointer operands of loads and 4239 // stores will be scalar as long as the memory accesses is not a gather or 4240 // scatter operation. The value operand of a store will remain scalar if the 4241 // store is scalarized. 4242 for (auto *BB : TheLoop->blocks()) 4243 for (auto &I : *BB) { 4244 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4245 evaluatePtrUse(Load, Load->getPointerOperand()); 4246 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4247 evaluatePtrUse(Store, Store->getPointerOperand()); 4248 evaluatePtrUse(Store, Store->getValueOperand()); 4249 } 4250 } 4251 for (auto *I : ScalarPtrs) 4252 if (PossibleNonScalarPtrs.find(I) == PossibleNonScalarPtrs.end()) { 4253 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4254 Worklist.insert(I); 4255 } 4256 4257 // (3) Add to the worklist all pointer induction variables and their update 4258 // instructions. 4259 // 4260 // TODO: Once we are able to vectorize pointer induction variables we should 4261 // no longer insert them into the worklist here. 4262 auto *Latch = TheLoop->getLoopLatch(); 4263 for (auto &Induction : *Legal->getInductionVars()) { 4264 auto *Ind = Induction.first; 4265 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4266 if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction) 4267 continue; 4268 Worklist.insert(Ind); 4269 Worklist.insert(IndUpdate); 4270 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4271 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4272 << "\n"); 4273 } 4274 4275 // Insert the forced scalars. 4276 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4277 // induction variable when the PHI user is scalarized. 4278 auto ForcedScalar = ForcedScalars.find(VF); 4279 if (ForcedScalar != ForcedScalars.end()) 4280 for (auto *I : ForcedScalar->second) 4281 Worklist.insert(I); 4282 4283 // Expand the worklist by looking through any bitcasts and getelementptr 4284 // instructions we've already identified as scalar. This is similar to the 4285 // expansion step in collectLoopUniforms(); however, here we're only 4286 // expanding to include additional bitcasts and getelementptr instructions. 4287 unsigned Idx = 0; 4288 while (Idx != Worklist.size()) { 4289 Instruction *Dst = Worklist[Idx++]; 4290 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4291 continue; 4292 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4293 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4294 auto *J = cast<Instruction>(U); 4295 return !TheLoop->contains(J) || Worklist.count(J) || 4296 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4297 isScalarUse(J, Src)); 4298 })) { 4299 Worklist.insert(Src); 4300 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4301 } 4302 } 4303 4304 // An induction variable will remain scalar if all users of the induction 4305 // variable and induction variable update remain scalar. 4306 for (auto &Induction : *Legal->getInductionVars()) { 4307 auto *Ind = Induction.first; 4308 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4309 4310 // We already considered pointer induction variables, so there's no reason 4311 // to look at their users again. 4312 // 4313 // TODO: Once we are able to vectorize pointer induction variables we 4314 // should no longer skip over them here. 4315 if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction) 4316 continue; 4317 4318 // Determine if all users of the induction variable are scalar after 4319 // vectorization. 4320 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4321 auto *I = cast<Instruction>(U); 4322 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 4323 }); 4324 if (!ScalarInd) 4325 continue; 4326 4327 // Determine if all users of the induction variable update instruction are 4328 // scalar after vectorization. 4329 auto ScalarIndUpdate = 4330 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4331 auto *I = cast<Instruction>(U); 4332 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 4333 }); 4334 if (!ScalarIndUpdate) 4335 continue; 4336 4337 // The induction variable and its update instruction will remain scalar. 4338 Worklist.insert(Ind); 4339 Worklist.insert(IndUpdate); 4340 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4341 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4342 << "\n"); 4343 } 4344 4345 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4346 } 4347 4348 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, unsigned VF) { 4349 if (!blockNeedsPredication(I->getParent())) 4350 return false; 4351 switch(I->getOpcode()) { 4352 default: 4353 break; 4354 case Instruction::Load: 4355 case Instruction::Store: { 4356 if (!Legal->isMaskRequired(I)) 4357 return false; 4358 auto *Ptr = getLoadStorePointerOperand(I); 4359 auto *Ty = getMemInstValueType(I); 4360 // We have already decided how to vectorize this instruction, get that 4361 // result. 4362 if (VF > 1) { 4363 InstWidening WideningDecision = getWideningDecision(I, VF); 4364 assert(WideningDecision != CM_Unknown && 4365 "Widening decision should be ready at this moment"); 4366 return WideningDecision == CM_Scalarize; 4367 } 4368 return isa<LoadInst>(I) ? 4369 !(isLegalMaskedLoad(Ty, Ptr) || isLegalMaskedGather(Ty)) 4370 : !(isLegalMaskedStore(Ty, Ptr) || isLegalMaskedScatter(Ty)); 4371 } 4372 case Instruction::UDiv: 4373 case Instruction::SDiv: 4374 case Instruction::SRem: 4375 case Instruction::URem: 4376 return mayDivideByZero(*I); 4377 } 4378 return false; 4379 } 4380 4381 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(Instruction *I, 4382 unsigned VF) { 4383 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4384 assert(getWideningDecision(I, VF) == CM_Unknown && 4385 "Decision should not be set yet."); 4386 auto *Group = getInterleavedAccessGroup(I); 4387 assert(Group && "Must have a group."); 4388 4389 // Check if masking is required. 4390 // A Group may need masking for one of two reasons: it resides in a block that 4391 // needs predication, or it was decided to use masking to deal with gaps. 4392 bool PredicatedAccessRequiresMasking = 4393 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 4394 bool AccessWithGapsRequiresMasking = 4395 Group->requiresScalarEpilogue() && !IsScalarEpilogueAllowed; 4396 if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) 4397 return true; 4398 4399 // If masked interleaving is required, we expect that the user/target had 4400 // enabled it, because otherwise it either wouldn't have been created or 4401 // it should have been invalidated by the CostModel. 4402 assert(useMaskedInterleavedAccesses(TTI) && 4403 "Masked interleave-groups for predicated accesses are not enabled."); 4404 4405 auto *Ty = getMemInstValueType(I); 4406 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty) 4407 : TTI.isLegalMaskedStore(Ty); 4408 } 4409 4410 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I, 4411 unsigned VF) { 4412 // Get and ensure we have a valid memory instruction. 4413 LoadInst *LI = dyn_cast<LoadInst>(I); 4414 StoreInst *SI = dyn_cast<StoreInst>(I); 4415 assert((LI || SI) && "Invalid memory instruction"); 4416 4417 auto *Ptr = getLoadStorePointerOperand(I); 4418 4419 // In order to be widened, the pointer should be consecutive, first of all. 4420 if (!Legal->isConsecutivePtr(Ptr)) 4421 return false; 4422 4423 // If the instruction is a store located in a predicated block, it will be 4424 // scalarized. 4425 if (isScalarWithPredication(I)) 4426 return false; 4427 4428 // If the instruction's allocated size doesn't equal it's type size, it 4429 // requires padding and will be scalarized. 4430 auto &DL = I->getModule()->getDataLayout(); 4431 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 4432 if (hasIrregularType(ScalarTy, DL, VF)) 4433 return false; 4434 4435 return true; 4436 } 4437 4438 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { 4439 // We should not collect Uniforms more than once per VF. Right now, 4440 // this function is called from collectUniformsAndScalars(), which 4441 // already does this check. Collecting Uniforms for VF=1 does not make any 4442 // sense. 4443 4444 assert(VF >= 2 && Uniforms.find(VF) == Uniforms.end() && 4445 "This function should not be visited twice for the same VF"); 4446 4447 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4448 // not analyze again. Uniforms.count(VF) will return 1. 4449 Uniforms[VF].clear(); 4450 4451 // We now know that the loop is vectorizable! 4452 // Collect instructions inside the loop that will remain uniform after 4453 // vectorization. 4454 4455 // Global values, params and instructions outside of current loop are out of 4456 // scope. 4457 auto isOutOfScope = [&](Value *V) -> bool { 4458 Instruction *I = dyn_cast<Instruction>(V); 4459 return (!I || !TheLoop->contains(I)); 4460 }; 4461 4462 SetVector<Instruction *> Worklist; 4463 BasicBlock *Latch = TheLoop->getLoopLatch(); 4464 4465 // Start with the conditional branch. If the branch condition is an 4466 // instruction contained in the loop that is only used by the branch, it is 4467 // uniform. 4468 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4469 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) { 4470 Worklist.insert(Cmp); 4471 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); 4472 } 4473 4474 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers 4475 // are pointers that are treated like consecutive pointers during 4476 // vectorization. The pointer operands of interleaved accesses are an 4477 // example. 4478 SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs; 4479 4480 // Holds pointer operands of instructions that are possibly non-uniform. 4481 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs; 4482 4483 auto isUniformDecision = [&](Instruction *I, unsigned VF) { 4484 InstWidening WideningDecision = getWideningDecision(I, VF); 4485 assert(WideningDecision != CM_Unknown && 4486 "Widening decision should be ready at this moment"); 4487 4488 return (WideningDecision == CM_Widen || 4489 WideningDecision == CM_Widen_Reverse || 4490 WideningDecision == CM_Interleave); 4491 }; 4492 // Iterate over the instructions in the loop, and collect all 4493 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible 4494 // that a consecutive-like pointer operand will be scalarized, we collect it 4495 // in PossibleNonUniformPtrs instead. We use two sets here because a single 4496 // getelementptr instruction can be used by both vectorized and scalarized 4497 // memory instructions. For example, if a loop loads and stores from the same 4498 // location, but the store is conditional, the store will be scalarized, and 4499 // the getelementptr won't remain uniform. 4500 for (auto *BB : TheLoop->blocks()) 4501 for (auto &I : *BB) { 4502 // If there's no pointer operand, there's nothing to do. 4503 auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 4504 if (!Ptr) 4505 continue; 4506 4507 // True if all users of Ptr are memory accesses that have Ptr as their 4508 // pointer operand. 4509 auto UsersAreMemAccesses = 4510 llvm::all_of(Ptr->users(), [&](User *U) -> bool { 4511 return getLoadStorePointerOperand(U) == Ptr; 4512 }); 4513 4514 // Ensure the memory instruction will not be scalarized or used by 4515 // gather/scatter, making its pointer operand non-uniform. If the pointer 4516 // operand is used by any instruction other than a memory access, we 4517 // conservatively assume the pointer operand may be non-uniform. 4518 if (!UsersAreMemAccesses || !isUniformDecision(&I, VF)) 4519 PossibleNonUniformPtrs.insert(Ptr); 4520 4521 // If the memory instruction will be vectorized and its pointer operand 4522 // is consecutive-like, or interleaving - the pointer operand should 4523 // remain uniform. 4524 else 4525 ConsecutiveLikePtrs.insert(Ptr); 4526 } 4527 4528 // Add to the Worklist all consecutive and consecutive-like pointers that 4529 // aren't also identified as possibly non-uniform. 4530 for (auto *V : ConsecutiveLikePtrs) 4531 if (PossibleNonUniformPtrs.find(V) == PossibleNonUniformPtrs.end()) { 4532 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n"); 4533 Worklist.insert(V); 4534 } 4535 4536 // Expand Worklist in topological order: whenever a new instruction 4537 // is added , its users should be already inside Worklist. It ensures 4538 // a uniform instruction will only be used by uniform instructions. 4539 unsigned idx = 0; 4540 while (idx != Worklist.size()) { 4541 Instruction *I = Worklist[idx++]; 4542 4543 for (auto OV : I->operand_values()) { 4544 // isOutOfScope operands cannot be uniform instructions. 4545 if (isOutOfScope(OV)) 4546 continue; 4547 // First order recurrence Phi's should typically be considered 4548 // non-uniform. 4549 auto *OP = dyn_cast<PHINode>(OV); 4550 if (OP && Legal->isFirstOrderRecurrence(OP)) 4551 continue; 4552 // If all the users of the operand are uniform, then add the 4553 // operand into the uniform worklist. 4554 auto *OI = cast<Instruction>(OV); 4555 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 4556 auto *J = cast<Instruction>(U); 4557 return Worklist.count(J) || 4558 (OI == getLoadStorePointerOperand(J) && 4559 isUniformDecision(J, VF)); 4560 })) { 4561 Worklist.insert(OI); 4562 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); 4563 } 4564 } 4565 } 4566 4567 // Returns true if Ptr is the pointer operand of a memory access instruction 4568 // I, and I is known to not require scalarization. 4569 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 4570 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 4571 }; 4572 4573 // For an instruction to be added into Worklist above, all its users inside 4574 // the loop should also be in Worklist. However, this condition cannot be 4575 // true for phi nodes that form a cyclic dependence. We must process phi 4576 // nodes separately. An induction variable will remain uniform if all users 4577 // of the induction variable and induction variable update remain uniform. 4578 // The code below handles both pointer and non-pointer induction variables. 4579 for (auto &Induction : *Legal->getInductionVars()) { 4580 auto *Ind = Induction.first; 4581 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4582 4583 // Determine if all users of the induction variable are uniform after 4584 // vectorization. 4585 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4586 auto *I = cast<Instruction>(U); 4587 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4588 isVectorizedMemAccessUse(I, Ind); 4589 }); 4590 if (!UniformInd) 4591 continue; 4592 4593 // Determine if all users of the induction variable update instruction are 4594 // uniform after vectorization. 4595 auto UniformIndUpdate = 4596 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4597 auto *I = cast<Instruction>(U); 4598 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4599 isVectorizedMemAccessUse(I, IndUpdate); 4600 }); 4601 if (!UniformIndUpdate) 4602 continue; 4603 4604 // The induction variable and its update instruction will remain uniform. 4605 Worklist.insert(Ind); 4606 Worklist.insert(IndUpdate); 4607 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n"); 4608 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate 4609 << "\n"); 4610 } 4611 4612 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 4613 } 4614 4615 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) { 4616 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 4617 // TODO: It may by useful to do since it's still likely to be dynamically 4618 // uniform if the target can skip. 4619 LLVM_DEBUG( 4620 dbgs() << "LV: Not inserting runtime ptr check for divergent target"); 4621 4622 ORE->emit( 4623 createMissedAnalysis("CantVersionLoopWithDivergentTarget") 4624 << "runtime pointer checks needed. Not enabled for divergent target"); 4625 4626 return None; 4627 } 4628 4629 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 4630 if (!OptForSize) // Remaining checks deal with scalar loop when OptForSize. 4631 return computeFeasibleMaxVF(OptForSize, TC); 4632 4633 if (Legal->getRuntimePointerChecking()->Need) { 4634 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 4635 << "runtime pointer checks needed. Enable vectorization of this " 4636 "loop with '#pragma clang loop vectorize(enable)' when " 4637 "compiling with -Os/-Oz"); 4638 LLVM_DEBUG( 4639 dbgs() 4640 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 4641 return None; 4642 } 4643 4644 if (!PSE.getUnionPredicate().getPredicates().empty()) { 4645 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 4646 << "runtime SCEV checks needed. Enable vectorization of this " 4647 "loop with '#pragma clang loop vectorize(enable)' when " 4648 "compiling with -Os/-Oz"); 4649 LLVM_DEBUG( 4650 dbgs() 4651 << "LV: Aborting. Runtime SCEV check is required with -Os/-Oz.\n"); 4652 return None; 4653 } 4654 4655 // FIXME: Avoid specializing for stride==1 instead of bailing out. 4656 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 4657 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 4658 << "runtime stride == 1 checks needed. Enable vectorization of " 4659 "this loop with '#pragma clang loop vectorize(enable)' when " 4660 "compiling with -Os/-Oz"); 4661 LLVM_DEBUG( 4662 dbgs() 4663 << "LV: Aborting. Runtime stride check is required with -Os/-Oz.\n"); 4664 return None; 4665 } 4666 4667 // If we optimize the program for size, avoid creating the tail loop. 4668 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 4669 4670 if (TC == 1) { 4671 ORE->emit(createMissedAnalysis("SingleIterationLoop") 4672 << "loop trip count is one, irrelevant for vectorization"); 4673 LLVM_DEBUG(dbgs() << "LV: Aborting, single iteration (non) loop.\n"); 4674 return None; 4675 } 4676 4677 // Record that scalar epilogue is not allowed. 4678 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 4679 4680 IsScalarEpilogueAllowed = !OptForSize; 4681 4682 // We don't create an epilogue when optimizing for size. 4683 // Invalidate interleave groups that require an epilogue if we can't mask 4684 // the interleave-group. 4685 if (!useMaskedInterleavedAccesses(TTI)) 4686 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 4687 4688 unsigned MaxVF = computeFeasibleMaxVF(OptForSize, TC); 4689 4690 if (TC > 0 && TC % MaxVF == 0) { 4691 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 4692 return MaxVF; 4693 } 4694 4695 // If we don't know the precise trip count, or if the trip count that we 4696 // found modulo the vectorization factor is not zero, try to fold the tail 4697 // by masking. 4698 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 4699 if (Legal->canFoldTailByMasking()) { 4700 FoldTailByMasking = true; 4701 return MaxVF; 4702 } 4703 4704 if (TC == 0) { 4705 ORE->emit( 4706 createMissedAnalysis("UnknownLoopCountComplexCFG") 4707 << "unable to calculate the loop count due to complex control flow"); 4708 return None; 4709 } 4710 4711 ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize") 4712 << "cannot optimize for size and vectorize at the same time. " 4713 "Enable vectorization of this loop with '#pragma clang loop " 4714 "vectorize(enable)' when compiling with -Os/-Oz"); 4715 return None; 4716 } 4717 4718 unsigned 4719 LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize, 4720 unsigned ConstTripCount) { 4721 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 4722 unsigned SmallestType, WidestType; 4723 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 4724 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 4725 4726 // Get the maximum safe dependence distance in bits computed by LAA. 4727 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 4728 // the memory accesses that is most restrictive (involved in the smallest 4729 // dependence distance). 4730 unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth(); 4731 4732 WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth); 4733 4734 unsigned MaxVectorSize = WidestRegister / WidestType; 4735 4736 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 4737 << " / " << WidestType << " bits.\n"); 4738 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 4739 << WidestRegister << " bits.\n"); 4740 4741 assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements" 4742 " into one vector!"); 4743 if (MaxVectorSize == 0) { 4744 LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 4745 MaxVectorSize = 1; 4746 return MaxVectorSize; 4747 } else if (ConstTripCount && ConstTripCount < MaxVectorSize && 4748 isPowerOf2_32(ConstTripCount)) { 4749 // We need to clamp the VF to be the ConstTripCount. There is no point in 4750 // choosing a higher viable VF as done in the loop below. 4751 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 4752 << ConstTripCount << "\n"); 4753 MaxVectorSize = ConstTripCount; 4754 return MaxVectorSize; 4755 } 4756 4757 unsigned MaxVF = MaxVectorSize; 4758 if (TTI.shouldMaximizeVectorBandwidth(OptForSize) || 4759 (MaximizeBandwidth && !OptForSize)) { 4760 // Collect all viable vectorization factors larger than the default MaxVF 4761 // (i.e. MaxVectorSize). 4762 SmallVector<unsigned, 8> VFs; 4763 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 4764 for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2) 4765 VFs.push_back(VS); 4766 4767 // For each VF calculate its register usage. 4768 auto RUs = calculateRegisterUsage(VFs); 4769 4770 // Select the largest VF which doesn't require more registers than existing 4771 // ones. 4772 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 4773 for (int i = RUs.size() - 1; i >= 0; --i) { 4774 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 4775 MaxVF = VFs[i]; 4776 break; 4777 } 4778 } 4779 if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) { 4780 if (MaxVF < MinVF) { 4781 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 4782 << ") with target's minimum: " << MinVF << '\n'); 4783 MaxVF = MinVF; 4784 } 4785 } 4786 } 4787 return MaxVF; 4788 } 4789 4790 VectorizationFactor 4791 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) { 4792 float Cost = expectedCost(1).first; 4793 const float ScalarCost = Cost; 4794 unsigned Width = 1; 4795 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 4796 4797 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 4798 if (ForceVectorization && MaxVF > 1) { 4799 // Ignore scalar width, because the user explicitly wants vectorization. 4800 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 4801 // evaluation. 4802 Cost = std::numeric_limits<float>::max(); 4803 } 4804 4805 for (unsigned i = 2; i <= MaxVF; i *= 2) { 4806 // Notice that the vector loop needs to be executed less times, so 4807 // we need to divide the cost of the vector loops by the width of 4808 // the vector elements. 4809 VectorizationCostTy C = expectedCost(i); 4810 float VectorCost = C.first / (float)i; 4811 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 4812 << " costs: " << (int)VectorCost << ".\n"); 4813 if (!C.second && !ForceVectorization) { 4814 LLVM_DEBUG( 4815 dbgs() << "LV: Not considering vector loop of width " << i 4816 << " because it will not generate any vector instructions.\n"); 4817 continue; 4818 } 4819 if (VectorCost < Cost) { 4820 Cost = VectorCost; 4821 Width = i; 4822 } 4823 } 4824 4825 if (!EnableCondStoresVectorization && NumPredStores) { 4826 ORE->emit(createMissedAnalysis("ConditionalStore") 4827 << "store that is conditionally executed prevents vectorization"); 4828 LLVM_DEBUG( 4829 dbgs() << "LV: No vectorization. There are conditional stores.\n"); 4830 Width = 1; 4831 Cost = ScalarCost; 4832 } 4833 4834 LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 4835 << "LV: Vectorization seems to be not beneficial, " 4836 << "but was forced by a user.\n"); 4837 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 4838 VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)}; 4839 return Factor; 4840 } 4841 4842 std::pair<unsigned, unsigned> 4843 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 4844 unsigned MinWidth = -1U; 4845 unsigned MaxWidth = 8; 4846 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 4847 4848 // For each block. 4849 for (BasicBlock *BB : TheLoop->blocks()) { 4850 // For each instruction in the loop. 4851 for (Instruction &I : BB->instructionsWithoutDebug()) { 4852 Type *T = I.getType(); 4853 4854 // Skip ignored values. 4855 if (ValuesToIgnore.find(&I) != ValuesToIgnore.end()) 4856 continue; 4857 4858 // Only examine Loads, Stores and PHINodes. 4859 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 4860 continue; 4861 4862 // Examine PHI nodes that are reduction variables. Update the type to 4863 // account for the recurrence type. 4864 if (auto *PN = dyn_cast<PHINode>(&I)) { 4865 if (!Legal->isReductionVariable(PN)) 4866 continue; 4867 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 4868 T = RdxDesc.getRecurrenceType(); 4869 } 4870 4871 // Examine the stored values. 4872 if (auto *ST = dyn_cast<StoreInst>(&I)) 4873 T = ST->getValueOperand()->getType(); 4874 4875 // Ignore loaded pointer types and stored pointer types that are not 4876 // vectorizable. 4877 // 4878 // FIXME: The check here attempts to predict whether a load or store will 4879 // be vectorized. We only know this for certain after a VF has 4880 // been selected. Here, we assume that if an access can be 4881 // vectorized, it will be. We should also look at extending this 4882 // optimization to non-pointer types. 4883 // 4884 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 4885 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 4886 continue; 4887 4888 MinWidth = std::min(MinWidth, 4889 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 4890 MaxWidth = std::max(MaxWidth, 4891 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 4892 } 4893 } 4894 4895 return {MinWidth, MaxWidth}; 4896 } 4897 4898 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 4899 unsigned VF, 4900 unsigned LoopCost) { 4901 // -- The interleave heuristics -- 4902 // We interleave the loop in order to expose ILP and reduce the loop overhead. 4903 // There are many micro-architectural considerations that we can't predict 4904 // at this level. For example, frontend pressure (on decode or fetch) due to 4905 // code size, or the number and capabilities of the execution ports. 4906 // 4907 // We use the following heuristics to select the interleave count: 4908 // 1. If the code has reductions, then we interleave to break the cross 4909 // iteration dependency. 4910 // 2. If the loop is really small, then we interleave to reduce the loop 4911 // overhead. 4912 // 3. We don't interleave if we think that we will spill registers to memory 4913 // due to the increased register pressure. 4914 4915 // When we optimize for size, we don't interleave. 4916 if (OptForSize) 4917 return 1; 4918 4919 // We used the distance for the interleave count. 4920 if (Legal->getMaxSafeDepDistBytes() != -1U) 4921 return 1; 4922 4923 // Do not interleave loops with a relatively small trip count. 4924 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 4925 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 4926 return 1; 4927 4928 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 4929 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 4930 << " registers\n"); 4931 4932 if (VF == 1) { 4933 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 4934 TargetNumRegisters = ForceTargetNumScalarRegs; 4935 } else { 4936 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 4937 TargetNumRegisters = ForceTargetNumVectorRegs; 4938 } 4939 4940 RegisterUsage R = calculateRegisterUsage({VF})[0]; 4941 // We divide by these constants so assume that we have at least one 4942 // instruction that uses at least one register. 4943 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 4944 4945 // We calculate the interleave count using the following formula. 4946 // Subtract the number of loop invariants from the number of available 4947 // registers. These registers are used by all of the interleaved instances. 4948 // Next, divide the remaining registers by the number of registers that is 4949 // required by the loop, in order to estimate how many parallel instances 4950 // fit without causing spills. All of this is rounded down if necessary to be 4951 // a power of two. We want power of two interleave count to simplify any 4952 // addressing operations or alignment considerations. 4953 // We also want power of two interleave counts to ensure that the induction 4954 // variable of the vector loop wraps to zero, when tail is folded by masking; 4955 // this currently happens when OptForSize, in which case IC is set to 1 above. 4956 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 4957 R.MaxLocalUsers); 4958 4959 // Don't count the induction variable as interleaved. 4960 if (EnableIndVarRegisterHeur) 4961 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 4962 std::max(1U, (R.MaxLocalUsers - 1))); 4963 4964 // Clamp the interleave ranges to reasonable counts. 4965 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 4966 4967 // Check if the user has overridden the max. 4968 if (VF == 1) { 4969 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 4970 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 4971 } else { 4972 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 4973 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 4974 } 4975 4976 // If we did not calculate the cost for VF (because the user selected the VF) 4977 // then we calculate the cost of VF here. 4978 if (LoopCost == 0) 4979 LoopCost = expectedCost(VF).first; 4980 4981 // Clamp the calculated IC to be between the 1 and the max interleave count 4982 // that the target allows. 4983 if (IC > MaxInterleaveCount) 4984 IC = MaxInterleaveCount; 4985 else if (IC < 1) 4986 IC = 1; 4987 4988 // Interleave if we vectorized this loop and there is a reduction that could 4989 // benefit from interleaving. 4990 if (VF > 1 && !Legal->getReductionVars()->empty()) { 4991 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 4992 return IC; 4993 } 4994 4995 // Note that if we've already vectorized the loop we will have done the 4996 // runtime check and so interleaving won't require further checks. 4997 bool InterleavingRequiresRuntimePointerCheck = 4998 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 4999 5000 // We want to interleave small loops in order to reduce the loop overhead and 5001 // potentially expose ILP opportunities. 5002 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 5003 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 5004 // We assume that the cost overhead is 1 and we use the cost model 5005 // to estimate the cost of the loop and interleave until the cost of the 5006 // loop overhead is about 5% of the cost of the loop. 5007 unsigned SmallIC = 5008 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5009 5010 // Interleave until store/load ports (estimated by max interleave count) are 5011 // saturated. 5012 unsigned NumStores = Legal->getNumStores(); 5013 unsigned NumLoads = Legal->getNumLoads(); 5014 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5015 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5016 5017 // If we have a scalar reduction (vector reductions are already dealt with 5018 // by this point), we can increase the critical path length if the loop 5019 // we're interleaving is inside another loop. Limit, by default to 2, so the 5020 // critical path only gets increased by one reduction operation. 5021 if (!Legal->getReductionVars()->empty() && TheLoop->getLoopDepth() > 1) { 5022 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5023 SmallIC = std::min(SmallIC, F); 5024 StoresIC = std::min(StoresIC, F); 5025 LoadsIC = std::min(LoadsIC, F); 5026 } 5027 5028 if (EnableLoadStoreRuntimeInterleave && 5029 std::max(StoresIC, LoadsIC) > SmallIC) { 5030 LLVM_DEBUG( 5031 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5032 return std::max(StoresIC, LoadsIC); 5033 } 5034 5035 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5036 return SmallIC; 5037 } 5038 5039 // Interleave if this is a large loop (small loops are already dealt with by 5040 // this point) that could benefit from interleaving. 5041 bool HasReductions = !Legal->getReductionVars()->empty(); 5042 if (TTI.enableAggressiveInterleaving(HasReductions)) { 5043 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5044 return IC; 5045 } 5046 5047 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5048 return 1; 5049 } 5050 5051 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5052 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 5053 // This function calculates the register usage by measuring the highest number 5054 // of values that are alive at a single location. Obviously, this is a very 5055 // rough estimation. We scan the loop in a topological order in order and 5056 // assign a number to each instruction. We use RPO to ensure that defs are 5057 // met before their users. We assume that each instruction that has in-loop 5058 // users starts an interval. We record every time that an in-loop value is 5059 // used, so we have a list of the first and last occurrences of each 5060 // instruction. Next, we transpose this data structure into a multi map that 5061 // holds the list of intervals that *end* at a specific location. This multi 5062 // map allows us to perform a linear search. We scan the instructions linearly 5063 // and record each time that a new interval starts, by placing it in a set. 5064 // If we find this value in the multi-map then we remove it from the set. 5065 // The max register usage is the maximum size of the set. 5066 // We also search for instructions that are defined outside the loop, but are 5067 // used inside the loop. We need this number separately from the max-interval 5068 // usage number because when we unroll, loop-invariant values do not take 5069 // more register. 5070 LoopBlocksDFS DFS(TheLoop); 5071 DFS.perform(LI); 5072 5073 RegisterUsage RU; 5074 5075 // Each 'key' in the map opens a new interval. The values 5076 // of the map are the index of the 'last seen' usage of the 5077 // instruction that is the key. 5078 using IntervalMap = DenseMap<Instruction *, unsigned>; 5079 5080 // Maps instruction to its index. 5081 SmallVector<Instruction *, 64> IdxToInstr; 5082 // Marks the end of each interval. 5083 IntervalMap EndPoint; 5084 // Saves the list of instruction indices that are used in the loop. 5085 SmallPtrSet<Instruction *, 8> Ends; 5086 // Saves the list of values that are used in the loop but are 5087 // defined outside the loop, such as arguments and constants. 5088 SmallPtrSet<Value *, 8> LoopInvariants; 5089 5090 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5091 for (Instruction &I : BB->instructionsWithoutDebug()) { 5092 IdxToInstr.push_back(&I); 5093 5094 // Save the end location of each USE. 5095 for (Value *U : I.operands()) { 5096 auto *Instr = dyn_cast<Instruction>(U); 5097 5098 // Ignore non-instruction values such as arguments, constants, etc. 5099 if (!Instr) 5100 continue; 5101 5102 // If this instruction is outside the loop then record it and continue. 5103 if (!TheLoop->contains(Instr)) { 5104 LoopInvariants.insert(Instr); 5105 continue; 5106 } 5107 5108 // Overwrite previous end points. 5109 EndPoint[Instr] = IdxToInstr.size(); 5110 Ends.insert(Instr); 5111 } 5112 } 5113 } 5114 5115 // Saves the list of intervals that end with the index in 'key'. 5116 using InstrList = SmallVector<Instruction *, 2>; 5117 DenseMap<unsigned, InstrList> TransposeEnds; 5118 5119 // Transpose the EndPoints to a list of values that end at each index. 5120 for (auto &Interval : EndPoint) 5121 TransposeEnds[Interval.second].push_back(Interval.first); 5122 5123 SmallPtrSet<Instruction *, 8> OpenIntervals; 5124 5125 // Get the size of the widest register. 5126 unsigned MaxSafeDepDist = -1U; 5127 if (Legal->getMaxSafeDepDistBytes() != -1U) 5128 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 5129 unsigned WidestRegister = 5130 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 5131 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5132 5133 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5134 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 5135 5136 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5137 5138 // A lambda that gets the register usage for the given type and VF. 5139 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 5140 if (Ty->isTokenTy()) 5141 return 0U; 5142 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 5143 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 5144 }; 5145 5146 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 5147 Instruction *I = IdxToInstr[i]; 5148 5149 // Remove all of the instructions that end at this location. 5150 InstrList &List = TransposeEnds[i]; 5151 for (Instruction *ToRemove : List) 5152 OpenIntervals.erase(ToRemove); 5153 5154 // Ignore instructions that are never used within the loop. 5155 if (Ends.find(I) == Ends.end()) 5156 continue; 5157 5158 // Skip ignored values. 5159 if (ValuesToIgnore.find(I) != ValuesToIgnore.end()) 5160 continue; 5161 5162 // For each VF find the maximum usage of registers. 5163 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 5164 if (VFs[j] == 1) { 5165 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 5166 continue; 5167 } 5168 collectUniformsAndScalars(VFs[j]); 5169 // Count the number of live intervals. 5170 unsigned RegUsage = 0; 5171 for (auto Inst : OpenIntervals) { 5172 // Skip ignored values for VF > 1. 5173 if (VecValuesToIgnore.find(Inst) != VecValuesToIgnore.end() || 5174 isScalarAfterVectorization(Inst, VFs[j])) 5175 continue; 5176 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 5177 } 5178 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 5179 } 5180 5181 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 5182 << OpenIntervals.size() << '\n'); 5183 5184 // Add the current instruction to the list of open intervals. 5185 OpenIntervals.insert(I); 5186 } 5187 5188 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 5189 unsigned Invariant = 0; 5190 if (VFs[i] == 1) 5191 Invariant = LoopInvariants.size(); 5192 else { 5193 for (auto Inst : LoopInvariants) 5194 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 5195 } 5196 5197 LLVM_DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 5198 LLVM_DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 5199 LLVM_DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant 5200 << '\n'); 5201 5202 RU.LoopInvariantRegs = Invariant; 5203 RU.MaxLocalUsers = MaxUsages[i]; 5204 RUs[i] = RU; 5205 } 5206 5207 return RUs; 5208 } 5209 5210 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 5211 // TODO: Cost model for emulated masked load/store is completely 5212 // broken. This hack guides the cost model to use an artificially 5213 // high enough value to practically disable vectorization with such 5214 // operations, except where previously deployed legality hack allowed 5215 // using very low cost values. This is to avoid regressions coming simply 5216 // from moving "masked load/store" check from legality to cost model. 5217 // Masked Load/Gather emulation was previously never allowed. 5218 // Limited number of Masked Store/Scatter emulation was allowed. 5219 assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction"); 5220 return isa<LoadInst>(I) || 5221 (isa<StoreInst>(I) && 5222 NumPredStores > NumberOfStoresToPredicate); 5223 } 5224 5225 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) { 5226 // If we aren't vectorizing the loop, or if we've already collected the 5227 // instructions to scalarize, there's nothing to do. Collection may already 5228 // have occurred if we have a user-selected VF and are now computing the 5229 // expected cost for interleaving. 5230 if (VF < 2 || InstsToScalarize.find(VF) != InstsToScalarize.end()) 5231 return; 5232 5233 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 5234 // not profitable to scalarize any instructions, the presence of VF in the 5235 // map will indicate that we've analyzed it already. 5236 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 5237 5238 // Find all the instructions that are scalar with predication in the loop and 5239 // determine if it would be better to not if-convert the blocks they are in. 5240 // If so, we also record the instructions to scalarize. 5241 for (BasicBlock *BB : TheLoop->blocks()) { 5242 if (!blockNeedsPredication(BB)) 5243 continue; 5244 for (Instruction &I : *BB) 5245 if (isScalarWithPredication(&I)) { 5246 ScalarCostsTy ScalarCosts; 5247 // Do not apply discount logic if hacked cost is needed 5248 // for emulated masked memrefs. 5249 if (!useEmulatedMaskMemRefHack(&I) && 5250 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 5251 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 5252 // Remember that BB will remain after vectorization. 5253 PredicatedBBsAfterVectorization.insert(BB); 5254 } 5255 } 5256 } 5257 5258 int LoopVectorizationCostModel::computePredInstDiscount( 5259 Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts, 5260 unsigned VF) { 5261 assert(!isUniformAfterVectorization(PredInst, VF) && 5262 "Instruction marked uniform-after-vectorization will be predicated"); 5263 5264 // Initialize the discount to zero, meaning that the scalar version and the 5265 // vector version cost the same. 5266 int Discount = 0; 5267 5268 // Holds instructions to analyze. The instructions we visit are mapped in 5269 // ScalarCosts. Those instructions are the ones that would be scalarized if 5270 // we find that the scalar version costs less. 5271 SmallVector<Instruction *, 8> Worklist; 5272 5273 // Returns true if the given instruction can be scalarized. 5274 auto canBeScalarized = [&](Instruction *I) -> bool { 5275 // We only attempt to scalarize instructions forming a single-use chain 5276 // from the original predicated block that would otherwise be vectorized. 5277 // Although not strictly necessary, we give up on instructions we know will 5278 // already be scalar to avoid traversing chains that are unlikely to be 5279 // beneficial. 5280 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 5281 isScalarAfterVectorization(I, VF)) 5282 return false; 5283 5284 // If the instruction is scalar with predication, it will be analyzed 5285 // separately. We ignore it within the context of PredInst. 5286 if (isScalarWithPredication(I)) 5287 return false; 5288 5289 // If any of the instruction's operands are uniform after vectorization, 5290 // the instruction cannot be scalarized. This prevents, for example, a 5291 // masked load from being scalarized. 5292 // 5293 // We assume we will only emit a value for lane zero of an instruction 5294 // marked uniform after vectorization, rather than VF identical values. 5295 // Thus, if we scalarize an instruction that uses a uniform, we would 5296 // create uses of values corresponding to the lanes we aren't emitting code 5297 // for. This behavior can be changed by allowing getScalarValue to clone 5298 // the lane zero values for uniforms rather than asserting. 5299 for (Use &U : I->operands()) 5300 if (auto *J = dyn_cast<Instruction>(U.get())) 5301 if (isUniformAfterVectorization(J, VF)) 5302 return false; 5303 5304 // Otherwise, we can scalarize the instruction. 5305 return true; 5306 }; 5307 5308 // Returns true if an operand that cannot be scalarized must be extracted 5309 // from a vector. We will account for this scalarization overhead below. Note 5310 // that the non-void predicated instructions are placed in their own blocks, 5311 // and their return values are inserted into vectors. Thus, an extract would 5312 // still be required. 5313 auto needsExtract = [&](Instruction *I) -> bool { 5314 return TheLoop->contains(I) && !isScalarAfterVectorization(I, VF); 5315 }; 5316 5317 // Compute the expected cost discount from scalarizing the entire expression 5318 // feeding the predicated instruction. We currently only consider expressions 5319 // that are single-use instruction chains. 5320 Worklist.push_back(PredInst); 5321 while (!Worklist.empty()) { 5322 Instruction *I = Worklist.pop_back_val(); 5323 5324 // If we've already analyzed the instruction, there's nothing to do. 5325 if (ScalarCosts.find(I) != ScalarCosts.end()) 5326 continue; 5327 5328 // Compute the cost of the vector instruction. Note that this cost already 5329 // includes the scalarization overhead of the predicated instruction. 5330 unsigned VectorCost = getInstructionCost(I, VF).first; 5331 5332 // Compute the cost of the scalarized instruction. This cost is the cost of 5333 // the instruction as if it wasn't if-converted and instead remained in the 5334 // predicated block. We will scale this cost by block probability after 5335 // computing the scalarization overhead. 5336 unsigned ScalarCost = VF * getInstructionCost(I, 1).first; 5337 5338 // Compute the scalarization overhead of needed insertelement instructions 5339 // and phi nodes. 5340 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 5341 ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF), 5342 true, false); 5343 ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI); 5344 } 5345 5346 // Compute the scalarization overhead of needed extractelement 5347 // instructions. For each of the instruction's operands, if the operand can 5348 // be scalarized, add it to the worklist; otherwise, account for the 5349 // overhead. 5350 for (Use &U : I->operands()) 5351 if (auto *J = dyn_cast<Instruction>(U.get())) { 5352 assert(VectorType::isValidElementType(J->getType()) && 5353 "Instruction has non-scalar type"); 5354 if (canBeScalarized(J)) 5355 Worklist.push_back(J); 5356 else if (needsExtract(J)) 5357 ScalarCost += TTI.getScalarizationOverhead( 5358 ToVectorTy(J->getType(),VF), false, true); 5359 } 5360 5361 // Scale the total scalar cost by block probability. 5362 ScalarCost /= getReciprocalPredBlockProb(); 5363 5364 // Compute the discount. A non-negative discount means the vector version 5365 // of the instruction costs more, and scalarizing would be beneficial. 5366 Discount += VectorCost - ScalarCost; 5367 ScalarCosts[I] = ScalarCost; 5368 } 5369 5370 return Discount; 5371 } 5372 5373 LoopVectorizationCostModel::VectorizationCostTy 5374 LoopVectorizationCostModel::expectedCost(unsigned VF) { 5375 VectorizationCostTy Cost; 5376 5377 // For each block. 5378 for (BasicBlock *BB : TheLoop->blocks()) { 5379 VectorizationCostTy BlockCost; 5380 5381 // For each instruction in the old loop. 5382 for (Instruction &I : BB->instructionsWithoutDebug()) { 5383 // Skip ignored values. 5384 if (ValuesToIgnore.find(&I) != ValuesToIgnore.end() || 5385 (VF > 1 && VecValuesToIgnore.find(&I) != VecValuesToIgnore.end())) 5386 continue; 5387 5388 VectorizationCostTy C = getInstructionCost(&I, VF); 5389 5390 // Check if we should override the cost. 5391 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 5392 C.first = ForceTargetInstructionCost; 5393 5394 BlockCost.first += C.first; 5395 BlockCost.second |= C.second; 5396 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 5397 << " for VF " << VF << " For instruction: " << I 5398 << '\n'); 5399 } 5400 5401 // If we are vectorizing a predicated block, it will have been 5402 // if-converted. This means that the block's instructions (aside from 5403 // stores and instructions that may divide by zero) will now be 5404 // unconditionally executed. For the scalar case, we may not always execute 5405 // the predicated block. Thus, scale the block's cost by the probability of 5406 // executing it. 5407 if (VF == 1 && blockNeedsPredication(BB)) 5408 BlockCost.first /= getReciprocalPredBlockProb(); 5409 5410 Cost.first += BlockCost.first; 5411 Cost.second |= BlockCost.second; 5412 } 5413 5414 return Cost; 5415 } 5416 5417 /// Gets Address Access SCEV after verifying that the access pattern 5418 /// is loop invariant except the induction variable dependence. 5419 /// 5420 /// This SCEV can be sent to the Target in order to estimate the address 5421 /// calculation cost. 5422 static const SCEV *getAddressAccessSCEV( 5423 Value *Ptr, 5424 LoopVectorizationLegality *Legal, 5425 PredicatedScalarEvolution &PSE, 5426 const Loop *TheLoop) { 5427 5428 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 5429 if (!Gep) 5430 return nullptr; 5431 5432 // We are looking for a gep with all loop invariant indices except for one 5433 // which should be an induction variable. 5434 auto SE = PSE.getSE(); 5435 unsigned NumOperands = Gep->getNumOperands(); 5436 for (unsigned i = 1; i < NumOperands; ++i) { 5437 Value *Opd = Gep->getOperand(i); 5438 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 5439 !Legal->isInductionVariable(Opd)) 5440 return nullptr; 5441 } 5442 5443 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 5444 return PSE.getSCEV(Ptr); 5445 } 5446 5447 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 5448 return Legal->hasStride(I->getOperand(0)) || 5449 Legal->hasStride(I->getOperand(1)); 5450 } 5451 5452 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 5453 unsigned VF) { 5454 assert(VF > 1 && "Scalarization cost of instruction implies vectorization."); 5455 Type *ValTy = getMemInstValueType(I); 5456 auto SE = PSE.getSE(); 5457 5458 unsigned Alignment = getLoadStoreAlignment(I); 5459 unsigned AS = getLoadStoreAddressSpace(I); 5460 Value *Ptr = getLoadStorePointerOperand(I); 5461 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 5462 5463 // Figure out whether the access is strided and get the stride value 5464 // if it's known in compile time 5465 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 5466 5467 // Get the cost of the scalar memory instruction and address computation. 5468 unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 5469 5470 // Don't pass *I here, since it is scalar but will actually be part of a 5471 // vectorized loop where the user of it is a vectorized instruction. 5472 Cost += VF * 5473 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 5474 AS); 5475 5476 // Get the overhead of the extractelement and insertelement instructions 5477 // we might create due to scalarization. 5478 Cost += getScalarizationOverhead(I, VF, TTI); 5479 5480 // If we have a predicated store, it may not be executed for each vector 5481 // lane. Scale the cost by the probability of executing the predicated 5482 // block. 5483 if (isPredicatedInst(I)) { 5484 Cost /= getReciprocalPredBlockProb(); 5485 5486 if (useEmulatedMaskMemRefHack(I)) 5487 // Artificially setting to a high enough value to practically disable 5488 // vectorization with such operations. 5489 Cost = 3000000; 5490 } 5491 5492 return Cost; 5493 } 5494 5495 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 5496 unsigned VF) { 5497 Type *ValTy = getMemInstValueType(I); 5498 Type *VectorTy = ToVectorTy(ValTy, VF); 5499 unsigned Alignment = getLoadStoreAlignment(I); 5500 Value *Ptr = getLoadStorePointerOperand(I); 5501 unsigned AS = getLoadStoreAddressSpace(I); 5502 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 5503 5504 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 5505 "Stride should be 1 or -1 for consecutive memory access"); 5506 unsigned Cost = 0; 5507 if (Legal->isMaskRequired(I)) 5508 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 5509 else 5510 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I); 5511 5512 bool Reverse = ConsecutiveStride < 0; 5513 if (Reverse) 5514 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5515 return Cost; 5516 } 5517 5518 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 5519 unsigned VF) { 5520 Type *ValTy = getMemInstValueType(I); 5521 Type *VectorTy = ToVectorTy(ValTy, VF); 5522 unsigned Alignment = getLoadStoreAlignment(I); 5523 unsigned AS = getLoadStoreAddressSpace(I); 5524 if (isa<LoadInst>(I)) { 5525 return TTI.getAddressComputationCost(ValTy) + 5526 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) + 5527 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 5528 } 5529 StoreInst *SI = cast<StoreInst>(I); 5530 5531 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 5532 return TTI.getAddressComputationCost(ValTy) + 5533 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS) + 5534 (isLoopInvariantStoreValue ? 0 : TTI.getVectorInstrCost( 5535 Instruction::ExtractElement, 5536 VectorTy, VF - 1)); 5537 } 5538 5539 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 5540 unsigned VF) { 5541 Type *ValTy = getMemInstValueType(I); 5542 Type *VectorTy = ToVectorTy(ValTy, VF); 5543 unsigned Alignment = getLoadStoreAlignment(I); 5544 Value *Ptr = getLoadStorePointerOperand(I); 5545 5546 return TTI.getAddressComputationCost(VectorTy) + 5547 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 5548 Legal->isMaskRequired(I), Alignment); 5549 } 5550 5551 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 5552 unsigned VF) { 5553 Type *ValTy = getMemInstValueType(I); 5554 Type *VectorTy = ToVectorTy(ValTy, VF); 5555 unsigned AS = getLoadStoreAddressSpace(I); 5556 5557 auto Group = getInterleavedAccessGroup(I); 5558 assert(Group && "Fail to get an interleaved access group."); 5559 5560 unsigned InterleaveFactor = Group->getFactor(); 5561 Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 5562 5563 // Holds the indices of existing members in an interleaved load group. 5564 // An interleaved store group doesn't need this as it doesn't allow gaps. 5565 SmallVector<unsigned, 4> Indices; 5566 if (isa<LoadInst>(I)) { 5567 for (unsigned i = 0; i < InterleaveFactor; i++) 5568 if (Group->getMember(i)) 5569 Indices.push_back(i); 5570 } 5571 5572 // Calculate the cost of the whole interleaved group. 5573 bool UseMaskForGaps = 5574 Group->requiresScalarEpilogue() && !IsScalarEpilogueAllowed; 5575 unsigned Cost = TTI.getInterleavedMemoryOpCost( 5576 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 5577 Group->getAlignment(), AS, Legal->isMaskRequired(I), UseMaskForGaps); 5578 5579 if (Group->isReverse()) { 5580 // TODO: Add support for reversed masked interleaved access. 5581 assert(!Legal->isMaskRequired(I) && 5582 "Reverse masked interleaved access not supported."); 5583 Cost += Group->getNumMembers() * 5584 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5585 } 5586 return Cost; 5587 } 5588 5589 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 5590 unsigned VF) { 5591 // Calculate scalar cost only. Vectorization cost should be ready at this 5592 // moment. 5593 if (VF == 1) { 5594 Type *ValTy = getMemInstValueType(I); 5595 unsigned Alignment = getLoadStoreAlignment(I); 5596 unsigned AS = getLoadStoreAddressSpace(I); 5597 5598 return TTI.getAddressComputationCost(ValTy) + 5599 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I); 5600 } 5601 return getWideningCost(I, VF); 5602 } 5603 5604 LoopVectorizationCostModel::VectorizationCostTy 5605 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 5606 // If we know that this instruction will remain uniform, check the cost of 5607 // the scalar version. 5608 if (isUniformAfterVectorization(I, VF)) 5609 VF = 1; 5610 5611 if (VF > 1 && isProfitableToScalarize(I, VF)) 5612 return VectorizationCostTy(InstsToScalarize[VF][I], false); 5613 5614 // Forced scalars do not have any scalarization overhead. 5615 auto ForcedScalar = ForcedScalars.find(VF); 5616 if (VF > 1 && ForcedScalar != ForcedScalars.end()) { 5617 auto InstSet = ForcedScalar->second; 5618 if (InstSet.find(I) != InstSet.end()) 5619 return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false); 5620 } 5621 5622 Type *VectorTy; 5623 unsigned C = getInstructionCost(I, VF, VectorTy); 5624 5625 bool TypeNotScalarized = 5626 VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF; 5627 return VectorizationCostTy(C, TypeNotScalarized); 5628 } 5629 5630 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) { 5631 if (VF == 1) 5632 return; 5633 NumPredStores = 0; 5634 for (BasicBlock *BB : TheLoop->blocks()) { 5635 // For each instruction in the old loop. 5636 for (Instruction &I : *BB) { 5637 Value *Ptr = getLoadStorePointerOperand(&I); 5638 if (!Ptr) 5639 continue; 5640 5641 // TODO: We should generate better code and update the cost model for 5642 // predicated uniform stores. Today they are treated as any other 5643 // predicated store (see added test cases in 5644 // invariant-store-vectorization.ll). 5645 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 5646 NumPredStores++; 5647 5648 if (Legal->isUniform(Ptr) && 5649 // Conditional loads and stores should be scalarized and predicated. 5650 // isScalarWithPredication cannot be used here since masked 5651 // gather/scatters are not considered scalar with predication. 5652 !Legal->blockNeedsPredication(I.getParent())) { 5653 // TODO: Avoid replicating loads and stores instead of 5654 // relying on instcombine to remove them. 5655 // Load: Scalar load + broadcast 5656 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 5657 unsigned Cost = getUniformMemOpCost(&I, VF); 5658 setWideningDecision(&I, VF, CM_Scalarize, Cost); 5659 continue; 5660 } 5661 5662 // We assume that widening is the best solution when possible. 5663 if (memoryInstructionCanBeWidened(&I, VF)) { 5664 unsigned Cost = getConsecutiveMemOpCost(&I, VF); 5665 int ConsecutiveStride = 5666 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 5667 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 5668 "Expected consecutive stride."); 5669 InstWidening Decision = 5670 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 5671 setWideningDecision(&I, VF, Decision, Cost); 5672 continue; 5673 } 5674 5675 // Choose between Interleaving, Gather/Scatter or Scalarization. 5676 unsigned InterleaveCost = std::numeric_limits<unsigned>::max(); 5677 unsigned NumAccesses = 1; 5678 if (isAccessInterleaved(&I)) { 5679 auto Group = getInterleavedAccessGroup(&I); 5680 assert(Group && "Fail to get an interleaved access group."); 5681 5682 // Make one decision for the whole group. 5683 if (getWideningDecision(&I, VF) != CM_Unknown) 5684 continue; 5685 5686 NumAccesses = Group->getNumMembers(); 5687 if (interleavedAccessCanBeWidened(&I, VF)) 5688 InterleaveCost = getInterleaveGroupCost(&I, VF); 5689 } 5690 5691 unsigned GatherScatterCost = 5692 isLegalGatherOrScatter(&I) 5693 ? getGatherScatterCost(&I, VF) * NumAccesses 5694 : std::numeric_limits<unsigned>::max(); 5695 5696 unsigned ScalarizationCost = 5697 getMemInstScalarizationCost(&I, VF) * NumAccesses; 5698 5699 // Choose better solution for the current VF, 5700 // write down this decision and use it during vectorization. 5701 unsigned Cost; 5702 InstWidening Decision; 5703 if (InterleaveCost <= GatherScatterCost && 5704 InterleaveCost < ScalarizationCost) { 5705 Decision = CM_Interleave; 5706 Cost = InterleaveCost; 5707 } else if (GatherScatterCost < ScalarizationCost) { 5708 Decision = CM_GatherScatter; 5709 Cost = GatherScatterCost; 5710 } else { 5711 Decision = CM_Scalarize; 5712 Cost = ScalarizationCost; 5713 } 5714 // If the instructions belongs to an interleave group, the whole group 5715 // receives the same decision. The whole group receives the cost, but 5716 // the cost will actually be assigned to one instruction. 5717 if (auto Group = getInterleavedAccessGroup(&I)) 5718 setWideningDecision(Group, VF, Decision, Cost); 5719 else 5720 setWideningDecision(&I, VF, Decision, Cost); 5721 } 5722 } 5723 5724 // Make sure that any load of address and any other address computation 5725 // remains scalar unless there is gather/scatter support. This avoids 5726 // inevitable extracts into address registers, and also has the benefit of 5727 // activating LSR more, since that pass can't optimize vectorized 5728 // addresses. 5729 if (TTI.prefersVectorizedAddressing()) 5730 return; 5731 5732 // Start with all scalar pointer uses. 5733 SmallPtrSet<Instruction *, 8> AddrDefs; 5734 for (BasicBlock *BB : TheLoop->blocks()) 5735 for (Instruction &I : *BB) { 5736 Instruction *PtrDef = 5737 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 5738 if (PtrDef && TheLoop->contains(PtrDef) && 5739 getWideningDecision(&I, VF) != CM_GatherScatter) 5740 AddrDefs.insert(PtrDef); 5741 } 5742 5743 // Add all instructions used to generate the addresses. 5744 SmallVector<Instruction *, 4> Worklist; 5745 for (auto *I : AddrDefs) 5746 Worklist.push_back(I); 5747 while (!Worklist.empty()) { 5748 Instruction *I = Worklist.pop_back_val(); 5749 for (auto &Op : I->operands()) 5750 if (auto *InstOp = dyn_cast<Instruction>(Op)) 5751 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 5752 AddrDefs.insert(InstOp).second) 5753 Worklist.push_back(InstOp); 5754 } 5755 5756 for (auto *I : AddrDefs) { 5757 if (isa<LoadInst>(I)) { 5758 // Setting the desired widening decision should ideally be handled in 5759 // by cost functions, but since this involves the task of finding out 5760 // if the loaded register is involved in an address computation, it is 5761 // instead changed here when we know this is the case. 5762 InstWidening Decision = getWideningDecision(I, VF); 5763 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 5764 // Scalarize a widened load of address. 5765 setWideningDecision(I, VF, CM_Scalarize, 5766 (VF * getMemoryInstructionCost(I, 1))); 5767 else if (auto Group = getInterleavedAccessGroup(I)) { 5768 // Scalarize an interleave group of address loads. 5769 for (unsigned I = 0; I < Group->getFactor(); ++I) { 5770 if (Instruction *Member = Group->getMember(I)) 5771 setWideningDecision(Member, VF, CM_Scalarize, 5772 (VF * getMemoryInstructionCost(Member, 1))); 5773 } 5774 } 5775 } else 5776 // Make sure I gets scalarized and a cost estimate without 5777 // scalarization overhead. 5778 ForcedScalars[VF].insert(I); 5779 } 5780 } 5781 5782 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 5783 unsigned VF, 5784 Type *&VectorTy) { 5785 Type *RetTy = I->getType(); 5786 if (canTruncateToMinimalBitwidth(I, VF)) 5787 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 5788 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 5789 auto SE = PSE.getSE(); 5790 5791 // TODO: We need to estimate the cost of intrinsic calls. 5792 switch (I->getOpcode()) { 5793 case Instruction::GetElementPtr: 5794 // We mark this instruction as zero-cost because the cost of GEPs in 5795 // vectorized code depends on whether the corresponding memory instruction 5796 // is scalarized or not. Therefore, we handle GEPs with the memory 5797 // instruction cost. 5798 return 0; 5799 case Instruction::Br: { 5800 // In cases of scalarized and predicated instructions, there will be VF 5801 // predicated blocks in the vectorized loop. Each branch around these 5802 // blocks requires also an extract of its vector compare i1 element. 5803 bool ScalarPredicatedBB = false; 5804 BranchInst *BI = cast<BranchInst>(I); 5805 if (VF > 1 && BI->isConditional() && 5806 (PredicatedBBsAfterVectorization.find(BI->getSuccessor(0)) != 5807 PredicatedBBsAfterVectorization.end() || 5808 PredicatedBBsAfterVectorization.find(BI->getSuccessor(1)) != 5809 PredicatedBBsAfterVectorization.end())) 5810 ScalarPredicatedBB = true; 5811 5812 if (ScalarPredicatedBB) { 5813 // Return cost for branches around scalarized and predicated blocks. 5814 Type *Vec_i1Ty = 5815 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 5816 return (TTI.getScalarizationOverhead(Vec_i1Ty, false, true) + 5817 (TTI.getCFInstrCost(Instruction::Br) * VF)); 5818 } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1) 5819 // The back-edge branch will remain, as will all scalar branches. 5820 return TTI.getCFInstrCost(Instruction::Br); 5821 else 5822 // This branch will be eliminated by if-conversion. 5823 return 0; 5824 // Note: We currently assume zero cost for an unconditional branch inside 5825 // a predicated block since it will become a fall-through, although we 5826 // may decide in the future to call TTI for all branches. 5827 } 5828 case Instruction::PHI: { 5829 auto *Phi = cast<PHINode>(I); 5830 5831 // First-order recurrences are replaced by vector shuffles inside the loop. 5832 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 5833 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 5834 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 5835 VectorTy, VF - 1, VectorType::get(RetTy, 1)); 5836 5837 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 5838 // converted into select instructions. We require N - 1 selects per phi 5839 // node, where N is the number of incoming values. 5840 if (VF > 1 && Phi->getParent() != TheLoop->getHeader()) 5841 return (Phi->getNumIncomingValues() - 1) * 5842 TTI.getCmpSelInstrCost( 5843 Instruction::Select, ToVectorTy(Phi->getType(), VF), 5844 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF)); 5845 5846 return TTI.getCFInstrCost(Instruction::PHI); 5847 } 5848 case Instruction::UDiv: 5849 case Instruction::SDiv: 5850 case Instruction::URem: 5851 case Instruction::SRem: 5852 // If we have a predicated instruction, it may not be executed for each 5853 // vector lane. Get the scalarization cost and scale this amount by the 5854 // probability of executing the predicated block. If the instruction is not 5855 // predicated, we fall through to the next case. 5856 if (VF > 1 && isScalarWithPredication(I)) { 5857 unsigned Cost = 0; 5858 5859 // These instructions have a non-void type, so account for the phi nodes 5860 // that we will create. This cost is likely to be zero. The phi node 5861 // cost, if any, should be scaled by the block probability because it 5862 // models a copy at the end of each predicated block. 5863 Cost += VF * TTI.getCFInstrCost(Instruction::PHI); 5864 5865 // The cost of the non-predicated instruction. 5866 Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy); 5867 5868 // The cost of insertelement and extractelement instructions needed for 5869 // scalarization. 5870 Cost += getScalarizationOverhead(I, VF, TTI); 5871 5872 // Scale the cost by the probability of executing the predicated blocks. 5873 // This assumes the predicated block for each vector lane is equally 5874 // likely. 5875 return Cost / getReciprocalPredBlockProb(); 5876 } 5877 LLVM_FALLTHROUGH; 5878 case Instruction::Add: 5879 case Instruction::FAdd: 5880 case Instruction::Sub: 5881 case Instruction::FSub: 5882 case Instruction::Mul: 5883 case Instruction::FMul: 5884 case Instruction::FDiv: 5885 case Instruction::FRem: 5886 case Instruction::Shl: 5887 case Instruction::LShr: 5888 case Instruction::AShr: 5889 case Instruction::And: 5890 case Instruction::Or: 5891 case Instruction::Xor: { 5892 // Since we will replace the stride by 1 the multiplication should go away. 5893 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 5894 return 0; 5895 // Certain instructions can be cheaper to vectorize if they have a constant 5896 // second vector operand. One example of this are shifts on x86. 5897 Value *Op2 = I->getOperand(1); 5898 TargetTransformInfo::OperandValueProperties Op2VP; 5899 TargetTransformInfo::OperandValueKind Op2VK = 5900 TTI.getOperandInfo(Op2, Op2VP); 5901 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 5902 Op2VK = TargetTransformInfo::OK_UniformValue; 5903 5904 SmallVector<const Value *, 4> Operands(I->operand_values()); 5905 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 5906 return N * TTI.getArithmeticInstrCost( 5907 I->getOpcode(), VectorTy, TargetTransformInfo::OK_AnyValue, 5908 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands); 5909 } 5910 case Instruction::Select: { 5911 SelectInst *SI = cast<SelectInst>(I); 5912 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 5913 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 5914 Type *CondTy = SI->getCondition()->getType(); 5915 if (!ScalarCond) 5916 CondTy = VectorType::get(CondTy, VF); 5917 5918 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I); 5919 } 5920 case Instruction::ICmp: 5921 case Instruction::FCmp: { 5922 Type *ValTy = I->getOperand(0)->getType(); 5923 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 5924 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 5925 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 5926 VectorTy = ToVectorTy(ValTy, VF); 5927 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I); 5928 } 5929 case Instruction::Store: 5930 case Instruction::Load: { 5931 unsigned Width = VF; 5932 if (Width > 1) { 5933 InstWidening Decision = getWideningDecision(I, Width); 5934 assert(Decision != CM_Unknown && 5935 "CM decision should be taken at this point"); 5936 if (Decision == CM_Scalarize) 5937 Width = 1; 5938 } 5939 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 5940 return getMemoryInstructionCost(I, VF); 5941 } 5942 case Instruction::ZExt: 5943 case Instruction::SExt: 5944 case Instruction::FPToUI: 5945 case Instruction::FPToSI: 5946 case Instruction::FPExt: 5947 case Instruction::PtrToInt: 5948 case Instruction::IntToPtr: 5949 case Instruction::SIToFP: 5950 case Instruction::UIToFP: 5951 case Instruction::Trunc: 5952 case Instruction::FPTrunc: 5953 case Instruction::BitCast: { 5954 // We optimize the truncation of induction variables having constant 5955 // integer steps. The cost of these truncations is the same as the scalar 5956 // operation. 5957 if (isOptimizableIVTruncate(I, VF)) { 5958 auto *Trunc = cast<TruncInst>(I); 5959 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 5960 Trunc->getSrcTy(), Trunc); 5961 } 5962 5963 Type *SrcScalarTy = I->getOperand(0)->getType(); 5964 Type *SrcVecTy = 5965 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 5966 if (canTruncateToMinimalBitwidth(I, VF)) { 5967 // This cast is going to be shrunk. This may remove the cast or it might 5968 // turn it into slightly different cast. For example, if MinBW == 16, 5969 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 5970 // 5971 // Calculate the modified src and dest types. 5972 Type *MinVecTy = VectorTy; 5973 if (I->getOpcode() == Instruction::Trunc) { 5974 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 5975 VectorTy = 5976 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 5977 } else if (I->getOpcode() == Instruction::ZExt || 5978 I->getOpcode() == Instruction::SExt) { 5979 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 5980 VectorTy = 5981 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 5982 } 5983 } 5984 5985 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 5986 return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I); 5987 } 5988 case Instruction::Call: { 5989 bool NeedToScalarize; 5990 CallInst *CI = cast<CallInst>(I); 5991 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 5992 if (getVectorIntrinsicIDForCall(CI, TLI)) 5993 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 5994 return CallCost; 5995 } 5996 default: 5997 // The cost of executing VF copies of the scalar instruction. This opcode 5998 // is unknown. Assume that it is the same as 'mul'. 5999 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) + 6000 getScalarizationOverhead(I, VF, TTI); 6001 } // end of switch. 6002 } 6003 6004 char LoopVectorize::ID = 0; 6005 6006 static const char lv_name[] = "Loop Vectorization"; 6007 6008 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 6009 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6010 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 6011 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6012 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 6013 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6014 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6015 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6016 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6017 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6018 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 6019 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6020 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 6021 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6022 6023 namespace llvm { 6024 6025 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { 6026 return new LoopVectorize(NoUnrolling, AlwaysVectorize); 6027 } 6028 6029 } // end namespace llvm 6030 6031 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6032 // Check if the pointer operand of a load or store instruction is 6033 // consecutive. 6034 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 6035 return Legal->isConsecutivePtr(Ptr); 6036 return false; 6037 } 6038 6039 void LoopVectorizationCostModel::collectValuesToIgnore() { 6040 // Ignore ephemeral values. 6041 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 6042 6043 // Ignore type-promoting instructions we identified during reduction 6044 // detection. 6045 for (auto &Reduction : *Legal->getReductionVars()) { 6046 RecurrenceDescriptor &RedDes = Reduction.second; 6047 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 6048 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6049 } 6050 // Ignore type-casting instructions we identified during induction 6051 // detection. 6052 for (auto &Induction : *Legal->getInductionVars()) { 6053 InductionDescriptor &IndDes = Induction.second; 6054 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6055 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6056 } 6057 } 6058 6059 VectorizationFactor 6060 LoopVectorizationPlanner::planInVPlanNativePath(bool OptForSize, 6061 unsigned UserVF) { 6062 // Width 1 means no vectorization, cost 0 means uncomputed cost. 6063 const VectorizationFactor NoVectorization = {1U, 0U}; 6064 6065 // Outer loop handling: They may require CFG and instruction level 6066 // transformations before even evaluating whether vectorization is profitable. 6067 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 6068 // the vectorization pipeline. 6069 if (!OrigLoop->empty()) { 6070 // TODO: If UserVF is not provided, we set UserVF to 4 for stress testing. 6071 // This won't be necessary when UserVF is not required in the VPlan-native 6072 // path. 6073 if (VPlanBuildStressTest && !UserVF) 6074 UserVF = 4; 6075 6076 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 6077 assert(UserVF && "Expected UserVF for outer loop vectorization."); 6078 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 6079 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 6080 buildVPlans(UserVF, UserVF); 6081 6082 // For VPlan build stress testing, we bail out after VPlan construction. 6083 if (VPlanBuildStressTest) 6084 return NoVectorization; 6085 6086 return {UserVF, 0}; 6087 } 6088 6089 LLVM_DEBUG( 6090 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 6091 "VPlan-native path.\n"); 6092 return NoVectorization; 6093 } 6094 6095 VectorizationFactor 6096 LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) { 6097 assert(OrigLoop->empty() && "Inner loop expected."); 6098 // Width 1 means no vectorization, cost 0 means uncomputed cost. 6099 const VectorizationFactor NoVectorization = {1U, 0U}; 6100 Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(OptForSize); 6101 if (!MaybeMaxVF.hasValue()) // Cases considered too costly to vectorize. 6102 return NoVectorization; 6103 6104 // Invalidate interleave groups if all blocks of loop will be predicated. 6105 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 6106 !useMaskedInterleavedAccesses(*TTI)) { 6107 LLVM_DEBUG( 6108 dbgs() 6109 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 6110 "which requires masked-interleaved support.\n"); 6111 CM.InterleaveInfo.reset(); 6112 } 6113 6114 if (UserVF) { 6115 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 6116 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 6117 // Collect the instructions (and their associated costs) that will be more 6118 // profitable to scalarize. 6119 CM.selectUserVectorizationFactor(UserVF); 6120 buildVPlansWithVPRecipes(UserVF, UserVF); 6121 LLVM_DEBUG(printPlans(dbgs())); 6122 return {UserVF, 0}; 6123 } 6124 6125 unsigned MaxVF = MaybeMaxVF.getValue(); 6126 assert(MaxVF != 0 && "MaxVF is zero."); 6127 6128 for (unsigned VF = 1; VF <= MaxVF; VF *= 2) { 6129 // Collect Uniform and Scalar instructions after vectorization with VF. 6130 CM.collectUniformsAndScalars(VF); 6131 6132 // Collect the instructions (and their associated costs) that will be more 6133 // profitable to scalarize. 6134 if (VF > 1) 6135 CM.collectInstsToScalarize(VF); 6136 } 6137 6138 buildVPlansWithVPRecipes(1, MaxVF); 6139 LLVM_DEBUG(printPlans(dbgs())); 6140 if (MaxVF == 1) 6141 return NoVectorization; 6142 6143 // Select the optimal vectorization factor. 6144 return CM.selectVectorizationFactor(MaxVF); 6145 } 6146 6147 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) { 6148 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 6149 << '\n'); 6150 BestVF = VF; 6151 BestUF = UF; 6152 6153 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 6154 return !Plan->hasVF(VF); 6155 }); 6156 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 6157 } 6158 6159 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 6160 DominatorTree *DT) { 6161 // Perform the actual loop transformation. 6162 6163 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 6164 VPCallbackILV CallbackILV(ILV); 6165 6166 VPTransformState State{BestVF, BestUF, LI, 6167 DT, ILV.Builder, ILV.VectorLoopValueMap, 6168 &ILV, CallbackILV}; 6169 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 6170 State.TripCount = ILV.getOrCreateTripCount(nullptr); 6171 6172 //===------------------------------------------------===// 6173 // 6174 // Notice: any optimization or new instruction that go 6175 // into the code below should also be implemented in 6176 // the cost-model. 6177 // 6178 //===------------------------------------------------===// 6179 6180 // 2. Copy and widen instructions from the old loop into the new loop. 6181 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 6182 VPlans.front()->execute(&State); 6183 6184 // 3. Fix the vectorized code: take care of header phi's, live-outs, 6185 // predication, updating analyses. 6186 ILV.fixVectorizedLoop(); 6187 } 6188 6189 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 6190 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 6191 BasicBlock *Latch = OrigLoop->getLoopLatch(); 6192 6193 // We create new control-flow for the vectorized loop, so the original 6194 // condition will be dead after vectorization if it's only used by the 6195 // branch. 6196 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 6197 if (Cmp && Cmp->hasOneUse()) 6198 DeadInstructions.insert(Cmp); 6199 6200 // We create new "steps" for induction variable updates to which the original 6201 // induction variables map. An original update instruction will be dead if 6202 // all its users except the induction variable are dead. 6203 for (auto &Induction : *Legal->getInductionVars()) { 6204 PHINode *Ind = Induction.first; 6205 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 6206 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 6207 return U == Ind || DeadInstructions.find(cast<Instruction>(U)) != 6208 DeadInstructions.end(); 6209 })) 6210 DeadInstructions.insert(IndUpdate); 6211 6212 // We record as "Dead" also the type-casting instructions we had identified 6213 // during induction analysis. We don't need any handling for them in the 6214 // vectorized loop because we have proven that, under a proper runtime 6215 // test guarding the vectorized loop, the value of the phi, and the casted 6216 // value of the phi, are the same. The last instruction in this casting chain 6217 // will get its scalar/vector/widened def from the scalar/vector/widened def 6218 // of the respective phi node. Any other casts in the induction def-use chain 6219 // have no other uses outside the phi update chain, and will be ignored. 6220 InductionDescriptor &IndDes = Induction.second; 6221 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6222 DeadInstructions.insert(Casts.begin(), Casts.end()); 6223 } 6224 } 6225 6226 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 6227 6228 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 6229 6230 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 6231 Instruction::BinaryOps BinOp) { 6232 // When unrolling and the VF is 1, we only need to add a simple scalar. 6233 Type *Ty = Val->getType(); 6234 assert(!Ty->isVectorTy() && "Val must be a scalar"); 6235 6236 if (Ty->isFloatingPointTy()) { 6237 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 6238 6239 // Floating point operations had to be 'fast' to enable the unrolling. 6240 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 6241 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 6242 } 6243 Constant *C = ConstantInt::get(Ty, StartIdx); 6244 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 6245 } 6246 6247 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 6248 SmallVector<Metadata *, 4> MDs; 6249 // Reserve first location for self reference to the LoopID metadata node. 6250 MDs.push_back(nullptr); 6251 bool IsUnrollMetadata = false; 6252 MDNode *LoopID = L->getLoopID(); 6253 if (LoopID) { 6254 // First find existing loop unrolling disable metadata. 6255 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 6256 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 6257 if (MD) { 6258 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 6259 IsUnrollMetadata = 6260 S && S->getString().startswith("llvm.loop.unroll.disable"); 6261 } 6262 MDs.push_back(LoopID->getOperand(i)); 6263 } 6264 } 6265 6266 if (!IsUnrollMetadata) { 6267 // Add runtime unroll disable metadata. 6268 LLVMContext &Context = L->getHeader()->getContext(); 6269 SmallVector<Metadata *, 1> DisableOperands; 6270 DisableOperands.push_back( 6271 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 6272 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 6273 MDs.push_back(DisableNode); 6274 MDNode *NewLoopID = MDNode::get(Context, MDs); 6275 // Set operand 0 to refer to the loop id itself. 6276 NewLoopID->replaceOperandWith(0, NewLoopID); 6277 L->setLoopID(NewLoopID); 6278 } 6279 } 6280 6281 bool LoopVectorizationPlanner::getDecisionAndClampRange( 6282 const std::function<bool(unsigned)> &Predicate, VFRange &Range) { 6283 assert(Range.End > Range.Start && "Trying to test an empty VF range."); 6284 bool PredicateAtRangeStart = Predicate(Range.Start); 6285 6286 for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2) 6287 if (Predicate(TmpVF) != PredicateAtRangeStart) { 6288 Range.End = TmpVF; 6289 break; 6290 } 6291 6292 return PredicateAtRangeStart; 6293 } 6294 6295 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 6296 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 6297 /// of VF's starting at a given VF and extending it as much as possible. Each 6298 /// vectorization decision can potentially shorten this sub-range during 6299 /// buildVPlan(). 6300 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) { 6301 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 6302 VFRange SubRange = {VF, MaxVF + 1}; 6303 VPlans.push_back(buildVPlan(SubRange)); 6304 VF = SubRange.End; 6305 } 6306 } 6307 6308 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 6309 VPlanPtr &Plan) { 6310 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 6311 6312 // Look for cached value. 6313 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 6314 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 6315 if (ECEntryIt != EdgeMaskCache.end()) 6316 return ECEntryIt->second; 6317 6318 VPValue *SrcMask = createBlockInMask(Src, Plan); 6319 6320 // The terminator has to be a branch inst! 6321 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 6322 assert(BI && "Unexpected terminator found"); 6323 6324 if (!BI->isConditional()) 6325 return EdgeMaskCache[Edge] = SrcMask; 6326 6327 VPValue *EdgeMask = Plan->getVPValue(BI->getCondition()); 6328 assert(EdgeMask && "No Edge Mask found for condition"); 6329 6330 if (BI->getSuccessor(0) != Dst) 6331 EdgeMask = Builder.createNot(EdgeMask); 6332 6333 if (SrcMask) // Otherwise block in-mask is all-one, no need to AND. 6334 EdgeMask = Builder.createAnd(EdgeMask, SrcMask); 6335 6336 return EdgeMaskCache[Edge] = EdgeMask; 6337 } 6338 6339 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 6340 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 6341 6342 // Look for cached value. 6343 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 6344 if (BCEntryIt != BlockMaskCache.end()) 6345 return BCEntryIt->second; 6346 6347 // All-one mask is modelled as no-mask following the convention for masked 6348 // load/store/gather/scatter. Initialize BlockMask to no-mask. 6349 VPValue *BlockMask = nullptr; 6350 6351 if (OrigLoop->getHeader() == BB) { 6352 if (!CM.blockNeedsPredication(BB)) 6353 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 6354 6355 // Introduce the early-exit compare IV <= BTC to form header block mask. 6356 // This is used instead of IV < TC because TC may wrap, unlike BTC. 6357 VPValue *IV = Plan->getVPValue(Legal->getPrimaryInduction()); 6358 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 6359 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 6360 return BlockMaskCache[BB] = BlockMask; 6361 } 6362 6363 // This is the block mask. We OR all incoming edges. 6364 for (auto *Predecessor : predecessors(BB)) { 6365 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 6366 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 6367 return BlockMaskCache[BB] = EdgeMask; 6368 6369 if (!BlockMask) { // BlockMask has its initialized nullptr value. 6370 BlockMask = EdgeMask; 6371 continue; 6372 } 6373 6374 BlockMask = Builder.createOr(BlockMask, EdgeMask); 6375 } 6376 6377 return BlockMaskCache[BB] = BlockMask; 6378 } 6379 6380 VPInterleaveRecipe *VPRecipeBuilder::tryToInterleaveMemory(Instruction *I, 6381 VFRange &Range, 6382 VPlanPtr &Plan) { 6383 const InterleaveGroup<Instruction> *IG = CM.getInterleavedAccessGroup(I); 6384 if (!IG) 6385 return nullptr; 6386 6387 // Now check if IG is relevant for VF's in the given range. 6388 auto isIGMember = [&](Instruction *I) -> std::function<bool(unsigned)> { 6389 return [=](unsigned VF) -> bool { 6390 return (VF >= 2 && // Query is illegal for VF == 1 6391 CM.getWideningDecision(I, VF) == 6392 LoopVectorizationCostModel::CM_Interleave); 6393 }; 6394 }; 6395 if (!LoopVectorizationPlanner::getDecisionAndClampRange(isIGMember(I), Range)) 6396 return nullptr; 6397 6398 // I is a member of an InterleaveGroup for VF's in the (possibly trimmed) 6399 // range. If it's the primary member of the IG construct a VPInterleaveRecipe. 6400 // Otherwise, it's an adjunct member of the IG, do not construct any Recipe. 6401 assert(I == IG->getInsertPos() && 6402 "Generating a recipe for an adjunct member of an interleave group"); 6403 6404 VPValue *Mask = nullptr; 6405 if (Legal->isMaskRequired(I)) 6406 Mask = createBlockInMask(I->getParent(), Plan); 6407 6408 return new VPInterleaveRecipe(IG, Mask); 6409 } 6410 6411 VPWidenMemoryInstructionRecipe * 6412 VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range, 6413 VPlanPtr &Plan) { 6414 if (!isa<LoadInst>(I) && !isa<StoreInst>(I)) 6415 return nullptr; 6416 6417 auto willWiden = [&](unsigned VF) -> bool { 6418 if (VF == 1) 6419 return false; 6420 if (CM.isScalarAfterVectorization(I, VF) || 6421 CM.isProfitableToScalarize(I, VF)) 6422 return false; 6423 LoopVectorizationCostModel::InstWidening Decision = 6424 CM.getWideningDecision(I, VF); 6425 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 6426 "CM decision should be taken at this point."); 6427 assert(Decision != LoopVectorizationCostModel::CM_Interleave && 6428 "Interleave memory opportunity should be caught earlier."); 6429 return Decision != LoopVectorizationCostModel::CM_Scalarize; 6430 }; 6431 6432 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 6433 return nullptr; 6434 6435 VPValue *Mask = nullptr; 6436 if (Legal->isMaskRequired(I)) 6437 Mask = createBlockInMask(I->getParent(), Plan); 6438 6439 return new VPWidenMemoryInstructionRecipe(*I, Mask); 6440 } 6441 6442 VPWidenIntOrFpInductionRecipe * 6443 VPRecipeBuilder::tryToOptimizeInduction(Instruction *I, VFRange &Range) { 6444 if (PHINode *Phi = dyn_cast<PHINode>(I)) { 6445 // Check if this is an integer or fp induction. If so, build the recipe that 6446 // produces its scalar and vector values. 6447 InductionDescriptor II = Legal->getInductionVars()->lookup(Phi); 6448 if (II.getKind() == InductionDescriptor::IK_IntInduction || 6449 II.getKind() == InductionDescriptor::IK_FpInduction) 6450 return new VPWidenIntOrFpInductionRecipe(Phi); 6451 6452 return nullptr; 6453 } 6454 6455 // Optimize the special case where the source is a constant integer 6456 // induction variable. Notice that we can only optimize the 'trunc' case 6457 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 6458 // (c) other casts depend on pointer size. 6459 6460 // Determine whether \p K is a truncation based on an induction variable that 6461 // can be optimized. 6462 auto isOptimizableIVTruncate = 6463 [&](Instruction *K) -> std::function<bool(unsigned)> { 6464 return 6465 [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); }; 6466 }; 6467 6468 if (isa<TruncInst>(I) && LoopVectorizationPlanner::getDecisionAndClampRange( 6469 isOptimizableIVTruncate(I), Range)) 6470 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 6471 cast<TruncInst>(I)); 6472 return nullptr; 6473 } 6474 6475 VPBlendRecipe *VPRecipeBuilder::tryToBlend(Instruction *I, VPlanPtr &Plan) { 6476 PHINode *Phi = dyn_cast<PHINode>(I); 6477 if (!Phi || Phi->getParent() == OrigLoop->getHeader()) 6478 return nullptr; 6479 6480 // We know that all PHIs in non-header blocks are converted into selects, so 6481 // we don't have to worry about the insertion order and we can just use the 6482 // builder. At this point we generate the predication tree. There may be 6483 // duplications since this is a simple recursive scan, but future 6484 // optimizations will clean it up. 6485 6486 SmallVector<VPValue *, 2> Masks; 6487 unsigned NumIncoming = Phi->getNumIncomingValues(); 6488 for (unsigned In = 0; In < NumIncoming; In++) { 6489 VPValue *EdgeMask = 6490 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 6491 assert((EdgeMask || NumIncoming == 1) && 6492 "Multiple predecessors with one having a full mask"); 6493 if (EdgeMask) 6494 Masks.push_back(EdgeMask); 6495 } 6496 return new VPBlendRecipe(Phi, Masks); 6497 } 6498 6499 bool VPRecipeBuilder::tryToWiden(Instruction *I, VPBasicBlock *VPBB, 6500 VFRange &Range) { 6501 6502 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 6503 [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range); 6504 6505 if (IsPredicated) 6506 return false; 6507 6508 auto IsVectorizableOpcode = [](unsigned Opcode) { 6509 switch (Opcode) { 6510 case Instruction::Add: 6511 case Instruction::And: 6512 case Instruction::AShr: 6513 case Instruction::BitCast: 6514 case Instruction::Br: 6515 case Instruction::Call: 6516 case Instruction::FAdd: 6517 case Instruction::FCmp: 6518 case Instruction::FDiv: 6519 case Instruction::FMul: 6520 case Instruction::FPExt: 6521 case Instruction::FPToSI: 6522 case Instruction::FPToUI: 6523 case Instruction::FPTrunc: 6524 case Instruction::FRem: 6525 case Instruction::FSub: 6526 case Instruction::GetElementPtr: 6527 case Instruction::ICmp: 6528 case Instruction::IntToPtr: 6529 case Instruction::Load: 6530 case Instruction::LShr: 6531 case Instruction::Mul: 6532 case Instruction::Or: 6533 case Instruction::PHI: 6534 case Instruction::PtrToInt: 6535 case Instruction::SDiv: 6536 case Instruction::Select: 6537 case Instruction::SExt: 6538 case Instruction::Shl: 6539 case Instruction::SIToFP: 6540 case Instruction::SRem: 6541 case Instruction::Store: 6542 case Instruction::Sub: 6543 case Instruction::Trunc: 6544 case Instruction::UDiv: 6545 case Instruction::UIToFP: 6546 case Instruction::URem: 6547 case Instruction::Xor: 6548 case Instruction::ZExt: 6549 return true; 6550 } 6551 return false; 6552 }; 6553 6554 if (!IsVectorizableOpcode(I->getOpcode())) 6555 return false; 6556 6557 if (CallInst *CI = dyn_cast<CallInst>(I)) { 6558 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6559 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 6560 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect)) 6561 return false; 6562 } 6563 6564 auto willWiden = [&](unsigned VF) -> bool { 6565 if (!isa<PHINode>(I) && (CM.isScalarAfterVectorization(I, VF) || 6566 CM.isProfitableToScalarize(I, VF))) 6567 return false; 6568 if (CallInst *CI = dyn_cast<CallInst>(I)) { 6569 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6570 // The following case may be scalarized depending on the VF. 6571 // The flag shows whether we use Intrinsic or a usual Call for vectorized 6572 // version of the instruction. 6573 // Is it beneficial to perform intrinsic call compared to lib call? 6574 bool NeedToScalarize; 6575 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 6576 bool UseVectorIntrinsic = 6577 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 6578 return UseVectorIntrinsic || !NeedToScalarize; 6579 } 6580 if (isa<LoadInst>(I) || isa<StoreInst>(I)) { 6581 assert(CM.getWideningDecision(I, VF) == 6582 LoopVectorizationCostModel::CM_Scalarize && 6583 "Memory widening decisions should have been taken care by now"); 6584 return false; 6585 } 6586 return true; 6587 }; 6588 6589 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 6590 return false; 6591 6592 // Success: widen this instruction. We optimize the common case where 6593 // consecutive instructions can be represented by a single recipe. 6594 if (!VPBB->empty()) { 6595 VPWidenRecipe *LastWidenRecipe = dyn_cast<VPWidenRecipe>(&VPBB->back()); 6596 if (LastWidenRecipe && LastWidenRecipe->appendInstruction(I)) 6597 return true; 6598 } 6599 6600 VPBB->appendRecipe(new VPWidenRecipe(I)); 6601 return true; 6602 } 6603 6604 VPBasicBlock *VPRecipeBuilder::handleReplication( 6605 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 6606 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe, 6607 VPlanPtr &Plan) { 6608 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 6609 [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); }, 6610 Range); 6611 6612 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 6613 [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range); 6614 6615 auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated); 6616 6617 // Find if I uses a predicated instruction. If so, it will use its scalar 6618 // value. Avoid hoisting the insert-element which packs the scalar value into 6619 // a vector value, as that happens iff all users use the vector value. 6620 for (auto &Op : I->operands()) 6621 if (auto *PredInst = dyn_cast<Instruction>(Op)) 6622 if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end()) 6623 PredInst2Recipe[PredInst]->setAlsoPack(false); 6624 6625 // Finalize the recipe for Instr, first if it is not predicated. 6626 if (!IsPredicated) { 6627 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 6628 VPBB->appendRecipe(Recipe); 6629 return VPBB; 6630 } 6631 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 6632 assert(VPBB->getSuccessors().empty() && 6633 "VPBB has successors when handling predicated replication."); 6634 // Record predicated instructions for above packing optimizations. 6635 PredInst2Recipe[I] = Recipe; 6636 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 6637 VPBlockUtils::insertBlockAfter(Region, VPBB); 6638 auto *RegSucc = new VPBasicBlock(); 6639 VPBlockUtils::insertBlockAfter(RegSucc, Region); 6640 return RegSucc; 6641 } 6642 6643 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 6644 VPRecipeBase *PredRecipe, 6645 VPlanPtr &Plan) { 6646 // Instructions marked for predication are replicated and placed under an 6647 // if-then construct to prevent side-effects. 6648 6649 // Generate recipes to compute the block mask for this region. 6650 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 6651 6652 // Build the triangular if-then region. 6653 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 6654 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 6655 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 6656 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 6657 auto *PHIRecipe = 6658 Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr); 6659 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 6660 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 6661 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 6662 6663 // Note: first set Entry as region entry and then connect successors starting 6664 // from it in order, to propagate the "parent" of each VPBasicBlock. 6665 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 6666 VPBlockUtils::connectBlocks(Pred, Exit); 6667 6668 return Region; 6669 } 6670 6671 bool VPRecipeBuilder::tryToCreateRecipe(Instruction *Instr, VFRange &Range, 6672 VPlanPtr &Plan, VPBasicBlock *VPBB) { 6673 VPRecipeBase *Recipe = nullptr; 6674 // Check if Instr should belong to an interleave memory recipe, or already 6675 // does. In the latter case Instr is irrelevant. 6676 if ((Recipe = tryToInterleaveMemory(Instr, Range, Plan))) { 6677 VPBB->appendRecipe(Recipe); 6678 return true; 6679 } 6680 6681 // Check if Instr is a memory operation that should be widened. 6682 if ((Recipe = tryToWidenMemory(Instr, Range, Plan))) { 6683 VPBB->appendRecipe(Recipe); 6684 return true; 6685 } 6686 6687 // Check if Instr should form some PHI recipe. 6688 if ((Recipe = tryToOptimizeInduction(Instr, Range))) { 6689 VPBB->appendRecipe(Recipe); 6690 return true; 6691 } 6692 if ((Recipe = tryToBlend(Instr, Plan))) { 6693 VPBB->appendRecipe(Recipe); 6694 return true; 6695 } 6696 if (PHINode *Phi = dyn_cast<PHINode>(Instr)) { 6697 VPBB->appendRecipe(new VPWidenPHIRecipe(Phi)); 6698 return true; 6699 } 6700 6701 // Check if Instr is to be widened by a general VPWidenRecipe, after 6702 // having first checked for specific widening recipes that deal with 6703 // Interleave Groups, Inductions and Phi nodes. 6704 if (tryToWiden(Instr, VPBB, Range)) 6705 return true; 6706 6707 return false; 6708 } 6709 6710 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(unsigned MinVF, 6711 unsigned MaxVF) { 6712 assert(OrigLoop->empty() && "Inner loop expected."); 6713 6714 // Collect conditions feeding internal conditional branches; they need to be 6715 // represented in VPlan for it to model masking. 6716 SmallPtrSet<Value *, 1> NeedDef; 6717 6718 auto *Latch = OrigLoop->getLoopLatch(); 6719 for (BasicBlock *BB : OrigLoop->blocks()) { 6720 if (BB == Latch) 6721 continue; 6722 BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator()); 6723 if (Branch && Branch->isConditional()) 6724 NeedDef.insert(Branch->getCondition()); 6725 } 6726 6727 // If the tail is to be folded by masking, the primary induction variable 6728 // needs to be represented in VPlan for it to model early-exit masking. 6729 if (CM.foldTailByMasking()) 6730 NeedDef.insert(Legal->getPrimaryInduction()); 6731 6732 // Collect instructions from the original loop that will become trivially dead 6733 // in the vectorized loop. We don't need to vectorize these instructions. For 6734 // example, original induction update instructions can become dead because we 6735 // separately emit induction "steps" when generating code for the new loop. 6736 // Similarly, we create a new latch condition when setting up the structure 6737 // of the new loop, so the old one can become dead. 6738 SmallPtrSet<Instruction *, 4> DeadInstructions; 6739 collectTriviallyDeadInstructions(DeadInstructions); 6740 6741 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 6742 VFRange SubRange = {VF, MaxVF + 1}; 6743 VPlans.push_back( 6744 buildVPlanWithVPRecipes(SubRange, NeedDef, DeadInstructions)); 6745 VF = SubRange.End; 6746 } 6747 } 6748 6749 LoopVectorizationPlanner::VPlanPtr 6750 LoopVectorizationPlanner::buildVPlanWithVPRecipes( 6751 VFRange &Range, SmallPtrSetImpl<Value *> &NeedDef, 6752 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 6753 // Hold a mapping from predicated instructions to their recipes, in order to 6754 // fix their AlsoPack behavior if a user is determined to replicate and use a 6755 // scalar instead of vector value. 6756 DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe; 6757 6758 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 6759 DenseMap<Instruction *, Instruction *> SinkAfterInverse; 6760 6761 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 6762 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 6763 auto Plan = llvm::make_unique<VPlan>(VPBB); 6764 6765 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, TTI, Legal, CM, Builder); 6766 // Represent values that will have defs inside VPlan. 6767 for (Value *V : NeedDef) 6768 Plan->addVPValue(V); 6769 6770 // Scan the body of the loop in a topological order to visit each basic block 6771 // after having visited its predecessor basic blocks. 6772 LoopBlocksDFS DFS(OrigLoop); 6773 DFS.perform(LI); 6774 6775 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6776 // Relevant instructions from basic block BB will be grouped into VPRecipe 6777 // ingredients and fill a new VPBasicBlock. 6778 unsigned VPBBsForBB = 0; 6779 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 6780 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 6781 VPBB = FirstVPBBForBB; 6782 Builder.setInsertPoint(VPBB); 6783 6784 std::vector<Instruction *> Ingredients; 6785 6786 // Organize the ingredients to vectorize from current basic block in the 6787 // right order. 6788 for (Instruction &I : BB->instructionsWithoutDebug()) { 6789 Instruction *Instr = &I; 6790 6791 // First filter out irrelevant instructions, to ensure no recipes are 6792 // built for them. 6793 if (isa<BranchInst>(Instr) || 6794 DeadInstructions.find(Instr) != DeadInstructions.end()) 6795 continue; 6796 6797 // I is a member of an InterleaveGroup for Range.Start. If it's an adjunct 6798 // member of the IG, do not construct any Recipe for it. 6799 const InterleaveGroup<Instruction> *IG = 6800 CM.getInterleavedAccessGroup(Instr); 6801 if (IG && Instr != IG->getInsertPos() && 6802 Range.Start >= 2 && // Query is illegal for VF == 1 6803 CM.getWideningDecision(Instr, Range.Start) == 6804 LoopVectorizationCostModel::CM_Interleave) { 6805 auto SinkCandidate = SinkAfterInverse.find(Instr); 6806 if (SinkCandidate != SinkAfterInverse.end()) 6807 Ingredients.push_back(SinkCandidate->second); 6808 continue; 6809 } 6810 6811 // Move instructions to handle first-order recurrences, step 1: avoid 6812 // handling this instruction until after we've handled the instruction it 6813 // should follow. 6814 auto SAIt = SinkAfter.find(Instr); 6815 if (SAIt != SinkAfter.end()) { 6816 LLVM_DEBUG(dbgs() << "Sinking" << *SAIt->first << " after" 6817 << *SAIt->second 6818 << " to vectorize a 1st order recurrence.\n"); 6819 SinkAfterInverse[SAIt->second] = Instr; 6820 continue; 6821 } 6822 6823 Ingredients.push_back(Instr); 6824 6825 // Move instructions to handle first-order recurrences, step 2: push the 6826 // instruction to be sunk at its insertion point. 6827 auto SAInvIt = SinkAfterInverse.find(Instr); 6828 if (SAInvIt != SinkAfterInverse.end()) 6829 Ingredients.push_back(SAInvIt->second); 6830 } 6831 6832 // Introduce each ingredient into VPlan. 6833 for (Instruction *Instr : Ingredients) { 6834 if (RecipeBuilder.tryToCreateRecipe(Instr, Range, Plan, VPBB)) 6835 continue; 6836 6837 // Otherwise, if all widening options failed, Instruction is to be 6838 // replicated. This may create a successor for VPBB. 6839 VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication( 6840 Instr, Range, VPBB, PredInst2Recipe, Plan); 6841 if (NextVPBB != VPBB) { 6842 VPBB = NextVPBB; 6843 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 6844 : ""); 6845 } 6846 } 6847 } 6848 6849 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 6850 // may also be empty, such as the last one VPBB, reflecting original 6851 // basic-blocks with no recipes. 6852 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 6853 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 6854 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 6855 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 6856 delete PreEntry; 6857 6858 std::string PlanName; 6859 raw_string_ostream RSO(PlanName); 6860 unsigned VF = Range.Start; 6861 Plan->addVF(VF); 6862 RSO << "Initial VPlan for VF={" << VF; 6863 for (VF *= 2; VF < Range.End; VF *= 2) { 6864 Plan->addVF(VF); 6865 RSO << "," << VF; 6866 } 6867 RSO << "},UF>=1"; 6868 RSO.flush(); 6869 Plan->setName(PlanName); 6870 6871 return Plan; 6872 } 6873 6874 LoopVectorizationPlanner::VPlanPtr 6875 LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 6876 // Outer loop handling: They may require CFG and instruction level 6877 // transformations before even evaluating whether vectorization is profitable. 6878 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 6879 // the vectorization pipeline. 6880 assert(!OrigLoop->empty()); 6881 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 6882 6883 // Create new empty VPlan 6884 auto Plan = llvm::make_unique<VPlan>(); 6885 6886 // Build hierarchical CFG 6887 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 6888 HCFGBuilder.buildHierarchicalCFG(); 6889 6890 SmallPtrSet<Instruction *, 1> DeadInstructions; 6891 VPlanHCFGTransforms::VPInstructionsToVPRecipes( 6892 Plan, Legal->getInductionVars(), DeadInstructions); 6893 6894 for (unsigned VF = Range.Start; VF < Range.End; VF *= 2) 6895 Plan->addVF(VF); 6896 6897 return Plan; 6898 } 6899 6900 Value* LoopVectorizationPlanner::VPCallbackILV:: 6901 getOrCreateVectorValues(Value *V, unsigned Part) { 6902 return ILV.getOrCreateVectorValue(V, Part); 6903 } 6904 6905 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent) const { 6906 O << " +\n" 6907 << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 6908 IG->getInsertPos()->printAsOperand(O, false); 6909 if (User) { 6910 O << ", "; 6911 User->getOperand(0)->printAsOperand(O); 6912 } 6913 O << "\\l\""; 6914 for (unsigned i = 0; i < IG->getFactor(); ++i) 6915 if (Instruction *I = IG->getMember(i)) 6916 O << " +\n" 6917 << Indent << "\" " << VPlanIngredient(I) << " " << i << "\\l\""; 6918 } 6919 6920 void VPWidenRecipe::execute(VPTransformState &State) { 6921 for (auto &Instr : make_range(Begin, End)) 6922 State.ILV->widenInstruction(Instr); 6923 } 6924 6925 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 6926 assert(!State.Instance && "Int or FP induction being replicated."); 6927 State.ILV->widenIntOrFpInduction(IV, Trunc); 6928 } 6929 6930 void VPWidenPHIRecipe::execute(VPTransformState &State) { 6931 State.ILV->widenPHIInstruction(Phi, State.UF, State.VF); 6932 } 6933 6934 void VPBlendRecipe::execute(VPTransformState &State) { 6935 State.ILV->setDebugLocFromInst(State.Builder, Phi); 6936 // We know that all PHIs in non-header blocks are converted into 6937 // selects, so we don't have to worry about the insertion order and we 6938 // can just use the builder. 6939 // At this point we generate the predication tree. There may be 6940 // duplications since this is a simple recursive scan, but future 6941 // optimizations will clean it up. 6942 6943 unsigned NumIncoming = Phi->getNumIncomingValues(); 6944 6945 assert((User || NumIncoming == 1) && 6946 "Multiple predecessors with predecessors having a full mask"); 6947 // Generate a sequence of selects of the form: 6948 // SELECT(Mask3, In3, 6949 // SELECT(Mask2, In2, 6950 // ( ...))) 6951 InnerLoopVectorizer::VectorParts Entry(State.UF); 6952 for (unsigned In = 0; In < NumIncoming; ++In) { 6953 for (unsigned Part = 0; Part < State.UF; ++Part) { 6954 // We might have single edge PHIs (blocks) - use an identity 6955 // 'select' for the first PHI operand. 6956 Value *In0 = 6957 State.ILV->getOrCreateVectorValue(Phi->getIncomingValue(In), Part); 6958 if (In == 0) 6959 Entry[Part] = In0; // Initialize with the first incoming value. 6960 else { 6961 // Select between the current value and the previous incoming edge 6962 // based on the incoming mask. 6963 Value *Cond = State.get(User->getOperand(In), Part); 6964 Entry[Part] = 6965 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 6966 } 6967 } 6968 } 6969 for (unsigned Part = 0; Part < State.UF; ++Part) 6970 State.ValueMap.setVectorValue(Phi, Part, Entry[Part]); 6971 } 6972 6973 void VPInterleaveRecipe::execute(VPTransformState &State) { 6974 assert(!State.Instance && "Interleave group being replicated."); 6975 if (!User) 6976 return State.ILV->vectorizeInterleaveGroup(IG->getInsertPos()); 6977 6978 // Last (and currently only) operand is a mask. 6979 InnerLoopVectorizer::VectorParts MaskValues(State.UF); 6980 VPValue *Mask = User->getOperand(User->getNumOperands() - 1); 6981 for (unsigned Part = 0; Part < State.UF; ++Part) 6982 MaskValues[Part] = State.get(Mask, Part); 6983 State.ILV->vectorizeInterleaveGroup(IG->getInsertPos(), &MaskValues); 6984 } 6985 6986 void VPReplicateRecipe::execute(VPTransformState &State) { 6987 if (State.Instance) { // Generate a single instance. 6988 State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated); 6989 // Insert scalar instance packing it into a vector. 6990 if (AlsoPack && State.VF > 1) { 6991 // If we're constructing lane 0, initialize to start from undef. 6992 if (State.Instance->Lane == 0) { 6993 Value *Undef = 6994 UndefValue::get(VectorType::get(Ingredient->getType(), State.VF)); 6995 State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef); 6996 } 6997 State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance); 6998 } 6999 return; 7000 } 7001 7002 // Generate scalar instances for all VF lanes of all UF parts, unless the 7003 // instruction is uniform inwhich case generate only the first lane for each 7004 // of the UF parts. 7005 unsigned EndLane = IsUniform ? 1 : State.VF; 7006 for (unsigned Part = 0; Part < State.UF; ++Part) 7007 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 7008 State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated); 7009 } 7010 7011 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 7012 assert(State.Instance && "Branch on Mask works only on single instance."); 7013 7014 unsigned Part = State.Instance->Part; 7015 unsigned Lane = State.Instance->Lane; 7016 7017 Value *ConditionBit = nullptr; 7018 if (!User) // Block in mask is all-one. 7019 ConditionBit = State.Builder.getTrue(); 7020 else { 7021 VPValue *BlockInMask = User->getOperand(0); 7022 ConditionBit = State.get(BlockInMask, Part); 7023 if (ConditionBit->getType()->isVectorTy()) 7024 ConditionBit = State.Builder.CreateExtractElement( 7025 ConditionBit, State.Builder.getInt32(Lane)); 7026 } 7027 7028 // Replace the temporary unreachable terminator with a new conditional branch, 7029 // whose two destinations will be set later when they are created. 7030 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 7031 assert(isa<UnreachableInst>(CurrentTerminator) && 7032 "Expected to replace unreachable terminator with conditional branch."); 7033 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 7034 CondBr->setSuccessor(0, nullptr); 7035 ReplaceInstWithInst(CurrentTerminator, CondBr); 7036 } 7037 7038 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 7039 assert(State.Instance && "Predicated instruction PHI works per instance."); 7040 Instruction *ScalarPredInst = cast<Instruction>( 7041 State.ValueMap.getScalarValue(PredInst, *State.Instance)); 7042 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 7043 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 7044 assert(PredicatingBB && "Predicated block has no single predecessor."); 7045 7046 // By current pack/unpack logic we need to generate only a single phi node: if 7047 // a vector value for the predicated instruction exists at this point it means 7048 // the instruction has vector users only, and a phi for the vector value is 7049 // needed. In this case the recipe of the predicated instruction is marked to 7050 // also do that packing, thereby "hoisting" the insert-element sequence. 7051 // Otherwise, a phi node for the scalar value is needed. 7052 unsigned Part = State.Instance->Part; 7053 if (State.ValueMap.hasVectorValue(PredInst, Part)) { 7054 Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part); 7055 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 7056 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 7057 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 7058 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 7059 State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache. 7060 } else { 7061 Type *PredInstType = PredInst->getType(); 7062 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 7063 Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB); 7064 Phi->addIncoming(ScalarPredInst, PredicatedBB); 7065 State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi); 7066 } 7067 } 7068 7069 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 7070 if (!User) 7071 return State.ILV->vectorizeMemoryInstruction(&Instr); 7072 7073 // Last (and currently only) operand is a mask. 7074 InnerLoopVectorizer::VectorParts MaskValues(State.UF); 7075 VPValue *Mask = User->getOperand(User->getNumOperands() - 1); 7076 for (unsigned Part = 0; Part < State.UF; ++Part) 7077 MaskValues[Part] = State.get(Mask, Part); 7078 State.ILV->vectorizeMemoryInstruction(&Instr, &MaskValues); 7079 } 7080 7081 // Process the loop in the VPlan-native vectorization path. This path builds 7082 // VPlan upfront in the vectorization pipeline, which allows to apply 7083 // VPlan-to-VPlan transformations from the very beginning without modifying the 7084 // input LLVM IR. 7085 static bool processLoopInVPlanNativePath( 7086 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 7087 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 7088 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 7089 OptimizationRemarkEmitter *ORE, LoopVectorizeHints &Hints) { 7090 7091 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 7092 Function *F = L->getHeader()->getParent(); 7093 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 7094 LoopVectorizationCostModel CM(L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 7095 &Hints, IAI); 7096 // Use the planner for outer loop vectorization. 7097 // TODO: CM is not used at this point inside the planner. Turn CM into an 7098 // optional argument if we don't need it in the future. 7099 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM); 7100 7101 // Get user vectorization factor. 7102 unsigned UserVF = Hints.getWidth(); 7103 7104 // Check the function attributes to find out if this function should be 7105 // optimized for size. 7106 bool OptForSize = 7107 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 7108 7109 // Plan how to best vectorize, return the best VF and its cost. 7110 VectorizationFactor VF = LVP.planInVPlanNativePath(OptForSize, UserVF); 7111 7112 // If we are stress testing VPlan builds, do not attempt to generate vector 7113 // code. 7114 if (VPlanBuildStressTest) 7115 return false; 7116 7117 LVP.setBestPlan(VF.Width, 1); 7118 7119 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, UserVF, 1, LVL, 7120 &CM); 7121 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 7122 << L->getHeader()->getParent()->getName() << "\"\n"); 7123 LVP.executePlan(LB, DT); 7124 7125 // Mark the loop as already vectorized to avoid vectorizing again. 7126 Hints.setAlreadyVectorized(); 7127 7128 LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent())); 7129 return true; 7130 } 7131 7132 bool LoopVectorizePass::processLoop(Loop *L) { 7133 assert((EnableVPlanNativePath || L->empty()) && 7134 "VPlan-native path is not enabled. Only process inner loops."); 7135 7136 #ifndef NDEBUG 7137 const std::string DebugLocStr = getDebugLocString(L); 7138 #endif /* NDEBUG */ 7139 7140 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 7141 << L->getHeader()->getParent()->getName() << "\" from " 7142 << DebugLocStr << "\n"); 7143 7144 LoopVectorizeHints Hints(L, DisableUnrolling, *ORE); 7145 7146 LLVM_DEBUG( 7147 dbgs() << "LV: Loop hints:" 7148 << " force=" 7149 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 7150 ? "disabled" 7151 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 7152 ? "enabled" 7153 : "?")) 7154 << " width=" << Hints.getWidth() 7155 << " unroll=" << Hints.getInterleave() << "\n"); 7156 7157 // Function containing loop 7158 Function *F = L->getHeader()->getParent(); 7159 7160 // Looking at the diagnostic output is the only way to determine if a loop 7161 // was vectorized (other than looking at the IR or machine code), so it 7162 // is important to generate an optimization remark for each loop. Most of 7163 // these messages are generated as OptimizationRemarkAnalysis. Remarks 7164 // generated as OptimizationRemark and OptimizationRemarkMissed are 7165 // less verbose reporting vectorized loops and unvectorized loops that may 7166 // benefit from vectorization, respectively. 7167 7168 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { 7169 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 7170 return false; 7171 } 7172 7173 PredicatedScalarEvolution PSE(*SE, *L); 7174 7175 // Check if it is legal to vectorize the loop. 7176 LoopVectorizationRequirements Requirements(*ORE); 7177 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, GetLAA, LI, ORE, 7178 &Requirements, &Hints, DB, AC); 7179 if (!LVL.canVectorize(EnableVPlanNativePath)) { 7180 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 7181 Hints.emitRemarkWithHints(); 7182 return false; 7183 } 7184 7185 // Check the function attributes to find out if this function should be 7186 // optimized for size. 7187 bool OptForSize = 7188 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 7189 7190 // Entrance to the VPlan-native vectorization path. Outer loops are processed 7191 // here. They may require CFG and instruction level transformations before 7192 // even evaluating whether vectorization is profitable. Since we cannot modify 7193 // the incoming IR, we need to build VPlan upfront in the vectorization 7194 // pipeline. 7195 if (!L->empty()) 7196 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 7197 ORE, Hints); 7198 7199 assert(L->empty() && "Inner loop expected."); 7200 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 7201 // count by optimizing for size, to minimize overheads. 7202 // Prefer constant trip counts over profile data, over upper bound estimate. 7203 unsigned ExpectedTC = 0; 7204 bool HasExpectedTC = false; 7205 if (const SCEVConstant *ConstExits = 7206 dyn_cast<SCEVConstant>(SE->getBackedgeTakenCount(L))) { 7207 const APInt &ExitsCount = ConstExits->getAPInt(); 7208 // We are interested in small values for ExpectedTC. Skip over those that 7209 // can't fit an unsigned. 7210 if (ExitsCount.ult(std::numeric_limits<unsigned>::max())) { 7211 ExpectedTC = static_cast<unsigned>(ExitsCount.getZExtValue()) + 1; 7212 HasExpectedTC = true; 7213 } 7214 } 7215 // ExpectedTC may be large because it's bound by a variable. Check 7216 // profiling information to validate we should vectorize. 7217 if (!HasExpectedTC && LoopVectorizeWithBlockFrequency) { 7218 auto EstimatedTC = getLoopEstimatedTripCount(L); 7219 if (EstimatedTC) { 7220 ExpectedTC = *EstimatedTC; 7221 HasExpectedTC = true; 7222 } 7223 } 7224 if (!HasExpectedTC) { 7225 ExpectedTC = SE->getSmallConstantMaxTripCount(L); 7226 HasExpectedTC = (ExpectedTC > 0); 7227 } 7228 7229 if (HasExpectedTC && ExpectedTC < TinyTripCountVectorThreshold) { 7230 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 7231 << "This loop is worth vectorizing only if no scalar " 7232 << "iteration overheads are incurred."); 7233 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 7234 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 7235 else { 7236 LLVM_DEBUG(dbgs() << "\n"); 7237 // Loops with a very small trip count are considered for vectorization 7238 // under OptForSize, thereby making sure the cost of their loop body is 7239 // dominant, free of runtime guards and scalar iteration overheads. 7240 OptForSize = true; 7241 } 7242 } 7243 7244 // Check the function attributes to see if implicit floats are allowed. 7245 // FIXME: This check doesn't seem possibly correct -- what if the loop is 7246 // an integer loop and the vector instructions selected are purely integer 7247 // vector instructions? 7248 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 7249 LLVM_DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 7250 "attribute is used.\n"); 7251 ORE->emit(createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(), 7252 "NoImplicitFloat", L) 7253 << "loop not vectorized due to NoImplicitFloat attribute"); 7254 Hints.emitRemarkWithHints(); 7255 return false; 7256 } 7257 7258 // Check if the target supports potentially unsafe FP vectorization. 7259 // FIXME: Add a check for the type of safety issue (denormal, signaling) 7260 // for the target we're vectorizing for, to make sure none of the 7261 // additional fp-math flags can help. 7262 if (Hints.isPotentiallyUnsafe() && 7263 TTI->isFPVectorizationPotentiallyUnsafe()) { 7264 LLVM_DEBUG( 7265 dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 7266 ORE->emit( 7267 createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L) 7268 << "loop not vectorized due to unsafe FP support."); 7269 Hints.emitRemarkWithHints(); 7270 return false; 7271 } 7272 7273 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 7274 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 7275 7276 // If an override option has been passed in for interleaved accesses, use it. 7277 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 7278 UseInterleaved = EnableInterleavedMemAccesses; 7279 7280 // Analyze interleaved memory accesses. 7281 if (UseInterleaved) { 7282 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 7283 } 7284 7285 // Use the cost model. 7286 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F, 7287 &Hints, IAI); 7288 CM.collectValuesToIgnore(); 7289 7290 // Use the planner for vectorization. 7291 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM); 7292 7293 // Get user vectorization factor. 7294 unsigned UserVF = Hints.getWidth(); 7295 7296 // Plan how to best vectorize, return the best VF and its cost. 7297 VectorizationFactor VF = LVP.plan(OptForSize, UserVF); 7298 7299 // Select the interleave count. 7300 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 7301 7302 // Get user interleave count. 7303 unsigned UserIC = Hints.getInterleave(); 7304 7305 // Identify the diagnostic messages that should be produced. 7306 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 7307 bool VectorizeLoop = true, InterleaveLoop = true; 7308 if (Requirements.doesNotMeet(F, L, Hints)) { 7309 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 7310 "requirements.\n"); 7311 Hints.emitRemarkWithHints(); 7312 return false; 7313 } 7314 7315 if (VF.Width == 1) { 7316 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 7317 VecDiagMsg = std::make_pair( 7318 "VectorizationNotBeneficial", 7319 "the cost-model indicates that vectorization is not beneficial"); 7320 VectorizeLoop = false; 7321 } 7322 7323 if (IC == 1 && UserIC <= 1) { 7324 // Tell the user interleaving is not beneficial. 7325 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 7326 IntDiagMsg = std::make_pair( 7327 "InterleavingNotBeneficial", 7328 "the cost-model indicates that interleaving is not beneficial"); 7329 InterleaveLoop = false; 7330 if (UserIC == 1) { 7331 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 7332 IntDiagMsg.second += 7333 " and is explicitly disabled or interleave count is set to 1"; 7334 } 7335 } else if (IC > 1 && UserIC == 1) { 7336 // Tell the user interleaving is beneficial, but it explicitly disabled. 7337 LLVM_DEBUG( 7338 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 7339 IntDiagMsg = std::make_pair( 7340 "InterleavingBeneficialButDisabled", 7341 "the cost-model indicates that interleaving is beneficial " 7342 "but is explicitly disabled or interleave count is set to 1"); 7343 InterleaveLoop = false; 7344 } 7345 7346 // Override IC if user provided an interleave count. 7347 IC = UserIC > 0 ? UserIC : IC; 7348 7349 // Emit diagnostic messages, if any. 7350 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 7351 if (!VectorizeLoop && !InterleaveLoop) { 7352 // Do not vectorize or interleaving the loop. 7353 ORE->emit([&]() { 7354 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 7355 L->getStartLoc(), L->getHeader()) 7356 << VecDiagMsg.second; 7357 }); 7358 ORE->emit([&]() { 7359 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 7360 L->getStartLoc(), L->getHeader()) 7361 << IntDiagMsg.second; 7362 }); 7363 return false; 7364 } else if (!VectorizeLoop && InterleaveLoop) { 7365 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7366 ORE->emit([&]() { 7367 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 7368 L->getStartLoc(), L->getHeader()) 7369 << VecDiagMsg.second; 7370 }); 7371 } else if (VectorizeLoop && !InterleaveLoop) { 7372 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 7373 << ") in " << DebugLocStr << '\n'); 7374 ORE->emit([&]() { 7375 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 7376 L->getStartLoc(), L->getHeader()) 7377 << IntDiagMsg.second; 7378 }); 7379 } else if (VectorizeLoop && InterleaveLoop) { 7380 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 7381 << ") in " << DebugLocStr << '\n'); 7382 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7383 } 7384 7385 LVP.setBestPlan(VF.Width, IC); 7386 7387 using namespace ore; 7388 bool DisableRuntimeUnroll = false; 7389 MDNode *OrigLoopID = L->getLoopID(); 7390 7391 if (!VectorizeLoop) { 7392 assert(IC > 1 && "interleave count should not be 1 or 0"); 7393 // If we decided that it is not legal to vectorize the loop, then 7394 // interleave it. 7395 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 7396 &CM); 7397 LVP.executePlan(Unroller, DT); 7398 7399 ORE->emit([&]() { 7400 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 7401 L->getHeader()) 7402 << "interleaved loop (interleaved count: " 7403 << NV("InterleaveCount", IC) << ")"; 7404 }); 7405 } else { 7406 // If we decided that it is *legal* to vectorize the loop, then do it. 7407 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 7408 &LVL, &CM); 7409 LVP.executePlan(LB, DT); 7410 ++LoopsVectorized; 7411 7412 // Add metadata to disable runtime unrolling a scalar loop when there are 7413 // no runtime checks about strides and memory. A scalar loop that is 7414 // rarely used is not worth unrolling. 7415 if (!LB.areSafetyChecksAdded()) 7416 DisableRuntimeUnroll = true; 7417 7418 // Report the vectorization decision. 7419 ORE->emit([&]() { 7420 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 7421 L->getHeader()) 7422 << "vectorized loop (vectorization width: " 7423 << NV("VectorizationFactor", VF.Width) 7424 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 7425 }); 7426 } 7427 7428 Optional<MDNode *> RemainderLoopID = 7429 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 7430 LLVMLoopVectorizeFollowupEpilogue}); 7431 if (RemainderLoopID.hasValue()) { 7432 L->setLoopID(RemainderLoopID.getValue()); 7433 } else { 7434 if (DisableRuntimeUnroll) 7435 AddRuntimeUnrollDisableMetaData(L); 7436 7437 // Mark the loop as already vectorized to avoid vectorizing again. 7438 Hints.setAlreadyVectorized(); 7439 } 7440 7441 LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent())); 7442 return true; 7443 } 7444 7445 bool LoopVectorizePass::runImpl( 7446 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 7447 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 7448 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 7449 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 7450 OptimizationRemarkEmitter &ORE_) { 7451 SE = &SE_; 7452 LI = &LI_; 7453 TTI = &TTI_; 7454 DT = &DT_; 7455 BFI = &BFI_; 7456 TLI = TLI_; 7457 AA = &AA_; 7458 AC = &AC_; 7459 GetLAA = &GetLAA_; 7460 DB = &DB_; 7461 ORE = &ORE_; 7462 7463 // Don't attempt if 7464 // 1. the target claims to have no vector registers, and 7465 // 2. interleaving won't help ILP. 7466 // 7467 // The second condition is necessary because, even if the target has no 7468 // vector registers, loop vectorization may still enable scalar 7469 // interleaving. 7470 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 7471 return false; 7472 7473 bool Changed = false; 7474 7475 // The vectorizer requires loops to be in simplified form. 7476 // Since simplification may add new inner loops, it has to run before the 7477 // legality and profitability checks. This means running the loop vectorizer 7478 // will simplify all loops, regardless of whether anything end up being 7479 // vectorized. 7480 for (auto &L : *LI) 7481 Changed |= simplifyLoop(L, DT, LI, SE, AC, false /* PreserveLCSSA */); 7482 7483 // Build up a worklist of inner-loops to vectorize. This is necessary as 7484 // the act of vectorizing or partially unrolling a loop creates new loops 7485 // and can invalidate iterators across the loops. 7486 SmallVector<Loop *, 8> Worklist; 7487 7488 for (Loop *L : *LI) 7489 collectSupportedLoops(*L, LI, ORE, Worklist); 7490 7491 LoopsAnalyzed += Worklist.size(); 7492 7493 // Now walk the identified inner loops. 7494 while (!Worklist.empty()) { 7495 Loop *L = Worklist.pop_back_val(); 7496 7497 // For the inner loops we actually process, form LCSSA to simplify the 7498 // transform. 7499 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 7500 7501 Changed |= processLoop(L); 7502 } 7503 7504 // Process each loop nest in the function. 7505 return Changed; 7506 } 7507 7508 PreservedAnalyses LoopVectorizePass::run(Function &F, 7509 FunctionAnalysisManager &AM) { 7510 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 7511 auto &LI = AM.getResult<LoopAnalysis>(F); 7512 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 7513 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 7514 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 7515 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 7516 auto &AA = AM.getResult<AAManager>(F); 7517 auto &AC = AM.getResult<AssumptionAnalysis>(F); 7518 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 7519 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 7520 7521 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 7522 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 7523 [&](Loop &L) -> const LoopAccessInfo & { 7524 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, nullptr}; 7525 return LAM.getResult<LoopAccessAnalysis>(L, AR); 7526 }; 7527 bool Changed = 7528 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE); 7529 if (!Changed) 7530 return PreservedAnalyses::all(); 7531 PreservedAnalyses PA; 7532 7533 // We currently do not preserve loopinfo/dominator analyses with outer loop 7534 // vectorization. Until this is addressed, mark these analyses as preserved 7535 // only for non-VPlan-native path. 7536 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 7537 if (!EnableVPlanNativePath) { 7538 PA.preserve<LoopAnalysis>(); 7539 PA.preserve<DominatorTreeAnalysis>(); 7540 } 7541 PA.preserve<BasicAA>(); 7542 PA.preserve<GlobalsAA>(); 7543 return PA; 7544 } 7545