1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanHCFGTransforms.h" 62 #include "VPlanPredicator.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/MemorySSA.h" 91 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 92 #include "llvm/Analysis/ProfileSummaryInfo.h" 93 #include "llvm/Analysis/ScalarEvolution.h" 94 #include "llvm/Analysis/ScalarEvolutionExpander.h" 95 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 96 #include "llvm/Analysis/TargetLibraryInfo.h" 97 #include "llvm/Analysis/TargetTransformInfo.h" 98 #include "llvm/Analysis/VectorUtils.h" 99 #include "llvm/IR/Attributes.h" 100 #include "llvm/IR/BasicBlock.h" 101 #include "llvm/IR/CFG.h" 102 #include "llvm/IR/Constant.h" 103 #include "llvm/IR/Constants.h" 104 #include "llvm/IR/DataLayout.h" 105 #include "llvm/IR/DebugInfoMetadata.h" 106 #include "llvm/IR/DebugLoc.h" 107 #include "llvm/IR/DerivedTypes.h" 108 #include "llvm/IR/DiagnosticInfo.h" 109 #include "llvm/IR/Dominators.h" 110 #include "llvm/IR/Function.h" 111 #include "llvm/IR/IRBuilder.h" 112 #include "llvm/IR/InstrTypes.h" 113 #include "llvm/IR/Instruction.h" 114 #include "llvm/IR/Instructions.h" 115 #include "llvm/IR/IntrinsicInst.h" 116 #include "llvm/IR/Intrinsics.h" 117 #include "llvm/IR/LLVMContext.h" 118 #include "llvm/IR/Metadata.h" 119 #include "llvm/IR/Module.h" 120 #include "llvm/IR/Operator.h" 121 #include "llvm/IR/Type.h" 122 #include "llvm/IR/Use.h" 123 #include "llvm/IR/User.h" 124 #include "llvm/IR/Value.h" 125 #include "llvm/IR/ValueHandle.h" 126 #include "llvm/IR/Verifier.h" 127 #include "llvm/Pass.h" 128 #include "llvm/Support/Casting.h" 129 #include "llvm/Support/CommandLine.h" 130 #include "llvm/Support/Compiler.h" 131 #include "llvm/Support/Debug.h" 132 #include "llvm/Support/ErrorHandling.h" 133 #include "llvm/Support/MathExtras.h" 134 #include "llvm/Support/raw_ostream.h" 135 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 136 #include "llvm/Transforms/Utils/LoopSimplify.h" 137 #include "llvm/Transforms/Utils/LoopUtils.h" 138 #include "llvm/Transforms/Utils/LoopVersioning.h" 139 #include "llvm/Transforms/Utils/SizeOpts.h" 140 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 141 #include <algorithm> 142 #include <cassert> 143 #include <cstdint> 144 #include <cstdlib> 145 #include <functional> 146 #include <iterator> 147 #include <limits> 148 #include <memory> 149 #include <string> 150 #include <tuple> 151 #include <utility> 152 #include <vector> 153 154 using namespace llvm; 155 156 #define LV_NAME "loop-vectorize" 157 #define DEBUG_TYPE LV_NAME 158 159 /// @{ 160 /// Metadata attribute names 161 static const char *const LLVMLoopVectorizeFollowupAll = 162 "llvm.loop.vectorize.followup_all"; 163 static const char *const LLVMLoopVectorizeFollowupVectorized = 164 "llvm.loop.vectorize.followup_vectorized"; 165 static const char *const LLVMLoopVectorizeFollowupEpilogue = 166 "llvm.loop.vectorize.followup_epilogue"; 167 /// @} 168 169 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 170 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 171 172 /// Loops with a known constant trip count below this number are vectorized only 173 /// if no scalar iteration overheads are incurred. 174 static cl::opt<unsigned> TinyTripCountVectorThreshold( 175 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 176 cl::desc("Loops with a constant trip count that is smaller than this " 177 "value are vectorized only if no scalar iteration overheads " 178 "are incurred.")); 179 180 static cl::opt<bool> MaximizeBandwidth( 181 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 182 cl::desc("Maximize bandwidth when selecting vectorization factor which " 183 "will be determined by the smallest type in loop.")); 184 185 static cl::opt<bool> EnableInterleavedMemAccesses( 186 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 187 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 188 189 /// An interleave-group may need masking if it resides in a block that needs 190 /// predication, or in order to mask away gaps. 191 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 192 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 193 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 194 195 /// We don't interleave loops with a known constant trip count below this 196 /// number. 197 static const unsigned TinyTripCountInterleaveThreshold = 128; 198 199 static cl::opt<unsigned> ForceTargetNumScalarRegs( 200 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 201 cl::desc("A flag that overrides the target's number of scalar registers.")); 202 203 static cl::opt<unsigned> ForceTargetNumVectorRegs( 204 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 205 cl::desc("A flag that overrides the target's number of vector registers.")); 206 207 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 208 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 209 cl::desc("A flag that overrides the target's max interleave factor for " 210 "scalar loops.")); 211 212 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 213 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 214 cl::desc("A flag that overrides the target's max interleave factor for " 215 "vectorized loops.")); 216 217 static cl::opt<unsigned> ForceTargetInstructionCost( 218 "force-target-instruction-cost", cl::init(0), cl::Hidden, 219 cl::desc("A flag that overrides the target's expected cost for " 220 "an instruction to a single constant value. Mostly " 221 "useful for getting consistent testing.")); 222 223 static cl::opt<unsigned> SmallLoopCost( 224 "small-loop-cost", cl::init(20), cl::Hidden, 225 cl::desc( 226 "The cost of a loop that is considered 'small' by the interleaver.")); 227 228 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 229 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 230 cl::desc("Enable the use of the block frequency analysis to access PGO " 231 "heuristics minimizing code growth in cold regions and being more " 232 "aggressive in hot regions.")); 233 234 // Runtime interleave loops for load/store throughput. 235 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 236 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 237 cl::desc( 238 "Enable runtime interleaving until load/store ports are saturated")); 239 240 /// The number of stores in a loop that are allowed to need predication. 241 static cl::opt<unsigned> NumberOfStoresToPredicate( 242 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 243 cl::desc("Max number of stores to be predicated behind an if.")); 244 245 static cl::opt<bool> EnableIndVarRegisterHeur( 246 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 247 cl::desc("Count the induction variable only once when interleaving")); 248 249 static cl::opt<bool> EnableCondStoresVectorization( 250 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 251 cl::desc("Enable if predication of stores during vectorization.")); 252 253 static cl::opt<unsigned> MaxNestedScalarReductionIC( 254 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 255 cl::desc("The maximum interleave count to use when interleaving a scalar " 256 "reduction in a nested loop.")); 257 258 cl::opt<bool> EnableVPlanNativePath( 259 "enable-vplan-native-path", cl::init(false), cl::Hidden, 260 cl::desc("Enable VPlan-native vectorization path with " 261 "support for outer loop vectorization.")); 262 263 // FIXME: Remove this switch once we have divergence analysis. Currently we 264 // assume divergent non-backedge branches when this switch is true. 265 cl::opt<bool> EnableVPlanPredication( 266 "enable-vplan-predication", cl::init(false), cl::Hidden, 267 cl::desc("Enable VPlan-native vectorization path predicator with " 268 "support for outer loop vectorization.")); 269 270 // This flag enables the stress testing of the VPlan H-CFG construction in the 271 // VPlan-native vectorization path. It must be used in conjuction with 272 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 273 // verification of the H-CFGs built. 274 static cl::opt<bool> VPlanBuildStressTest( 275 "vplan-build-stress-test", cl::init(false), cl::Hidden, 276 cl::desc( 277 "Build VPlan for every supported loop nest in the function and bail " 278 "out right after the build (stress test the VPlan H-CFG construction " 279 "in the VPlan-native vectorization path).")); 280 281 cl::opt<bool> llvm::EnableLoopInterleaving( 282 "interleave-loops", cl::init(true), cl::Hidden, 283 cl::desc("Enable loop interleaving in Loop vectorization passes")); 284 cl::opt<bool> llvm::EnableLoopVectorization( 285 "vectorize-loops", cl::init(true), cl::Hidden, 286 cl::desc("Run the Loop vectorization passes")); 287 288 /// A helper function for converting Scalar types to vector types. 289 /// If the incoming type is void, we return void. If the VF is 1, we return 290 /// the scalar type. 291 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 292 if (Scalar->isVoidTy() || VF == 1) 293 return Scalar; 294 return VectorType::get(Scalar, VF); 295 } 296 297 /// A helper function that returns the type of loaded or stored value. 298 static Type *getMemInstValueType(Value *I) { 299 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 300 "Expected Load or Store instruction"); 301 if (auto *LI = dyn_cast<LoadInst>(I)) 302 return LI->getType(); 303 return cast<StoreInst>(I)->getValueOperand()->getType(); 304 } 305 306 /// A helper function that returns true if the given type is irregular. The 307 /// type is irregular if its allocated size doesn't equal the store size of an 308 /// element of the corresponding vector type at the given vectorization factor. 309 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) { 310 // Determine if an array of VF elements of type Ty is "bitcast compatible" 311 // with a <VF x Ty> vector. 312 if (VF > 1) { 313 auto *VectorTy = VectorType::get(Ty, VF); 314 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); 315 } 316 317 // If the vectorization factor is one, we just check if an array of type Ty 318 // requires padding between elements. 319 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 320 } 321 322 /// A helper function that returns the reciprocal of the block probability of 323 /// predicated blocks. If we return X, we are assuming the predicated block 324 /// will execute once for every X iterations of the loop header. 325 /// 326 /// TODO: We should use actual block probability here, if available. Currently, 327 /// we always assume predicated blocks have a 50% chance of executing. 328 static unsigned getReciprocalPredBlockProb() { return 2; } 329 330 /// A helper function that adds a 'fast' flag to floating-point operations. 331 static Value *addFastMathFlag(Value *V) { 332 if (isa<FPMathOperator>(V)) 333 cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast()); 334 return V; 335 } 336 337 static Value *addFastMathFlag(Value *V, FastMathFlags FMF) { 338 if (isa<FPMathOperator>(V)) 339 cast<Instruction>(V)->setFastMathFlags(FMF); 340 return V; 341 } 342 343 /// A helper function that returns an integer or floating-point constant with 344 /// value C. 345 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 346 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 347 : ConstantFP::get(Ty, C); 348 } 349 350 namespace llvm { 351 352 /// InnerLoopVectorizer vectorizes loops which contain only one basic 353 /// block to a specified vectorization factor (VF). 354 /// This class performs the widening of scalars into vectors, or multiple 355 /// scalars. This class also implements the following features: 356 /// * It inserts an epilogue loop for handling loops that don't have iteration 357 /// counts that are known to be a multiple of the vectorization factor. 358 /// * It handles the code generation for reduction variables. 359 /// * Scalarization (implementation using scalars) of un-vectorizable 360 /// instructions. 361 /// InnerLoopVectorizer does not perform any vectorization-legality 362 /// checks, and relies on the caller to check for the different legality 363 /// aspects. The InnerLoopVectorizer relies on the 364 /// LoopVectorizationLegality class to provide information about the induction 365 /// and reduction variables that were found to a given vectorization factor. 366 class InnerLoopVectorizer { 367 public: 368 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 369 LoopInfo *LI, DominatorTree *DT, 370 const TargetLibraryInfo *TLI, 371 const TargetTransformInfo *TTI, AssumptionCache *AC, 372 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 373 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 374 LoopVectorizationCostModel *CM) 375 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 376 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 377 Builder(PSE.getSE()->getContext()), 378 VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {} 379 virtual ~InnerLoopVectorizer() = default; 380 381 /// Create a new empty loop. Unlink the old loop and connect the new one. 382 /// Return the pre-header block of the new loop. 383 BasicBlock *createVectorizedLoopSkeleton(); 384 385 /// Widen a single instruction within the innermost loop. 386 void widenInstruction(Instruction &I); 387 388 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 389 void fixVectorizedLoop(); 390 391 // Return true if any runtime check is added. 392 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 393 394 /// A type for vectorized values in the new loop. Each value from the 395 /// original loop, when vectorized, is represented by UF vector values in the 396 /// new unrolled loop, where UF is the unroll factor. 397 using VectorParts = SmallVector<Value *, 2>; 398 399 /// Vectorize a single PHINode in a block. This method handles the induction 400 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 401 /// arbitrary length vectors. 402 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF); 403 404 /// A helper function to scalarize a single Instruction in the innermost loop. 405 /// Generates a sequence of scalar instances for each lane between \p MinLane 406 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 407 /// inclusive.. 408 void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance, 409 bool IfPredicateInstr); 410 411 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 412 /// is provided, the integer induction variable will first be truncated to 413 /// the corresponding type. 414 void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr); 415 416 /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a 417 /// vector or scalar value on-demand if one is not yet available. When 418 /// vectorizing a loop, we visit the definition of an instruction before its 419 /// uses. When visiting the definition, we either vectorize or scalarize the 420 /// instruction, creating an entry for it in the corresponding map. (In some 421 /// cases, such as induction variables, we will create both vector and scalar 422 /// entries.) Then, as we encounter uses of the definition, we derive values 423 /// for each scalar or vector use unless such a value is already available. 424 /// For example, if we scalarize a definition and one of its uses is vector, 425 /// we build the required vector on-demand with an insertelement sequence 426 /// when visiting the use. Otherwise, if the use is scalar, we can use the 427 /// existing scalar definition. 428 /// 429 /// Return a value in the new loop corresponding to \p V from the original 430 /// loop at unroll index \p Part. If the value has already been vectorized, 431 /// the corresponding vector entry in VectorLoopValueMap is returned. If, 432 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 433 /// a new vector value on-demand by inserting the scalar values into a vector 434 /// with an insertelement sequence. If the value has been neither vectorized 435 /// nor scalarized, it must be loop invariant, so we simply broadcast the 436 /// value into a vector. 437 Value *getOrCreateVectorValue(Value *V, unsigned Part); 438 439 /// Return a value in the new loop corresponding to \p V from the original 440 /// loop at unroll and vector indices \p Instance. If the value has been 441 /// vectorized but not scalarized, the necessary extractelement instruction 442 /// will be generated. 443 Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance); 444 445 /// Construct the vector value of a scalarized value \p V one lane at a time. 446 void packScalarIntoVectorValue(Value *V, const VPIteration &Instance); 447 448 /// Try to vectorize the interleaved access group that \p Instr belongs to, 449 /// optionally masking the vector operations if \p BlockInMask is non-null. 450 void vectorizeInterleaveGroup(Instruction *Instr, 451 VectorParts *BlockInMask = nullptr); 452 453 /// Vectorize Load and Store instructions, optionally masking the vector 454 /// operations if \p BlockInMask is non-null. 455 void vectorizeMemoryInstruction(Instruction *Instr, 456 VectorParts *BlockInMask = nullptr); 457 458 /// Set the debug location in the builder using the debug location in 459 /// the instruction. 460 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 461 462 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 463 void fixNonInductionPHIs(void); 464 465 protected: 466 friend class LoopVectorizationPlanner; 467 468 /// A small list of PHINodes. 469 using PhiVector = SmallVector<PHINode *, 4>; 470 471 /// A type for scalarized values in the new loop. Each value from the 472 /// original loop, when scalarized, is represented by UF x VF scalar values 473 /// in the new unrolled loop, where UF is the unroll factor and VF is the 474 /// vectorization factor. 475 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 476 477 /// Set up the values of the IVs correctly when exiting the vector loop. 478 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 479 Value *CountRoundDown, Value *EndValue, 480 BasicBlock *MiddleBlock); 481 482 /// Create a new induction variable inside L. 483 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 484 Value *Step, Instruction *DL); 485 486 /// Handle all cross-iteration phis in the header. 487 void fixCrossIterationPHIs(); 488 489 /// Fix a first-order recurrence. This is the second phase of vectorizing 490 /// this phi node. 491 void fixFirstOrderRecurrence(PHINode *Phi); 492 493 /// Fix a reduction cross-iteration phi. This is the second phase of 494 /// vectorizing this phi node. 495 void fixReduction(PHINode *Phi); 496 497 /// The Loop exit block may have single value PHI nodes with some 498 /// incoming value. While vectorizing we only handled real values 499 /// that were defined inside the loop and we should have one value for 500 /// each predecessor of its parent basic block. See PR14725. 501 void fixLCSSAPHIs(); 502 503 /// Iteratively sink the scalarized operands of a predicated instruction into 504 /// the block that was created for it. 505 void sinkScalarOperands(Instruction *PredInst); 506 507 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 508 /// represented as. 509 void truncateToMinimalBitwidths(); 510 511 /// Insert the new loop to the loop hierarchy and pass manager 512 /// and update the analysis passes. 513 void updateAnalysis(); 514 515 /// Create a broadcast instruction. This method generates a broadcast 516 /// instruction (shuffle) for loop invariant values and for the induction 517 /// value. If this is the induction variable then we extend it to N, N+1, ... 518 /// this is needed because each iteration in the loop corresponds to a SIMD 519 /// element. 520 virtual Value *getBroadcastInstrs(Value *V); 521 522 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 523 /// to each vector element of Val. The sequence starts at StartIndex. 524 /// \p Opcode is relevant for FP induction variable. 525 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 526 Instruction::BinaryOps Opcode = 527 Instruction::BinaryOpsEnd); 528 529 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 530 /// variable on which to base the steps, \p Step is the size of the step, and 531 /// \p EntryVal is the value from the original loop that maps to the steps. 532 /// Note that \p EntryVal doesn't have to be an induction variable - it 533 /// can also be a truncate instruction. 534 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 535 const InductionDescriptor &ID); 536 537 /// Create a vector induction phi node based on an existing scalar one. \p 538 /// EntryVal is the value from the original loop that maps to the vector phi 539 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 540 /// truncate instruction, instead of widening the original IV, we widen a 541 /// version of the IV truncated to \p EntryVal's type. 542 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 543 Value *Step, Instruction *EntryVal); 544 545 /// Returns true if an instruction \p I should be scalarized instead of 546 /// vectorized for the chosen vectorization factor. 547 bool shouldScalarizeInstruction(Instruction *I) const; 548 549 /// Returns true if we should generate a scalar version of \p IV. 550 bool needsScalarInduction(Instruction *IV) const; 551 552 /// If there is a cast involved in the induction variable \p ID, which should 553 /// be ignored in the vectorized loop body, this function records the 554 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 555 /// cast. We had already proved that the casted Phi is equal to the uncasted 556 /// Phi in the vectorized loop (under a runtime guard), and therefore 557 /// there is no need to vectorize the cast - the same value can be used in the 558 /// vector loop for both the Phi and the cast. 559 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 560 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 561 /// 562 /// \p EntryVal is the value from the original loop that maps to the vector 563 /// phi node and is used to distinguish what is the IV currently being 564 /// processed - original one (if \p EntryVal is a phi corresponding to the 565 /// original IV) or the "newly-created" one based on the proof mentioned above 566 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 567 /// latter case \p EntryVal is a TruncInst and we must not record anything for 568 /// that IV, but it's error-prone to expect callers of this routine to care 569 /// about that, hence this explicit parameter. 570 void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID, 571 const Instruction *EntryVal, 572 Value *VectorLoopValue, 573 unsigned Part, 574 unsigned Lane = UINT_MAX); 575 576 /// Generate a shuffle sequence that will reverse the vector Vec. 577 virtual Value *reverseVector(Value *Vec); 578 579 /// Returns (and creates if needed) the original loop trip count. 580 Value *getOrCreateTripCount(Loop *NewLoop); 581 582 /// Returns (and creates if needed) the trip count of the widened loop. 583 Value *getOrCreateVectorTripCount(Loop *NewLoop); 584 585 /// Returns a bitcasted value to the requested vector type. 586 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 587 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 588 const DataLayout &DL); 589 590 /// Emit a bypass check to see if the vector trip count is zero, including if 591 /// it overflows. 592 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 593 594 /// Emit a bypass check to see if all of the SCEV assumptions we've 595 /// had to make are correct. 596 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 597 598 /// Emit bypass checks to check any memory assumptions we may have made. 599 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 600 601 /// Compute the transformed value of Index at offset StartValue using step 602 /// StepValue. 603 /// For integer induction, returns StartValue + Index * StepValue. 604 /// For pointer induction, returns StartValue[Index * StepValue]. 605 /// FIXME: The newly created binary instructions should contain nsw/nuw 606 /// flags, which can be found from the original scalar operations. 607 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 608 const DataLayout &DL, 609 const InductionDescriptor &ID) const; 610 611 /// Add additional metadata to \p To that was not present on \p Orig. 612 /// 613 /// Currently this is used to add the noalias annotations based on the 614 /// inserted memchecks. Use this for instructions that are *cloned* into the 615 /// vector loop. 616 void addNewMetadata(Instruction *To, const Instruction *Orig); 617 618 /// Add metadata from one instruction to another. 619 /// 620 /// This includes both the original MDs from \p From and additional ones (\see 621 /// addNewMetadata). Use this for *newly created* instructions in the vector 622 /// loop. 623 void addMetadata(Instruction *To, Instruction *From); 624 625 /// Similar to the previous function but it adds the metadata to a 626 /// vector of instructions. 627 void addMetadata(ArrayRef<Value *> To, Instruction *From); 628 629 /// The original loop. 630 Loop *OrigLoop; 631 632 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 633 /// dynamic knowledge to simplify SCEV expressions and converts them to a 634 /// more usable form. 635 PredicatedScalarEvolution &PSE; 636 637 /// Loop Info. 638 LoopInfo *LI; 639 640 /// Dominator Tree. 641 DominatorTree *DT; 642 643 /// Alias Analysis. 644 AliasAnalysis *AA; 645 646 /// Target Library Info. 647 const TargetLibraryInfo *TLI; 648 649 /// Target Transform Info. 650 const TargetTransformInfo *TTI; 651 652 /// Assumption Cache. 653 AssumptionCache *AC; 654 655 /// Interface to emit optimization remarks. 656 OptimizationRemarkEmitter *ORE; 657 658 /// LoopVersioning. It's only set up (non-null) if memchecks were 659 /// used. 660 /// 661 /// This is currently only used to add no-alias metadata based on the 662 /// memchecks. The actually versioning is performed manually. 663 std::unique_ptr<LoopVersioning> LVer; 664 665 /// The vectorization SIMD factor to use. Each vector will have this many 666 /// vector elements. 667 unsigned VF; 668 669 /// The vectorization unroll factor to use. Each scalar is vectorized to this 670 /// many different vector instructions. 671 unsigned UF; 672 673 /// The builder that we use 674 IRBuilder<> Builder; 675 676 // --- Vectorization state --- 677 678 /// The vector-loop preheader. 679 BasicBlock *LoopVectorPreHeader; 680 681 /// The scalar-loop preheader. 682 BasicBlock *LoopScalarPreHeader; 683 684 /// Middle Block between the vector and the scalar. 685 BasicBlock *LoopMiddleBlock; 686 687 /// The ExitBlock of the scalar loop. 688 BasicBlock *LoopExitBlock; 689 690 /// The vector loop body. 691 BasicBlock *LoopVectorBody; 692 693 /// The scalar loop body. 694 BasicBlock *LoopScalarBody; 695 696 /// A list of all bypass blocks. The first block is the entry of the loop. 697 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 698 699 /// The new Induction variable which was added to the new block. 700 PHINode *Induction = nullptr; 701 702 /// The induction variable of the old basic block. 703 PHINode *OldInduction = nullptr; 704 705 /// Maps values from the original loop to their corresponding values in the 706 /// vectorized loop. A key value can map to either vector values, scalar 707 /// values or both kinds of values, depending on whether the key was 708 /// vectorized and scalarized. 709 VectorizerValueMap VectorLoopValueMap; 710 711 /// Store instructions that were predicated. 712 SmallVector<Instruction *, 4> PredicatedInstructions; 713 714 /// Trip count of the original loop. 715 Value *TripCount = nullptr; 716 717 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 718 Value *VectorTripCount = nullptr; 719 720 /// The legality analysis. 721 LoopVectorizationLegality *Legal; 722 723 /// The profitablity analysis. 724 LoopVectorizationCostModel *Cost; 725 726 // Record whether runtime checks are added. 727 bool AddedSafetyChecks = false; 728 729 // Holds the end values for each induction variable. We save the end values 730 // so we can later fix-up the external users of the induction variables. 731 DenseMap<PHINode *, Value *> IVEndValues; 732 733 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 734 // fixed up at the end of vector code generation. 735 SmallVector<PHINode *, 8> OrigPHIsToFix; 736 }; 737 738 class InnerLoopUnroller : public InnerLoopVectorizer { 739 public: 740 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 741 LoopInfo *LI, DominatorTree *DT, 742 const TargetLibraryInfo *TLI, 743 const TargetTransformInfo *TTI, AssumptionCache *AC, 744 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 745 LoopVectorizationLegality *LVL, 746 LoopVectorizationCostModel *CM) 747 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 748 UnrollFactor, LVL, CM) {} 749 750 private: 751 Value *getBroadcastInstrs(Value *V) override; 752 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 753 Instruction::BinaryOps Opcode = 754 Instruction::BinaryOpsEnd) override; 755 Value *reverseVector(Value *Vec) override; 756 }; 757 758 } // end namespace llvm 759 760 /// Look for a meaningful debug location on the instruction or it's 761 /// operands. 762 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 763 if (!I) 764 return I; 765 766 DebugLoc Empty; 767 if (I->getDebugLoc() != Empty) 768 return I; 769 770 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 771 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 772 if (OpInst->getDebugLoc() != Empty) 773 return OpInst; 774 } 775 776 return I; 777 } 778 779 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 780 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 781 const DILocation *DIL = Inst->getDebugLoc(); 782 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 783 !isa<DbgInfoIntrinsic>(Inst)) { 784 auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(UF * VF); 785 if (NewDIL) 786 B.SetCurrentDebugLocation(NewDIL.getValue()); 787 else 788 LLVM_DEBUG(dbgs() 789 << "Failed to create new discriminator: " 790 << DIL->getFilename() << " Line: " << DIL->getLine()); 791 } 792 else 793 B.SetCurrentDebugLocation(DIL); 794 } else 795 B.SetCurrentDebugLocation(DebugLoc()); 796 } 797 798 #ifndef NDEBUG 799 /// \return string containing a file name and a line # for the given loop. 800 static std::string getDebugLocString(const Loop *L) { 801 std::string Result; 802 if (L) { 803 raw_string_ostream OS(Result); 804 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 805 LoopDbgLoc.print(OS); 806 else 807 // Just print the module name. 808 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 809 OS.flush(); 810 } 811 return Result; 812 } 813 #endif 814 815 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 816 const Instruction *Orig) { 817 // If the loop was versioned with memchecks, add the corresponding no-alias 818 // metadata. 819 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 820 LVer->annotateInstWithNoAlias(To, Orig); 821 } 822 823 void InnerLoopVectorizer::addMetadata(Instruction *To, 824 Instruction *From) { 825 propagateMetadata(To, From); 826 addNewMetadata(To, From); 827 } 828 829 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 830 Instruction *From) { 831 for (Value *V : To) { 832 if (Instruction *I = dyn_cast<Instruction>(V)) 833 addMetadata(I, From); 834 } 835 } 836 837 namespace llvm { 838 839 // Loop vectorization cost-model hints how the scalar epilogue loop should be 840 // lowered. 841 enum ScalarEpilogueLowering { 842 843 // The default: allowing scalar epilogues. 844 CM_ScalarEpilogueAllowed, 845 846 // Vectorization with OptForSize: don't allow epilogues. 847 CM_ScalarEpilogueNotAllowedOptSize, 848 849 // A special case of vectorisation with OptForSize: loops with a very small 850 // trip count are considered for vectorization under OptForSize, thereby 851 // making sure the cost of their loop body is dominant, free of runtime 852 // guards and scalar iteration overheads. 853 CM_ScalarEpilogueNotAllowedLowTripLoop, 854 855 // Loop hint predicate indicating an epilogue is undesired. 856 CM_ScalarEpilogueNotNeededPredicatePragma 857 }; 858 859 /// LoopVectorizationCostModel - estimates the expected speedups due to 860 /// vectorization. 861 /// In many cases vectorization is not profitable. This can happen because of 862 /// a number of reasons. In this class we mainly attempt to predict the 863 /// expected speedup/slowdowns due to the supported instruction set. We use the 864 /// TargetTransformInfo to query the different backends for the cost of 865 /// different operations. 866 class LoopVectorizationCostModel { 867 public: 868 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 869 PredicatedScalarEvolution &PSE, LoopInfo *LI, 870 LoopVectorizationLegality *Legal, 871 const TargetTransformInfo &TTI, 872 const TargetLibraryInfo *TLI, DemandedBits *DB, 873 AssumptionCache *AC, 874 OptimizationRemarkEmitter *ORE, const Function *F, 875 const LoopVectorizeHints *Hints, 876 InterleavedAccessInfo &IAI) 877 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 878 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 879 Hints(Hints), InterleaveInfo(IAI) {} 880 881 /// \return An upper bound for the vectorization factor, or None if 882 /// vectorization and interleaving should be avoided up front. 883 Optional<unsigned> computeMaxVF(); 884 885 /// \return True if runtime checks are required for vectorization, and false 886 /// otherwise. 887 bool runtimeChecksRequired(); 888 889 /// \return The most profitable vectorization factor and the cost of that VF. 890 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 891 /// then this vectorization factor will be selected if vectorization is 892 /// possible. 893 VectorizationFactor selectVectorizationFactor(unsigned MaxVF); 894 895 /// Setup cost-based decisions for user vectorization factor. 896 void selectUserVectorizationFactor(unsigned UserVF) { 897 collectUniformsAndScalars(UserVF); 898 collectInstsToScalarize(UserVF); 899 } 900 901 /// \return The size (in bits) of the smallest and widest types in the code 902 /// that needs to be vectorized. We ignore values that remain scalar such as 903 /// 64 bit loop indices. 904 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 905 906 /// \return The desired interleave count. 907 /// If interleave count has been specified by metadata it will be returned. 908 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 909 /// are the selected vectorization factor and the cost of the selected VF. 910 unsigned selectInterleaveCount(unsigned VF, unsigned LoopCost); 911 912 /// Memory access instruction may be vectorized in more than one way. 913 /// Form of instruction after vectorization depends on cost. 914 /// This function takes cost-based decisions for Load/Store instructions 915 /// and collects them in a map. This decisions map is used for building 916 /// the lists of loop-uniform and loop-scalar instructions. 917 /// The calculated cost is saved with widening decision in order to 918 /// avoid redundant calculations. 919 void setCostBasedWideningDecision(unsigned VF); 920 921 /// A struct that represents some properties of the register usage 922 /// of a loop. 923 struct RegisterUsage { 924 /// Holds the number of loop invariant values that are used in the loop. 925 unsigned LoopInvariantRegs; 926 927 /// Holds the maximum number of concurrent live intervals in the loop. 928 unsigned MaxLocalUsers; 929 }; 930 931 /// \return Returns information about the register usages of the loop for the 932 /// given vectorization factors. 933 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 934 935 /// Collect values we want to ignore in the cost model. 936 void collectValuesToIgnore(); 937 938 /// \returns The smallest bitwidth each instruction can be represented with. 939 /// The vector equivalents of these instructions should be truncated to this 940 /// type. 941 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 942 return MinBWs; 943 } 944 945 /// \returns True if it is more profitable to scalarize instruction \p I for 946 /// vectorization factor \p VF. 947 bool isProfitableToScalarize(Instruction *I, unsigned VF) const { 948 assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1."); 949 950 // Cost model is not run in the VPlan-native path - return conservative 951 // result until this changes. 952 if (EnableVPlanNativePath) 953 return false; 954 955 auto Scalars = InstsToScalarize.find(VF); 956 assert(Scalars != InstsToScalarize.end() && 957 "VF not yet analyzed for scalarization profitability"); 958 return Scalars->second.find(I) != Scalars->second.end(); 959 } 960 961 /// Returns true if \p I is known to be uniform after vectorization. 962 bool isUniformAfterVectorization(Instruction *I, unsigned VF) const { 963 if (VF == 1) 964 return true; 965 966 // Cost model is not run in the VPlan-native path - return conservative 967 // result until this changes. 968 if (EnableVPlanNativePath) 969 return false; 970 971 auto UniformsPerVF = Uniforms.find(VF); 972 assert(UniformsPerVF != Uniforms.end() && 973 "VF not yet analyzed for uniformity"); 974 return UniformsPerVF->second.find(I) != UniformsPerVF->second.end(); 975 } 976 977 /// Returns true if \p I is known to be scalar after vectorization. 978 bool isScalarAfterVectorization(Instruction *I, unsigned VF) const { 979 if (VF == 1) 980 return true; 981 982 // Cost model is not run in the VPlan-native path - return conservative 983 // result until this changes. 984 if (EnableVPlanNativePath) 985 return false; 986 987 auto ScalarsPerVF = Scalars.find(VF); 988 assert(ScalarsPerVF != Scalars.end() && 989 "Scalar values are not calculated for VF"); 990 return ScalarsPerVF->second.find(I) != ScalarsPerVF->second.end(); 991 } 992 993 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 994 /// for vectorization factor \p VF. 995 bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const { 996 return VF > 1 && MinBWs.find(I) != MinBWs.end() && 997 !isProfitableToScalarize(I, VF) && 998 !isScalarAfterVectorization(I, VF); 999 } 1000 1001 /// Decision that was taken during cost calculation for memory instruction. 1002 enum InstWidening { 1003 CM_Unknown, 1004 CM_Widen, // For consecutive accesses with stride +1. 1005 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1006 CM_Interleave, 1007 CM_GatherScatter, 1008 CM_Scalarize 1009 }; 1010 1011 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1012 /// instruction \p I and vector width \p VF. 1013 void setWideningDecision(Instruction *I, unsigned VF, InstWidening W, 1014 unsigned Cost) { 1015 assert(VF >= 2 && "Expected VF >=2"); 1016 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1017 } 1018 1019 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1020 /// interleaving group \p Grp and vector width \p VF. 1021 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, unsigned VF, 1022 InstWidening W, unsigned Cost) { 1023 assert(VF >= 2 && "Expected VF >=2"); 1024 /// Broadcast this decicion to all instructions inside the group. 1025 /// But the cost will be assigned to one instruction only. 1026 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1027 if (auto *I = Grp->getMember(i)) { 1028 if (Grp->getInsertPos() == I) 1029 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1030 else 1031 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1032 } 1033 } 1034 } 1035 1036 /// Return the cost model decision for the given instruction \p I and vector 1037 /// width \p VF. Return CM_Unknown if this instruction did not pass 1038 /// through the cost modeling. 1039 InstWidening getWideningDecision(Instruction *I, unsigned VF) { 1040 assert(VF >= 2 && "Expected VF >=2"); 1041 1042 // Cost model is not run in the VPlan-native path - return conservative 1043 // result until this changes. 1044 if (EnableVPlanNativePath) 1045 return CM_GatherScatter; 1046 1047 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1048 auto Itr = WideningDecisions.find(InstOnVF); 1049 if (Itr == WideningDecisions.end()) 1050 return CM_Unknown; 1051 return Itr->second.first; 1052 } 1053 1054 /// Return the vectorization cost for the given instruction \p I and vector 1055 /// width \p VF. 1056 unsigned getWideningCost(Instruction *I, unsigned VF) { 1057 assert(VF >= 2 && "Expected VF >=2"); 1058 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1059 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1060 "The cost is not calculated"); 1061 return WideningDecisions[InstOnVF].second; 1062 } 1063 1064 /// Return True if instruction \p I is an optimizable truncate whose operand 1065 /// is an induction variable. Such a truncate will be removed by adding a new 1066 /// induction variable with the destination type. 1067 bool isOptimizableIVTruncate(Instruction *I, unsigned VF) { 1068 // If the instruction is not a truncate, return false. 1069 auto *Trunc = dyn_cast<TruncInst>(I); 1070 if (!Trunc) 1071 return false; 1072 1073 // Get the source and destination types of the truncate. 1074 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1075 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1076 1077 // If the truncate is free for the given types, return false. Replacing a 1078 // free truncate with an induction variable would add an induction variable 1079 // update instruction to each iteration of the loop. We exclude from this 1080 // check the primary induction variable since it will need an update 1081 // instruction regardless. 1082 Value *Op = Trunc->getOperand(0); 1083 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1084 return false; 1085 1086 // If the truncated value is not an induction variable, return false. 1087 return Legal->isInductionPhi(Op); 1088 } 1089 1090 /// Collects the instructions to scalarize for each predicated instruction in 1091 /// the loop. 1092 void collectInstsToScalarize(unsigned VF); 1093 1094 /// Collect Uniform and Scalar values for the given \p VF. 1095 /// The sets depend on CM decision for Load/Store instructions 1096 /// that may be vectorized as interleave, gather-scatter or scalarized. 1097 void collectUniformsAndScalars(unsigned VF) { 1098 // Do the analysis once. 1099 if (VF == 1 || Uniforms.find(VF) != Uniforms.end()) 1100 return; 1101 setCostBasedWideningDecision(VF); 1102 collectLoopUniforms(VF); 1103 collectLoopScalars(VF); 1104 } 1105 1106 /// Returns true if the target machine supports masked store operation 1107 /// for the given \p DataType and kind of access to \p Ptr. 1108 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1109 return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedStore(DataType); 1110 } 1111 1112 /// Returns true if the target machine supports masked load operation 1113 /// for the given \p DataType and kind of access to \p Ptr. 1114 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1115 return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedLoad(DataType); 1116 } 1117 1118 /// Returns true if the target machine supports masked scatter operation 1119 /// for the given \p DataType. 1120 bool isLegalMaskedScatter(Type *DataType) { 1121 return TTI.isLegalMaskedScatter(DataType); 1122 } 1123 1124 /// Returns true if the target machine supports masked gather operation 1125 /// for the given \p DataType. 1126 bool isLegalMaskedGather(Type *DataType) { 1127 return TTI.isLegalMaskedGather(DataType); 1128 } 1129 1130 /// Returns true if the target machine can represent \p V as a masked gather 1131 /// or scatter operation. 1132 bool isLegalGatherOrScatter(Value *V) { 1133 bool LI = isa<LoadInst>(V); 1134 bool SI = isa<StoreInst>(V); 1135 if (!LI && !SI) 1136 return false; 1137 auto *Ty = getMemInstValueType(V); 1138 return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty)); 1139 } 1140 1141 /// Returns true if \p I is an instruction that will be scalarized with 1142 /// predication. Such instructions include conditional stores and 1143 /// instructions that may divide by zero. 1144 /// If a non-zero VF has been calculated, we check if I will be scalarized 1145 /// predication for that VF. 1146 bool isScalarWithPredication(Instruction *I, unsigned VF = 1); 1147 1148 // Returns true if \p I is an instruction that will be predicated either 1149 // through scalar predication or masked load/store or masked gather/scatter. 1150 // Superset of instructions that return true for isScalarWithPredication. 1151 bool isPredicatedInst(Instruction *I) { 1152 if (!blockNeedsPredication(I->getParent())) 1153 return false; 1154 // Loads and stores that need some form of masked operation are predicated 1155 // instructions. 1156 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1157 return Legal->isMaskRequired(I); 1158 return isScalarWithPredication(I); 1159 } 1160 1161 /// Returns true if \p I is a memory instruction with consecutive memory 1162 /// access that can be widened. 1163 bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1); 1164 1165 /// Returns true if \p I is a memory instruction in an interleaved-group 1166 /// of memory accesses that can be vectorized with wide vector loads/stores 1167 /// and shuffles. 1168 bool interleavedAccessCanBeWidened(Instruction *I, unsigned VF = 1); 1169 1170 /// Check if \p Instr belongs to any interleaved access group. 1171 bool isAccessInterleaved(Instruction *Instr) { 1172 return InterleaveInfo.isInterleaved(Instr); 1173 } 1174 1175 /// Get the interleaved access group that \p Instr belongs to. 1176 const InterleaveGroup<Instruction> * 1177 getInterleavedAccessGroup(Instruction *Instr) { 1178 return InterleaveInfo.getInterleaveGroup(Instr); 1179 } 1180 1181 /// Returns true if an interleaved group requires a scalar iteration 1182 /// to handle accesses with gaps, and there is nothing preventing us from 1183 /// creating a scalar epilogue. 1184 bool requiresScalarEpilogue() const { 1185 return isScalarEpilogueAllowed() && InterleaveInfo.requiresScalarEpilogue(); 1186 } 1187 1188 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1189 /// loop hint annotation. 1190 bool isScalarEpilogueAllowed() const { 1191 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1192 } 1193 1194 /// Returns true if all loop blocks should be masked to fold tail loop. 1195 bool foldTailByMasking() const { return FoldTailByMasking; } 1196 1197 bool blockNeedsPredication(BasicBlock *BB) { 1198 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1199 } 1200 1201 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1202 /// with factor VF. Return the cost of the instruction, including 1203 /// scalarization overhead if it's needed. 1204 unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF); 1205 1206 /// Estimate cost of a call instruction CI if it were vectorized with factor 1207 /// VF. Return the cost of the instruction, including scalarization overhead 1208 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1209 /// scalarized - 1210 /// i.e. either vector version isn't available, or is too expensive. 1211 unsigned getVectorCallCost(CallInst *CI, unsigned VF, bool &NeedToScalarize); 1212 1213 private: 1214 unsigned NumPredStores = 0; 1215 1216 /// \return An upper bound for the vectorization factor, larger than zero. 1217 /// One is returned if vectorization should best be avoided due to cost. 1218 unsigned computeFeasibleMaxVF(unsigned ConstTripCount); 1219 1220 /// The vectorization cost is a combination of the cost itself and a boolean 1221 /// indicating whether any of the contributing operations will actually 1222 /// operate on 1223 /// vector values after type legalization in the backend. If this latter value 1224 /// is 1225 /// false, then all operations will be scalarized (i.e. no vectorization has 1226 /// actually taken place). 1227 using VectorizationCostTy = std::pair<unsigned, bool>; 1228 1229 /// Returns the expected execution cost. The unit of the cost does 1230 /// not matter because we use the 'cost' units to compare different 1231 /// vector widths. The cost that is returned is *not* normalized by 1232 /// the factor width. 1233 VectorizationCostTy expectedCost(unsigned VF); 1234 1235 /// Returns the execution time cost of an instruction for a given vector 1236 /// width. Vector width of one means scalar. 1237 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1238 1239 /// The cost-computation logic from getInstructionCost which provides 1240 /// the vector type as an output parameter. 1241 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1242 1243 /// Calculate vectorization cost of memory instruction \p I. 1244 unsigned getMemoryInstructionCost(Instruction *I, unsigned VF); 1245 1246 /// The cost computation for scalarized memory instruction. 1247 unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF); 1248 1249 /// The cost computation for interleaving group of memory instructions. 1250 unsigned getInterleaveGroupCost(Instruction *I, unsigned VF); 1251 1252 /// The cost computation for Gather/Scatter instruction. 1253 unsigned getGatherScatterCost(Instruction *I, unsigned VF); 1254 1255 /// The cost computation for widening instruction \p I with consecutive 1256 /// memory access. 1257 unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF); 1258 1259 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1260 /// Load: scalar load + broadcast. 1261 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1262 /// element) 1263 unsigned getUniformMemOpCost(Instruction *I, unsigned VF); 1264 1265 /// Estimate the overhead of scalarizing an instruction. This is a 1266 /// convenience wrapper for the type-based getScalarizationOverhead API. 1267 unsigned getScalarizationOverhead(Instruction *I, unsigned VF); 1268 1269 /// Returns whether the instruction is a load or store and will be a emitted 1270 /// as a vector operation. 1271 bool isConsecutiveLoadOrStore(Instruction *I); 1272 1273 /// Returns true if an artificially high cost for emulated masked memrefs 1274 /// should be used. 1275 bool useEmulatedMaskMemRefHack(Instruction *I); 1276 1277 /// Create an analysis remark that explains why vectorization failed 1278 /// 1279 /// \p RemarkName is the identifier for the remark. \return the remark object 1280 /// that can be streamed to. 1281 OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) { 1282 return createLVMissedAnalysis(Hints->vectorizeAnalysisPassName(), 1283 RemarkName, TheLoop); 1284 } 1285 1286 /// Map of scalar integer values to the smallest bitwidth they can be legally 1287 /// represented as. The vector equivalents of these values should be truncated 1288 /// to this type. 1289 MapVector<Instruction *, uint64_t> MinBWs; 1290 1291 /// A type representing the costs for instructions if they were to be 1292 /// scalarized rather than vectorized. The entries are Instruction-Cost 1293 /// pairs. 1294 using ScalarCostsTy = DenseMap<Instruction *, unsigned>; 1295 1296 /// A set containing all BasicBlocks that are known to present after 1297 /// vectorization as a predicated block. 1298 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1299 1300 /// Records whether it is allowed to have the original scalar loop execute at 1301 /// least once. This may be needed as a fallback loop in case runtime 1302 /// aliasing/dependence checks fail, or to handle the tail/remainder 1303 /// iterations when the trip count is unknown or doesn't divide by the VF, 1304 /// or as a peel-loop to handle gaps in interleave-groups. 1305 /// Under optsize and when the trip count is very small we don't allow any 1306 /// iterations to execute in the scalar loop. 1307 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1308 1309 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1310 bool FoldTailByMasking = false; 1311 1312 /// A map holding scalar costs for different vectorization factors. The 1313 /// presence of a cost for an instruction in the mapping indicates that the 1314 /// instruction will be scalarized when vectorizing with the associated 1315 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1316 DenseMap<unsigned, ScalarCostsTy> InstsToScalarize; 1317 1318 /// Holds the instructions known to be uniform after vectorization. 1319 /// The data is collected per VF. 1320 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms; 1321 1322 /// Holds the instructions known to be scalar after vectorization. 1323 /// The data is collected per VF. 1324 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars; 1325 1326 /// Holds the instructions (address computations) that are forced to be 1327 /// scalarized. 1328 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1329 1330 /// Returns the expected difference in cost from scalarizing the expression 1331 /// feeding a predicated instruction \p PredInst. The instructions to 1332 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1333 /// non-negative return value implies the expression will be scalarized. 1334 /// Currently, only single-use chains are considered for scalarization. 1335 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1336 unsigned VF); 1337 1338 /// Collect the instructions that are uniform after vectorization. An 1339 /// instruction is uniform if we represent it with a single scalar value in 1340 /// the vectorized loop corresponding to each vector iteration. Examples of 1341 /// uniform instructions include pointer operands of consecutive or 1342 /// interleaved memory accesses. Note that although uniformity implies an 1343 /// instruction will be scalar, the reverse is not true. In general, a 1344 /// scalarized instruction will be represented by VF scalar values in the 1345 /// vectorized loop, each corresponding to an iteration of the original 1346 /// scalar loop. 1347 void collectLoopUniforms(unsigned VF); 1348 1349 /// Collect the instructions that are scalar after vectorization. An 1350 /// instruction is scalar if it is known to be uniform or will be scalarized 1351 /// during vectorization. Non-uniform scalarized instructions will be 1352 /// represented by VF values in the vectorized loop, each corresponding to an 1353 /// iteration of the original scalar loop. 1354 void collectLoopScalars(unsigned VF); 1355 1356 /// Keeps cost model vectorization decision and cost for instructions. 1357 /// Right now it is used for memory instructions only. 1358 using DecisionList = DenseMap<std::pair<Instruction *, unsigned>, 1359 std::pair<InstWidening, unsigned>>; 1360 1361 DecisionList WideningDecisions; 1362 1363 /// Returns true if \p V is expected to be vectorized and it needs to be 1364 /// extracted. 1365 bool needsExtract(Value *V, unsigned VF) const { 1366 Instruction *I = dyn_cast<Instruction>(V); 1367 if (VF == 1 || !I || !TheLoop->contains(I) || TheLoop->isLoopInvariant(I)) 1368 return false; 1369 1370 // Assume we can vectorize V (and hence we need extraction) if the 1371 // scalars are not computed yet. This can happen, because it is called 1372 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1373 // the scalars are collected. That should be a safe assumption in most 1374 // cases, because we check if the operands have vectorizable types 1375 // beforehand in LoopVectorizationLegality. 1376 return Scalars.find(VF) == Scalars.end() || 1377 !isScalarAfterVectorization(I, VF); 1378 }; 1379 1380 /// Returns a range containing only operands needing to be extracted. 1381 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1382 unsigned VF) { 1383 return SmallVector<Value *, 4>(make_filter_range( 1384 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1385 } 1386 1387 public: 1388 /// The loop that we evaluate. 1389 Loop *TheLoop; 1390 1391 /// Predicated scalar evolution analysis. 1392 PredicatedScalarEvolution &PSE; 1393 1394 /// Loop Info analysis. 1395 LoopInfo *LI; 1396 1397 /// Vectorization legality. 1398 LoopVectorizationLegality *Legal; 1399 1400 /// Vector target information. 1401 const TargetTransformInfo &TTI; 1402 1403 /// Target Library Info. 1404 const TargetLibraryInfo *TLI; 1405 1406 /// Demanded bits analysis. 1407 DemandedBits *DB; 1408 1409 /// Assumption cache. 1410 AssumptionCache *AC; 1411 1412 /// Interface to emit optimization remarks. 1413 OptimizationRemarkEmitter *ORE; 1414 1415 const Function *TheFunction; 1416 1417 /// Loop Vectorize Hint. 1418 const LoopVectorizeHints *Hints; 1419 1420 /// The interleave access information contains groups of interleaved accesses 1421 /// with the same stride and close to each other. 1422 InterleavedAccessInfo &InterleaveInfo; 1423 1424 /// Values to ignore in the cost model. 1425 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1426 1427 /// Values to ignore in the cost model when VF > 1. 1428 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1429 }; 1430 1431 } // end namespace llvm 1432 1433 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 1434 // vectorization. The loop needs to be annotated with #pragma omp simd 1435 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 1436 // vector length information is not provided, vectorization is not considered 1437 // explicit. Interleave hints are not allowed either. These limitations will be 1438 // relaxed in the future. 1439 // Please, note that we are currently forced to abuse the pragma 'clang 1440 // vectorize' semantics. This pragma provides *auto-vectorization hints* 1441 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 1442 // provides *explicit vectorization hints* (LV can bypass legal checks and 1443 // assume that vectorization is legal). However, both hints are implemented 1444 // using the same metadata (llvm.loop.vectorize, processed by 1445 // LoopVectorizeHints). This will be fixed in the future when the native IR 1446 // representation for pragma 'omp simd' is introduced. 1447 static bool isExplicitVecOuterLoop(Loop *OuterLp, 1448 OptimizationRemarkEmitter *ORE) { 1449 assert(!OuterLp->empty() && "This is not an outer loop"); 1450 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 1451 1452 // Only outer loops with an explicit vectorization hint are supported. 1453 // Unannotated outer loops are ignored. 1454 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 1455 return false; 1456 1457 Function *Fn = OuterLp->getHeader()->getParent(); 1458 if (!Hints.allowVectorization(Fn, OuterLp, 1459 true /*VectorizeOnlyWhenForced*/)) { 1460 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 1461 return false; 1462 } 1463 1464 if (Hints.getInterleave() > 1) { 1465 // TODO: Interleave support is future work. 1466 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 1467 "outer loops.\n"); 1468 Hints.emitRemarkWithHints(); 1469 return false; 1470 } 1471 1472 return true; 1473 } 1474 1475 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 1476 OptimizationRemarkEmitter *ORE, 1477 SmallVectorImpl<Loop *> &V) { 1478 // Collect inner loops and outer loops without irreducible control flow. For 1479 // now, only collect outer loops that have explicit vectorization hints. If we 1480 // are stress testing the VPlan H-CFG construction, we collect the outermost 1481 // loop of every loop nest. 1482 if (L.empty() || VPlanBuildStressTest || 1483 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 1484 LoopBlocksRPO RPOT(&L); 1485 RPOT.perform(LI); 1486 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 1487 V.push_back(&L); 1488 // TODO: Collect inner loops inside marked outer loops in case 1489 // vectorization fails for the outer loop. Do not invoke 1490 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 1491 // already known to be reducible. We can use an inherited attribute for 1492 // that. 1493 return; 1494 } 1495 } 1496 for (Loop *InnerL : L) 1497 collectSupportedLoops(*InnerL, LI, ORE, V); 1498 } 1499 1500 namespace { 1501 1502 /// The LoopVectorize Pass. 1503 struct LoopVectorize : public FunctionPass { 1504 /// Pass identification, replacement for typeid 1505 static char ID; 1506 1507 LoopVectorizePass Impl; 1508 1509 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 1510 bool VectorizeOnlyWhenForced = false) 1511 : FunctionPass(ID) { 1512 Impl.InterleaveOnlyWhenForced = InterleaveOnlyWhenForced; 1513 Impl.VectorizeOnlyWhenForced = VectorizeOnlyWhenForced; 1514 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1515 } 1516 1517 bool runOnFunction(Function &F) override { 1518 if (skipFunction(F)) 1519 return false; 1520 1521 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1522 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1523 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1524 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1525 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1526 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1527 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 1528 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1529 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1530 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 1531 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1532 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 1533 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 1534 1535 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 1536 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 1537 1538 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 1539 GetLAA, *ORE, PSI); 1540 } 1541 1542 void getAnalysisUsage(AnalysisUsage &AU) const override { 1543 AU.addRequired<AssumptionCacheTracker>(); 1544 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1545 AU.addRequired<DominatorTreeWrapperPass>(); 1546 AU.addRequired<LoopInfoWrapperPass>(); 1547 AU.addRequired<ScalarEvolutionWrapperPass>(); 1548 AU.addRequired<TargetTransformInfoWrapperPass>(); 1549 AU.addRequired<AAResultsWrapperPass>(); 1550 AU.addRequired<LoopAccessLegacyAnalysis>(); 1551 AU.addRequired<DemandedBitsWrapperPass>(); 1552 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 1553 1554 // We currently do not preserve loopinfo/dominator analyses with outer loop 1555 // vectorization. Until this is addressed, mark these analyses as preserved 1556 // only for non-VPlan-native path. 1557 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 1558 if (!EnableVPlanNativePath) { 1559 AU.addPreserved<LoopInfoWrapperPass>(); 1560 AU.addPreserved<DominatorTreeWrapperPass>(); 1561 } 1562 1563 AU.addPreserved<BasicAAWrapperPass>(); 1564 AU.addPreserved<GlobalsAAWrapperPass>(); 1565 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 1566 } 1567 }; 1568 1569 } // end anonymous namespace 1570 1571 //===----------------------------------------------------------------------===// 1572 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 1573 // LoopVectorizationCostModel and LoopVectorizationPlanner. 1574 //===----------------------------------------------------------------------===// 1575 1576 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 1577 // We need to place the broadcast of invariant variables outside the loop, 1578 // but only if it's proven safe to do so. Else, broadcast will be inside 1579 // vector loop body. 1580 Instruction *Instr = dyn_cast<Instruction>(V); 1581 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 1582 (!Instr || 1583 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 1584 // Place the code for broadcasting invariant variables in the new preheader. 1585 IRBuilder<>::InsertPointGuard Guard(Builder); 1586 if (SafeToHoist) 1587 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1588 1589 // Broadcast the scalar into all locations in the vector. 1590 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 1591 1592 return Shuf; 1593 } 1594 1595 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 1596 const InductionDescriptor &II, Value *Step, Instruction *EntryVal) { 1597 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1598 "Expected either an induction phi-node or a truncate of it!"); 1599 Value *Start = II.getStartValue(); 1600 1601 // Construct the initial value of the vector IV in the vector loop preheader 1602 auto CurrIP = Builder.saveIP(); 1603 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1604 if (isa<TruncInst>(EntryVal)) { 1605 assert(Start->getType()->isIntegerTy() && 1606 "Truncation requires an integer type"); 1607 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 1608 Step = Builder.CreateTrunc(Step, TruncType); 1609 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 1610 } 1611 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 1612 Value *SteppedStart = 1613 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 1614 1615 // We create vector phi nodes for both integer and floating-point induction 1616 // variables. Here, we determine the kind of arithmetic we will perform. 1617 Instruction::BinaryOps AddOp; 1618 Instruction::BinaryOps MulOp; 1619 if (Step->getType()->isIntegerTy()) { 1620 AddOp = Instruction::Add; 1621 MulOp = Instruction::Mul; 1622 } else { 1623 AddOp = II.getInductionOpcode(); 1624 MulOp = Instruction::FMul; 1625 } 1626 1627 // Multiply the vectorization factor by the step using integer or 1628 // floating-point arithmetic as appropriate. 1629 Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF); 1630 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF)); 1631 1632 // Create a vector splat to use in the induction update. 1633 // 1634 // FIXME: If the step is non-constant, we create the vector splat with 1635 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 1636 // handle a constant vector splat. 1637 Value *SplatVF = isa<Constant>(Mul) 1638 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 1639 : Builder.CreateVectorSplat(VF, Mul); 1640 Builder.restoreIP(CurrIP); 1641 1642 // We may need to add the step a number of times, depending on the unroll 1643 // factor. The last of those goes into the PHI. 1644 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 1645 &*LoopVectorBody->getFirstInsertionPt()); 1646 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 1647 Instruction *LastInduction = VecInd; 1648 for (unsigned Part = 0; Part < UF; ++Part) { 1649 VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction); 1650 1651 if (isa<TruncInst>(EntryVal)) 1652 addMetadata(LastInduction, EntryVal); 1653 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part); 1654 1655 LastInduction = cast<Instruction>(addFastMathFlag( 1656 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"))); 1657 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 1658 } 1659 1660 // Move the last step to the end of the latch block. This ensures consistent 1661 // placement of all induction updates. 1662 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 1663 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 1664 auto *ICmp = cast<Instruction>(Br->getCondition()); 1665 LastInduction->moveBefore(ICmp); 1666 LastInduction->setName("vec.ind.next"); 1667 1668 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 1669 VecInd->addIncoming(LastInduction, LoopVectorLatch); 1670 } 1671 1672 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 1673 return Cost->isScalarAfterVectorization(I, VF) || 1674 Cost->isProfitableToScalarize(I, VF); 1675 } 1676 1677 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 1678 if (shouldScalarizeInstruction(IV)) 1679 return true; 1680 auto isScalarInst = [&](User *U) -> bool { 1681 auto *I = cast<Instruction>(U); 1682 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 1683 }; 1684 return llvm::any_of(IV->users(), isScalarInst); 1685 } 1686 1687 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 1688 const InductionDescriptor &ID, const Instruction *EntryVal, 1689 Value *VectorLoopVal, unsigned Part, unsigned Lane) { 1690 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1691 "Expected either an induction phi-node or a truncate of it!"); 1692 1693 // This induction variable is not the phi from the original loop but the 1694 // newly-created IV based on the proof that casted Phi is equal to the 1695 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 1696 // re-uses the same InductionDescriptor that original IV uses but we don't 1697 // have to do any recording in this case - that is done when original IV is 1698 // processed. 1699 if (isa<TruncInst>(EntryVal)) 1700 return; 1701 1702 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 1703 if (Casts.empty()) 1704 return; 1705 // Only the first Cast instruction in the Casts vector is of interest. 1706 // The rest of the Casts (if exist) have no uses outside the 1707 // induction update chain itself. 1708 Instruction *CastInst = *Casts.begin(); 1709 if (Lane < UINT_MAX) 1710 VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal); 1711 else 1712 VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal); 1713 } 1714 1715 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) { 1716 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 1717 "Primary induction variable must have an integer type"); 1718 1719 auto II = Legal->getInductionVars()->find(IV); 1720 assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); 1721 1722 auto ID = II->second; 1723 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 1724 1725 // The scalar value to broadcast. This will be derived from the canonical 1726 // induction variable. 1727 Value *ScalarIV = nullptr; 1728 1729 // The value from the original loop to which we are mapping the new induction 1730 // variable. 1731 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 1732 1733 // True if we have vectorized the induction variable. 1734 auto VectorizedIV = false; 1735 1736 // Determine if we want a scalar version of the induction variable. This is 1737 // true if the induction variable itself is not widened, or if it has at 1738 // least one user in the loop that is not widened. 1739 auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal); 1740 1741 // Generate code for the induction step. Note that induction steps are 1742 // required to be loop-invariant 1743 assert(PSE.getSE()->isLoopInvariant(ID.getStep(), OrigLoop) && 1744 "Induction step should be loop invariant"); 1745 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 1746 Value *Step = nullptr; 1747 if (PSE.getSE()->isSCEVable(IV->getType())) { 1748 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 1749 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), 1750 LoopVectorPreHeader->getTerminator()); 1751 } else { 1752 Step = cast<SCEVUnknown>(ID.getStep())->getValue(); 1753 } 1754 1755 // Try to create a new independent vector induction variable. If we can't 1756 // create the phi node, we will splat the scalar induction variable in each 1757 // loop iteration. 1758 if (VF > 1 && !shouldScalarizeInstruction(EntryVal)) { 1759 createVectorIntOrFpInductionPHI(ID, Step, EntryVal); 1760 VectorizedIV = true; 1761 } 1762 1763 // If we haven't yet vectorized the induction variable, or if we will create 1764 // a scalar one, we need to define the scalar induction variable and step 1765 // values. If we were given a truncation type, truncate the canonical 1766 // induction variable and step. Otherwise, derive these values from the 1767 // induction descriptor. 1768 if (!VectorizedIV || NeedsScalarIV) { 1769 ScalarIV = Induction; 1770 if (IV != OldInduction) { 1771 ScalarIV = IV->getType()->isIntegerTy() 1772 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 1773 : Builder.CreateCast(Instruction::SIToFP, Induction, 1774 IV->getType()); 1775 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 1776 ScalarIV->setName("offset.idx"); 1777 } 1778 if (Trunc) { 1779 auto *TruncType = cast<IntegerType>(Trunc->getType()); 1780 assert(Step->getType()->isIntegerTy() && 1781 "Truncation requires an integer step"); 1782 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 1783 Step = Builder.CreateTrunc(Step, TruncType); 1784 } 1785 } 1786 1787 // If we haven't yet vectorized the induction variable, splat the scalar 1788 // induction variable, and build the necessary step vectors. 1789 // TODO: Don't do it unless the vectorized IV is really required. 1790 if (!VectorizedIV) { 1791 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 1792 for (unsigned Part = 0; Part < UF; ++Part) { 1793 Value *EntryPart = 1794 getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode()); 1795 VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart); 1796 if (Trunc) 1797 addMetadata(EntryPart, Trunc); 1798 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part); 1799 } 1800 } 1801 1802 // If an induction variable is only used for counting loop iterations or 1803 // calculating addresses, it doesn't need to be widened. Create scalar steps 1804 // that can be used by instructions we will later scalarize. Note that the 1805 // addition of the scalar steps will not increase the number of instructions 1806 // in the loop in the common case prior to InstCombine. We will be trading 1807 // one vector extract for each scalar step. 1808 if (NeedsScalarIV) 1809 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 1810 } 1811 1812 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 1813 Instruction::BinaryOps BinOp) { 1814 // Create and check the types. 1815 assert(Val->getType()->isVectorTy() && "Must be a vector"); 1816 int VLen = Val->getType()->getVectorNumElements(); 1817 1818 Type *STy = Val->getType()->getScalarType(); 1819 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 1820 "Induction Step must be an integer or FP"); 1821 assert(Step->getType() == STy && "Step has wrong type"); 1822 1823 SmallVector<Constant *, 8> Indices; 1824 1825 if (STy->isIntegerTy()) { 1826 // Create a vector of consecutive numbers from zero to VF. 1827 for (int i = 0; i < VLen; ++i) 1828 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 1829 1830 // Add the consecutive indices to the vector value. 1831 Constant *Cv = ConstantVector::get(Indices); 1832 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 1833 Step = Builder.CreateVectorSplat(VLen, Step); 1834 assert(Step->getType() == Val->getType() && "Invalid step vec"); 1835 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 1836 // which can be found from the original scalar operations. 1837 Step = Builder.CreateMul(Cv, Step); 1838 return Builder.CreateAdd(Val, Step, "induction"); 1839 } 1840 1841 // Floating point induction. 1842 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 1843 "Binary Opcode should be specified for FP induction"); 1844 // Create a vector of consecutive numbers from zero to VF. 1845 for (int i = 0; i < VLen; ++i) 1846 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 1847 1848 // Add the consecutive indices to the vector value. 1849 Constant *Cv = ConstantVector::get(Indices); 1850 1851 Step = Builder.CreateVectorSplat(VLen, Step); 1852 1853 // Floating point operations had to be 'fast' to enable the induction. 1854 FastMathFlags Flags; 1855 Flags.setFast(); 1856 1857 Value *MulOp = Builder.CreateFMul(Cv, Step); 1858 if (isa<Instruction>(MulOp)) 1859 // Have to check, MulOp may be a constant 1860 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 1861 1862 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 1863 if (isa<Instruction>(BOp)) 1864 cast<Instruction>(BOp)->setFastMathFlags(Flags); 1865 return BOp; 1866 } 1867 1868 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 1869 Instruction *EntryVal, 1870 const InductionDescriptor &ID) { 1871 // We shouldn't have to build scalar steps if we aren't vectorizing. 1872 assert(VF > 1 && "VF should be greater than one"); 1873 1874 // Get the value type and ensure it and the step have the same integer type. 1875 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 1876 assert(ScalarIVTy == Step->getType() && 1877 "Val and Step should have the same type"); 1878 1879 // We build scalar steps for both integer and floating-point induction 1880 // variables. Here, we determine the kind of arithmetic we will perform. 1881 Instruction::BinaryOps AddOp; 1882 Instruction::BinaryOps MulOp; 1883 if (ScalarIVTy->isIntegerTy()) { 1884 AddOp = Instruction::Add; 1885 MulOp = Instruction::Mul; 1886 } else { 1887 AddOp = ID.getInductionOpcode(); 1888 MulOp = Instruction::FMul; 1889 } 1890 1891 // Determine the number of scalars we need to generate for each unroll 1892 // iteration. If EntryVal is uniform, we only need to generate the first 1893 // lane. Otherwise, we generate all VF values. 1894 unsigned Lanes = 1895 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1 1896 : VF; 1897 // Compute the scalar steps and save the results in VectorLoopValueMap. 1898 for (unsigned Part = 0; Part < UF; ++Part) { 1899 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 1900 auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane); 1901 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step)); 1902 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul)); 1903 VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add); 1904 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane); 1905 } 1906 } 1907 } 1908 1909 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) { 1910 assert(V != Induction && "The new induction variable should not be used."); 1911 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 1912 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 1913 1914 // If we have a stride that is replaced by one, do it here. Defer this for 1915 // the VPlan-native path until we start running Legal checks in that path. 1916 if (!EnableVPlanNativePath && Legal->hasStride(V)) 1917 V = ConstantInt::get(V->getType(), 1); 1918 1919 // If we have a vector mapped to this value, return it. 1920 if (VectorLoopValueMap.hasVectorValue(V, Part)) 1921 return VectorLoopValueMap.getVectorValue(V, Part); 1922 1923 // If the value has not been vectorized, check if it has been scalarized 1924 // instead. If it has been scalarized, and we actually need the value in 1925 // vector form, we will construct the vector values on demand. 1926 if (VectorLoopValueMap.hasAnyScalarValue(V)) { 1927 Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0}); 1928 1929 // If we've scalarized a value, that value should be an instruction. 1930 auto *I = cast<Instruction>(V); 1931 1932 // If we aren't vectorizing, we can just copy the scalar map values over to 1933 // the vector map. 1934 if (VF == 1) { 1935 VectorLoopValueMap.setVectorValue(V, Part, ScalarValue); 1936 return ScalarValue; 1937 } 1938 1939 // Get the last scalar instruction we generated for V and Part. If the value 1940 // is known to be uniform after vectorization, this corresponds to lane zero 1941 // of the Part unroll iteration. Otherwise, the last instruction is the one 1942 // we created for the last vector lane of the Part unroll iteration. 1943 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1; 1944 auto *LastInst = cast<Instruction>( 1945 VectorLoopValueMap.getScalarValue(V, {Part, LastLane})); 1946 1947 // Set the insert point after the last scalarized instruction. This ensures 1948 // the insertelement sequence will directly follow the scalar definitions. 1949 auto OldIP = Builder.saveIP(); 1950 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 1951 Builder.SetInsertPoint(&*NewIP); 1952 1953 // However, if we are vectorizing, we need to construct the vector values. 1954 // If the value is known to be uniform after vectorization, we can just 1955 // broadcast the scalar value corresponding to lane zero for each unroll 1956 // iteration. Otherwise, we construct the vector values using insertelement 1957 // instructions. Since the resulting vectors are stored in 1958 // VectorLoopValueMap, we will only generate the insertelements once. 1959 Value *VectorValue = nullptr; 1960 if (Cost->isUniformAfterVectorization(I, VF)) { 1961 VectorValue = getBroadcastInstrs(ScalarValue); 1962 VectorLoopValueMap.setVectorValue(V, Part, VectorValue); 1963 } else { 1964 // Initialize packing with insertelements to start from undef. 1965 Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF)); 1966 VectorLoopValueMap.setVectorValue(V, Part, Undef); 1967 for (unsigned Lane = 0; Lane < VF; ++Lane) 1968 packScalarIntoVectorValue(V, {Part, Lane}); 1969 VectorValue = VectorLoopValueMap.getVectorValue(V, Part); 1970 } 1971 Builder.restoreIP(OldIP); 1972 return VectorValue; 1973 } 1974 1975 // If this scalar is unknown, assume that it is a constant or that it is 1976 // loop invariant. Broadcast V and save the value for future uses. 1977 Value *B = getBroadcastInstrs(V); 1978 VectorLoopValueMap.setVectorValue(V, Part, B); 1979 return B; 1980 } 1981 1982 Value * 1983 InnerLoopVectorizer::getOrCreateScalarValue(Value *V, 1984 const VPIteration &Instance) { 1985 // If the value is not an instruction contained in the loop, it should 1986 // already be scalar. 1987 if (OrigLoop->isLoopInvariant(V)) 1988 return V; 1989 1990 assert(Instance.Lane > 0 1991 ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF) 1992 : true && "Uniform values only have lane zero"); 1993 1994 // If the value from the original loop has not been vectorized, it is 1995 // represented by UF x VF scalar values in the new loop. Return the requested 1996 // scalar value. 1997 if (VectorLoopValueMap.hasScalarValue(V, Instance)) 1998 return VectorLoopValueMap.getScalarValue(V, Instance); 1999 2000 // If the value has not been scalarized, get its entry in VectorLoopValueMap 2001 // for the given unroll part. If this entry is not a vector type (i.e., the 2002 // vectorization factor is one), there is no need to generate an 2003 // extractelement instruction. 2004 auto *U = getOrCreateVectorValue(V, Instance.Part); 2005 if (!U->getType()->isVectorTy()) { 2006 assert(VF == 1 && "Value not scalarized has non-vector type"); 2007 return U; 2008 } 2009 2010 // Otherwise, the value from the original loop has been vectorized and is 2011 // represented by UF vector values. Extract and return the requested scalar 2012 // value from the appropriate vector lane. 2013 return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane)); 2014 } 2015 2016 void InnerLoopVectorizer::packScalarIntoVectorValue( 2017 Value *V, const VPIteration &Instance) { 2018 assert(V != Induction && "The new induction variable should not be used."); 2019 assert(!V->getType()->isVectorTy() && "Can't pack a vector"); 2020 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2021 2022 Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance); 2023 Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part); 2024 VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst, 2025 Builder.getInt32(Instance.Lane)); 2026 VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue); 2027 } 2028 2029 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2030 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2031 SmallVector<Constant *, 8> ShuffleMask; 2032 for (unsigned i = 0; i < VF; ++i) 2033 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 2034 2035 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2036 ConstantVector::get(ShuffleMask), 2037 "reverse"); 2038 } 2039 2040 // Return whether we allow using masked interleave-groups (for dealing with 2041 // strided loads/stores that reside in predicated blocks, or for dealing 2042 // with gaps). 2043 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2044 // If an override option has been passed in for interleaved accesses, use it. 2045 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2046 return EnableMaskedInterleavedMemAccesses; 2047 2048 return TTI.enableMaskedInterleavedAccessVectorization(); 2049 } 2050 2051 // Try to vectorize the interleave group that \p Instr belongs to. 2052 // 2053 // E.g. Translate following interleaved load group (factor = 3): 2054 // for (i = 0; i < N; i+=3) { 2055 // R = Pic[i]; // Member of index 0 2056 // G = Pic[i+1]; // Member of index 1 2057 // B = Pic[i+2]; // Member of index 2 2058 // ... // do something to R, G, B 2059 // } 2060 // To: 2061 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2062 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2063 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2064 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2065 // 2066 // Or translate following interleaved store group (factor = 3): 2067 // for (i = 0; i < N; i+=3) { 2068 // ... do something to R, G, B 2069 // Pic[i] = R; // Member of index 0 2070 // Pic[i+1] = G; // Member of index 1 2071 // Pic[i+2] = B; // Member of index 2 2072 // } 2073 // To: 2074 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2075 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2076 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2077 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2078 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2079 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr, 2080 VectorParts *BlockInMask) { 2081 const InterleaveGroup<Instruction> *Group = 2082 Cost->getInterleavedAccessGroup(Instr); 2083 assert(Group && "Fail to get an interleaved access group."); 2084 2085 // Skip if current instruction is not the insert position. 2086 if (Instr != Group->getInsertPos()) 2087 return; 2088 2089 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2090 Value *Ptr = getLoadStorePointerOperand(Instr); 2091 2092 // Prepare for the vector type of the interleaved load/store. 2093 Type *ScalarTy = getMemInstValueType(Instr); 2094 unsigned InterleaveFactor = Group->getFactor(); 2095 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2096 Type *PtrTy = VecTy->getPointerTo(getLoadStoreAddressSpace(Instr)); 2097 2098 // Prepare for the new pointers. 2099 setDebugLocFromInst(Builder, Ptr); 2100 SmallVector<Value *, 2> NewPtrs; 2101 unsigned Index = Group->getIndex(Instr); 2102 2103 VectorParts Mask; 2104 bool IsMaskForCondRequired = BlockInMask; 2105 if (IsMaskForCondRequired) { 2106 Mask = *BlockInMask; 2107 // TODO: extend the masked interleaved-group support to reversed access. 2108 assert(!Group->isReverse() && "Reversed masked interleave-group " 2109 "not supported."); 2110 } 2111 2112 // If the group is reverse, adjust the index to refer to the last vector lane 2113 // instead of the first. We adjust the index from the first vector lane, 2114 // rather than directly getting the pointer for lane VF - 1, because the 2115 // pointer operand of the interleaved access is supposed to be uniform. For 2116 // uniform instructions, we're only required to generate a value for the 2117 // first vector lane in each unroll iteration. 2118 if (Group->isReverse()) 2119 Index += (VF - 1) * Group->getFactor(); 2120 2121 bool InBounds = false; 2122 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2123 InBounds = gep->isInBounds(); 2124 2125 for (unsigned Part = 0; Part < UF; Part++) { 2126 Value *NewPtr = getOrCreateScalarValue(Ptr, {Part, 0}); 2127 2128 // Notice current instruction could be any index. Need to adjust the address 2129 // to the member of index 0. 2130 // 2131 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2132 // b = A[i]; // Member of index 0 2133 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2134 // 2135 // E.g. A[i+1] = a; // Member of index 1 2136 // A[i] = b; // Member of index 0 2137 // A[i+2] = c; // Member of index 2 (Current instruction) 2138 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2139 NewPtr = Builder.CreateGEP(ScalarTy, NewPtr, Builder.getInt32(-Index)); 2140 if (InBounds) 2141 cast<GetElementPtrInst>(NewPtr)->setIsInBounds(true); 2142 2143 // Cast to the vector pointer type. 2144 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2145 } 2146 2147 setDebugLocFromInst(Builder, Instr); 2148 Value *UndefVec = UndefValue::get(VecTy); 2149 2150 Value *MaskForGaps = nullptr; 2151 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2152 MaskForGaps = createBitMaskForGaps(Builder, VF, *Group); 2153 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2154 } 2155 2156 // Vectorize the interleaved load group. 2157 if (isa<LoadInst>(Instr)) { 2158 // For each unroll part, create a wide load for the group. 2159 SmallVector<Value *, 2> NewLoads; 2160 for (unsigned Part = 0; Part < UF; Part++) { 2161 Instruction *NewLoad; 2162 if (IsMaskForCondRequired || MaskForGaps) { 2163 assert(useMaskedInterleavedAccesses(*TTI) && 2164 "masked interleaved groups are not allowed."); 2165 Value *GroupMask = MaskForGaps; 2166 if (IsMaskForCondRequired) { 2167 auto *Undefs = UndefValue::get(Mask[Part]->getType()); 2168 auto *RepMask = createReplicatedMask(Builder, InterleaveFactor, VF); 2169 Value *ShuffledMask = Builder.CreateShuffleVector( 2170 Mask[Part], Undefs, RepMask, "interleaved.mask"); 2171 GroupMask = MaskForGaps 2172 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2173 MaskForGaps) 2174 : ShuffledMask; 2175 } 2176 NewLoad = 2177 Builder.CreateMaskedLoad(NewPtrs[Part], Group->getAlignment(), 2178 GroupMask, UndefVec, "wide.masked.vec"); 2179 } 2180 else 2181 NewLoad = Builder.CreateAlignedLoad(VecTy, NewPtrs[Part], 2182 Group->getAlignment(), "wide.vec"); 2183 Group->addMetadata(NewLoad); 2184 NewLoads.push_back(NewLoad); 2185 } 2186 2187 // For each member in the group, shuffle out the appropriate data from the 2188 // wide loads. 2189 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2190 Instruction *Member = Group->getMember(I); 2191 2192 // Skip the gaps in the group. 2193 if (!Member) 2194 continue; 2195 2196 Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF); 2197 for (unsigned Part = 0; Part < UF; Part++) { 2198 Value *StridedVec = Builder.CreateShuffleVector( 2199 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 2200 2201 // If this member has different type, cast the result type. 2202 if (Member->getType() != ScalarTy) { 2203 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2204 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2205 } 2206 2207 if (Group->isReverse()) 2208 StridedVec = reverseVector(StridedVec); 2209 2210 VectorLoopValueMap.setVectorValue(Member, Part, StridedVec); 2211 } 2212 } 2213 return; 2214 } 2215 2216 // The sub vector type for current instruction. 2217 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2218 2219 // Vectorize the interleaved store group. 2220 for (unsigned Part = 0; Part < UF; Part++) { 2221 // Collect the stored vector from each member. 2222 SmallVector<Value *, 4> StoredVecs; 2223 for (unsigned i = 0; i < InterleaveFactor; i++) { 2224 // Interleaved store group doesn't allow a gap, so each index has a member 2225 Instruction *Member = Group->getMember(i); 2226 assert(Member && "Fail to get a member from an interleaved store group"); 2227 2228 Value *StoredVec = getOrCreateVectorValue( 2229 cast<StoreInst>(Member)->getValueOperand(), Part); 2230 if (Group->isReverse()) 2231 StoredVec = reverseVector(StoredVec); 2232 2233 // If this member has different type, cast it to a unified type. 2234 2235 if (StoredVec->getType() != SubVT) 2236 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2237 2238 StoredVecs.push_back(StoredVec); 2239 } 2240 2241 // Concatenate all vectors into a wide vector. 2242 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2243 2244 // Interleave the elements in the wide vector. 2245 Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor); 2246 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2247 "interleaved.vec"); 2248 2249 Instruction *NewStoreInstr; 2250 if (IsMaskForCondRequired) { 2251 auto *Undefs = UndefValue::get(Mask[Part]->getType()); 2252 auto *RepMask = createReplicatedMask(Builder, InterleaveFactor, VF); 2253 Value *ShuffledMask = Builder.CreateShuffleVector( 2254 Mask[Part], Undefs, RepMask, "interleaved.mask"); 2255 NewStoreInstr = Builder.CreateMaskedStore( 2256 IVec, NewPtrs[Part], Group->getAlignment(), ShuffledMask); 2257 } 2258 else 2259 NewStoreInstr = Builder.CreateAlignedStore(IVec, NewPtrs[Part], 2260 Group->getAlignment()); 2261 2262 Group->addMetadata(NewStoreInstr); 2263 } 2264 } 2265 2266 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr, 2267 VectorParts *BlockInMask) { 2268 // Attempt to issue a wide load. 2269 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2270 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2271 2272 assert((LI || SI) && "Invalid Load/Store instruction"); 2273 2274 LoopVectorizationCostModel::InstWidening Decision = 2275 Cost->getWideningDecision(Instr, VF); 2276 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 2277 "CM decision should be taken at this point"); 2278 if (Decision == LoopVectorizationCostModel::CM_Interleave) 2279 return vectorizeInterleaveGroup(Instr); 2280 2281 Type *ScalarDataTy = getMemInstValueType(Instr); 2282 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2283 Value *Ptr = getLoadStorePointerOperand(Instr); 2284 unsigned Alignment = getLoadStoreAlignment(Instr); 2285 // An alignment of 0 means target abi alignment. We need to use the scalar's 2286 // target abi alignment in such a case. 2287 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2288 if (!Alignment) 2289 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2290 unsigned AddressSpace = getLoadStoreAddressSpace(Instr); 2291 2292 // Determine if the pointer operand of the access is either consecutive or 2293 // reverse consecutive. 2294 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2295 bool ConsecutiveStride = 2296 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2297 bool CreateGatherScatter = 2298 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2299 2300 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2301 // gather/scatter. Otherwise Decision should have been to Scalarize. 2302 assert((ConsecutiveStride || CreateGatherScatter) && 2303 "The instruction should be scalarized"); 2304 2305 // Handle consecutive loads/stores. 2306 if (ConsecutiveStride) 2307 Ptr = getOrCreateScalarValue(Ptr, {0, 0}); 2308 2309 VectorParts Mask; 2310 bool isMaskRequired = BlockInMask; 2311 if (isMaskRequired) 2312 Mask = *BlockInMask; 2313 2314 bool InBounds = false; 2315 if (auto *gep = dyn_cast<GetElementPtrInst>( 2316 getLoadStorePointerOperand(Instr)->stripPointerCasts())) 2317 InBounds = gep->isInBounds(); 2318 2319 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2320 // Calculate the pointer for the specific unroll-part. 2321 GetElementPtrInst *PartPtr = nullptr; 2322 2323 if (Reverse) { 2324 // If the address is consecutive but reversed, then the 2325 // wide store needs to start at the last vector element. 2326 PartPtr = cast<GetElementPtrInst>( 2327 Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(-Part * VF))); 2328 PartPtr->setIsInBounds(InBounds); 2329 PartPtr = cast<GetElementPtrInst>( 2330 Builder.CreateGEP(ScalarDataTy, PartPtr, Builder.getInt32(1 - VF))); 2331 PartPtr->setIsInBounds(InBounds); 2332 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2333 Mask[Part] = reverseVector(Mask[Part]); 2334 } else { 2335 PartPtr = cast<GetElementPtrInst>( 2336 Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(Part * VF))); 2337 PartPtr->setIsInBounds(InBounds); 2338 } 2339 2340 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2341 }; 2342 2343 // Handle Stores: 2344 if (SI) { 2345 setDebugLocFromInst(Builder, SI); 2346 2347 for (unsigned Part = 0; Part < UF; ++Part) { 2348 Instruction *NewSI = nullptr; 2349 Value *StoredVal = getOrCreateVectorValue(SI->getValueOperand(), Part); 2350 if (CreateGatherScatter) { 2351 Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr; 2352 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 2353 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2354 MaskPart); 2355 } else { 2356 if (Reverse) { 2357 // If we store to reverse consecutive memory locations, then we need 2358 // to reverse the order of elements in the stored value. 2359 StoredVal = reverseVector(StoredVal); 2360 // We don't want to update the value in the map as it might be used in 2361 // another expression. So don't call resetVectorValue(StoredVal). 2362 } 2363 auto *VecPtr = CreateVecPtr(Part, Ptr); 2364 if (isMaskRequired) 2365 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2366 Mask[Part]); 2367 else 2368 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 2369 } 2370 addMetadata(NewSI, SI); 2371 } 2372 return; 2373 } 2374 2375 // Handle loads. 2376 assert(LI && "Must have a load instruction"); 2377 setDebugLocFromInst(Builder, LI); 2378 for (unsigned Part = 0; Part < UF; ++Part) { 2379 Value *NewLI; 2380 if (CreateGatherScatter) { 2381 Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr; 2382 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 2383 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 2384 nullptr, "wide.masked.gather"); 2385 addMetadata(NewLI, LI); 2386 } else { 2387 auto *VecPtr = CreateVecPtr(Part, Ptr); 2388 if (isMaskRequired) 2389 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 2390 UndefValue::get(DataTy), 2391 "wide.masked.load"); 2392 else 2393 NewLI = 2394 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 2395 2396 // Add metadata to the load, but setVectorValue to the reverse shuffle. 2397 addMetadata(NewLI, LI); 2398 if (Reverse) 2399 NewLI = reverseVector(NewLI); 2400 } 2401 VectorLoopValueMap.setVectorValue(Instr, Part, NewLI); 2402 } 2403 } 2404 2405 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2406 const VPIteration &Instance, 2407 bool IfPredicateInstr) { 2408 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2409 2410 setDebugLocFromInst(Builder, Instr); 2411 2412 // Does this instruction return a value ? 2413 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2414 2415 Instruction *Cloned = Instr->clone(); 2416 if (!IsVoidRetTy) 2417 Cloned->setName(Instr->getName() + ".cloned"); 2418 2419 // Replace the operands of the cloned instructions with their scalar 2420 // equivalents in the new loop. 2421 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2422 auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance); 2423 Cloned->setOperand(op, NewOp); 2424 } 2425 addNewMetadata(Cloned, Instr); 2426 2427 // Place the cloned scalar in the new loop. 2428 Builder.Insert(Cloned); 2429 2430 // Add the cloned scalar to the scalar map entry. 2431 VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned); 2432 2433 // If we just cloned a new assumption, add it the assumption cache. 2434 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2435 if (II->getIntrinsicID() == Intrinsic::assume) 2436 AC->registerAssumption(II); 2437 2438 // End if-block. 2439 if (IfPredicateInstr) 2440 PredicatedInstructions.push_back(Cloned); 2441 } 2442 2443 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2444 Value *End, Value *Step, 2445 Instruction *DL) { 2446 BasicBlock *Header = L->getHeader(); 2447 BasicBlock *Latch = L->getLoopLatch(); 2448 // As we're just creating this loop, it's possible no latch exists 2449 // yet. If so, use the header as this will be a single block loop. 2450 if (!Latch) 2451 Latch = Header; 2452 2453 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2454 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 2455 setDebugLocFromInst(Builder, OldInst); 2456 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2457 2458 Builder.SetInsertPoint(Latch->getTerminator()); 2459 setDebugLocFromInst(Builder, OldInst); 2460 2461 // Create i+1 and fill the PHINode. 2462 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2463 Induction->addIncoming(Start, L->getLoopPreheader()); 2464 Induction->addIncoming(Next, Latch); 2465 // Create the compare. 2466 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2467 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 2468 2469 // Now we have two terminators. Remove the old one from the block. 2470 Latch->getTerminator()->eraseFromParent(); 2471 2472 return Induction; 2473 } 2474 2475 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2476 if (TripCount) 2477 return TripCount; 2478 2479 assert(L && "Create Trip Count for null loop."); 2480 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2481 // Find the loop boundaries. 2482 ScalarEvolution *SE = PSE.getSE(); 2483 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2484 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 2485 "Invalid loop count"); 2486 2487 Type *IdxTy = Legal->getWidestInductionType(); 2488 assert(IdxTy && "No type for induction"); 2489 2490 // The exit count might have the type of i64 while the phi is i32. This can 2491 // happen if we have an induction variable that is sign extended before the 2492 // compare. The only way that we get a backedge taken count is that the 2493 // induction variable was signed and as such will not overflow. In such a case 2494 // truncation is legal. 2495 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 2496 IdxTy->getPrimitiveSizeInBits()) 2497 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2498 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2499 2500 // Get the total trip count from the count by adding 1. 2501 const SCEV *ExitCount = SE->getAddExpr( 2502 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2503 2504 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2505 2506 // Expand the trip count and place the new instructions in the preheader. 2507 // Notice that the pre-header does not change, only the loop body. 2508 SCEVExpander Exp(*SE, DL, "induction"); 2509 2510 // Count holds the overall loop count (N). 2511 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2512 L->getLoopPreheader()->getTerminator()); 2513 2514 if (TripCount->getType()->isPointerTy()) 2515 TripCount = 2516 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2517 L->getLoopPreheader()->getTerminator()); 2518 2519 return TripCount; 2520 } 2521 2522 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 2523 if (VectorTripCount) 2524 return VectorTripCount; 2525 2526 Value *TC = getOrCreateTripCount(L); 2527 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2528 2529 Type *Ty = TC->getType(); 2530 Constant *Step = ConstantInt::get(Ty, VF * UF); 2531 2532 // If the tail is to be folded by masking, round the number of iterations N 2533 // up to a multiple of Step instead of rounding down. This is done by first 2534 // adding Step-1 and then rounding down. Note that it's ok if this addition 2535 // overflows: the vector induction variable will eventually wrap to zero given 2536 // that it starts at zero and its Step is a power of two; the loop will then 2537 // exit, with the last early-exit vector comparison also producing all-true. 2538 if (Cost->foldTailByMasking()) { 2539 assert(isPowerOf2_32(VF * UF) && 2540 "VF*UF must be a power of 2 when folding tail by masking"); 2541 TC = Builder.CreateAdd(TC, ConstantInt::get(Ty, VF * UF - 1), "n.rnd.up"); 2542 } 2543 2544 // Now we need to generate the expression for the part of the loop that the 2545 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2546 // iterations are not required for correctness, or N - Step, otherwise. Step 2547 // is equal to the vectorization factor (number of SIMD elements) times the 2548 // unroll factor (number of SIMD instructions). 2549 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2550 2551 // If there is a non-reversed interleaved group that may speculatively access 2552 // memory out-of-bounds, we need to ensure that there will be at least one 2553 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 2554 // the trip count, we set the remainder to be equal to the step. If the step 2555 // does not evenly divide the trip count, no adjustment is necessary since 2556 // there will already be scalar iterations. Note that the minimum iterations 2557 // check ensures that N >= Step. 2558 if (VF > 1 && Cost->requiresScalarEpilogue()) { 2559 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2560 R = Builder.CreateSelect(IsZero, Step, R); 2561 } 2562 2563 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2564 2565 return VectorTripCount; 2566 } 2567 2568 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2569 const DataLayout &DL) { 2570 // Verify that V is a vector type with same number of elements as DstVTy. 2571 unsigned VF = DstVTy->getNumElements(); 2572 VectorType *SrcVecTy = cast<VectorType>(V->getType()); 2573 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2574 Type *SrcElemTy = SrcVecTy->getElementType(); 2575 Type *DstElemTy = DstVTy->getElementType(); 2576 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2577 "Vector elements must have same size"); 2578 2579 // Do a direct cast if element types are castable. 2580 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2581 return Builder.CreateBitOrPointerCast(V, DstVTy); 2582 } 2583 // V cannot be directly casted to desired vector type. 2584 // May happen when V is a floating point vector but DstVTy is a vector of 2585 // pointers or vice-versa. Handle this using a two-step bitcast using an 2586 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2587 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2588 "Only one type should be a pointer type"); 2589 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2590 "Only one type should be a floating point type"); 2591 Type *IntTy = 2592 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2593 VectorType *VecIntTy = VectorType::get(IntTy, VF); 2594 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2595 return Builder.CreateBitOrPointerCast(CastVal, DstVTy); 2596 } 2597 2598 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 2599 BasicBlock *Bypass) { 2600 Value *Count = getOrCreateTripCount(L); 2601 BasicBlock *BB = L->getLoopPreheader(); 2602 IRBuilder<> Builder(BB->getTerminator()); 2603 2604 // Generate code to check if the loop's trip count is less than VF * UF, or 2605 // equal to it in case a scalar epilogue is required; this implies that the 2606 // vector trip count is zero. This check also covers the case where adding one 2607 // to the backedge-taken count overflowed leading to an incorrect trip count 2608 // of zero. In this case we will also jump to the scalar loop. 2609 auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 2610 : ICmpInst::ICMP_ULT; 2611 2612 // If tail is to be folded, vector loop takes care of all iterations. 2613 Value *CheckMinIters = Builder.getFalse(); 2614 if (!Cost->foldTailByMasking()) 2615 CheckMinIters = Builder.CreateICmp( 2616 P, Count, ConstantInt::get(Count->getType(), VF * UF), 2617 "min.iters.check"); 2618 2619 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2620 // Update dominator tree immediately if the generated block is a 2621 // LoopBypassBlock because SCEV expansions to generate loop bypass 2622 // checks may query it before the current function is finished. 2623 DT->addNewBlock(NewBB, BB); 2624 if (L->getParentLoop()) 2625 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2626 ReplaceInstWithInst(BB->getTerminator(), 2627 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 2628 LoopBypassBlocks.push_back(BB); 2629 } 2630 2631 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 2632 BasicBlock *BB = L->getLoopPreheader(); 2633 2634 // Generate the code to check that the SCEV assumptions that we made. 2635 // We want the new basic block to start at the first instruction in a 2636 // sequence of instructions that form a check. 2637 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 2638 "scev.check"); 2639 Value *SCEVCheck = 2640 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 2641 2642 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 2643 if (C->isZero()) 2644 return; 2645 2646 assert(!Cost->foldTailByMasking() && 2647 "Cannot SCEV check stride or overflow when folding tail"); 2648 // Create a new block containing the stride check. 2649 BB->setName("vector.scevcheck"); 2650 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2651 // Update dominator tree immediately if the generated block is a 2652 // LoopBypassBlock because SCEV expansions to generate loop bypass 2653 // checks may query it before the current function is finished. 2654 DT->addNewBlock(NewBB, BB); 2655 if (L->getParentLoop()) 2656 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2657 ReplaceInstWithInst(BB->getTerminator(), 2658 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 2659 LoopBypassBlocks.push_back(BB); 2660 AddedSafetyChecks = true; 2661 } 2662 2663 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 2664 // VPlan-native path does not do any analysis for runtime checks currently. 2665 if (EnableVPlanNativePath) 2666 return; 2667 2668 BasicBlock *BB = L->getLoopPreheader(); 2669 2670 // Generate the code that checks in runtime if arrays overlap. We put the 2671 // checks into a separate block to make the more common case of few elements 2672 // faster. 2673 Instruction *FirstCheckInst; 2674 Instruction *MemRuntimeCheck; 2675 std::tie(FirstCheckInst, MemRuntimeCheck) = 2676 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 2677 if (!MemRuntimeCheck) 2678 return; 2679 2680 assert(!Cost->foldTailByMasking() && "Cannot check memory when folding tail"); 2681 // Create a new block containing the memory check. 2682 BB->setName("vector.memcheck"); 2683 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2684 // Update dominator tree immediately if the generated block is a 2685 // LoopBypassBlock because SCEV expansions to generate loop bypass 2686 // checks may query it before the current function is finished. 2687 DT->addNewBlock(NewBB, BB); 2688 if (L->getParentLoop()) 2689 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2690 ReplaceInstWithInst(BB->getTerminator(), 2691 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 2692 LoopBypassBlocks.push_back(BB); 2693 AddedSafetyChecks = true; 2694 2695 // We currently don't use LoopVersioning for the actual loop cloning but we 2696 // still use it to add the noalias metadata. 2697 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 2698 PSE.getSE()); 2699 LVer->prepareNoAliasMetadata(); 2700 } 2701 2702 Value *InnerLoopVectorizer::emitTransformedIndex( 2703 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 2704 const InductionDescriptor &ID) const { 2705 2706 SCEVExpander Exp(*SE, DL, "induction"); 2707 auto Step = ID.getStep(); 2708 auto StartValue = ID.getStartValue(); 2709 assert(Index->getType() == Step->getType() && 2710 "Index type does not match StepValue type"); 2711 2712 // Note: the IR at this point is broken. We cannot use SE to create any new 2713 // SCEV and then expand it, hoping that SCEV's simplification will give us 2714 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 2715 // lead to various SCEV crashes. So all we can do is to use builder and rely 2716 // on InstCombine for future simplifications. Here we handle some trivial 2717 // cases only. 2718 auto CreateAdd = [&B](Value *X, Value *Y) { 2719 assert(X->getType() == Y->getType() && "Types don't match!"); 2720 if (auto *CX = dyn_cast<ConstantInt>(X)) 2721 if (CX->isZero()) 2722 return Y; 2723 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2724 if (CY->isZero()) 2725 return X; 2726 return B.CreateAdd(X, Y); 2727 }; 2728 2729 auto CreateMul = [&B](Value *X, Value *Y) { 2730 assert(X->getType() == Y->getType() && "Types don't match!"); 2731 if (auto *CX = dyn_cast<ConstantInt>(X)) 2732 if (CX->isOne()) 2733 return Y; 2734 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2735 if (CY->isOne()) 2736 return X; 2737 return B.CreateMul(X, Y); 2738 }; 2739 2740 switch (ID.getKind()) { 2741 case InductionDescriptor::IK_IntInduction: { 2742 assert(Index->getType() == StartValue->getType() && 2743 "Index type does not match StartValue type"); 2744 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 2745 return B.CreateSub(StartValue, Index); 2746 auto *Offset = CreateMul( 2747 Index, Exp.expandCodeFor(Step, Index->getType(), &*B.GetInsertPoint())); 2748 return CreateAdd(StartValue, Offset); 2749 } 2750 case InductionDescriptor::IK_PtrInduction: { 2751 assert(isa<SCEVConstant>(Step) && 2752 "Expected constant step for pointer induction"); 2753 return B.CreateGEP( 2754 StartValue->getType()->getPointerElementType(), StartValue, 2755 CreateMul(Index, Exp.expandCodeFor(Step, Index->getType(), 2756 &*B.GetInsertPoint()))); 2757 } 2758 case InductionDescriptor::IK_FpInduction: { 2759 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 2760 auto InductionBinOp = ID.getInductionBinOp(); 2761 assert(InductionBinOp && 2762 (InductionBinOp->getOpcode() == Instruction::FAdd || 2763 InductionBinOp->getOpcode() == Instruction::FSub) && 2764 "Original bin op should be defined for FP induction"); 2765 2766 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 2767 2768 // Floating point operations had to be 'fast' to enable the induction. 2769 FastMathFlags Flags; 2770 Flags.setFast(); 2771 2772 Value *MulExp = B.CreateFMul(StepValue, Index); 2773 if (isa<Instruction>(MulExp)) 2774 // We have to check, the MulExp may be a constant. 2775 cast<Instruction>(MulExp)->setFastMathFlags(Flags); 2776 2777 Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 2778 "induction"); 2779 if (isa<Instruction>(BOp)) 2780 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2781 2782 return BOp; 2783 } 2784 case InductionDescriptor::IK_NoInduction: 2785 return nullptr; 2786 } 2787 llvm_unreachable("invalid enum"); 2788 } 2789 2790 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 2791 /* 2792 In this function we generate a new loop. The new loop will contain 2793 the vectorized instructions while the old loop will continue to run the 2794 scalar remainder. 2795 2796 [ ] <-- loop iteration number check. 2797 / | 2798 / v 2799 | [ ] <-- vector loop bypass (may consist of multiple blocks). 2800 | / | 2801 | / v 2802 || [ ] <-- vector pre header. 2803 |/ | 2804 | v 2805 | [ ] \ 2806 | [ ]_| <-- vector loop. 2807 | | 2808 | v 2809 | -[ ] <--- middle-block. 2810 | / | 2811 | / v 2812 -|- >[ ] <--- new preheader. 2813 | | 2814 | v 2815 | [ ] \ 2816 | [ ]_| <-- old scalar loop to handle remainder. 2817 \ | 2818 \ v 2819 >[ ] <-- exit block. 2820 ... 2821 */ 2822 2823 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 2824 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 2825 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 2826 MDNode *OrigLoopID = OrigLoop->getLoopID(); 2827 assert(VectorPH && "Invalid loop structure"); 2828 assert(ExitBlock && "Must have an exit block"); 2829 2830 // Some loops have a single integer induction variable, while other loops 2831 // don't. One example is c++ iterators that often have multiple pointer 2832 // induction variables. In the code below we also support a case where we 2833 // don't have a single induction variable. 2834 // 2835 // We try to obtain an induction variable from the original loop as hard 2836 // as possible. However if we don't find one that: 2837 // - is an integer 2838 // - counts from zero, stepping by one 2839 // - is the size of the widest induction variable type 2840 // then we create a new one. 2841 OldInduction = Legal->getPrimaryInduction(); 2842 Type *IdxTy = Legal->getWidestInductionType(); 2843 2844 // Split the single block loop into the two loop structure described above. 2845 BasicBlock *VecBody = 2846 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 2847 BasicBlock *MiddleBlock = 2848 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 2849 BasicBlock *ScalarPH = 2850 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 2851 2852 // Create and register the new vector loop. 2853 Loop *Lp = LI->AllocateLoop(); 2854 Loop *ParentLoop = OrigLoop->getParentLoop(); 2855 2856 // Insert the new loop into the loop nest and register the new basic blocks 2857 // before calling any utilities such as SCEV that require valid LoopInfo. 2858 if (ParentLoop) { 2859 ParentLoop->addChildLoop(Lp); 2860 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 2861 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 2862 } else { 2863 LI->addTopLevelLoop(Lp); 2864 } 2865 Lp->addBasicBlockToLoop(VecBody, *LI); 2866 2867 // Find the loop boundaries. 2868 Value *Count = getOrCreateTripCount(Lp); 2869 2870 Value *StartIdx = ConstantInt::get(IdxTy, 0); 2871 2872 // Now, compare the new count to zero. If it is zero skip the vector loop and 2873 // jump to the scalar loop. This check also covers the case where the 2874 // backedge-taken count is uint##_max: adding one to it will overflow leading 2875 // to an incorrect trip count of zero. In this (rare) case we will also jump 2876 // to the scalar loop. 2877 emitMinimumIterationCountCheck(Lp, ScalarPH); 2878 2879 // Generate the code to check any assumptions that we've made for SCEV 2880 // expressions. 2881 emitSCEVChecks(Lp, ScalarPH); 2882 2883 // Generate the code that checks in runtime if arrays overlap. We put the 2884 // checks into a separate block to make the more common case of few elements 2885 // faster. 2886 emitMemRuntimeChecks(Lp, ScalarPH); 2887 2888 // Generate the induction variable. 2889 // The loop step is equal to the vectorization factor (num of SIMD elements) 2890 // times the unroll factor (num of SIMD instructions). 2891 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 2892 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 2893 Induction = 2894 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 2895 getDebugLocFromInstOrOperands(OldInduction)); 2896 2897 // We are going to resume the execution of the scalar loop. 2898 // Go over all of the induction variables that we found and fix the 2899 // PHIs that are left in the scalar version of the loop. 2900 // The starting values of PHI nodes depend on the counter of the last 2901 // iteration in the vectorized loop. 2902 // If we come from a bypass edge then we need to start from the original 2903 // start value. 2904 2905 // This variable saves the new starting index for the scalar loop. It is used 2906 // to test if there are any tail iterations left once the vector loop has 2907 // completed. 2908 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 2909 for (auto &InductionEntry : *List) { 2910 PHINode *OrigPhi = InductionEntry.first; 2911 InductionDescriptor II = InductionEntry.second; 2912 2913 // Create phi nodes to merge from the backedge-taken check block. 2914 PHINode *BCResumeVal = PHINode::Create( 2915 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 2916 // Copy original phi DL over to the new one. 2917 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 2918 Value *&EndValue = IVEndValues[OrigPhi]; 2919 if (OrigPhi == OldInduction) { 2920 // We know what the end value is. 2921 EndValue = CountRoundDown; 2922 } else { 2923 IRBuilder<> B(Lp->getLoopPreheader()->getTerminator()); 2924 Type *StepType = II.getStep()->getType(); 2925 Instruction::CastOps CastOp = 2926 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 2927 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 2928 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2929 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 2930 EndValue->setName("ind.end"); 2931 } 2932 2933 // The new PHI merges the original incoming value, in case of a bypass, 2934 // or the value at the end of the vectorized loop. 2935 BCResumeVal->addIncoming(EndValue, MiddleBlock); 2936 2937 // Fix the scalar body counter (PHI node). 2938 // The old induction's phi node in the scalar body needs the truncated 2939 // value. 2940 for (BasicBlock *BB : LoopBypassBlocks) 2941 BCResumeVal->addIncoming(II.getStartValue(), BB); 2942 OrigPhi->setIncomingValueForBlock(ScalarPH, BCResumeVal); 2943 } 2944 2945 // We need the OrigLoop (scalar loop part) latch terminator to help 2946 // produce correct debug info for the middle block BB instructions. 2947 // The legality check stage guarantees that the loop will have a single 2948 // latch. 2949 assert(isa<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()) && 2950 "Scalar loop latch terminator isn't a branch"); 2951 BranchInst *ScalarLatchBr = 2952 cast<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()); 2953 2954 // Add a check in the middle block to see if we have completed 2955 // all of the iterations in the first vector loop. 2956 // If (N - N%VF) == N, then we *don't* need to run the remainder. 2957 // If tail is to be folded, we know we don't need to run the remainder. 2958 Value *CmpN = Builder.getTrue(); 2959 if (!Cost->foldTailByMasking()) { 2960 CmpN = 2961 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 2962 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 2963 2964 // Here we use the same DebugLoc as the scalar loop latch branch instead 2965 // of the corresponding compare because they may have ended up with 2966 // different line numbers and we want to avoid awkward line stepping while 2967 // debugging. Eg. if the compare has got a line number inside the loop. 2968 cast<Instruction>(CmpN)->setDebugLoc(ScalarLatchBr->getDebugLoc()); 2969 } 2970 2971 BranchInst *BrInst = BranchInst::Create(ExitBlock, ScalarPH, CmpN); 2972 BrInst->setDebugLoc(ScalarLatchBr->getDebugLoc()); 2973 ReplaceInstWithInst(MiddleBlock->getTerminator(), BrInst); 2974 2975 // Get ready to start creating new instructions into the vectorized body. 2976 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 2977 2978 // Save the state. 2979 LoopVectorPreHeader = Lp->getLoopPreheader(); 2980 LoopScalarPreHeader = ScalarPH; 2981 LoopMiddleBlock = MiddleBlock; 2982 LoopExitBlock = ExitBlock; 2983 LoopVectorBody = VecBody; 2984 LoopScalarBody = OldBasicBlock; 2985 2986 Optional<MDNode *> VectorizedLoopID = 2987 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 2988 LLVMLoopVectorizeFollowupVectorized}); 2989 if (VectorizedLoopID.hasValue()) { 2990 Lp->setLoopID(VectorizedLoopID.getValue()); 2991 2992 // Do not setAlreadyVectorized if loop attributes have been defined 2993 // explicitly. 2994 return LoopVectorPreHeader; 2995 } 2996 2997 // Keep all loop hints from the original loop on the vector loop (we'll 2998 // replace the vectorizer-specific hints below). 2999 if (MDNode *LID = OrigLoop->getLoopID()) 3000 Lp->setLoopID(LID); 3001 3002 LoopVectorizeHints Hints(Lp, true, *ORE); 3003 Hints.setAlreadyVectorized(); 3004 3005 return LoopVectorPreHeader; 3006 } 3007 3008 // Fix up external users of the induction variable. At this point, we are 3009 // in LCSSA form, with all external PHIs that use the IV having one input value, 3010 // coming from the remainder loop. We need those PHIs to also have a correct 3011 // value for the IV when arriving directly from the middle block. 3012 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3013 const InductionDescriptor &II, 3014 Value *CountRoundDown, Value *EndValue, 3015 BasicBlock *MiddleBlock) { 3016 // There are two kinds of external IV usages - those that use the value 3017 // computed in the last iteration (the PHI) and those that use the penultimate 3018 // value (the value that feeds into the phi from the loop latch). 3019 // We allow both, but they, obviously, have different values. 3020 3021 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 3022 3023 DenseMap<Value *, Value *> MissingVals; 3024 3025 // An external user of the last iteration's value should see the value that 3026 // the remainder loop uses to initialize its own IV. 3027 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3028 for (User *U : PostInc->users()) { 3029 Instruction *UI = cast<Instruction>(U); 3030 if (!OrigLoop->contains(UI)) { 3031 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3032 MissingVals[UI] = EndValue; 3033 } 3034 } 3035 3036 // An external user of the penultimate value need to see EndValue - Step. 3037 // The simplest way to get this is to recompute it from the constituent SCEVs, 3038 // that is Start + (Step * (CRD - 1)). 3039 for (User *U : OrigPhi->users()) { 3040 auto *UI = cast<Instruction>(U); 3041 if (!OrigLoop->contains(UI)) { 3042 const DataLayout &DL = 3043 OrigLoop->getHeader()->getModule()->getDataLayout(); 3044 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3045 3046 IRBuilder<> B(MiddleBlock->getTerminator()); 3047 Value *CountMinusOne = B.CreateSub( 3048 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3049 Value *CMO = 3050 !II.getStep()->getType()->isIntegerTy() 3051 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3052 II.getStep()->getType()) 3053 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3054 CMO->setName("cast.cmo"); 3055 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3056 Escape->setName("ind.escape"); 3057 MissingVals[UI] = Escape; 3058 } 3059 } 3060 3061 for (auto &I : MissingVals) { 3062 PHINode *PHI = cast<PHINode>(I.first); 3063 // One corner case we have to handle is two IVs "chasing" each-other, 3064 // that is %IV2 = phi [...], [ %IV1, %latch ] 3065 // In this case, if IV1 has an external use, we need to avoid adding both 3066 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3067 // don't already have an incoming value for the middle block. 3068 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3069 PHI->addIncoming(I.second, MiddleBlock); 3070 } 3071 } 3072 3073 namespace { 3074 3075 struct CSEDenseMapInfo { 3076 static bool canHandle(const Instruction *I) { 3077 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3078 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3079 } 3080 3081 static inline Instruction *getEmptyKey() { 3082 return DenseMapInfo<Instruction *>::getEmptyKey(); 3083 } 3084 3085 static inline Instruction *getTombstoneKey() { 3086 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3087 } 3088 3089 static unsigned getHashValue(const Instruction *I) { 3090 assert(canHandle(I) && "Unknown instruction!"); 3091 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3092 I->value_op_end())); 3093 } 3094 3095 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3096 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3097 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3098 return LHS == RHS; 3099 return LHS->isIdenticalTo(RHS); 3100 } 3101 }; 3102 3103 } // end anonymous namespace 3104 3105 ///Perform cse of induction variable instructions. 3106 static void cse(BasicBlock *BB) { 3107 // Perform simple cse. 3108 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3109 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3110 Instruction *In = &*I++; 3111 3112 if (!CSEDenseMapInfo::canHandle(In)) 3113 continue; 3114 3115 // Check if we can replace this instruction with any of the 3116 // visited instructions. 3117 if (Instruction *V = CSEMap.lookup(In)) { 3118 In->replaceAllUsesWith(V); 3119 In->eraseFromParent(); 3120 continue; 3121 } 3122 3123 CSEMap[In] = In; 3124 } 3125 } 3126 3127 unsigned LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, 3128 unsigned VF, 3129 bool &NeedToScalarize) { 3130 Function *F = CI->getCalledFunction(); 3131 StringRef FnName = CI->getCalledFunction()->getName(); 3132 Type *ScalarRetTy = CI->getType(); 3133 SmallVector<Type *, 4> Tys, ScalarTys; 3134 for (auto &ArgOp : CI->arg_operands()) 3135 ScalarTys.push_back(ArgOp->getType()); 3136 3137 // Estimate cost of scalarized vector call. The source operands are assumed 3138 // to be vectors, so we need to extract individual elements from there, 3139 // execute VF scalar calls, and then gather the result into the vector return 3140 // value. 3141 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3142 if (VF == 1) 3143 return ScalarCallCost; 3144 3145 // Compute corresponding vector type for return value and arguments. 3146 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3147 for (Type *ScalarTy : ScalarTys) 3148 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3149 3150 // Compute costs of unpacking argument values for the scalar calls and 3151 // packing the return values to a vector. 3152 unsigned ScalarizationCost = getScalarizationOverhead(CI, VF); 3153 3154 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3155 3156 // If we can't emit a vector call for this function, then the currently found 3157 // cost is the cost we need to return. 3158 NeedToScalarize = true; 3159 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3160 return Cost; 3161 3162 // If the corresponding vector cost is cheaper, return its cost. 3163 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3164 if (VectorCallCost < Cost) { 3165 NeedToScalarize = false; 3166 return VectorCallCost; 3167 } 3168 return Cost; 3169 } 3170 3171 unsigned LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3172 unsigned VF) { 3173 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3174 assert(ID && "Expected intrinsic call!"); 3175 3176 FastMathFlags FMF; 3177 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3178 FMF = FPMO->getFastMathFlags(); 3179 3180 SmallVector<Value *, 4> Operands(CI->arg_operands()); 3181 return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF); 3182 } 3183 3184 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3185 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3186 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3187 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3188 } 3189 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3190 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3191 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3192 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3193 } 3194 3195 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3196 // For every instruction `I` in MinBWs, truncate the operands, create a 3197 // truncated version of `I` and reextend its result. InstCombine runs 3198 // later and will remove any ext/trunc pairs. 3199 SmallPtrSet<Value *, 4> Erased; 3200 for (const auto &KV : Cost->getMinimalBitwidths()) { 3201 // If the value wasn't vectorized, we must maintain the original scalar 3202 // type. The absence of the value from VectorLoopValueMap indicates that it 3203 // wasn't vectorized. 3204 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3205 continue; 3206 for (unsigned Part = 0; Part < UF; ++Part) { 3207 Value *I = getOrCreateVectorValue(KV.first, Part); 3208 if (Erased.find(I) != Erased.end() || I->use_empty() || 3209 !isa<Instruction>(I)) 3210 continue; 3211 Type *OriginalTy = I->getType(); 3212 Type *ScalarTruncatedTy = 3213 IntegerType::get(OriginalTy->getContext(), KV.second); 3214 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3215 OriginalTy->getVectorNumElements()); 3216 if (TruncatedTy == OriginalTy) 3217 continue; 3218 3219 IRBuilder<> B(cast<Instruction>(I)); 3220 auto ShrinkOperand = [&](Value *V) -> Value * { 3221 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3222 if (ZI->getSrcTy() == TruncatedTy) 3223 return ZI->getOperand(0); 3224 return B.CreateZExtOrTrunc(V, TruncatedTy); 3225 }; 3226 3227 // The actual instruction modification depends on the instruction type, 3228 // unfortunately. 3229 Value *NewI = nullptr; 3230 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3231 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3232 ShrinkOperand(BO->getOperand(1))); 3233 3234 // Any wrapping introduced by shrinking this operation shouldn't be 3235 // considered undefined behavior. So, we can't unconditionally copy 3236 // arithmetic wrapping flags to NewI. 3237 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3238 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3239 NewI = 3240 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3241 ShrinkOperand(CI->getOperand(1))); 3242 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3243 NewI = B.CreateSelect(SI->getCondition(), 3244 ShrinkOperand(SI->getTrueValue()), 3245 ShrinkOperand(SI->getFalseValue())); 3246 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3247 switch (CI->getOpcode()) { 3248 default: 3249 llvm_unreachable("Unhandled cast!"); 3250 case Instruction::Trunc: 3251 NewI = ShrinkOperand(CI->getOperand(0)); 3252 break; 3253 case Instruction::SExt: 3254 NewI = B.CreateSExtOrTrunc( 3255 CI->getOperand(0), 3256 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3257 break; 3258 case Instruction::ZExt: 3259 NewI = B.CreateZExtOrTrunc( 3260 CI->getOperand(0), 3261 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3262 break; 3263 } 3264 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3265 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3266 auto *O0 = B.CreateZExtOrTrunc( 3267 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3268 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3269 auto *O1 = B.CreateZExtOrTrunc( 3270 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3271 3272 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3273 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3274 // Don't do anything with the operands, just extend the result. 3275 continue; 3276 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3277 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3278 auto *O0 = B.CreateZExtOrTrunc( 3279 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3280 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3281 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3282 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3283 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3284 auto *O0 = B.CreateZExtOrTrunc( 3285 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3286 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3287 } else { 3288 // If we don't know what to do, be conservative and don't do anything. 3289 continue; 3290 } 3291 3292 // Lastly, extend the result. 3293 NewI->takeName(cast<Instruction>(I)); 3294 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3295 I->replaceAllUsesWith(Res); 3296 cast<Instruction>(I)->eraseFromParent(); 3297 Erased.insert(I); 3298 VectorLoopValueMap.resetVectorValue(KV.first, Part, Res); 3299 } 3300 } 3301 3302 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3303 for (const auto &KV : Cost->getMinimalBitwidths()) { 3304 // If the value wasn't vectorized, we must maintain the original scalar 3305 // type. The absence of the value from VectorLoopValueMap indicates that it 3306 // wasn't vectorized. 3307 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3308 continue; 3309 for (unsigned Part = 0; Part < UF; ++Part) { 3310 Value *I = getOrCreateVectorValue(KV.first, Part); 3311 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3312 if (Inst && Inst->use_empty()) { 3313 Value *NewI = Inst->getOperand(0); 3314 Inst->eraseFromParent(); 3315 VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI); 3316 } 3317 } 3318 } 3319 } 3320 3321 void InnerLoopVectorizer::fixVectorizedLoop() { 3322 // Insert truncates and extends for any truncated instructions as hints to 3323 // InstCombine. 3324 if (VF > 1) 3325 truncateToMinimalBitwidths(); 3326 3327 // Fix widened non-induction PHIs by setting up the PHI operands. 3328 if (OrigPHIsToFix.size()) { 3329 assert(EnableVPlanNativePath && 3330 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3331 fixNonInductionPHIs(); 3332 } 3333 3334 // At this point every instruction in the original loop is widened to a 3335 // vector form. Now we need to fix the recurrences in the loop. These PHI 3336 // nodes are currently empty because we did not want to introduce cycles. 3337 // This is the second stage of vectorizing recurrences. 3338 fixCrossIterationPHIs(); 3339 3340 // Update the dominator tree. 3341 // 3342 // FIXME: After creating the structure of the new loop, the dominator tree is 3343 // no longer up-to-date, and it remains that way until we update it 3344 // here. An out-of-date dominator tree is problematic for SCEV, 3345 // because SCEVExpander uses it to guide code generation. The 3346 // vectorizer use SCEVExpanders in several places. Instead, we should 3347 // keep the dominator tree up-to-date as we go. 3348 updateAnalysis(); 3349 3350 // Fix-up external users of the induction variables. 3351 for (auto &Entry : *Legal->getInductionVars()) 3352 fixupIVUsers(Entry.first, Entry.second, 3353 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 3354 IVEndValues[Entry.first], LoopMiddleBlock); 3355 3356 fixLCSSAPHIs(); 3357 for (Instruction *PI : PredicatedInstructions) 3358 sinkScalarOperands(&*PI); 3359 3360 // Remove redundant induction instructions. 3361 cse(LoopVectorBody); 3362 } 3363 3364 void InnerLoopVectorizer::fixCrossIterationPHIs() { 3365 // In order to support recurrences we need to be able to vectorize Phi nodes. 3366 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3367 // stage #2: We now need to fix the recurrences by adding incoming edges to 3368 // the currently empty PHI nodes. At this point every instruction in the 3369 // original loop is widened to a vector form so we can use them to construct 3370 // the incoming edges. 3371 for (PHINode &Phi : OrigLoop->getHeader()->phis()) { 3372 // Handle first-order recurrences and reductions that need to be fixed. 3373 if (Legal->isFirstOrderRecurrence(&Phi)) 3374 fixFirstOrderRecurrence(&Phi); 3375 else if (Legal->isReductionVariable(&Phi)) 3376 fixReduction(&Phi); 3377 } 3378 } 3379 3380 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 3381 // This is the second phase of vectorizing first-order recurrences. An 3382 // overview of the transformation is described below. Suppose we have the 3383 // following loop. 3384 // 3385 // for (int i = 0; i < n; ++i) 3386 // b[i] = a[i] - a[i - 1]; 3387 // 3388 // There is a first-order recurrence on "a". For this loop, the shorthand 3389 // scalar IR looks like: 3390 // 3391 // scalar.ph: 3392 // s_init = a[-1] 3393 // br scalar.body 3394 // 3395 // scalar.body: 3396 // i = phi [0, scalar.ph], [i+1, scalar.body] 3397 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3398 // s2 = a[i] 3399 // b[i] = s2 - s1 3400 // br cond, scalar.body, ... 3401 // 3402 // In this example, s1 is a recurrence because it's value depends on the 3403 // previous iteration. In the first phase of vectorization, we created a 3404 // temporary value for s1. We now complete the vectorization and produce the 3405 // shorthand vector IR shown below (for VF = 4, UF = 1). 3406 // 3407 // vector.ph: 3408 // v_init = vector(..., ..., ..., a[-1]) 3409 // br vector.body 3410 // 3411 // vector.body 3412 // i = phi [0, vector.ph], [i+4, vector.body] 3413 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3414 // v2 = a[i, i+1, i+2, i+3]; 3415 // v3 = vector(v1(3), v2(0, 1, 2)) 3416 // b[i, i+1, i+2, i+3] = v2 - v3 3417 // br cond, vector.body, middle.block 3418 // 3419 // middle.block: 3420 // x = v2(3) 3421 // br scalar.ph 3422 // 3423 // scalar.ph: 3424 // s_init = phi [x, middle.block], [a[-1], otherwise] 3425 // br scalar.body 3426 // 3427 // After execution completes the vector loop, we extract the next value of 3428 // the recurrence (x) to use as the initial value in the scalar loop. 3429 3430 // Get the original loop preheader and single loop latch. 3431 auto *Preheader = OrigLoop->getLoopPreheader(); 3432 auto *Latch = OrigLoop->getLoopLatch(); 3433 3434 // Get the initial and previous values of the scalar recurrence. 3435 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 3436 auto *Previous = Phi->getIncomingValueForBlock(Latch); 3437 3438 // Create a vector from the initial value. 3439 auto *VectorInit = ScalarInit; 3440 if (VF > 1) { 3441 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3442 VectorInit = Builder.CreateInsertElement( 3443 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 3444 Builder.getInt32(VF - 1), "vector.recur.init"); 3445 } 3446 3447 // We constructed a temporary phi node in the first phase of vectorization. 3448 // This phi node will eventually be deleted. 3449 Builder.SetInsertPoint( 3450 cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0))); 3451 3452 // Create a phi node for the new recurrence. The current value will either be 3453 // the initial value inserted into a vector or loop-varying vector value. 3454 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 3455 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 3456 3457 // Get the vectorized previous value of the last part UF - 1. It appears last 3458 // among all unrolled iterations, due to the order of their construction. 3459 Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1); 3460 3461 // Set the insertion point after the previous value if it is an instruction. 3462 // Note that the previous value may have been constant-folded so it is not 3463 // guaranteed to be an instruction in the vector loop. Also, if the previous 3464 // value is a phi node, we should insert after all the phi nodes to avoid 3465 // breaking basic block verification. 3466 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart) || 3467 isa<PHINode>(PreviousLastPart)) 3468 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3469 else 3470 Builder.SetInsertPoint( 3471 &*++BasicBlock::iterator(cast<Instruction>(PreviousLastPart))); 3472 3473 // We will construct a vector for the recurrence by combining the values for 3474 // the current and previous iterations. This is the required shuffle mask. 3475 SmallVector<Constant *, 8> ShuffleMask(VF); 3476 ShuffleMask[0] = Builder.getInt32(VF - 1); 3477 for (unsigned I = 1; I < VF; ++I) 3478 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 3479 3480 // The vector from which to take the initial value for the current iteration 3481 // (actual or unrolled). Initially, this is the vector phi node. 3482 Value *Incoming = VecPhi; 3483 3484 // Shuffle the current and previous vector and update the vector parts. 3485 for (unsigned Part = 0; Part < UF; ++Part) { 3486 Value *PreviousPart = getOrCreateVectorValue(Previous, Part); 3487 Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part); 3488 auto *Shuffle = 3489 VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart, 3490 ConstantVector::get(ShuffleMask)) 3491 : Incoming; 3492 PhiPart->replaceAllUsesWith(Shuffle); 3493 cast<Instruction>(PhiPart)->eraseFromParent(); 3494 VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle); 3495 Incoming = PreviousPart; 3496 } 3497 3498 // Fix the latch value of the new recurrence in the vector loop. 3499 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3500 3501 // Extract the last vector element in the middle block. This will be the 3502 // initial value for the recurrence when jumping to the scalar loop. 3503 auto *ExtractForScalar = Incoming; 3504 if (VF > 1) { 3505 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3506 ExtractForScalar = Builder.CreateExtractElement( 3507 ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract"); 3508 } 3509 // Extract the second last element in the middle block if the 3510 // Phi is used outside the loop. We need to extract the phi itself 3511 // and not the last element (the phi update in the current iteration). This 3512 // will be the value when jumping to the exit block from the LoopMiddleBlock, 3513 // when the scalar loop is not run at all. 3514 Value *ExtractForPhiUsedOutsideLoop = nullptr; 3515 if (VF > 1) 3516 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 3517 Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi"); 3518 // When loop is unrolled without vectorizing, initialize 3519 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 3520 // `Incoming`. This is analogous to the vectorized case above: extracting the 3521 // second last element when VF > 1. 3522 else if (UF > 1) 3523 ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2); 3524 3525 // Fix the initial value of the original recurrence in the scalar loop. 3526 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3527 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3528 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3529 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 3530 Start->addIncoming(Incoming, BB); 3531 } 3532 3533 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 3534 Phi->setName("scalar.recur"); 3535 3536 // Finally, fix users of the recurrence outside the loop. The users will need 3537 // either the last value of the scalar recurrence or the last value of the 3538 // vector recurrence we extracted in the middle block. Since the loop is in 3539 // LCSSA form, we just need to find all the phi nodes for the original scalar 3540 // recurrence in the exit block, and then add an edge for the middle block. 3541 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3542 if (LCSSAPhi.getIncomingValue(0) == Phi) { 3543 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 3544 } 3545 } 3546 } 3547 3548 void InnerLoopVectorizer::fixReduction(PHINode *Phi) { 3549 Constant *Zero = Builder.getInt32(0); 3550 3551 // Get it's reduction variable descriptor. 3552 assert(Legal->isReductionVariable(Phi) && 3553 "Unable to find the reduction variable"); 3554 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 3555 3556 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3557 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3558 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3559 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3560 RdxDesc.getMinMaxRecurrenceKind(); 3561 setDebugLocFromInst(Builder, ReductionStartValue); 3562 3563 // We need to generate a reduction vector from the incoming scalar. 3564 // To do so, we need to generate the 'identity' vector and override 3565 // one of the elements with the incoming scalar reduction. We need 3566 // to do it in the vector-loop preheader. 3567 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3568 3569 // This is the vector-clone of the value that leaves the loop. 3570 Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType(); 3571 3572 // Find the reduction identity variable. Zero for addition, or, xor, 3573 // one for multiplication, -1 for And. 3574 Value *Identity; 3575 Value *VectorStart; 3576 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3577 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3578 // MinMax reduction have the start value as their identify. 3579 if (VF == 1) { 3580 VectorStart = Identity = ReductionStartValue; 3581 } else { 3582 VectorStart = Identity = 3583 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3584 } 3585 } else { 3586 // Handle other reduction kinds: 3587 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3588 RK, VecTy->getScalarType()); 3589 if (VF == 1) { 3590 Identity = Iden; 3591 // This vector is the Identity vector where the first element is the 3592 // incoming scalar reduction. 3593 VectorStart = ReductionStartValue; 3594 } else { 3595 Identity = ConstantVector::getSplat(VF, Iden); 3596 3597 // This vector is the Identity vector where the first element is the 3598 // incoming scalar reduction. 3599 VectorStart = 3600 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3601 } 3602 } 3603 3604 // Fix the vector-loop phi. 3605 3606 // Reductions do not have to start at zero. They can start with 3607 // any loop invariant values. 3608 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3609 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3610 for (unsigned Part = 0; Part < UF; ++Part) { 3611 Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part); 3612 Value *Val = getOrCreateVectorValue(LoopVal, Part); 3613 // Make sure to add the reduction stat value only to the 3614 // first unroll part. 3615 Value *StartVal = (Part == 0) ? VectorStart : Identity; 3616 cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader); 3617 cast<PHINode>(VecRdxPhi) 3618 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3619 } 3620 3621 // Before each round, move the insertion point right between 3622 // the PHIs and the values we are going to write. 3623 // This allows us to write both PHINodes and the extractelement 3624 // instructions. 3625 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3626 3627 setDebugLocFromInst(Builder, LoopExitInst); 3628 3629 // If the vector reduction can be performed in a smaller type, we truncate 3630 // then extend the loop exit value to enable InstCombine to evaluate the 3631 // entire expression in the smaller type. 3632 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3633 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3634 Builder.SetInsertPoint( 3635 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 3636 VectorParts RdxParts(UF); 3637 for (unsigned Part = 0; Part < UF; ++Part) { 3638 RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3639 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3640 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3641 : Builder.CreateZExt(Trunc, VecTy); 3642 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 3643 UI != RdxParts[Part]->user_end();) 3644 if (*UI != Trunc) { 3645 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 3646 RdxParts[Part] = Extnd; 3647 } else { 3648 ++UI; 3649 } 3650 } 3651 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3652 for (unsigned Part = 0; Part < UF; ++Part) { 3653 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3654 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]); 3655 } 3656 } 3657 3658 // Reduce all of the unrolled parts into a single vector. 3659 Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0); 3660 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3661 3662 // The middle block terminator has already been assigned a DebugLoc here (the 3663 // OrigLoop's single latch terminator). We want the whole middle block to 3664 // appear to execute on this line because: (a) it is all compiler generated, 3665 // (b) these instructions are always executed after evaluating the latch 3666 // conditional branch, and (c) other passes may add new predecessors which 3667 // terminate on this line. This is the easiest way to ensure we don't 3668 // accidentally cause an extra step back into the loop while debugging. 3669 setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator()); 3670 for (unsigned Part = 1; Part < UF; ++Part) { 3671 Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3672 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3673 // Floating point operations had to be 'fast' to enable the reduction. 3674 ReducedPartRdx = addFastMathFlag( 3675 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart, 3676 ReducedPartRdx, "bin.rdx"), 3677 RdxDesc.getFastMathFlags()); 3678 else 3679 ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx, 3680 RdxPart); 3681 } 3682 3683 if (VF > 1) { 3684 bool NoNaN = Legal->hasFunNoNaNAttr(); 3685 ReducedPartRdx = 3686 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN); 3687 // If the reduction can be performed in a smaller type, we need to extend 3688 // the reduction to the wider type before we branch to the original loop. 3689 if (Phi->getType() != RdxDesc.getRecurrenceType()) 3690 ReducedPartRdx = 3691 RdxDesc.isSigned() 3692 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 3693 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 3694 } 3695 3696 // Create a phi node that merges control-flow from the backedge-taken check 3697 // block and the middle block. 3698 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 3699 LoopScalarPreHeader->getTerminator()); 3700 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3701 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 3702 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3703 3704 // Now, we need to fix the users of the reduction variable 3705 // inside and outside of the scalar remainder loop. 3706 // We know that the loop is in LCSSA form. We need to update the 3707 // PHI nodes in the exit blocks. 3708 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3709 // All PHINodes need to have a single entry edge, or two if 3710 // we already fixed them. 3711 assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 3712 3713 // We found a reduction value exit-PHI. Update it with the 3714 // incoming bypass edge. 3715 if (LCSSAPhi.getIncomingValue(0) == LoopExitInst) 3716 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 3717 } // end of the LCSSA phi scan. 3718 3719 // Fix the scalar loop reduction variable with the incoming reduction sum 3720 // from the vector body and from the backedge value. 3721 int IncomingEdgeBlockIdx = 3722 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 3723 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 3724 // Pick the other block. 3725 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 3726 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 3727 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 3728 } 3729 3730 void InnerLoopVectorizer::fixLCSSAPHIs() { 3731 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3732 if (LCSSAPhi.getNumIncomingValues() == 1) { 3733 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 3734 // Non-instruction incoming values will have only one value. 3735 unsigned LastLane = 0; 3736 if (isa<Instruction>(IncomingValue)) 3737 LastLane = Cost->isUniformAfterVectorization( 3738 cast<Instruction>(IncomingValue), VF) 3739 ? 0 3740 : VF - 1; 3741 // Can be a loop invariant incoming value or the last scalar value to be 3742 // extracted from the vectorized loop. 3743 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3744 Value *lastIncomingValue = 3745 getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane }); 3746 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 3747 } 3748 } 3749 } 3750 3751 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 3752 // The basic block and loop containing the predicated instruction. 3753 auto *PredBB = PredInst->getParent(); 3754 auto *VectorLoop = LI->getLoopFor(PredBB); 3755 3756 // Initialize a worklist with the operands of the predicated instruction. 3757 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 3758 3759 // Holds instructions that we need to analyze again. An instruction may be 3760 // reanalyzed if we don't yet know if we can sink it or not. 3761 SmallVector<Instruction *, 8> InstsToReanalyze; 3762 3763 // Returns true if a given use occurs in the predicated block. Phi nodes use 3764 // their operands in their corresponding predecessor blocks. 3765 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 3766 auto *I = cast<Instruction>(U.getUser()); 3767 BasicBlock *BB = I->getParent(); 3768 if (auto *Phi = dyn_cast<PHINode>(I)) 3769 BB = Phi->getIncomingBlock( 3770 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 3771 return BB == PredBB; 3772 }; 3773 3774 // Iteratively sink the scalarized operands of the predicated instruction 3775 // into the block we created for it. When an instruction is sunk, it's 3776 // operands are then added to the worklist. The algorithm ends after one pass 3777 // through the worklist doesn't sink a single instruction. 3778 bool Changed; 3779 do { 3780 // Add the instructions that need to be reanalyzed to the worklist, and 3781 // reset the changed indicator. 3782 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 3783 InstsToReanalyze.clear(); 3784 Changed = false; 3785 3786 while (!Worklist.empty()) { 3787 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 3788 3789 // We can't sink an instruction if it is a phi node, is already in the 3790 // predicated block, is not in the loop, or may have side effects. 3791 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 3792 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 3793 continue; 3794 3795 // It's legal to sink the instruction if all its uses occur in the 3796 // predicated block. Otherwise, there's nothing to do yet, and we may 3797 // need to reanalyze the instruction. 3798 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 3799 InstsToReanalyze.push_back(I); 3800 continue; 3801 } 3802 3803 // Move the instruction to the beginning of the predicated block, and add 3804 // it's operands to the worklist. 3805 I->moveBefore(&*PredBB->getFirstInsertionPt()); 3806 Worklist.insert(I->op_begin(), I->op_end()); 3807 3808 // The sinking may have enabled other instructions to be sunk, so we will 3809 // need to iterate. 3810 Changed = true; 3811 } 3812 } while (Changed); 3813 } 3814 3815 void InnerLoopVectorizer::fixNonInductionPHIs() { 3816 for (PHINode *OrigPhi : OrigPHIsToFix) { 3817 PHINode *NewPhi = 3818 cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0)); 3819 unsigned NumIncomingValues = OrigPhi->getNumIncomingValues(); 3820 3821 SmallVector<BasicBlock *, 2> ScalarBBPredecessors( 3822 predecessors(OrigPhi->getParent())); 3823 SmallVector<BasicBlock *, 2> VectorBBPredecessors( 3824 predecessors(NewPhi->getParent())); 3825 assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() && 3826 "Scalar and Vector BB should have the same number of predecessors"); 3827 3828 // The insertion point in Builder may be invalidated by the time we get 3829 // here. Force the Builder insertion point to something valid so that we do 3830 // not run into issues during insertion point restore in 3831 // getOrCreateVectorValue calls below. 3832 Builder.SetInsertPoint(NewPhi); 3833 3834 // The predecessor order is preserved and we can rely on mapping between 3835 // scalar and vector block predecessors. 3836 for (unsigned i = 0; i < NumIncomingValues; ++i) { 3837 BasicBlock *NewPredBB = VectorBBPredecessors[i]; 3838 3839 // When looking up the new scalar/vector values to fix up, use incoming 3840 // values from original phi. 3841 Value *ScIncV = 3842 OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]); 3843 3844 // Scalar incoming value may need a broadcast 3845 Value *NewIncV = getOrCreateVectorValue(ScIncV, 0); 3846 NewPhi->addIncoming(NewIncV, NewPredBB); 3847 } 3848 } 3849 } 3850 3851 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 3852 unsigned VF) { 3853 PHINode *P = cast<PHINode>(PN); 3854 if (EnableVPlanNativePath) { 3855 // Currently we enter here in the VPlan-native path for non-induction 3856 // PHIs where all control flow is uniform. We simply widen these PHIs. 3857 // Create a vector phi with no operands - the vector phi operands will be 3858 // set at the end of vector code generation. 3859 Type *VecTy = 3860 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 3861 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 3862 VectorLoopValueMap.setVectorValue(P, 0, VecPhi); 3863 OrigPHIsToFix.push_back(P); 3864 3865 return; 3866 } 3867 3868 assert(PN->getParent() == OrigLoop->getHeader() && 3869 "Non-header phis should have been handled elsewhere"); 3870 3871 // In order to support recurrences we need to be able to vectorize Phi nodes. 3872 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3873 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 3874 // this value when we vectorize all of the instructions that use the PHI. 3875 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 3876 for (unsigned Part = 0; Part < UF; ++Part) { 3877 // This is phase one of vectorizing PHIs. 3878 Type *VecTy = 3879 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 3880 Value *EntryPart = PHINode::Create( 3881 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 3882 VectorLoopValueMap.setVectorValue(P, Part, EntryPart); 3883 } 3884 return; 3885 } 3886 3887 setDebugLocFromInst(Builder, P); 3888 3889 // This PHINode must be an induction variable. 3890 // Make sure that we know about it. 3891 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 3892 3893 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 3894 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3895 3896 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 3897 // which can be found from the original scalar operations. 3898 switch (II.getKind()) { 3899 case InductionDescriptor::IK_NoInduction: 3900 llvm_unreachable("Unknown induction"); 3901 case InductionDescriptor::IK_IntInduction: 3902 case InductionDescriptor::IK_FpInduction: 3903 llvm_unreachable("Integer/fp induction is handled elsewhere."); 3904 case InductionDescriptor::IK_PtrInduction: { 3905 // Handle the pointer induction variable case. 3906 assert(P->getType()->isPointerTy() && "Unexpected type."); 3907 // This is the normalized GEP that starts counting at zero. 3908 Value *PtrInd = Induction; 3909 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 3910 // Determine the number of scalars we need to generate for each unroll 3911 // iteration. If the instruction is uniform, we only need to generate the 3912 // first lane. Otherwise, we generate all VF values. 3913 unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF; 3914 // These are the scalar results. Notice that we don't generate vector GEPs 3915 // because scalar GEPs result in better code. 3916 for (unsigned Part = 0; Part < UF; ++Part) { 3917 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 3918 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 3919 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 3920 Value *SclrGep = 3921 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 3922 SclrGep->setName("next.gep"); 3923 VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep); 3924 } 3925 } 3926 return; 3927 } 3928 } 3929 } 3930 3931 /// A helper function for checking whether an integer division-related 3932 /// instruction may divide by zero (in which case it must be predicated if 3933 /// executed conditionally in the scalar code). 3934 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 3935 /// Non-zero divisors that are non compile-time constants will not be 3936 /// converted into multiplication, so we will still end up scalarizing 3937 /// the division, but can do so w/o predication. 3938 static bool mayDivideByZero(Instruction &I) { 3939 assert((I.getOpcode() == Instruction::UDiv || 3940 I.getOpcode() == Instruction::SDiv || 3941 I.getOpcode() == Instruction::URem || 3942 I.getOpcode() == Instruction::SRem) && 3943 "Unexpected instruction"); 3944 Value *Divisor = I.getOperand(1); 3945 auto *CInt = dyn_cast<ConstantInt>(Divisor); 3946 return !CInt || CInt->isZero(); 3947 } 3948 3949 void InnerLoopVectorizer::widenInstruction(Instruction &I) { 3950 switch (I.getOpcode()) { 3951 case Instruction::Br: 3952 case Instruction::PHI: 3953 llvm_unreachable("This instruction is handled by a different recipe."); 3954 case Instruction::GetElementPtr: { 3955 // Construct a vector GEP by widening the operands of the scalar GEP as 3956 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 3957 // results in a vector of pointers when at least one operand of the GEP 3958 // is vector-typed. Thus, to keep the representation compact, we only use 3959 // vector-typed operands for loop-varying values. 3960 auto *GEP = cast<GetElementPtrInst>(&I); 3961 3962 if (VF > 1 && OrigLoop->hasLoopInvariantOperands(GEP)) { 3963 // If we are vectorizing, but the GEP has only loop-invariant operands, 3964 // the GEP we build (by only using vector-typed operands for 3965 // loop-varying values) would be a scalar pointer. Thus, to ensure we 3966 // produce a vector of pointers, we need to either arbitrarily pick an 3967 // operand to broadcast, or broadcast a clone of the original GEP. 3968 // Here, we broadcast a clone of the original. 3969 // 3970 // TODO: If at some point we decide to scalarize instructions having 3971 // loop-invariant operands, this special case will no longer be 3972 // required. We would add the scalarization decision to 3973 // collectLoopScalars() and teach getVectorValue() to broadcast 3974 // the lane-zero scalar value. 3975 auto *Clone = Builder.Insert(GEP->clone()); 3976 for (unsigned Part = 0; Part < UF; ++Part) { 3977 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 3978 VectorLoopValueMap.setVectorValue(&I, Part, EntryPart); 3979 addMetadata(EntryPart, GEP); 3980 } 3981 } else { 3982 // If the GEP has at least one loop-varying operand, we are sure to 3983 // produce a vector of pointers. But if we are only unrolling, we want 3984 // to produce a scalar GEP for each unroll part. Thus, the GEP we 3985 // produce with the code below will be scalar (if VF == 1) or vector 3986 // (otherwise). Note that for the unroll-only case, we still maintain 3987 // values in the vector mapping with initVector, as we do for other 3988 // instructions. 3989 for (unsigned Part = 0; Part < UF; ++Part) { 3990 // The pointer operand of the new GEP. If it's loop-invariant, we 3991 // won't broadcast it. 3992 auto *Ptr = 3993 OrigLoop->isLoopInvariant(GEP->getPointerOperand()) 3994 ? GEP->getPointerOperand() 3995 : getOrCreateVectorValue(GEP->getPointerOperand(), Part); 3996 3997 // Collect all the indices for the new GEP. If any index is 3998 // loop-invariant, we won't broadcast it. 3999 SmallVector<Value *, 4> Indices; 4000 for (auto &U : make_range(GEP->idx_begin(), GEP->idx_end())) { 4001 if (OrigLoop->isLoopInvariant(U.get())) 4002 Indices.push_back(U.get()); 4003 else 4004 Indices.push_back(getOrCreateVectorValue(U.get(), Part)); 4005 } 4006 4007 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4008 // but it should be a vector, otherwise. 4009 auto *NewGEP = 4010 GEP->isInBounds() 4011 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 4012 Indices) 4013 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 4014 assert((VF == 1 || NewGEP->getType()->isVectorTy()) && 4015 "NewGEP is not a pointer vector"); 4016 VectorLoopValueMap.setVectorValue(&I, Part, NewGEP); 4017 addMetadata(NewGEP, GEP); 4018 } 4019 } 4020 4021 break; 4022 } 4023 case Instruction::UDiv: 4024 case Instruction::SDiv: 4025 case Instruction::SRem: 4026 case Instruction::URem: 4027 case Instruction::Add: 4028 case Instruction::FAdd: 4029 case Instruction::Sub: 4030 case Instruction::FSub: 4031 case Instruction::FNeg: 4032 case Instruction::Mul: 4033 case Instruction::FMul: 4034 case Instruction::FDiv: 4035 case Instruction::FRem: 4036 case Instruction::Shl: 4037 case Instruction::LShr: 4038 case Instruction::AShr: 4039 case Instruction::And: 4040 case Instruction::Or: 4041 case Instruction::Xor: { 4042 // Just widen unops and binops. 4043 setDebugLocFromInst(Builder, &I); 4044 4045 for (unsigned Part = 0; Part < UF; ++Part) { 4046 SmallVector<Value *, 2> Ops; 4047 for (Value *Op : I.operands()) 4048 Ops.push_back(getOrCreateVectorValue(Op, Part)); 4049 4050 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 4051 4052 if (auto *VecOp = dyn_cast<Instruction>(V)) 4053 VecOp->copyIRFlags(&I); 4054 4055 // Use this vector value for all users of the original instruction. 4056 VectorLoopValueMap.setVectorValue(&I, Part, V); 4057 addMetadata(V, &I); 4058 } 4059 4060 break; 4061 } 4062 case Instruction::Select: { 4063 // Widen selects. 4064 // If the selector is loop invariant we can create a select 4065 // instruction with a scalar condition. Otherwise, use vector-select. 4066 auto *SE = PSE.getSE(); 4067 bool InvariantCond = 4068 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); 4069 setDebugLocFromInst(Builder, &I); 4070 4071 // The condition can be loop invariant but still defined inside the 4072 // loop. This means that we can't just use the original 'cond' value. 4073 // We have to take the 'vectorized' value and pick the first lane. 4074 // Instcombine will make this a no-op. 4075 4076 auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0}); 4077 4078 for (unsigned Part = 0; Part < UF; ++Part) { 4079 Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part); 4080 Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part); 4081 Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part); 4082 Value *Sel = 4083 Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1); 4084 VectorLoopValueMap.setVectorValue(&I, Part, Sel); 4085 addMetadata(Sel, &I); 4086 } 4087 4088 break; 4089 } 4090 4091 case Instruction::ICmp: 4092 case Instruction::FCmp: { 4093 // Widen compares. Generate vector compares. 4094 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4095 auto *Cmp = dyn_cast<CmpInst>(&I); 4096 setDebugLocFromInst(Builder, Cmp); 4097 for (unsigned Part = 0; Part < UF; ++Part) { 4098 Value *A = getOrCreateVectorValue(Cmp->getOperand(0), Part); 4099 Value *B = getOrCreateVectorValue(Cmp->getOperand(1), Part); 4100 Value *C = nullptr; 4101 if (FCmp) { 4102 // Propagate fast math flags. 4103 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4104 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4105 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4106 } else { 4107 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4108 } 4109 VectorLoopValueMap.setVectorValue(&I, Part, C); 4110 addMetadata(C, &I); 4111 } 4112 4113 break; 4114 } 4115 4116 case Instruction::ZExt: 4117 case Instruction::SExt: 4118 case Instruction::FPToUI: 4119 case Instruction::FPToSI: 4120 case Instruction::FPExt: 4121 case Instruction::PtrToInt: 4122 case Instruction::IntToPtr: 4123 case Instruction::SIToFP: 4124 case Instruction::UIToFP: 4125 case Instruction::Trunc: 4126 case Instruction::FPTrunc: 4127 case Instruction::BitCast: { 4128 auto *CI = dyn_cast<CastInst>(&I); 4129 setDebugLocFromInst(Builder, CI); 4130 4131 /// Vectorize casts. 4132 Type *DestTy = 4133 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4134 4135 for (unsigned Part = 0; Part < UF; ++Part) { 4136 Value *A = getOrCreateVectorValue(CI->getOperand(0), Part); 4137 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4138 VectorLoopValueMap.setVectorValue(&I, Part, Cast); 4139 addMetadata(Cast, &I); 4140 } 4141 break; 4142 } 4143 4144 case Instruction::Call: { 4145 // Ignore dbg intrinsics. 4146 if (isa<DbgInfoIntrinsic>(I)) 4147 break; 4148 setDebugLocFromInst(Builder, &I); 4149 4150 Module *M = I.getParent()->getParent()->getParent(); 4151 auto *CI = cast<CallInst>(&I); 4152 4153 StringRef FnName = CI->getCalledFunction()->getName(); 4154 Function *F = CI->getCalledFunction(); 4155 Type *RetTy = ToVectorTy(CI->getType(), VF); 4156 SmallVector<Type *, 4> Tys; 4157 for (Value *ArgOperand : CI->arg_operands()) 4158 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4159 4160 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4161 4162 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4163 // version of the instruction. 4164 // Is it beneficial to perform intrinsic call compared to lib call? 4165 bool NeedToScalarize; 4166 unsigned CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4167 bool UseVectorIntrinsic = 4168 ID && Cost->getVectorIntrinsicCost(CI, VF) <= CallCost; 4169 assert((UseVectorIntrinsic || !NeedToScalarize) && 4170 "Instruction should be scalarized elsewhere."); 4171 4172 for (unsigned Part = 0; Part < UF; ++Part) { 4173 SmallVector<Value *, 4> Args; 4174 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4175 Value *Arg = CI->getArgOperand(i); 4176 // Some intrinsics have a scalar argument - don't replace it with a 4177 // vector. 4178 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) 4179 Arg = getOrCreateVectorValue(CI->getArgOperand(i), Part); 4180 Args.push_back(Arg); 4181 } 4182 4183 Function *VectorF; 4184 if (UseVectorIntrinsic) { 4185 // Use vector version of the intrinsic. 4186 Type *TysForDecl[] = {CI->getType()}; 4187 if (VF > 1) 4188 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4189 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4190 } else { 4191 // Use vector version of the library call. 4192 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4193 assert(!VFnName.empty() && "Vector function name is empty."); 4194 VectorF = M->getFunction(VFnName); 4195 if (!VectorF) { 4196 // Generate a declaration 4197 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4198 VectorF = 4199 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4200 VectorF->copyAttributesFrom(F); 4201 } 4202 } 4203 assert(VectorF && "Can't create vector function."); 4204 4205 SmallVector<OperandBundleDef, 1> OpBundles; 4206 CI->getOperandBundlesAsDefs(OpBundles); 4207 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4208 4209 if (isa<FPMathOperator>(V)) 4210 V->copyFastMathFlags(CI); 4211 4212 VectorLoopValueMap.setVectorValue(&I, Part, V); 4213 addMetadata(V, &I); 4214 } 4215 4216 break; 4217 } 4218 4219 default: 4220 // This instruction is not vectorized by simple widening. 4221 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4222 llvm_unreachable("Unhandled instruction!"); 4223 } // end of switch. 4224 } 4225 4226 void InnerLoopVectorizer::updateAnalysis() { 4227 // Forget the original basic block. 4228 PSE.getSE()->forgetLoop(OrigLoop); 4229 4230 // DT is not kept up-to-date for outer loop vectorization 4231 if (EnableVPlanNativePath) 4232 return; 4233 4234 // Update the dominator tree information. 4235 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4236 "Entry does not dominate exit."); 4237 4238 DT->addNewBlock(LoopMiddleBlock, 4239 LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4240 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4241 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4242 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4243 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 4244 } 4245 4246 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) { 4247 // We should not collect Scalars more than once per VF. Right now, this 4248 // function is called from collectUniformsAndScalars(), which already does 4249 // this check. Collecting Scalars for VF=1 does not make any sense. 4250 assert(VF >= 2 && Scalars.find(VF) == Scalars.end() && 4251 "This function should not be visited twice for the same VF"); 4252 4253 SmallSetVector<Instruction *, 8> Worklist; 4254 4255 // These sets are used to seed the analysis with pointers used by memory 4256 // accesses that will remain scalar. 4257 SmallSetVector<Instruction *, 8> ScalarPtrs; 4258 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4259 4260 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4261 // The pointer operands of loads and stores will be scalar as long as the 4262 // memory access is not a gather or scatter operation. The value operand of a 4263 // store will remain scalar if the store is scalarized. 4264 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4265 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4266 assert(WideningDecision != CM_Unknown && 4267 "Widening decision should be ready at this moment"); 4268 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4269 if (Ptr == Store->getValueOperand()) 4270 return WideningDecision == CM_Scalarize; 4271 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4272 "Ptr is neither a value or pointer operand"); 4273 return WideningDecision != CM_GatherScatter; 4274 }; 4275 4276 // A helper that returns true if the given value is a bitcast or 4277 // getelementptr instruction contained in the loop. 4278 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4279 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4280 isa<GetElementPtrInst>(V)) && 4281 !TheLoop->isLoopInvariant(V); 4282 }; 4283 4284 // A helper that evaluates a memory access's use of a pointer. If the use 4285 // will be a scalar use, and the pointer is only used by memory accesses, we 4286 // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4287 // PossibleNonScalarPtrs. 4288 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4289 // We only care about bitcast and getelementptr instructions contained in 4290 // the loop. 4291 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4292 return; 4293 4294 // If the pointer has already been identified as scalar (e.g., if it was 4295 // also identified as uniform), there's nothing to do. 4296 auto *I = cast<Instruction>(Ptr); 4297 if (Worklist.count(I)) 4298 return; 4299 4300 // If the use of the pointer will be a scalar use, and all users of the 4301 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4302 // place the pointer in PossibleNonScalarPtrs. 4303 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4304 return isa<LoadInst>(U) || isa<StoreInst>(U); 4305 })) 4306 ScalarPtrs.insert(I); 4307 else 4308 PossibleNonScalarPtrs.insert(I); 4309 }; 4310 4311 // We seed the scalars analysis with three classes of instructions: (1) 4312 // instructions marked uniform-after-vectorization, (2) bitcast and 4313 // getelementptr instructions used by memory accesses requiring a scalar use, 4314 // and (3) pointer induction variables and their update instructions (we 4315 // currently only scalarize these). 4316 // 4317 // (1) Add to the worklist all instructions that have been identified as 4318 // uniform-after-vectorization. 4319 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4320 4321 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4322 // memory accesses requiring a scalar use. The pointer operands of loads and 4323 // stores will be scalar as long as the memory accesses is not a gather or 4324 // scatter operation. The value operand of a store will remain scalar if the 4325 // store is scalarized. 4326 for (auto *BB : TheLoop->blocks()) 4327 for (auto &I : *BB) { 4328 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4329 evaluatePtrUse(Load, Load->getPointerOperand()); 4330 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4331 evaluatePtrUse(Store, Store->getPointerOperand()); 4332 evaluatePtrUse(Store, Store->getValueOperand()); 4333 } 4334 } 4335 for (auto *I : ScalarPtrs) 4336 if (PossibleNonScalarPtrs.find(I) == PossibleNonScalarPtrs.end()) { 4337 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4338 Worklist.insert(I); 4339 } 4340 4341 // (3) Add to the worklist all pointer induction variables and their update 4342 // instructions. 4343 // 4344 // TODO: Once we are able to vectorize pointer induction variables we should 4345 // no longer insert them into the worklist here. 4346 auto *Latch = TheLoop->getLoopLatch(); 4347 for (auto &Induction : *Legal->getInductionVars()) { 4348 auto *Ind = Induction.first; 4349 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4350 if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction) 4351 continue; 4352 Worklist.insert(Ind); 4353 Worklist.insert(IndUpdate); 4354 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4355 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4356 << "\n"); 4357 } 4358 4359 // Insert the forced scalars. 4360 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4361 // induction variable when the PHI user is scalarized. 4362 auto ForcedScalar = ForcedScalars.find(VF); 4363 if (ForcedScalar != ForcedScalars.end()) 4364 for (auto *I : ForcedScalar->second) 4365 Worklist.insert(I); 4366 4367 // Expand the worklist by looking through any bitcasts and getelementptr 4368 // instructions we've already identified as scalar. This is similar to the 4369 // expansion step in collectLoopUniforms(); however, here we're only 4370 // expanding to include additional bitcasts and getelementptr instructions. 4371 unsigned Idx = 0; 4372 while (Idx != Worklist.size()) { 4373 Instruction *Dst = Worklist[Idx++]; 4374 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4375 continue; 4376 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4377 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4378 auto *J = cast<Instruction>(U); 4379 return !TheLoop->contains(J) || Worklist.count(J) || 4380 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4381 isScalarUse(J, Src)); 4382 })) { 4383 Worklist.insert(Src); 4384 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4385 } 4386 } 4387 4388 // An induction variable will remain scalar if all users of the induction 4389 // variable and induction variable update remain scalar. 4390 for (auto &Induction : *Legal->getInductionVars()) { 4391 auto *Ind = Induction.first; 4392 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4393 4394 // We already considered pointer induction variables, so there's no reason 4395 // to look at their users again. 4396 // 4397 // TODO: Once we are able to vectorize pointer induction variables we 4398 // should no longer skip over them here. 4399 if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction) 4400 continue; 4401 4402 // Determine if all users of the induction variable are scalar after 4403 // vectorization. 4404 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4405 auto *I = cast<Instruction>(U); 4406 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 4407 }); 4408 if (!ScalarInd) 4409 continue; 4410 4411 // Determine if all users of the induction variable update instruction are 4412 // scalar after vectorization. 4413 auto ScalarIndUpdate = 4414 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4415 auto *I = cast<Instruction>(U); 4416 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 4417 }); 4418 if (!ScalarIndUpdate) 4419 continue; 4420 4421 // The induction variable and its update instruction will remain scalar. 4422 Worklist.insert(Ind); 4423 Worklist.insert(IndUpdate); 4424 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4425 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4426 << "\n"); 4427 } 4428 4429 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4430 } 4431 4432 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, unsigned VF) { 4433 if (!blockNeedsPredication(I->getParent())) 4434 return false; 4435 switch(I->getOpcode()) { 4436 default: 4437 break; 4438 case Instruction::Load: 4439 case Instruction::Store: { 4440 if (!Legal->isMaskRequired(I)) 4441 return false; 4442 auto *Ptr = getLoadStorePointerOperand(I); 4443 auto *Ty = getMemInstValueType(I); 4444 // We have already decided how to vectorize this instruction, get that 4445 // result. 4446 if (VF > 1) { 4447 InstWidening WideningDecision = getWideningDecision(I, VF); 4448 assert(WideningDecision != CM_Unknown && 4449 "Widening decision should be ready at this moment"); 4450 return WideningDecision == CM_Scalarize; 4451 } 4452 return isa<LoadInst>(I) ? 4453 !(isLegalMaskedLoad(Ty, Ptr) || isLegalMaskedGather(Ty)) 4454 : !(isLegalMaskedStore(Ty, Ptr) || isLegalMaskedScatter(Ty)); 4455 } 4456 case Instruction::UDiv: 4457 case Instruction::SDiv: 4458 case Instruction::SRem: 4459 case Instruction::URem: 4460 return mayDivideByZero(*I); 4461 } 4462 return false; 4463 } 4464 4465 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(Instruction *I, 4466 unsigned VF) { 4467 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4468 assert(getWideningDecision(I, VF) == CM_Unknown && 4469 "Decision should not be set yet."); 4470 auto *Group = getInterleavedAccessGroup(I); 4471 assert(Group && "Must have a group."); 4472 4473 // If the instruction's allocated size doesn't equal it's type size, it 4474 // requires padding and will be scalarized. 4475 auto &DL = I->getModule()->getDataLayout(); 4476 auto *ScalarTy = getMemInstValueType(I); 4477 if (hasIrregularType(ScalarTy, DL, VF)) 4478 return false; 4479 4480 // Check if masking is required. 4481 // A Group may need masking for one of two reasons: it resides in a block that 4482 // needs predication, or it was decided to use masking to deal with gaps. 4483 bool PredicatedAccessRequiresMasking = 4484 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 4485 bool AccessWithGapsRequiresMasking = 4486 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 4487 if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) 4488 return true; 4489 4490 // If masked interleaving is required, we expect that the user/target had 4491 // enabled it, because otherwise it either wouldn't have been created or 4492 // it should have been invalidated by the CostModel. 4493 assert(useMaskedInterleavedAccesses(TTI) && 4494 "Masked interleave-groups for predicated accesses are not enabled."); 4495 4496 auto *Ty = getMemInstValueType(I); 4497 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty) 4498 : TTI.isLegalMaskedStore(Ty); 4499 } 4500 4501 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I, 4502 unsigned VF) { 4503 // Get and ensure we have a valid memory instruction. 4504 LoadInst *LI = dyn_cast<LoadInst>(I); 4505 StoreInst *SI = dyn_cast<StoreInst>(I); 4506 assert((LI || SI) && "Invalid memory instruction"); 4507 4508 auto *Ptr = getLoadStorePointerOperand(I); 4509 4510 // In order to be widened, the pointer should be consecutive, first of all. 4511 if (!Legal->isConsecutivePtr(Ptr)) 4512 return false; 4513 4514 // If the instruction is a store located in a predicated block, it will be 4515 // scalarized. 4516 if (isScalarWithPredication(I)) 4517 return false; 4518 4519 // If the instruction's allocated size doesn't equal it's type size, it 4520 // requires padding and will be scalarized. 4521 auto &DL = I->getModule()->getDataLayout(); 4522 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 4523 if (hasIrregularType(ScalarTy, DL, VF)) 4524 return false; 4525 4526 return true; 4527 } 4528 4529 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { 4530 // We should not collect Uniforms more than once per VF. Right now, 4531 // this function is called from collectUniformsAndScalars(), which 4532 // already does this check. Collecting Uniforms for VF=1 does not make any 4533 // sense. 4534 4535 assert(VF >= 2 && Uniforms.find(VF) == Uniforms.end() && 4536 "This function should not be visited twice for the same VF"); 4537 4538 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4539 // not analyze again. Uniforms.count(VF) will return 1. 4540 Uniforms[VF].clear(); 4541 4542 // We now know that the loop is vectorizable! 4543 // Collect instructions inside the loop that will remain uniform after 4544 // vectorization. 4545 4546 // Global values, params and instructions outside of current loop are out of 4547 // scope. 4548 auto isOutOfScope = [&](Value *V) -> bool { 4549 Instruction *I = dyn_cast<Instruction>(V); 4550 return (!I || !TheLoop->contains(I)); 4551 }; 4552 4553 SetVector<Instruction *> Worklist; 4554 BasicBlock *Latch = TheLoop->getLoopLatch(); 4555 4556 // Start with the conditional branch. If the branch condition is an 4557 // instruction contained in the loop that is only used by the branch, it is 4558 // uniform. 4559 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4560 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) { 4561 Worklist.insert(Cmp); 4562 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); 4563 } 4564 4565 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers 4566 // are pointers that are treated like consecutive pointers during 4567 // vectorization. The pointer operands of interleaved accesses are an 4568 // example. 4569 SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs; 4570 4571 // Holds pointer operands of instructions that are possibly non-uniform. 4572 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs; 4573 4574 auto isUniformDecision = [&](Instruction *I, unsigned VF) { 4575 InstWidening WideningDecision = getWideningDecision(I, VF); 4576 assert(WideningDecision != CM_Unknown && 4577 "Widening decision should be ready at this moment"); 4578 4579 return (WideningDecision == CM_Widen || 4580 WideningDecision == CM_Widen_Reverse || 4581 WideningDecision == CM_Interleave); 4582 }; 4583 // Iterate over the instructions in the loop, and collect all 4584 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible 4585 // that a consecutive-like pointer operand will be scalarized, we collect it 4586 // in PossibleNonUniformPtrs instead. We use two sets here because a single 4587 // getelementptr instruction can be used by both vectorized and scalarized 4588 // memory instructions. For example, if a loop loads and stores from the same 4589 // location, but the store is conditional, the store will be scalarized, and 4590 // the getelementptr won't remain uniform. 4591 for (auto *BB : TheLoop->blocks()) 4592 for (auto &I : *BB) { 4593 // If there's no pointer operand, there's nothing to do. 4594 auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 4595 if (!Ptr) 4596 continue; 4597 4598 // True if all users of Ptr are memory accesses that have Ptr as their 4599 // pointer operand. 4600 auto UsersAreMemAccesses = 4601 llvm::all_of(Ptr->users(), [&](User *U) -> bool { 4602 return getLoadStorePointerOperand(U) == Ptr; 4603 }); 4604 4605 // Ensure the memory instruction will not be scalarized or used by 4606 // gather/scatter, making its pointer operand non-uniform. If the pointer 4607 // operand is used by any instruction other than a memory access, we 4608 // conservatively assume the pointer operand may be non-uniform. 4609 if (!UsersAreMemAccesses || !isUniformDecision(&I, VF)) 4610 PossibleNonUniformPtrs.insert(Ptr); 4611 4612 // If the memory instruction will be vectorized and its pointer operand 4613 // is consecutive-like, or interleaving - the pointer operand should 4614 // remain uniform. 4615 else 4616 ConsecutiveLikePtrs.insert(Ptr); 4617 } 4618 4619 // Add to the Worklist all consecutive and consecutive-like pointers that 4620 // aren't also identified as possibly non-uniform. 4621 for (auto *V : ConsecutiveLikePtrs) 4622 if (PossibleNonUniformPtrs.find(V) == PossibleNonUniformPtrs.end()) { 4623 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n"); 4624 Worklist.insert(V); 4625 } 4626 4627 // Expand Worklist in topological order: whenever a new instruction 4628 // is added , its users should be already inside Worklist. It ensures 4629 // a uniform instruction will only be used by uniform instructions. 4630 unsigned idx = 0; 4631 while (idx != Worklist.size()) { 4632 Instruction *I = Worklist[idx++]; 4633 4634 for (auto OV : I->operand_values()) { 4635 // isOutOfScope operands cannot be uniform instructions. 4636 if (isOutOfScope(OV)) 4637 continue; 4638 // First order recurrence Phi's should typically be considered 4639 // non-uniform. 4640 auto *OP = dyn_cast<PHINode>(OV); 4641 if (OP && Legal->isFirstOrderRecurrence(OP)) 4642 continue; 4643 // If all the users of the operand are uniform, then add the 4644 // operand into the uniform worklist. 4645 auto *OI = cast<Instruction>(OV); 4646 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 4647 auto *J = cast<Instruction>(U); 4648 return Worklist.count(J) || 4649 (OI == getLoadStorePointerOperand(J) && 4650 isUniformDecision(J, VF)); 4651 })) { 4652 Worklist.insert(OI); 4653 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); 4654 } 4655 } 4656 } 4657 4658 // Returns true if Ptr is the pointer operand of a memory access instruction 4659 // I, and I is known to not require scalarization. 4660 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 4661 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 4662 }; 4663 4664 // For an instruction to be added into Worklist above, all its users inside 4665 // the loop should also be in Worklist. However, this condition cannot be 4666 // true for phi nodes that form a cyclic dependence. We must process phi 4667 // nodes separately. An induction variable will remain uniform if all users 4668 // of the induction variable and induction variable update remain uniform. 4669 // The code below handles both pointer and non-pointer induction variables. 4670 for (auto &Induction : *Legal->getInductionVars()) { 4671 auto *Ind = Induction.first; 4672 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4673 4674 // Determine if all users of the induction variable are uniform after 4675 // vectorization. 4676 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4677 auto *I = cast<Instruction>(U); 4678 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4679 isVectorizedMemAccessUse(I, Ind); 4680 }); 4681 if (!UniformInd) 4682 continue; 4683 4684 // Determine if all users of the induction variable update instruction are 4685 // uniform after vectorization. 4686 auto UniformIndUpdate = 4687 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4688 auto *I = cast<Instruction>(U); 4689 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4690 isVectorizedMemAccessUse(I, IndUpdate); 4691 }); 4692 if (!UniformIndUpdate) 4693 continue; 4694 4695 // The induction variable and its update instruction will remain uniform. 4696 Worklist.insert(Ind); 4697 Worklist.insert(IndUpdate); 4698 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n"); 4699 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate 4700 << "\n"); 4701 } 4702 4703 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 4704 } 4705 4706 bool LoopVectorizationCostModel::runtimeChecksRequired() { 4707 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 4708 4709 if (Legal->getRuntimePointerChecking()->Need) { 4710 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 4711 << "runtime pointer checks needed. Enable vectorization of this " 4712 "loop with '#pragma clang loop vectorize(enable)' when " 4713 "compiling with -Os/-Oz"); 4714 LLVM_DEBUG( 4715 dbgs() 4716 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 4717 return true; 4718 } 4719 4720 if (!PSE.getUnionPredicate().getPredicates().empty()) { 4721 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 4722 << "runtime SCEV checks needed. Enable vectorization of this " 4723 "loop with '#pragma clang loop vectorize(enable)' when " 4724 "compiling with -Os/-Oz"); 4725 LLVM_DEBUG( 4726 dbgs() 4727 << "LV: Aborting. Runtime SCEV check is required with -Os/-Oz.\n"); 4728 return true; 4729 } 4730 4731 // FIXME: Avoid specializing for stride==1 instead of bailing out. 4732 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 4733 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 4734 << "runtime stride == 1 checks needed. Enable vectorization of " 4735 "this loop with '#pragma clang loop vectorize(enable)' when " 4736 "compiling with -Os/-Oz"); 4737 LLVM_DEBUG( 4738 dbgs() 4739 << "LV: Aborting. Runtime stride check is required with -Os/-Oz.\n"); 4740 return true; 4741 } 4742 4743 return false; 4744 } 4745 4746 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF() { 4747 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 4748 // TODO: It may by useful to do since it's still likely to be dynamically 4749 // uniform if the target can skip. 4750 LLVM_DEBUG( 4751 dbgs() << "LV: Not inserting runtime ptr check for divergent target"); 4752 4753 ORE->emit( 4754 createMissedAnalysis("CantVersionLoopWithDivergentTarget") 4755 << "runtime pointer checks needed. Not enabled for divergent target"); 4756 4757 return None; 4758 } 4759 4760 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 4761 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 4762 if (TC == 1) { 4763 ORE->emit(createMissedAnalysis("SingleIterationLoop") 4764 << "loop trip count is one, irrelevant for vectorization"); 4765 LLVM_DEBUG(dbgs() << "LV: Aborting, single iteration (non) loop.\n"); 4766 return None; 4767 } 4768 4769 switch (ScalarEpilogueStatus) { 4770 case CM_ScalarEpilogueAllowed: 4771 return computeFeasibleMaxVF(TC); 4772 case CM_ScalarEpilogueNotNeededPredicatePragma: 4773 LLVM_DEBUG( 4774 dbgs() << "LV: vector predicate hint found.\n" 4775 << "LV: Not allowing scalar epilogue, creating predicated " 4776 << "vector loop.\n"); 4777 break; 4778 case CM_ScalarEpilogueNotAllowedLowTripLoop: 4779 // fallthrough as a special case of OptForSize 4780 case CM_ScalarEpilogueNotAllowedOptSize: 4781 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 4782 LLVM_DEBUG( 4783 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 4784 else 4785 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 4786 << "count.\n"); 4787 4788 // Bail if runtime checks are required, which are not good when optimising 4789 // for size. 4790 if (runtimeChecksRequired()) 4791 return None; 4792 break; 4793 } 4794 4795 // Now try the tail folding 4796 4797 // Invalidate interleave groups that require an epilogue if we can't mask 4798 // the interleave-group. 4799 if (!useMaskedInterleavedAccesses(TTI)) 4800 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 4801 4802 unsigned MaxVF = computeFeasibleMaxVF(TC); 4803 if (TC > 0 && TC % MaxVF == 0) { 4804 // Accept MaxVF if we do not have a tail. 4805 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 4806 return MaxVF; 4807 } 4808 4809 // If we don't know the precise trip count, or if the trip count that we 4810 // found modulo the vectorization factor is not zero, try to fold the tail 4811 // by masking. 4812 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 4813 if (Legal->canFoldTailByMasking()) { 4814 FoldTailByMasking = true; 4815 return MaxVF; 4816 } 4817 4818 if (TC == 0) { 4819 ORE->emit( 4820 createMissedAnalysis("UnknownLoopCountComplexCFG") 4821 << "unable to calculate the loop count due to complex control flow"); 4822 return None; 4823 } 4824 4825 ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize") 4826 << "cannot optimize for size and vectorize at the same time. " 4827 "Enable vectorization of this loop with '#pragma clang loop " 4828 "vectorize(enable)' when compiling with -Os/-Oz"); 4829 return None; 4830 } 4831 4832 unsigned 4833 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount) { 4834 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 4835 unsigned SmallestType, WidestType; 4836 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 4837 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 4838 4839 // Get the maximum safe dependence distance in bits computed by LAA. 4840 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 4841 // the memory accesses that is most restrictive (involved in the smallest 4842 // dependence distance). 4843 unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth(); 4844 4845 WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth); 4846 4847 unsigned MaxVectorSize = WidestRegister / WidestType; 4848 4849 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 4850 << " / " << WidestType << " bits.\n"); 4851 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 4852 << WidestRegister << " bits.\n"); 4853 4854 assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements" 4855 " into one vector!"); 4856 if (MaxVectorSize == 0) { 4857 LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 4858 MaxVectorSize = 1; 4859 return MaxVectorSize; 4860 } else if (ConstTripCount && ConstTripCount < MaxVectorSize && 4861 isPowerOf2_32(ConstTripCount)) { 4862 // We need to clamp the VF to be the ConstTripCount. There is no point in 4863 // choosing a higher viable VF as done in the loop below. 4864 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 4865 << ConstTripCount << "\n"); 4866 MaxVectorSize = ConstTripCount; 4867 return MaxVectorSize; 4868 } 4869 4870 unsigned MaxVF = MaxVectorSize; 4871 if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) || 4872 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 4873 // Collect all viable vectorization factors larger than the default MaxVF 4874 // (i.e. MaxVectorSize). 4875 SmallVector<unsigned, 8> VFs; 4876 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 4877 for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2) 4878 VFs.push_back(VS); 4879 4880 // For each VF calculate its register usage. 4881 auto RUs = calculateRegisterUsage(VFs); 4882 4883 // Select the largest VF which doesn't require more registers than existing 4884 // ones. 4885 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 4886 for (int i = RUs.size() - 1; i >= 0; --i) { 4887 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 4888 MaxVF = VFs[i]; 4889 break; 4890 } 4891 } 4892 if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) { 4893 if (MaxVF < MinVF) { 4894 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 4895 << ") with target's minimum: " << MinVF << '\n'); 4896 MaxVF = MinVF; 4897 } 4898 } 4899 } 4900 return MaxVF; 4901 } 4902 4903 VectorizationFactor 4904 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) { 4905 float Cost = expectedCost(1).first; 4906 const float ScalarCost = Cost; 4907 unsigned Width = 1; 4908 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 4909 4910 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 4911 if (ForceVectorization && MaxVF > 1) { 4912 // Ignore scalar width, because the user explicitly wants vectorization. 4913 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 4914 // evaluation. 4915 Cost = std::numeric_limits<float>::max(); 4916 } 4917 4918 for (unsigned i = 2; i <= MaxVF; i *= 2) { 4919 // Notice that the vector loop needs to be executed less times, so 4920 // we need to divide the cost of the vector loops by the width of 4921 // the vector elements. 4922 VectorizationCostTy C = expectedCost(i); 4923 float VectorCost = C.first / (float)i; 4924 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 4925 << " costs: " << (int)VectorCost << ".\n"); 4926 if (!C.second && !ForceVectorization) { 4927 LLVM_DEBUG( 4928 dbgs() << "LV: Not considering vector loop of width " << i 4929 << " because it will not generate any vector instructions.\n"); 4930 continue; 4931 } 4932 if (VectorCost < Cost) { 4933 Cost = VectorCost; 4934 Width = i; 4935 } 4936 } 4937 4938 if (!EnableCondStoresVectorization && NumPredStores) { 4939 ORE->emit(createMissedAnalysis("ConditionalStore") 4940 << "store that is conditionally executed prevents vectorization"); 4941 LLVM_DEBUG( 4942 dbgs() << "LV: No vectorization. There are conditional stores.\n"); 4943 Width = 1; 4944 Cost = ScalarCost; 4945 } 4946 4947 LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 4948 << "LV: Vectorization seems to be not beneficial, " 4949 << "but was forced by a user.\n"); 4950 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 4951 VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)}; 4952 return Factor; 4953 } 4954 4955 std::pair<unsigned, unsigned> 4956 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 4957 unsigned MinWidth = -1U; 4958 unsigned MaxWidth = 8; 4959 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 4960 4961 // For each block. 4962 for (BasicBlock *BB : TheLoop->blocks()) { 4963 // For each instruction in the loop. 4964 for (Instruction &I : BB->instructionsWithoutDebug()) { 4965 Type *T = I.getType(); 4966 4967 // Skip ignored values. 4968 if (ValuesToIgnore.find(&I) != ValuesToIgnore.end()) 4969 continue; 4970 4971 // Only examine Loads, Stores and PHINodes. 4972 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 4973 continue; 4974 4975 // Examine PHI nodes that are reduction variables. Update the type to 4976 // account for the recurrence type. 4977 if (auto *PN = dyn_cast<PHINode>(&I)) { 4978 if (!Legal->isReductionVariable(PN)) 4979 continue; 4980 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 4981 T = RdxDesc.getRecurrenceType(); 4982 } 4983 4984 // Examine the stored values. 4985 if (auto *ST = dyn_cast<StoreInst>(&I)) 4986 T = ST->getValueOperand()->getType(); 4987 4988 // Ignore loaded pointer types and stored pointer types that are not 4989 // vectorizable. 4990 // 4991 // FIXME: The check here attempts to predict whether a load or store will 4992 // be vectorized. We only know this for certain after a VF has 4993 // been selected. Here, we assume that if an access can be 4994 // vectorized, it will be. We should also look at extending this 4995 // optimization to non-pointer types. 4996 // 4997 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 4998 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 4999 continue; 5000 5001 MinWidth = std::min(MinWidth, 5002 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5003 MaxWidth = std::max(MaxWidth, 5004 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5005 } 5006 } 5007 5008 return {MinWidth, MaxWidth}; 5009 } 5010 5011 unsigned LoopVectorizationCostModel::selectInterleaveCount(unsigned VF, 5012 unsigned LoopCost) { 5013 // -- The interleave heuristics -- 5014 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5015 // There are many micro-architectural considerations that we can't predict 5016 // at this level. For example, frontend pressure (on decode or fetch) due to 5017 // code size, or the number and capabilities of the execution ports. 5018 // 5019 // We use the following heuristics to select the interleave count: 5020 // 1. If the code has reductions, then we interleave to break the cross 5021 // iteration dependency. 5022 // 2. If the loop is really small, then we interleave to reduce the loop 5023 // overhead. 5024 // 3. We don't interleave if we think that we will spill registers to memory 5025 // due to the increased register pressure. 5026 5027 if (!isScalarEpilogueAllowed()) 5028 return 1; 5029 5030 // We used the distance for the interleave count. 5031 if (Legal->getMaxSafeDepDistBytes() != -1U) 5032 return 1; 5033 5034 // Do not interleave loops with a relatively small trip count. 5035 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5036 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 5037 return 1; 5038 5039 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 5040 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5041 << " registers\n"); 5042 5043 if (VF == 1) { 5044 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5045 TargetNumRegisters = ForceTargetNumScalarRegs; 5046 } else { 5047 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5048 TargetNumRegisters = ForceTargetNumVectorRegs; 5049 } 5050 5051 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5052 // We divide by these constants so assume that we have at least one 5053 // instruction that uses at least one register. 5054 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 5055 5056 // We calculate the interleave count using the following formula. 5057 // Subtract the number of loop invariants from the number of available 5058 // registers. These registers are used by all of the interleaved instances. 5059 // Next, divide the remaining registers by the number of registers that is 5060 // required by the loop, in order to estimate how many parallel instances 5061 // fit without causing spills. All of this is rounded down if necessary to be 5062 // a power of two. We want power of two interleave count to simplify any 5063 // addressing operations or alignment considerations. 5064 // We also want power of two interleave counts to ensure that the induction 5065 // variable of the vector loop wraps to zero, when tail is folded by masking; 5066 // this currently happens when OptForSize, in which case IC is set to 1 above. 5067 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 5068 R.MaxLocalUsers); 5069 5070 // Don't count the induction variable as interleaved. 5071 if (EnableIndVarRegisterHeur) 5072 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 5073 std::max(1U, (R.MaxLocalUsers - 1))); 5074 5075 // Clamp the interleave ranges to reasonable counts. 5076 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 5077 5078 // Check if the user has overridden the max. 5079 if (VF == 1) { 5080 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5081 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5082 } else { 5083 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5084 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5085 } 5086 5087 // If we did not calculate the cost for VF (because the user selected the VF) 5088 // then we calculate the cost of VF here. 5089 if (LoopCost == 0) 5090 LoopCost = expectedCost(VF).first; 5091 5092 assert(LoopCost && "Non-zero loop cost expected"); 5093 5094 // Clamp the calculated IC to be between the 1 and the max interleave count 5095 // that the target allows. 5096 if (IC > MaxInterleaveCount) 5097 IC = MaxInterleaveCount; 5098 else if (IC < 1) 5099 IC = 1; 5100 5101 // Interleave if we vectorized this loop and there is a reduction that could 5102 // benefit from interleaving. 5103 if (VF > 1 && !Legal->getReductionVars()->empty()) { 5104 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5105 return IC; 5106 } 5107 5108 // Note that if we've already vectorized the loop we will have done the 5109 // runtime check and so interleaving won't require further checks. 5110 bool InterleavingRequiresRuntimePointerCheck = 5111 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 5112 5113 // We want to interleave small loops in order to reduce the loop overhead and 5114 // potentially expose ILP opportunities. 5115 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 5116 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 5117 // We assume that the cost overhead is 1 and we use the cost model 5118 // to estimate the cost of the loop and interleave until the cost of the 5119 // loop overhead is about 5% of the cost of the loop. 5120 unsigned SmallIC = 5121 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5122 5123 // Interleave until store/load ports (estimated by max interleave count) are 5124 // saturated. 5125 unsigned NumStores = Legal->getNumStores(); 5126 unsigned NumLoads = Legal->getNumLoads(); 5127 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5128 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5129 5130 // If we have a scalar reduction (vector reductions are already dealt with 5131 // by this point), we can increase the critical path length if the loop 5132 // we're interleaving is inside another loop. Limit, by default to 2, so the 5133 // critical path only gets increased by one reduction operation. 5134 if (!Legal->getReductionVars()->empty() && TheLoop->getLoopDepth() > 1) { 5135 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5136 SmallIC = std::min(SmallIC, F); 5137 StoresIC = std::min(StoresIC, F); 5138 LoadsIC = std::min(LoadsIC, F); 5139 } 5140 5141 if (EnableLoadStoreRuntimeInterleave && 5142 std::max(StoresIC, LoadsIC) > SmallIC) { 5143 LLVM_DEBUG( 5144 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5145 return std::max(StoresIC, LoadsIC); 5146 } 5147 5148 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5149 return SmallIC; 5150 } 5151 5152 // Interleave if this is a large loop (small loops are already dealt with by 5153 // this point) that could benefit from interleaving. 5154 bool HasReductions = !Legal->getReductionVars()->empty(); 5155 if (TTI.enableAggressiveInterleaving(HasReductions)) { 5156 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5157 return IC; 5158 } 5159 5160 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5161 return 1; 5162 } 5163 5164 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5165 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 5166 // This function calculates the register usage by measuring the highest number 5167 // of values that are alive at a single location. Obviously, this is a very 5168 // rough estimation. We scan the loop in a topological order in order and 5169 // assign a number to each instruction. We use RPO to ensure that defs are 5170 // met before their users. We assume that each instruction that has in-loop 5171 // users starts an interval. We record every time that an in-loop value is 5172 // used, so we have a list of the first and last occurrences of each 5173 // instruction. Next, we transpose this data structure into a multi map that 5174 // holds the list of intervals that *end* at a specific location. This multi 5175 // map allows us to perform a linear search. We scan the instructions linearly 5176 // and record each time that a new interval starts, by placing it in a set. 5177 // If we find this value in the multi-map then we remove it from the set. 5178 // The max register usage is the maximum size of the set. 5179 // We also search for instructions that are defined outside the loop, but are 5180 // used inside the loop. We need this number separately from the max-interval 5181 // usage number because when we unroll, loop-invariant values do not take 5182 // more register. 5183 LoopBlocksDFS DFS(TheLoop); 5184 DFS.perform(LI); 5185 5186 RegisterUsage RU; 5187 5188 // Each 'key' in the map opens a new interval. The values 5189 // of the map are the index of the 'last seen' usage of the 5190 // instruction that is the key. 5191 using IntervalMap = DenseMap<Instruction *, unsigned>; 5192 5193 // Maps instruction to its index. 5194 SmallVector<Instruction *, 64> IdxToInstr; 5195 // Marks the end of each interval. 5196 IntervalMap EndPoint; 5197 // Saves the list of instruction indices that are used in the loop. 5198 SmallPtrSet<Instruction *, 8> Ends; 5199 // Saves the list of values that are used in the loop but are 5200 // defined outside the loop, such as arguments and constants. 5201 SmallPtrSet<Value *, 8> LoopInvariants; 5202 5203 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5204 for (Instruction &I : BB->instructionsWithoutDebug()) { 5205 IdxToInstr.push_back(&I); 5206 5207 // Save the end location of each USE. 5208 for (Value *U : I.operands()) { 5209 auto *Instr = dyn_cast<Instruction>(U); 5210 5211 // Ignore non-instruction values such as arguments, constants, etc. 5212 if (!Instr) 5213 continue; 5214 5215 // If this instruction is outside the loop then record it and continue. 5216 if (!TheLoop->contains(Instr)) { 5217 LoopInvariants.insert(Instr); 5218 continue; 5219 } 5220 5221 // Overwrite previous end points. 5222 EndPoint[Instr] = IdxToInstr.size(); 5223 Ends.insert(Instr); 5224 } 5225 } 5226 } 5227 5228 // Saves the list of intervals that end with the index in 'key'. 5229 using InstrList = SmallVector<Instruction *, 2>; 5230 DenseMap<unsigned, InstrList> TransposeEnds; 5231 5232 // Transpose the EndPoints to a list of values that end at each index. 5233 for (auto &Interval : EndPoint) 5234 TransposeEnds[Interval.second].push_back(Interval.first); 5235 5236 SmallPtrSet<Instruction *, 8> OpenIntervals; 5237 5238 // Get the size of the widest register. 5239 unsigned MaxSafeDepDist = -1U; 5240 if (Legal->getMaxSafeDepDistBytes() != -1U) 5241 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 5242 unsigned WidestRegister = 5243 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 5244 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5245 5246 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5247 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 5248 5249 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5250 5251 // A lambda that gets the register usage for the given type and VF. 5252 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 5253 if (Ty->isTokenTy()) 5254 return 0U; 5255 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 5256 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 5257 }; 5258 5259 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 5260 Instruction *I = IdxToInstr[i]; 5261 5262 // Remove all of the instructions that end at this location. 5263 InstrList &List = TransposeEnds[i]; 5264 for (Instruction *ToRemove : List) 5265 OpenIntervals.erase(ToRemove); 5266 5267 // Ignore instructions that are never used within the loop. 5268 if (Ends.find(I) == Ends.end()) 5269 continue; 5270 5271 // Skip ignored values. 5272 if (ValuesToIgnore.find(I) != ValuesToIgnore.end()) 5273 continue; 5274 5275 // For each VF find the maximum usage of registers. 5276 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 5277 if (VFs[j] == 1) { 5278 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 5279 continue; 5280 } 5281 collectUniformsAndScalars(VFs[j]); 5282 // Count the number of live intervals. 5283 unsigned RegUsage = 0; 5284 for (auto Inst : OpenIntervals) { 5285 // Skip ignored values for VF > 1. 5286 if (VecValuesToIgnore.find(Inst) != VecValuesToIgnore.end() || 5287 isScalarAfterVectorization(Inst, VFs[j])) 5288 continue; 5289 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 5290 } 5291 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 5292 } 5293 5294 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 5295 << OpenIntervals.size() << '\n'); 5296 5297 // Add the current instruction to the list of open intervals. 5298 OpenIntervals.insert(I); 5299 } 5300 5301 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 5302 unsigned Invariant = 0; 5303 if (VFs[i] == 1) 5304 Invariant = LoopInvariants.size(); 5305 else { 5306 for (auto Inst : LoopInvariants) 5307 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 5308 } 5309 5310 LLVM_DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 5311 LLVM_DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 5312 LLVM_DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant 5313 << '\n'); 5314 5315 RU.LoopInvariantRegs = Invariant; 5316 RU.MaxLocalUsers = MaxUsages[i]; 5317 RUs[i] = RU; 5318 } 5319 5320 return RUs; 5321 } 5322 5323 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 5324 // TODO: Cost model for emulated masked load/store is completely 5325 // broken. This hack guides the cost model to use an artificially 5326 // high enough value to practically disable vectorization with such 5327 // operations, except where previously deployed legality hack allowed 5328 // using very low cost values. This is to avoid regressions coming simply 5329 // from moving "masked load/store" check from legality to cost model. 5330 // Masked Load/Gather emulation was previously never allowed. 5331 // Limited number of Masked Store/Scatter emulation was allowed. 5332 assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction"); 5333 return isa<LoadInst>(I) || 5334 (isa<StoreInst>(I) && 5335 NumPredStores > NumberOfStoresToPredicate); 5336 } 5337 5338 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) { 5339 // If we aren't vectorizing the loop, or if we've already collected the 5340 // instructions to scalarize, there's nothing to do. Collection may already 5341 // have occurred if we have a user-selected VF and are now computing the 5342 // expected cost for interleaving. 5343 if (VF < 2 || InstsToScalarize.find(VF) != InstsToScalarize.end()) 5344 return; 5345 5346 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 5347 // not profitable to scalarize any instructions, the presence of VF in the 5348 // map will indicate that we've analyzed it already. 5349 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 5350 5351 // Find all the instructions that are scalar with predication in the loop and 5352 // determine if it would be better to not if-convert the blocks they are in. 5353 // If so, we also record the instructions to scalarize. 5354 for (BasicBlock *BB : TheLoop->blocks()) { 5355 if (!blockNeedsPredication(BB)) 5356 continue; 5357 for (Instruction &I : *BB) 5358 if (isScalarWithPredication(&I)) { 5359 ScalarCostsTy ScalarCosts; 5360 // Do not apply discount logic if hacked cost is needed 5361 // for emulated masked memrefs. 5362 if (!useEmulatedMaskMemRefHack(&I) && 5363 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 5364 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 5365 // Remember that BB will remain after vectorization. 5366 PredicatedBBsAfterVectorization.insert(BB); 5367 } 5368 } 5369 } 5370 5371 int LoopVectorizationCostModel::computePredInstDiscount( 5372 Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts, 5373 unsigned VF) { 5374 assert(!isUniformAfterVectorization(PredInst, VF) && 5375 "Instruction marked uniform-after-vectorization will be predicated"); 5376 5377 // Initialize the discount to zero, meaning that the scalar version and the 5378 // vector version cost the same. 5379 int Discount = 0; 5380 5381 // Holds instructions to analyze. The instructions we visit are mapped in 5382 // ScalarCosts. Those instructions are the ones that would be scalarized if 5383 // we find that the scalar version costs less. 5384 SmallVector<Instruction *, 8> Worklist; 5385 5386 // Returns true if the given instruction can be scalarized. 5387 auto canBeScalarized = [&](Instruction *I) -> bool { 5388 // We only attempt to scalarize instructions forming a single-use chain 5389 // from the original predicated block that would otherwise be vectorized. 5390 // Although not strictly necessary, we give up on instructions we know will 5391 // already be scalar to avoid traversing chains that are unlikely to be 5392 // beneficial. 5393 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 5394 isScalarAfterVectorization(I, VF)) 5395 return false; 5396 5397 // If the instruction is scalar with predication, it will be analyzed 5398 // separately. We ignore it within the context of PredInst. 5399 if (isScalarWithPredication(I)) 5400 return false; 5401 5402 // If any of the instruction's operands are uniform after vectorization, 5403 // the instruction cannot be scalarized. This prevents, for example, a 5404 // masked load from being scalarized. 5405 // 5406 // We assume we will only emit a value for lane zero of an instruction 5407 // marked uniform after vectorization, rather than VF identical values. 5408 // Thus, if we scalarize an instruction that uses a uniform, we would 5409 // create uses of values corresponding to the lanes we aren't emitting code 5410 // for. This behavior can be changed by allowing getScalarValue to clone 5411 // the lane zero values for uniforms rather than asserting. 5412 for (Use &U : I->operands()) 5413 if (auto *J = dyn_cast<Instruction>(U.get())) 5414 if (isUniformAfterVectorization(J, VF)) 5415 return false; 5416 5417 // Otherwise, we can scalarize the instruction. 5418 return true; 5419 }; 5420 5421 // Compute the expected cost discount from scalarizing the entire expression 5422 // feeding the predicated instruction. We currently only consider expressions 5423 // that are single-use instruction chains. 5424 Worklist.push_back(PredInst); 5425 while (!Worklist.empty()) { 5426 Instruction *I = Worklist.pop_back_val(); 5427 5428 // If we've already analyzed the instruction, there's nothing to do. 5429 if (ScalarCosts.find(I) != ScalarCosts.end()) 5430 continue; 5431 5432 // Compute the cost of the vector instruction. Note that this cost already 5433 // includes the scalarization overhead of the predicated instruction. 5434 unsigned VectorCost = getInstructionCost(I, VF).first; 5435 5436 // Compute the cost of the scalarized instruction. This cost is the cost of 5437 // the instruction as if it wasn't if-converted and instead remained in the 5438 // predicated block. We will scale this cost by block probability after 5439 // computing the scalarization overhead. 5440 unsigned ScalarCost = VF * getInstructionCost(I, 1).first; 5441 5442 // Compute the scalarization overhead of needed insertelement instructions 5443 // and phi nodes. 5444 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 5445 ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF), 5446 true, false); 5447 ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI); 5448 } 5449 5450 // Compute the scalarization overhead of needed extractelement 5451 // instructions. For each of the instruction's operands, if the operand can 5452 // be scalarized, add it to the worklist; otherwise, account for the 5453 // overhead. 5454 for (Use &U : I->operands()) 5455 if (auto *J = dyn_cast<Instruction>(U.get())) { 5456 assert(VectorType::isValidElementType(J->getType()) && 5457 "Instruction has non-scalar type"); 5458 if (canBeScalarized(J)) 5459 Worklist.push_back(J); 5460 else if (needsExtract(J, VF)) 5461 ScalarCost += TTI.getScalarizationOverhead( 5462 ToVectorTy(J->getType(),VF), false, true); 5463 } 5464 5465 // Scale the total scalar cost by block probability. 5466 ScalarCost /= getReciprocalPredBlockProb(); 5467 5468 // Compute the discount. A non-negative discount means the vector version 5469 // of the instruction costs more, and scalarizing would be beneficial. 5470 Discount += VectorCost - ScalarCost; 5471 ScalarCosts[I] = ScalarCost; 5472 } 5473 5474 return Discount; 5475 } 5476 5477 LoopVectorizationCostModel::VectorizationCostTy 5478 LoopVectorizationCostModel::expectedCost(unsigned VF) { 5479 VectorizationCostTy Cost; 5480 5481 // For each block. 5482 for (BasicBlock *BB : TheLoop->blocks()) { 5483 VectorizationCostTy BlockCost; 5484 5485 // For each instruction in the old loop. 5486 for (Instruction &I : BB->instructionsWithoutDebug()) { 5487 // Skip ignored values. 5488 if (ValuesToIgnore.find(&I) != ValuesToIgnore.end() || 5489 (VF > 1 && VecValuesToIgnore.find(&I) != VecValuesToIgnore.end())) 5490 continue; 5491 5492 VectorizationCostTy C = getInstructionCost(&I, VF); 5493 5494 // Check if we should override the cost. 5495 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 5496 C.first = ForceTargetInstructionCost; 5497 5498 BlockCost.first += C.first; 5499 BlockCost.second |= C.second; 5500 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 5501 << " for VF " << VF << " For instruction: " << I 5502 << '\n'); 5503 } 5504 5505 // If we are vectorizing a predicated block, it will have been 5506 // if-converted. This means that the block's instructions (aside from 5507 // stores and instructions that may divide by zero) will now be 5508 // unconditionally executed. For the scalar case, we may not always execute 5509 // the predicated block. Thus, scale the block's cost by the probability of 5510 // executing it. 5511 if (VF == 1 && blockNeedsPredication(BB)) 5512 BlockCost.first /= getReciprocalPredBlockProb(); 5513 5514 Cost.first += BlockCost.first; 5515 Cost.second |= BlockCost.second; 5516 } 5517 5518 return Cost; 5519 } 5520 5521 /// Gets Address Access SCEV after verifying that the access pattern 5522 /// is loop invariant except the induction variable dependence. 5523 /// 5524 /// This SCEV can be sent to the Target in order to estimate the address 5525 /// calculation cost. 5526 static const SCEV *getAddressAccessSCEV( 5527 Value *Ptr, 5528 LoopVectorizationLegality *Legal, 5529 PredicatedScalarEvolution &PSE, 5530 const Loop *TheLoop) { 5531 5532 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 5533 if (!Gep) 5534 return nullptr; 5535 5536 // We are looking for a gep with all loop invariant indices except for one 5537 // which should be an induction variable. 5538 auto SE = PSE.getSE(); 5539 unsigned NumOperands = Gep->getNumOperands(); 5540 for (unsigned i = 1; i < NumOperands; ++i) { 5541 Value *Opd = Gep->getOperand(i); 5542 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 5543 !Legal->isInductionVariable(Opd)) 5544 return nullptr; 5545 } 5546 5547 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 5548 return PSE.getSCEV(Ptr); 5549 } 5550 5551 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 5552 return Legal->hasStride(I->getOperand(0)) || 5553 Legal->hasStride(I->getOperand(1)); 5554 } 5555 5556 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 5557 unsigned VF) { 5558 assert(VF > 1 && "Scalarization cost of instruction implies vectorization."); 5559 Type *ValTy = getMemInstValueType(I); 5560 auto SE = PSE.getSE(); 5561 5562 unsigned Alignment = getLoadStoreAlignment(I); 5563 unsigned AS = getLoadStoreAddressSpace(I); 5564 Value *Ptr = getLoadStorePointerOperand(I); 5565 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 5566 5567 // Figure out whether the access is strided and get the stride value 5568 // if it's known in compile time 5569 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 5570 5571 // Get the cost of the scalar memory instruction and address computation. 5572 unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 5573 5574 // Don't pass *I here, since it is scalar but will actually be part of a 5575 // vectorized loop where the user of it is a vectorized instruction. 5576 Cost += VF * 5577 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 5578 AS); 5579 5580 // Get the overhead of the extractelement and insertelement instructions 5581 // we might create due to scalarization. 5582 Cost += getScalarizationOverhead(I, VF); 5583 5584 // If we have a predicated store, it may not be executed for each vector 5585 // lane. Scale the cost by the probability of executing the predicated 5586 // block. 5587 if (isPredicatedInst(I)) { 5588 Cost /= getReciprocalPredBlockProb(); 5589 5590 if (useEmulatedMaskMemRefHack(I)) 5591 // Artificially setting to a high enough value to practically disable 5592 // vectorization with such operations. 5593 Cost = 3000000; 5594 } 5595 5596 return Cost; 5597 } 5598 5599 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 5600 unsigned VF) { 5601 Type *ValTy = getMemInstValueType(I); 5602 Type *VectorTy = ToVectorTy(ValTy, VF); 5603 unsigned Alignment = getLoadStoreAlignment(I); 5604 Value *Ptr = getLoadStorePointerOperand(I); 5605 unsigned AS = getLoadStoreAddressSpace(I); 5606 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 5607 5608 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 5609 "Stride should be 1 or -1 for consecutive memory access"); 5610 unsigned Cost = 0; 5611 if (Legal->isMaskRequired(I)) 5612 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 5613 else 5614 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I); 5615 5616 bool Reverse = ConsecutiveStride < 0; 5617 if (Reverse) 5618 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5619 return Cost; 5620 } 5621 5622 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 5623 unsigned VF) { 5624 Type *ValTy = getMemInstValueType(I); 5625 Type *VectorTy = ToVectorTy(ValTy, VF); 5626 unsigned Alignment = getLoadStoreAlignment(I); 5627 unsigned AS = getLoadStoreAddressSpace(I); 5628 if (isa<LoadInst>(I)) { 5629 return TTI.getAddressComputationCost(ValTy) + 5630 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) + 5631 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 5632 } 5633 StoreInst *SI = cast<StoreInst>(I); 5634 5635 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 5636 return TTI.getAddressComputationCost(ValTy) + 5637 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS) + 5638 (isLoopInvariantStoreValue ? 0 : TTI.getVectorInstrCost( 5639 Instruction::ExtractElement, 5640 VectorTy, VF - 1)); 5641 } 5642 5643 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 5644 unsigned VF) { 5645 Type *ValTy = getMemInstValueType(I); 5646 Type *VectorTy = ToVectorTy(ValTy, VF); 5647 unsigned Alignment = getLoadStoreAlignment(I); 5648 Value *Ptr = getLoadStorePointerOperand(I); 5649 5650 return TTI.getAddressComputationCost(VectorTy) + 5651 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 5652 Legal->isMaskRequired(I), Alignment); 5653 } 5654 5655 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 5656 unsigned VF) { 5657 Type *ValTy = getMemInstValueType(I); 5658 Type *VectorTy = ToVectorTy(ValTy, VF); 5659 unsigned AS = getLoadStoreAddressSpace(I); 5660 5661 auto Group = getInterleavedAccessGroup(I); 5662 assert(Group && "Fail to get an interleaved access group."); 5663 5664 unsigned InterleaveFactor = Group->getFactor(); 5665 Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 5666 5667 // Holds the indices of existing members in an interleaved load group. 5668 // An interleaved store group doesn't need this as it doesn't allow gaps. 5669 SmallVector<unsigned, 4> Indices; 5670 if (isa<LoadInst>(I)) { 5671 for (unsigned i = 0; i < InterleaveFactor; i++) 5672 if (Group->getMember(i)) 5673 Indices.push_back(i); 5674 } 5675 5676 // Calculate the cost of the whole interleaved group. 5677 bool UseMaskForGaps = 5678 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 5679 unsigned Cost = TTI.getInterleavedMemoryOpCost( 5680 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 5681 Group->getAlignment(), AS, Legal->isMaskRequired(I), UseMaskForGaps); 5682 5683 if (Group->isReverse()) { 5684 // TODO: Add support for reversed masked interleaved access. 5685 assert(!Legal->isMaskRequired(I) && 5686 "Reverse masked interleaved access not supported."); 5687 Cost += Group->getNumMembers() * 5688 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5689 } 5690 return Cost; 5691 } 5692 5693 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 5694 unsigned VF) { 5695 // Calculate scalar cost only. Vectorization cost should be ready at this 5696 // moment. 5697 if (VF == 1) { 5698 Type *ValTy = getMemInstValueType(I); 5699 unsigned Alignment = getLoadStoreAlignment(I); 5700 unsigned AS = getLoadStoreAddressSpace(I); 5701 5702 return TTI.getAddressComputationCost(ValTy) + 5703 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I); 5704 } 5705 return getWideningCost(I, VF); 5706 } 5707 5708 LoopVectorizationCostModel::VectorizationCostTy 5709 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 5710 // If we know that this instruction will remain uniform, check the cost of 5711 // the scalar version. 5712 if (isUniformAfterVectorization(I, VF)) 5713 VF = 1; 5714 5715 if (VF > 1 && isProfitableToScalarize(I, VF)) 5716 return VectorizationCostTy(InstsToScalarize[VF][I], false); 5717 5718 // Forced scalars do not have any scalarization overhead. 5719 auto ForcedScalar = ForcedScalars.find(VF); 5720 if (VF > 1 && ForcedScalar != ForcedScalars.end()) { 5721 auto InstSet = ForcedScalar->second; 5722 if (InstSet.find(I) != InstSet.end()) 5723 return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false); 5724 } 5725 5726 Type *VectorTy; 5727 unsigned C = getInstructionCost(I, VF, VectorTy); 5728 5729 bool TypeNotScalarized = 5730 VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF; 5731 return VectorizationCostTy(C, TypeNotScalarized); 5732 } 5733 5734 unsigned LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 5735 unsigned VF) { 5736 5737 if (VF == 1) 5738 return 0; 5739 5740 unsigned Cost = 0; 5741 Type *RetTy = ToVectorTy(I->getType(), VF); 5742 if (!RetTy->isVoidTy() && 5743 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 5744 Cost += TTI.getScalarizationOverhead(RetTy, true, false); 5745 5746 // Some targets keep addresses scalar. 5747 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 5748 return Cost; 5749 5750 // Some targets support efficient element stores. 5751 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 5752 return Cost; 5753 5754 // Collect operands to consider. 5755 CallInst *CI = dyn_cast<CallInst>(I); 5756 Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands(); 5757 5758 // Skip operands that do not require extraction/scalarization and do not incur 5759 // any overhead. 5760 return Cost + TTI.getOperandsScalarizationOverhead( 5761 filterExtractingOperands(Ops, VF), VF); 5762 } 5763 5764 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) { 5765 if (VF == 1) 5766 return; 5767 NumPredStores = 0; 5768 for (BasicBlock *BB : TheLoop->blocks()) { 5769 // For each instruction in the old loop. 5770 for (Instruction &I : *BB) { 5771 Value *Ptr = getLoadStorePointerOperand(&I); 5772 if (!Ptr) 5773 continue; 5774 5775 // TODO: We should generate better code and update the cost model for 5776 // predicated uniform stores. Today they are treated as any other 5777 // predicated store (see added test cases in 5778 // invariant-store-vectorization.ll). 5779 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 5780 NumPredStores++; 5781 5782 if (Legal->isUniform(Ptr) && 5783 // Conditional loads and stores should be scalarized and predicated. 5784 // isScalarWithPredication cannot be used here since masked 5785 // gather/scatters are not considered scalar with predication. 5786 !Legal->blockNeedsPredication(I.getParent())) { 5787 // TODO: Avoid replicating loads and stores instead of 5788 // relying on instcombine to remove them. 5789 // Load: Scalar load + broadcast 5790 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 5791 unsigned Cost = getUniformMemOpCost(&I, VF); 5792 setWideningDecision(&I, VF, CM_Scalarize, Cost); 5793 continue; 5794 } 5795 5796 // We assume that widening is the best solution when possible. 5797 if (memoryInstructionCanBeWidened(&I, VF)) { 5798 unsigned Cost = getConsecutiveMemOpCost(&I, VF); 5799 int ConsecutiveStride = 5800 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 5801 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 5802 "Expected consecutive stride."); 5803 InstWidening Decision = 5804 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 5805 setWideningDecision(&I, VF, Decision, Cost); 5806 continue; 5807 } 5808 5809 // Choose between Interleaving, Gather/Scatter or Scalarization. 5810 unsigned InterleaveCost = std::numeric_limits<unsigned>::max(); 5811 unsigned NumAccesses = 1; 5812 if (isAccessInterleaved(&I)) { 5813 auto Group = getInterleavedAccessGroup(&I); 5814 assert(Group && "Fail to get an interleaved access group."); 5815 5816 // Make one decision for the whole group. 5817 if (getWideningDecision(&I, VF) != CM_Unknown) 5818 continue; 5819 5820 NumAccesses = Group->getNumMembers(); 5821 if (interleavedAccessCanBeWidened(&I, VF)) 5822 InterleaveCost = getInterleaveGroupCost(&I, VF); 5823 } 5824 5825 unsigned GatherScatterCost = 5826 isLegalGatherOrScatter(&I) 5827 ? getGatherScatterCost(&I, VF) * NumAccesses 5828 : std::numeric_limits<unsigned>::max(); 5829 5830 unsigned ScalarizationCost = 5831 getMemInstScalarizationCost(&I, VF) * NumAccesses; 5832 5833 // Choose better solution for the current VF, 5834 // write down this decision and use it during vectorization. 5835 unsigned Cost; 5836 InstWidening Decision; 5837 if (InterleaveCost <= GatherScatterCost && 5838 InterleaveCost < ScalarizationCost) { 5839 Decision = CM_Interleave; 5840 Cost = InterleaveCost; 5841 } else if (GatherScatterCost < ScalarizationCost) { 5842 Decision = CM_GatherScatter; 5843 Cost = GatherScatterCost; 5844 } else { 5845 Decision = CM_Scalarize; 5846 Cost = ScalarizationCost; 5847 } 5848 // If the instructions belongs to an interleave group, the whole group 5849 // receives the same decision. The whole group receives the cost, but 5850 // the cost will actually be assigned to one instruction. 5851 if (auto Group = getInterleavedAccessGroup(&I)) 5852 setWideningDecision(Group, VF, Decision, Cost); 5853 else 5854 setWideningDecision(&I, VF, Decision, Cost); 5855 } 5856 } 5857 5858 // Make sure that any load of address and any other address computation 5859 // remains scalar unless there is gather/scatter support. This avoids 5860 // inevitable extracts into address registers, and also has the benefit of 5861 // activating LSR more, since that pass can't optimize vectorized 5862 // addresses. 5863 if (TTI.prefersVectorizedAddressing()) 5864 return; 5865 5866 // Start with all scalar pointer uses. 5867 SmallPtrSet<Instruction *, 8> AddrDefs; 5868 for (BasicBlock *BB : TheLoop->blocks()) 5869 for (Instruction &I : *BB) { 5870 Instruction *PtrDef = 5871 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 5872 if (PtrDef && TheLoop->contains(PtrDef) && 5873 getWideningDecision(&I, VF) != CM_GatherScatter) 5874 AddrDefs.insert(PtrDef); 5875 } 5876 5877 // Add all instructions used to generate the addresses. 5878 SmallVector<Instruction *, 4> Worklist; 5879 for (auto *I : AddrDefs) 5880 Worklist.push_back(I); 5881 while (!Worklist.empty()) { 5882 Instruction *I = Worklist.pop_back_val(); 5883 for (auto &Op : I->operands()) 5884 if (auto *InstOp = dyn_cast<Instruction>(Op)) 5885 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 5886 AddrDefs.insert(InstOp).second) 5887 Worklist.push_back(InstOp); 5888 } 5889 5890 for (auto *I : AddrDefs) { 5891 if (isa<LoadInst>(I)) { 5892 // Setting the desired widening decision should ideally be handled in 5893 // by cost functions, but since this involves the task of finding out 5894 // if the loaded register is involved in an address computation, it is 5895 // instead changed here when we know this is the case. 5896 InstWidening Decision = getWideningDecision(I, VF); 5897 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 5898 // Scalarize a widened load of address. 5899 setWideningDecision(I, VF, CM_Scalarize, 5900 (VF * getMemoryInstructionCost(I, 1))); 5901 else if (auto Group = getInterleavedAccessGroup(I)) { 5902 // Scalarize an interleave group of address loads. 5903 for (unsigned I = 0; I < Group->getFactor(); ++I) { 5904 if (Instruction *Member = Group->getMember(I)) 5905 setWideningDecision(Member, VF, CM_Scalarize, 5906 (VF * getMemoryInstructionCost(Member, 1))); 5907 } 5908 } 5909 } else 5910 // Make sure I gets scalarized and a cost estimate without 5911 // scalarization overhead. 5912 ForcedScalars[VF].insert(I); 5913 } 5914 } 5915 5916 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 5917 unsigned VF, 5918 Type *&VectorTy) { 5919 Type *RetTy = I->getType(); 5920 if (canTruncateToMinimalBitwidth(I, VF)) 5921 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 5922 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 5923 auto SE = PSE.getSE(); 5924 5925 // TODO: We need to estimate the cost of intrinsic calls. 5926 switch (I->getOpcode()) { 5927 case Instruction::GetElementPtr: 5928 // We mark this instruction as zero-cost because the cost of GEPs in 5929 // vectorized code depends on whether the corresponding memory instruction 5930 // is scalarized or not. Therefore, we handle GEPs with the memory 5931 // instruction cost. 5932 return 0; 5933 case Instruction::Br: { 5934 // In cases of scalarized and predicated instructions, there will be VF 5935 // predicated blocks in the vectorized loop. Each branch around these 5936 // blocks requires also an extract of its vector compare i1 element. 5937 bool ScalarPredicatedBB = false; 5938 BranchInst *BI = cast<BranchInst>(I); 5939 if (VF > 1 && BI->isConditional() && 5940 (PredicatedBBsAfterVectorization.find(BI->getSuccessor(0)) != 5941 PredicatedBBsAfterVectorization.end() || 5942 PredicatedBBsAfterVectorization.find(BI->getSuccessor(1)) != 5943 PredicatedBBsAfterVectorization.end())) 5944 ScalarPredicatedBB = true; 5945 5946 if (ScalarPredicatedBB) { 5947 // Return cost for branches around scalarized and predicated blocks. 5948 Type *Vec_i1Ty = 5949 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 5950 return (TTI.getScalarizationOverhead(Vec_i1Ty, false, true) + 5951 (TTI.getCFInstrCost(Instruction::Br) * VF)); 5952 } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1) 5953 // The back-edge branch will remain, as will all scalar branches. 5954 return TTI.getCFInstrCost(Instruction::Br); 5955 else 5956 // This branch will be eliminated by if-conversion. 5957 return 0; 5958 // Note: We currently assume zero cost for an unconditional branch inside 5959 // a predicated block since it will become a fall-through, although we 5960 // may decide in the future to call TTI for all branches. 5961 } 5962 case Instruction::PHI: { 5963 auto *Phi = cast<PHINode>(I); 5964 5965 // First-order recurrences are replaced by vector shuffles inside the loop. 5966 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 5967 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 5968 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 5969 VectorTy, VF - 1, VectorType::get(RetTy, 1)); 5970 5971 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 5972 // converted into select instructions. We require N - 1 selects per phi 5973 // node, where N is the number of incoming values. 5974 if (VF > 1 && Phi->getParent() != TheLoop->getHeader()) 5975 return (Phi->getNumIncomingValues() - 1) * 5976 TTI.getCmpSelInstrCost( 5977 Instruction::Select, ToVectorTy(Phi->getType(), VF), 5978 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF)); 5979 5980 return TTI.getCFInstrCost(Instruction::PHI); 5981 } 5982 case Instruction::UDiv: 5983 case Instruction::SDiv: 5984 case Instruction::URem: 5985 case Instruction::SRem: 5986 // If we have a predicated instruction, it may not be executed for each 5987 // vector lane. Get the scalarization cost and scale this amount by the 5988 // probability of executing the predicated block. If the instruction is not 5989 // predicated, we fall through to the next case. 5990 if (VF > 1 && isScalarWithPredication(I)) { 5991 unsigned Cost = 0; 5992 5993 // These instructions have a non-void type, so account for the phi nodes 5994 // that we will create. This cost is likely to be zero. The phi node 5995 // cost, if any, should be scaled by the block probability because it 5996 // models a copy at the end of each predicated block. 5997 Cost += VF * TTI.getCFInstrCost(Instruction::PHI); 5998 5999 // The cost of the non-predicated instruction. 6000 Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy); 6001 6002 // The cost of insertelement and extractelement instructions needed for 6003 // scalarization. 6004 Cost += getScalarizationOverhead(I, VF); 6005 6006 // Scale the cost by the probability of executing the predicated blocks. 6007 // This assumes the predicated block for each vector lane is equally 6008 // likely. 6009 return Cost / getReciprocalPredBlockProb(); 6010 } 6011 LLVM_FALLTHROUGH; 6012 case Instruction::Add: 6013 case Instruction::FAdd: 6014 case Instruction::Sub: 6015 case Instruction::FSub: 6016 case Instruction::Mul: 6017 case Instruction::FMul: 6018 case Instruction::FDiv: 6019 case Instruction::FRem: 6020 case Instruction::Shl: 6021 case Instruction::LShr: 6022 case Instruction::AShr: 6023 case Instruction::And: 6024 case Instruction::Or: 6025 case Instruction::Xor: { 6026 // Since we will replace the stride by 1 the multiplication should go away. 6027 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 6028 return 0; 6029 // Certain instructions can be cheaper to vectorize if they have a constant 6030 // second vector operand. One example of this are shifts on x86. 6031 Value *Op2 = I->getOperand(1); 6032 TargetTransformInfo::OperandValueProperties Op2VP; 6033 TargetTransformInfo::OperandValueKind Op2VK = 6034 TTI.getOperandInfo(Op2, Op2VP); 6035 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 6036 Op2VK = TargetTransformInfo::OK_UniformValue; 6037 6038 SmallVector<const Value *, 4> Operands(I->operand_values()); 6039 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 6040 return N * TTI.getArithmeticInstrCost( 6041 I->getOpcode(), VectorTy, TargetTransformInfo::OK_AnyValue, 6042 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands); 6043 } 6044 case Instruction::FNeg: { 6045 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 6046 return N * TTI.getArithmeticInstrCost( 6047 I->getOpcode(), VectorTy, TargetTransformInfo::OK_AnyValue, 6048 TargetTransformInfo::OK_AnyValue, 6049 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None, 6050 I->getOperand(0)); 6051 } 6052 case Instruction::Select: { 6053 SelectInst *SI = cast<SelectInst>(I); 6054 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 6055 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 6056 Type *CondTy = SI->getCondition()->getType(); 6057 if (!ScalarCond) 6058 CondTy = VectorType::get(CondTy, VF); 6059 6060 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I); 6061 } 6062 case Instruction::ICmp: 6063 case Instruction::FCmp: { 6064 Type *ValTy = I->getOperand(0)->getType(); 6065 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 6066 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 6067 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 6068 VectorTy = ToVectorTy(ValTy, VF); 6069 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I); 6070 } 6071 case Instruction::Store: 6072 case Instruction::Load: { 6073 unsigned Width = VF; 6074 if (Width > 1) { 6075 InstWidening Decision = getWideningDecision(I, Width); 6076 assert(Decision != CM_Unknown && 6077 "CM decision should be taken at this point"); 6078 if (Decision == CM_Scalarize) 6079 Width = 1; 6080 } 6081 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 6082 return getMemoryInstructionCost(I, VF); 6083 } 6084 case Instruction::ZExt: 6085 case Instruction::SExt: 6086 case Instruction::FPToUI: 6087 case Instruction::FPToSI: 6088 case Instruction::FPExt: 6089 case Instruction::PtrToInt: 6090 case Instruction::IntToPtr: 6091 case Instruction::SIToFP: 6092 case Instruction::UIToFP: 6093 case Instruction::Trunc: 6094 case Instruction::FPTrunc: 6095 case Instruction::BitCast: { 6096 // We optimize the truncation of induction variables having constant 6097 // integer steps. The cost of these truncations is the same as the scalar 6098 // operation. 6099 if (isOptimizableIVTruncate(I, VF)) { 6100 auto *Trunc = cast<TruncInst>(I); 6101 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 6102 Trunc->getSrcTy(), Trunc); 6103 } 6104 6105 Type *SrcScalarTy = I->getOperand(0)->getType(); 6106 Type *SrcVecTy = 6107 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 6108 if (canTruncateToMinimalBitwidth(I, VF)) { 6109 // This cast is going to be shrunk. This may remove the cast or it might 6110 // turn it into slightly different cast. For example, if MinBW == 16, 6111 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 6112 // 6113 // Calculate the modified src and dest types. 6114 Type *MinVecTy = VectorTy; 6115 if (I->getOpcode() == Instruction::Trunc) { 6116 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 6117 VectorTy = 6118 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6119 } else if (I->getOpcode() == Instruction::ZExt || 6120 I->getOpcode() == Instruction::SExt) { 6121 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 6122 VectorTy = 6123 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6124 } 6125 } 6126 6127 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 6128 return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I); 6129 } 6130 case Instruction::Call: { 6131 bool NeedToScalarize; 6132 CallInst *CI = cast<CallInst>(I); 6133 unsigned CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 6134 if (getVectorIntrinsicIDForCall(CI, TLI)) 6135 return std::min(CallCost, getVectorIntrinsicCost(CI, VF)); 6136 return CallCost; 6137 } 6138 default: 6139 // The cost of executing VF copies of the scalar instruction. This opcode 6140 // is unknown. Assume that it is the same as 'mul'. 6141 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) + 6142 getScalarizationOverhead(I, VF); 6143 } // end of switch. 6144 } 6145 6146 char LoopVectorize::ID = 0; 6147 6148 static const char lv_name[] = "Loop Vectorization"; 6149 6150 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 6151 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6152 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 6153 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6154 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 6155 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6156 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6157 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6158 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6159 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6160 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 6161 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6162 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 6163 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 6164 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6165 6166 namespace llvm { 6167 6168 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 6169 6170 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 6171 bool VectorizeOnlyWhenForced) { 6172 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 6173 } 6174 6175 } // end namespace llvm 6176 6177 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6178 // Check if the pointer operand of a load or store instruction is 6179 // consecutive. 6180 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 6181 return Legal->isConsecutivePtr(Ptr); 6182 return false; 6183 } 6184 6185 void LoopVectorizationCostModel::collectValuesToIgnore() { 6186 // Ignore ephemeral values. 6187 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 6188 6189 // Ignore type-promoting instructions we identified during reduction 6190 // detection. 6191 for (auto &Reduction : *Legal->getReductionVars()) { 6192 RecurrenceDescriptor &RedDes = Reduction.second; 6193 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 6194 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6195 } 6196 // Ignore type-casting instructions we identified during induction 6197 // detection. 6198 for (auto &Induction : *Legal->getInductionVars()) { 6199 InductionDescriptor &IndDes = Induction.second; 6200 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6201 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6202 } 6203 } 6204 6205 // TODO: we could return a pair of values that specify the max VF and 6206 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 6207 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 6208 // doesn't have a cost model that can choose which plan to execute if 6209 // more than one is generated. 6210 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 6211 LoopVectorizationCostModel &CM) { 6212 unsigned WidestType; 6213 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 6214 return WidestVectorRegBits / WidestType; 6215 } 6216 6217 VectorizationFactor 6218 LoopVectorizationPlanner::planInVPlanNativePath(unsigned UserVF) { 6219 unsigned VF = UserVF; 6220 // Outer loop handling: They may require CFG and instruction level 6221 // transformations before even evaluating whether vectorization is profitable. 6222 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 6223 // the vectorization pipeline. 6224 if (!OrigLoop->empty()) { 6225 // If the user doesn't provide a vectorization factor, determine a 6226 // reasonable one. 6227 if (!UserVF) { 6228 VF = determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM); 6229 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 6230 6231 // Make sure we have a VF > 1 for stress testing. 6232 if (VPlanBuildStressTest && VF < 2) { 6233 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 6234 << "overriding computed VF.\n"); 6235 VF = 4; 6236 } 6237 } 6238 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 6239 assert(isPowerOf2_32(VF) && "VF needs to be a power of two"); 6240 LLVM_DEBUG(dbgs() << "LV: Using " << (UserVF ? "user " : "") << "VF " << VF 6241 << " to build VPlans.\n"); 6242 buildVPlans(VF, VF); 6243 6244 // For VPlan build stress testing, we bail out after VPlan construction. 6245 if (VPlanBuildStressTest) 6246 return VectorizationFactor::Disabled(); 6247 6248 return {VF, 0}; 6249 } 6250 6251 LLVM_DEBUG( 6252 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 6253 "VPlan-native path.\n"); 6254 return VectorizationFactor::Disabled(); 6255 } 6256 6257 Optional<VectorizationFactor> LoopVectorizationPlanner::plan(unsigned UserVF) { 6258 assert(OrigLoop->empty() && "Inner loop expected."); 6259 Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(); 6260 if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved. 6261 return None; 6262 6263 // Invalidate interleave groups if all blocks of loop will be predicated. 6264 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 6265 !useMaskedInterleavedAccesses(*TTI)) { 6266 LLVM_DEBUG( 6267 dbgs() 6268 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 6269 "which requires masked-interleaved support.\n"); 6270 CM.InterleaveInfo.reset(); 6271 } 6272 6273 if (UserVF) { 6274 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 6275 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 6276 // Collect the instructions (and their associated costs) that will be more 6277 // profitable to scalarize. 6278 CM.selectUserVectorizationFactor(UserVF); 6279 buildVPlansWithVPRecipes(UserVF, UserVF); 6280 LLVM_DEBUG(printPlans(dbgs())); 6281 return {{UserVF, 0}}; 6282 } 6283 6284 unsigned MaxVF = MaybeMaxVF.getValue(); 6285 assert(MaxVF != 0 && "MaxVF is zero."); 6286 6287 for (unsigned VF = 1; VF <= MaxVF; VF *= 2) { 6288 // Collect Uniform and Scalar instructions after vectorization with VF. 6289 CM.collectUniformsAndScalars(VF); 6290 6291 // Collect the instructions (and their associated costs) that will be more 6292 // profitable to scalarize. 6293 if (VF > 1) 6294 CM.collectInstsToScalarize(VF); 6295 } 6296 6297 buildVPlansWithVPRecipes(1, MaxVF); 6298 LLVM_DEBUG(printPlans(dbgs())); 6299 if (MaxVF == 1) 6300 return VectorizationFactor::Disabled(); 6301 6302 // Select the optimal vectorization factor. 6303 return CM.selectVectorizationFactor(MaxVF); 6304 } 6305 6306 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) { 6307 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 6308 << '\n'); 6309 BestVF = VF; 6310 BestUF = UF; 6311 6312 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 6313 return !Plan->hasVF(VF); 6314 }); 6315 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 6316 } 6317 6318 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 6319 DominatorTree *DT) { 6320 // Perform the actual loop transformation. 6321 6322 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 6323 VPCallbackILV CallbackILV(ILV); 6324 6325 VPTransformState State{BestVF, BestUF, LI, 6326 DT, ILV.Builder, ILV.VectorLoopValueMap, 6327 &ILV, CallbackILV}; 6328 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 6329 State.TripCount = ILV.getOrCreateTripCount(nullptr); 6330 6331 //===------------------------------------------------===// 6332 // 6333 // Notice: any optimization or new instruction that go 6334 // into the code below should also be implemented in 6335 // the cost-model. 6336 // 6337 //===------------------------------------------------===// 6338 6339 // 2. Copy and widen instructions from the old loop into the new loop. 6340 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 6341 VPlans.front()->execute(&State); 6342 6343 // 3. Fix the vectorized code: take care of header phi's, live-outs, 6344 // predication, updating analyses. 6345 ILV.fixVectorizedLoop(); 6346 } 6347 6348 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 6349 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 6350 BasicBlock *Latch = OrigLoop->getLoopLatch(); 6351 6352 // We create new control-flow for the vectorized loop, so the original 6353 // condition will be dead after vectorization if it's only used by the 6354 // branch. 6355 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 6356 if (Cmp && Cmp->hasOneUse()) 6357 DeadInstructions.insert(Cmp); 6358 6359 // We create new "steps" for induction variable updates to which the original 6360 // induction variables map. An original update instruction will be dead if 6361 // all its users except the induction variable are dead. 6362 for (auto &Induction : *Legal->getInductionVars()) { 6363 PHINode *Ind = Induction.first; 6364 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 6365 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 6366 return U == Ind || DeadInstructions.find(cast<Instruction>(U)) != 6367 DeadInstructions.end(); 6368 })) 6369 DeadInstructions.insert(IndUpdate); 6370 6371 // We record as "Dead" also the type-casting instructions we had identified 6372 // during induction analysis. We don't need any handling for them in the 6373 // vectorized loop because we have proven that, under a proper runtime 6374 // test guarding the vectorized loop, the value of the phi, and the casted 6375 // value of the phi, are the same. The last instruction in this casting chain 6376 // will get its scalar/vector/widened def from the scalar/vector/widened def 6377 // of the respective phi node. Any other casts in the induction def-use chain 6378 // have no other uses outside the phi update chain, and will be ignored. 6379 InductionDescriptor &IndDes = Induction.second; 6380 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6381 DeadInstructions.insert(Casts.begin(), Casts.end()); 6382 } 6383 } 6384 6385 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 6386 6387 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 6388 6389 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 6390 Instruction::BinaryOps BinOp) { 6391 // When unrolling and the VF is 1, we only need to add a simple scalar. 6392 Type *Ty = Val->getType(); 6393 assert(!Ty->isVectorTy() && "Val must be a scalar"); 6394 6395 if (Ty->isFloatingPointTy()) { 6396 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 6397 6398 // Floating point operations had to be 'fast' to enable the unrolling. 6399 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 6400 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 6401 } 6402 Constant *C = ConstantInt::get(Ty, StartIdx); 6403 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 6404 } 6405 6406 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 6407 SmallVector<Metadata *, 4> MDs; 6408 // Reserve first location for self reference to the LoopID metadata node. 6409 MDs.push_back(nullptr); 6410 bool IsUnrollMetadata = false; 6411 MDNode *LoopID = L->getLoopID(); 6412 if (LoopID) { 6413 // First find existing loop unrolling disable metadata. 6414 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 6415 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 6416 if (MD) { 6417 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 6418 IsUnrollMetadata = 6419 S && S->getString().startswith("llvm.loop.unroll.disable"); 6420 } 6421 MDs.push_back(LoopID->getOperand(i)); 6422 } 6423 } 6424 6425 if (!IsUnrollMetadata) { 6426 // Add runtime unroll disable metadata. 6427 LLVMContext &Context = L->getHeader()->getContext(); 6428 SmallVector<Metadata *, 1> DisableOperands; 6429 DisableOperands.push_back( 6430 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 6431 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 6432 MDs.push_back(DisableNode); 6433 MDNode *NewLoopID = MDNode::get(Context, MDs); 6434 // Set operand 0 to refer to the loop id itself. 6435 NewLoopID->replaceOperandWith(0, NewLoopID); 6436 L->setLoopID(NewLoopID); 6437 } 6438 } 6439 6440 bool LoopVectorizationPlanner::getDecisionAndClampRange( 6441 const std::function<bool(unsigned)> &Predicate, VFRange &Range) { 6442 assert(Range.End > Range.Start && "Trying to test an empty VF range."); 6443 bool PredicateAtRangeStart = Predicate(Range.Start); 6444 6445 for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2) 6446 if (Predicate(TmpVF) != PredicateAtRangeStart) { 6447 Range.End = TmpVF; 6448 break; 6449 } 6450 6451 return PredicateAtRangeStart; 6452 } 6453 6454 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 6455 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 6456 /// of VF's starting at a given VF and extending it as much as possible. Each 6457 /// vectorization decision can potentially shorten this sub-range during 6458 /// buildVPlan(). 6459 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) { 6460 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 6461 VFRange SubRange = {VF, MaxVF + 1}; 6462 VPlans.push_back(buildVPlan(SubRange)); 6463 VF = SubRange.End; 6464 } 6465 } 6466 6467 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 6468 VPlanPtr &Plan) { 6469 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 6470 6471 // Look for cached value. 6472 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 6473 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 6474 if (ECEntryIt != EdgeMaskCache.end()) 6475 return ECEntryIt->second; 6476 6477 VPValue *SrcMask = createBlockInMask(Src, Plan); 6478 6479 // The terminator has to be a branch inst! 6480 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 6481 assert(BI && "Unexpected terminator found"); 6482 6483 if (!BI->isConditional()) 6484 return EdgeMaskCache[Edge] = SrcMask; 6485 6486 VPValue *EdgeMask = Plan->getVPValue(BI->getCondition()); 6487 assert(EdgeMask && "No Edge Mask found for condition"); 6488 6489 if (BI->getSuccessor(0) != Dst) 6490 EdgeMask = Builder.createNot(EdgeMask); 6491 6492 if (SrcMask) // Otherwise block in-mask is all-one, no need to AND. 6493 EdgeMask = Builder.createAnd(EdgeMask, SrcMask); 6494 6495 return EdgeMaskCache[Edge] = EdgeMask; 6496 } 6497 6498 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 6499 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 6500 6501 // Look for cached value. 6502 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 6503 if (BCEntryIt != BlockMaskCache.end()) 6504 return BCEntryIt->second; 6505 6506 // All-one mask is modelled as no-mask following the convention for masked 6507 // load/store/gather/scatter. Initialize BlockMask to no-mask. 6508 VPValue *BlockMask = nullptr; 6509 6510 if (OrigLoop->getHeader() == BB) { 6511 if (!CM.blockNeedsPredication(BB)) 6512 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 6513 6514 // Introduce the early-exit compare IV <= BTC to form header block mask. 6515 // This is used instead of IV < TC because TC may wrap, unlike BTC. 6516 VPValue *IV = Plan->getVPValue(Legal->getPrimaryInduction()); 6517 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 6518 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 6519 return BlockMaskCache[BB] = BlockMask; 6520 } 6521 6522 // This is the block mask. We OR all incoming edges. 6523 for (auto *Predecessor : predecessors(BB)) { 6524 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 6525 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 6526 return BlockMaskCache[BB] = EdgeMask; 6527 6528 if (!BlockMask) { // BlockMask has its initialized nullptr value. 6529 BlockMask = EdgeMask; 6530 continue; 6531 } 6532 6533 BlockMask = Builder.createOr(BlockMask, EdgeMask); 6534 } 6535 6536 return BlockMaskCache[BB] = BlockMask; 6537 } 6538 6539 VPInterleaveRecipe *VPRecipeBuilder::tryToInterleaveMemory(Instruction *I, 6540 VFRange &Range, 6541 VPlanPtr &Plan) { 6542 const InterleaveGroup<Instruction> *IG = CM.getInterleavedAccessGroup(I); 6543 if (!IG) 6544 return nullptr; 6545 6546 // Now check if IG is relevant for VF's in the given range. 6547 auto isIGMember = [&](Instruction *I) -> std::function<bool(unsigned)> { 6548 return [=](unsigned VF) -> bool { 6549 return (VF >= 2 && // Query is illegal for VF == 1 6550 CM.getWideningDecision(I, VF) == 6551 LoopVectorizationCostModel::CM_Interleave); 6552 }; 6553 }; 6554 if (!LoopVectorizationPlanner::getDecisionAndClampRange(isIGMember(I), Range)) 6555 return nullptr; 6556 6557 // I is a member of an InterleaveGroup for VF's in the (possibly trimmed) 6558 // range. If it's the primary member of the IG construct a VPInterleaveRecipe. 6559 // Otherwise, it's an adjunct member of the IG, do not construct any Recipe. 6560 assert(I == IG->getInsertPos() && 6561 "Generating a recipe for an adjunct member of an interleave group"); 6562 6563 VPValue *Mask = nullptr; 6564 if (Legal->isMaskRequired(I)) 6565 Mask = createBlockInMask(I->getParent(), Plan); 6566 6567 return new VPInterleaveRecipe(IG, Mask); 6568 } 6569 6570 VPWidenMemoryInstructionRecipe * 6571 VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range, 6572 VPlanPtr &Plan) { 6573 if (!isa<LoadInst>(I) && !isa<StoreInst>(I)) 6574 return nullptr; 6575 6576 auto willWiden = [&](unsigned VF) -> bool { 6577 if (VF == 1) 6578 return false; 6579 if (CM.isScalarAfterVectorization(I, VF) || 6580 CM.isProfitableToScalarize(I, VF)) 6581 return false; 6582 LoopVectorizationCostModel::InstWidening Decision = 6583 CM.getWideningDecision(I, VF); 6584 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 6585 "CM decision should be taken at this point."); 6586 assert(Decision != LoopVectorizationCostModel::CM_Interleave && 6587 "Interleave memory opportunity should be caught earlier."); 6588 return Decision != LoopVectorizationCostModel::CM_Scalarize; 6589 }; 6590 6591 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 6592 return nullptr; 6593 6594 VPValue *Mask = nullptr; 6595 if (Legal->isMaskRequired(I)) 6596 Mask = createBlockInMask(I->getParent(), Plan); 6597 6598 return new VPWidenMemoryInstructionRecipe(*I, Mask); 6599 } 6600 6601 VPWidenIntOrFpInductionRecipe * 6602 VPRecipeBuilder::tryToOptimizeInduction(Instruction *I, VFRange &Range) { 6603 if (PHINode *Phi = dyn_cast<PHINode>(I)) { 6604 // Check if this is an integer or fp induction. If so, build the recipe that 6605 // produces its scalar and vector values. 6606 InductionDescriptor II = Legal->getInductionVars()->lookup(Phi); 6607 if (II.getKind() == InductionDescriptor::IK_IntInduction || 6608 II.getKind() == InductionDescriptor::IK_FpInduction) 6609 return new VPWidenIntOrFpInductionRecipe(Phi); 6610 6611 return nullptr; 6612 } 6613 6614 // Optimize the special case where the source is a constant integer 6615 // induction variable. Notice that we can only optimize the 'trunc' case 6616 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 6617 // (c) other casts depend on pointer size. 6618 6619 // Determine whether \p K is a truncation based on an induction variable that 6620 // can be optimized. 6621 auto isOptimizableIVTruncate = 6622 [&](Instruction *K) -> std::function<bool(unsigned)> { 6623 return 6624 [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); }; 6625 }; 6626 6627 if (isa<TruncInst>(I) && LoopVectorizationPlanner::getDecisionAndClampRange( 6628 isOptimizableIVTruncate(I), Range)) 6629 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 6630 cast<TruncInst>(I)); 6631 return nullptr; 6632 } 6633 6634 VPBlendRecipe *VPRecipeBuilder::tryToBlend(Instruction *I, VPlanPtr &Plan) { 6635 PHINode *Phi = dyn_cast<PHINode>(I); 6636 if (!Phi || Phi->getParent() == OrigLoop->getHeader()) 6637 return nullptr; 6638 6639 // We know that all PHIs in non-header blocks are converted into selects, so 6640 // we don't have to worry about the insertion order and we can just use the 6641 // builder. At this point we generate the predication tree. There may be 6642 // duplications since this is a simple recursive scan, but future 6643 // optimizations will clean it up. 6644 6645 SmallVector<VPValue *, 2> Masks; 6646 unsigned NumIncoming = Phi->getNumIncomingValues(); 6647 for (unsigned In = 0; In < NumIncoming; In++) { 6648 VPValue *EdgeMask = 6649 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 6650 assert((EdgeMask || NumIncoming == 1) && 6651 "Multiple predecessors with one having a full mask"); 6652 if (EdgeMask) 6653 Masks.push_back(EdgeMask); 6654 } 6655 return new VPBlendRecipe(Phi, Masks); 6656 } 6657 6658 bool VPRecipeBuilder::tryToWiden(Instruction *I, VPBasicBlock *VPBB, 6659 VFRange &Range) { 6660 6661 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 6662 [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range); 6663 6664 if (IsPredicated) 6665 return false; 6666 6667 auto IsVectorizableOpcode = [](unsigned Opcode) { 6668 switch (Opcode) { 6669 case Instruction::Add: 6670 case Instruction::And: 6671 case Instruction::AShr: 6672 case Instruction::BitCast: 6673 case Instruction::Br: 6674 case Instruction::Call: 6675 case Instruction::FAdd: 6676 case Instruction::FCmp: 6677 case Instruction::FDiv: 6678 case Instruction::FMul: 6679 case Instruction::FNeg: 6680 case Instruction::FPExt: 6681 case Instruction::FPToSI: 6682 case Instruction::FPToUI: 6683 case Instruction::FPTrunc: 6684 case Instruction::FRem: 6685 case Instruction::FSub: 6686 case Instruction::GetElementPtr: 6687 case Instruction::ICmp: 6688 case Instruction::IntToPtr: 6689 case Instruction::Load: 6690 case Instruction::LShr: 6691 case Instruction::Mul: 6692 case Instruction::Or: 6693 case Instruction::PHI: 6694 case Instruction::PtrToInt: 6695 case Instruction::SDiv: 6696 case Instruction::Select: 6697 case Instruction::SExt: 6698 case Instruction::Shl: 6699 case Instruction::SIToFP: 6700 case Instruction::SRem: 6701 case Instruction::Store: 6702 case Instruction::Sub: 6703 case Instruction::Trunc: 6704 case Instruction::UDiv: 6705 case Instruction::UIToFP: 6706 case Instruction::URem: 6707 case Instruction::Xor: 6708 case Instruction::ZExt: 6709 return true; 6710 } 6711 return false; 6712 }; 6713 6714 if (!IsVectorizableOpcode(I->getOpcode())) 6715 return false; 6716 6717 if (CallInst *CI = dyn_cast<CallInst>(I)) { 6718 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6719 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 6720 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect)) 6721 return false; 6722 } 6723 6724 auto willWiden = [&](unsigned VF) -> bool { 6725 if (!isa<PHINode>(I) && (CM.isScalarAfterVectorization(I, VF) || 6726 CM.isProfitableToScalarize(I, VF))) 6727 return false; 6728 if (CallInst *CI = dyn_cast<CallInst>(I)) { 6729 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6730 // The following case may be scalarized depending on the VF. 6731 // The flag shows whether we use Intrinsic or a usual Call for vectorized 6732 // version of the instruction. 6733 // Is it beneficial to perform intrinsic call compared to lib call? 6734 bool NeedToScalarize; 6735 unsigned CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 6736 bool UseVectorIntrinsic = 6737 ID && CM.getVectorIntrinsicCost(CI, VF) <= CallCost; 6738 return UseVectorIntrinsic || !NeedToScalarize; 6739 } 6740 if (isa<LoadInst>(I) || isa<StoreInst>(I)) { 6741 assert(CM.getWideningDecision(I, VF) == 6742 LoopVectorizationCostModel::CM_Scalarize && 6743 "Memory widening decisions should have been taken care by now"); 6744 return false; 6745 } 6746 return true; 6747 }; 6748 6749 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 6750 return false; 6751 6752 // Success: widen this instruction. We optimize the common case where 6753 // consecutive instructions can be represented by a single recipe. 6754 if (!VPBB->empty()) { 6755 VPWidenRecipe *LastWidenRecipe = dyn_cast<VPWidenRecipe>(&VPBB->back()); 6756 if (LastWidenRecipe && LastWidenRecipe->appendInstruction(I)) 6757 return true; 6758 } 6759 6760 VPBB->appendRecipe(new VPWidenRecipe(I)); 6761 return true; 6762 } 6763 6764 VPBasicBlock *VPRecipeBuilder::handleReplication( 6765 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 6766 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe, 6767 VPlanPtr &Plan) { 6768 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 6769 [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); }, 6770 Range); 6771 6772 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 6773 [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range); 6774 6775 auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated); 6776 6777 // Find if I uses a predicated instruction. If so, it will use its scalar 6778 // value. Avoid hoisting the insert-element which packs the scalar value into 6779 // a vector value, as that happens iff all users use the vector value. 6780 for (auto &Op : I->operands()) 6781 if (auto *PredInst = dyn_cast<Instruction>(Op)) 6782 if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end()) 6783 PredInst2Recipe[PredInst]->setAlsoPack(false); 6784 6785 // Finalize the recipe for Instr, first if it is not predicated. 6786 if (!IsPredicated) { 6787 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 6788 VPBB->appendRecipe(Recipe); 6789 return VPBB; 6790 } 6791 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 6792 assert(VPBB->getSuccessors().empty() && 6793 "VPBB has successors when handling predicated replication."); 6794 // Record predicated instructions for above packing optimizations. 6795 PredInst2Recipe[I] = Recipe; 6796 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 6797 VPBlockUtils::insertBlockAfter(Region, VPBB); 6798 auto *RegSucc = new VPBasicBlock(); 6799 VPBlockUtils::insertBlockAfter(RegSucc, Region); 6800 return RegSucc; 6801 } 6802 6803 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 6804 VPRecipeBase *PredRecipe, 6805 VPlanPtr &Plan) { 6806 // Instructions marked for predication are replicated and placed under an 6807 // if-then construct to prevent side-effects. 6808 6809 // Generate recipes to compute the block mask for this region. 6810 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 6811 6812 // Build the triangular if-then region. 6813 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 6814 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 6815 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 6816 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 6817 auto *PHIRecipe = 6818 Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr); 6819 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 6820 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 6821 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 6822 6823 // Note: first set Entry as region entry and then connect successors starting 6824 // from it in order, to propagate the "parent" of each VPBasicBlock. 6825 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 6826 VPBlockUtils::connectBlocks(Pred, Exit); 6827 6828 return Region; 6829 } 6830 6831 bool VPRecipeBuilder::tryToCreateRecipe(Instruction *Instr, VFRange &Range, 6832 VPlanPtr &Plan, VPBasicBlock *VPBB) { 6833 VPRecipeBase *Recipe = nullptr; 6834 // Check if Instr should belong to an interleave memory recipe, or already 6835 // does. In the latter case Instr is irrelevant. 6836 if ((Recipe = tryToInterleaveMemory(Instr, Range, Plan))) { 6837 VPBB->appendRecipe(Recipe); 6838 return true; 6839 } 6840 6841 // Check if Instr is a memory operation that should be widened. 6842 if ((Recipe = tryToWidenMemory(Instr, Range, Plan))) { 6843 VPBB->appendRecipe(Recipe); 6844 return true; 6845 } 6846 6847 // Check if Instr should form some PHI recipe. 6848 if ((Recipe = tryToOptimizeInduction(Instr, Range))) { 6849 VPBB->appendRecipe(Recipe); 6850 return true; 6851 } 6852 if ((Recipe = tryToBlend(Instr, Plan))) { 6853 VPBB->appendRecipe(Recipe); 6854 return true; 6855 } 6856 if (PHINode *Phi = dyn_cast<PHINode>(Instr)) { 6857 VPBB->appendRecipe(new VPWidenPHIRecipe(Phi)); 6858 return true; 6859 } 6860 6861 // Check if Instr is to be widened by a general VPWidenRecipe, after 6862 // having first checked for specific widening recipes that deal with 6863 // Interleave Groups, Inductions and Phi nodes. 6864 if (tryToWiden(Instr, VPBB, Range)) 6865 return true; 6866 6867 return false; 6868 } 6869 6870 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(unsigned MinVF, 6871 unsigned MaxVF) { 6872 assert(OrigLoop->empty() && "Inner loop expected."); 6873 6874 // Collect conditions feeding internal conditional branches; they need to be 6875 // represented in VPlan for it to model masking. 6876 SmallPtrSet<Value *, 1> NeedDef; 6877 6878 auto *Latch = OrigLoop->getLoopLatch(); 6879 for (BasicBlock *BB : OrigLoop->blocks()) { 6880 if (BB == Latch) 6881 continue; 6882 BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator()); 6883 if (Branch && Branch->isConditional()) 6884 NeedDef.insert(Branch->getCondition()); 6885 } 6886 6887 // If the tail is to be folded by masking, the primary induction variable 6888 // needs to be represented in VPlan for it to model early-exit masking. 6889 if (CM.foldTailByMasking()) 6890 NeedDef.insert(Legal->getPrimaryInduction()); 6891 6892 // Collect instructions from the original loop that will become trivially dead 6893 // in the vectorized loop. We don't need to vectorize these instructions. For 6894 // example, original induction update instructions can become dead because we 6895 // separately emit induction "steps" when generating code for the new loop. 6896 // Similarly, we create a new latch condition when setting up the structure 6897 // of the new loop, so the old one can become dead. 6898 SmallPtrSet<Instruction *, 4> DeadInstructions; 6899 collectTriviallyDeadInstructions(DeadInstructions); 6900 6901 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 6902 VFRange SubRange = {VF, MaxVF + 1}; 6903 VPlans.push_back( 6904 buildVPlanWithVPRecipes(SubRange, NeedDef, DeadInstructions)); 6905 VF = SubRange.End; 6906 } 6907 } 6908 6909 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 6910 VFRange &Range, SmallPtrSetImpl<Value *> &NeedDef, 6911 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 6912 // Hold a mapping from predicated instructions to their recipes, in order to 6913 // fix their AlsoPack behavior if a user is determined to replicate and use a 6914 // scalar instead of vector value. 6915 DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe; 6916 6917 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 6918 DenseMap<Instruction *, Instruction *> SinkAfterInverse; 6919 6920 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 6921 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 6922 auto Plan = llvm::make_unique<VPlan>(VPBB); 6923 6924 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, Builder); 6925 // Represent values that will have defs inside VPlan. 6926 for (Value *V : NeedDef) 6927 Plan->addVPValue(V); 6928 6929 // Scan the body of the loop in a topological order to visit each basic block 6930 // after having visited its predecessor basic blocks. 6931 LoopBlocksDFS DFS(OrigLoop); 6932 DFS.perform(LI); 6933 6934 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6935 // Relevant instructions from basic block BB will be grouped into VPRecipe 6936 // ingredients and fill a new VPBasicBlock. 6937 unsigned VPBBsForBB = 0; 6938 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 6939 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 6940 VPBB = FirstVPBBForBB; 6941 Builder.setInsertPoint(VPBB); 6942 6943 std::vector<Instruction *> Ingredients; 6944 6945 // Organize the ingredients to vectorize from current basic block in the 6946 // right order. 6947 for (Instruction &I : BB->instructionsWithoutDebug()) { 6948 Instruction *Instr = &I; 6949 6950 // First filter out irrelevant instructions, to ensure no recipes are 6951 // built for them. 6952 if (isa<BranchInst>(Instr) || 6953 DeadInstructions.find(Instr) != DeadInstructions.end()) 6954 continue; 6955 6956 // I is a member of an InterleaveGroup for Range.Start. If it's an adjunct 6957 // member of the IG, do not construct any Recipe for it. 6958 const InterleaveGroup<Instruction> *IG = 6959 CM.getInterleavedAccessGroup(Instr); 6960 if (IG && Instr != IG->getInsertPos() && 6961 Range.Start >= 2 && // Query is illegal for VF == 1 6962 CM.getWideningDecision(Instr, Range.Start) == 6963 LoopVectorizationCostModel::CM_Interleave) { 6964 auto SinkCandidate = SinkAfterInverse.find(Instr); 6965 if (SinkCandidate != SinkAfterInverse.end()) 6966 Ingredients.push_back(SinkCandidate->second); 6967 continue; 6968 } 6969 6970 // Move instructions to handle first-order recurrences, step 1: avoid 6971 // handling this instruction until after we've handled the instruction it 6972 // should follow. 6973 auto SAIt = SinkAfter.find(Instr); 6974 if (SAIt != SinkAfter.end()) { 6975 LLVM_DEBUG(dbgs() << "Sinking" << *SAIt->first << " after" 6976 << *SAIt->second 6977 << " to vectorize a 1st order recurrence.\n"); 6978 SinkAfterInverse[SAIt->second] = Instr; 6979 continue; 6980 } 6981 6982 Ingredients.push_back(Instr); 6983 6984 // Move instructions to handle first-order recurrences, step 2: push the 6985 // instruction to be sunk at its insertion point. 6986 auto SAInvIt = SinkAfterInverse.find(Instr); 6987 if (SAInvIt != SinkAfterInverse.end()) 6988 Ingredients.push_back(SAInvIt->second); 6989 } 6990 6991 // Introduce each ingredient into VPlan. 6992 for (Instruction *Instr : Ingredients) { 6993 if (RecipeBuilder.tryToCreateRecipe(Instr, Range, Plan, VPBB)) 6994 continue; 6995 6996 // Otherwise, if all widening options failed, Instruction is to be 6997 // replicated. This may create a successor for VPBB. 6998 VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication( 6999 Instr, Range, VPBB, PredInst2Recipe, Plan); 7000 if (NextVPBB != VPBB) { 7001 VPBB = NextVPBB; 7002 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 7003 : ""); 7004 } 7005 } 7006 } 7007 7008 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 7009 // may also be empty, such as the last one VPBB, reflecting original 7010 // basic-blocks with no recipes. 7011 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 7012 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 7013 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 7014 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 7015 delete PreEntry; 7016 7017 std::string PlanName; 7018 raw_string_ostream RSO(PlanName); 7019 unsigned VF = Range.Start; 7020 Plan->addVF(VF); 7021 RSO << "Initial VPlan for VF={" << VF; 7022 for (VF *= 2; VF < Range.End; VF *= 2) { 7023 Plan->addVF(VF); 7024 RSO << "," << VF; 7025 } 7026 RSO << "},UF>=1"; 7027 RSO.flush(); 7028 Plan->setName(PlanName); 7029 7030 return Plan; 7031 } 7032 7033 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 7034 // Outer loop handling: They may require CFG and instruction level 7035 // transformations before even evaluating whether vectorization is profitable. 7036 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7037 // the vectorization pipeline. 7038 assert(!OrigLoop->empty()); 7039 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7040 7041 // Create new empty VPlan 7042 auto Plan = llvm::make_unique<VPlan>(); 7043 7044 // Build hierarchical CFG 7045 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 7046 HCFGBuilder.buildHierarchicalCFG(); 7047 7048 for (unsigned VF = Range.Start; VF < Range.End; VF *= 2) 7049 Plan->addVF(VF); 7050 7051 if (EnableVPlanPredication) { 7052 VPlanPredicator VPP(*Plan); 7053 VPP.predicate(); 7054 7055 // Avoid running transformation to recipes until masked code generation in 7056 // VPlan-native path is in place. 7057 return Plan; 7058 } 7059 7060 SmallPtrSet<Instruction *, 1> DeadInstructions; 7061 VPlanHCFGTransforms::VPInstructionsToVPRecipes( 7062 Plan, Legal->getInductionVars(), DeadInstructions); 7063 7064 return Plan; 7065 } 7066 7067 Value* LoopVectorizationPlanner::VPCallbackILV:: 7068 getOrCreateVectorValues(Value *V, unsigned Part) { 7069 return ILV.getOrCreateVectorValue(V, Part); 7070 } 7071 7072 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent) const { 7073 O << " +\n" 7074 << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 7075 IG->getInsertPos()->printAsOperand(O, false); 7076 if (User) { 7077 O << ", "; 7078 User->getOperand(0)->printAsOperand(O); 7079 } 7080 O << "\\l\""; 7081 for (unsigned i = 0; i < IG->getFactor(); ++i) 7082 if (Instruction *I = IG->getMember(i)) 7083 O << " +\n" 7084 << Indent << "\" " << VPlanIngredient(I) << " " << i << "\\l\""; 7085 } 7086 7087 void VPWidenRecipe::execute(VPTransformState &State) { 7088 for (auto &Instr : make_range(Begin, End)) 7089 State.ILV->widenInstruction(Instr); 7090 } 7091 7092 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 7093 assert(!State.Instance && "Int or FP induction being replicated."); 7094 State.ILV->widenIntOrFpInduction(IV, Trunc); 7095 } 7096 7097 void VPWidenPHIRecipe::execute(VPTransformState &State) { 7098 State.ILV->widenPHIInstruction(Phi, State.UF, State.VF); 7099 } 7100 7101 void VPBlendRecipe::execute(VPTransformState &State) { 7102 State.ILV->setDebugLocFromInst(State.Builder, Phi); 7103 // We know that all PHIs in non-header blocks are converted into 7104 // selects, so we don't have to worry about the insertion order and we 7105 // can just use the builder. 7106 // At this point we generate the predication tree. There may be 7107 // duplications since this is a simple recursive scan, but future 7108 // optimizations will clean it up. 7109 7110 unsigned NumIncoming = Phi->getNumIncomingValues(); 7111 7112 assert((User || NumIncoming == 1) && 7113 "Multiple predecessors with predecessors having a full mask"); 7114 // Generate a sequence of selects of the form: 7115 // SELECT(Mask3, In3, 7116 // SELECT(Mask2, In2, 7117 // ( ...))) 7118 InnerLoopVectorizer::VectorParts Entry(State.UF); 7119 for (unsigned In = 0; In < NumIncoming; ++In) { 7120 for (unsigned Part = 0; Part < State.UF; ++Part) { 7121 // We might have single edge PHIs (blocks) - use an identity 7122 // 'select' for the first PHI operand. 7123 Value *In0 = 7124 State.ILV->getOrCreateVectorValue(Phi->getIncomingValue(In), Part); 7125 if (In == 0) 7126 Entry[Part] = In0; // Initialize with the first incoming value. 7127 else { 7128 // Select between the current value and the previous incoming edge 7129 // based on the incoming mask. 7130 Value *Cond = State.get(User->getOperand(In), Part); 7131 Entry[Part] = 7132 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 7133 } 7134 } 7135 } 7136 for (unsigned Part = 0; Part < State.UF; ++Part) 7137 State.ValueMap.setVectorValue(Phi, Part, Entry[Part]); 7138 } 7139 7140 void VPInterleaveRecipe::execute(VPTransformState &State) { 7141 assert(!State.Instance && "Interleave group being replicated."); 7142 if (!User) 7143 return State.ILV->vectorizeInterleaveGroup(IG->getInsertPos()); 7144 7145 // Last (and currently only) operand is a mask. 7146 InnerLoopVectorizer::VectorParts MaskValues(State.UF); 7147 VPValue *Mask = User->getOperand(User->getNumOperands() - 1); 7148 for (unsigned Part = 0; Part < State.UF; ++Part) 7149 MaskValues[Part] = State.get(Mask, Part); 7150 State.ILV->vectorizeInterleaveGroup(IG->getInsertPos(), &MaskValues); 7151 } 7152 7153 void VPReplicateRecipe::execute(VPTransformState &State) { 7154 if (State.Instance) { // Generate a single instance. 7155 State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated); 7156 // Insert scalar instance packing it into a vector. 7157 if (AlsoPack && State.VF > 1) { 7158 // If we're constructing lane 0, initialize to start from undef. 7159 if (State.Instance->Lane == 0) { 7160 Value *Undef = 7161 UndefValue::get(VectorType::get(Ingredient->getType(), State.VF)); 7162 State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef); 7163 } 7164 State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance); 7165 } 7166 return; 7167 } 7168 7169 // Generate scalar instances for all VF lanes of all UF parts, unless the 7170 // instruction is uniform inwhich case generate only the first lane for each 7171 // of the UF parts. 7172 unsigned EndLane = IsUniform ? 1 : State.VF; 7173 for (unsigned Part = 0; Part < State.UF; ++Part) 7174 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 7175 State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated); 7176 } 7177 7178 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 7179 assert(State.Instance && "Branch on Mask works only on single instance."); 7180 7181 unsigned Part = State.Instance->Part; 7182 unsigned Lane = State.Instance->Lane; 7183 7184 Value *ConditionBit = nullptr; 7185 if (!User) // Block in mask is all-one. 7186 ConditionBit = State.Builder.getTrue(); 7187 else { 7188 VPValue *BlockInMask = User->getOperand(0); 7189 ConditionBit = State.get(BlockInMask, Part); 7190 if (ConditionBit->getType()->isVectorTy()) 7191 ConditionBit = State.Builder.CreateExtractElement( 7192 ConditionBit, State.Builder.getInt32(Lane)); 7193 } 7194 7195 // Replace the temporary unreachable terminator with a new conditional branch, 7196 // whose two destinations will be set later when they are created. 7197 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 7198 assert(isa<UnreachableInst>(CurrentTerminator) && 7199 "Expected to replace unreachable terminator with conditional branch."); 7200 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 7201 CondBr->setSuccessor(0, nullptr); 7202 ReplaceInstWithInst(CurrentTerminator, CondBr); 7203 } 7204 7205 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 7206 assert(State.Instance && "Predicated instruction PHI works per instance."); 7207 Instruction *ScalarPredInst = cast<Instruction>( 7208 State.ValueMap.getScalarValue(PredInst, *State.Instance)); 7209 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 7210 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 7211 assert(PredicatingBB && "Predicated block has no single predecessor."); 7212 7213 // By current pack/unpack logic we need to generate only a single phi node: if 7214 // a vector value for the predicated instruction exists at this point it means 7215 // the instruction has vector users only, and a phi for the vector value is 7216 // needed. In this case the recipe of the predicated instruction is marked to 7217 // also do that packing, thereby "hoisting" the insert-element sequence. 7218 // Otherwise, a phi node for the scalar value is needed. 7219 unsigned Part = State.Instance->Part; 7220 if (State.ValueMap.hasVectorValue(PredInst, Part)) { 7221 Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part); 7222 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 7223 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 7224 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 7225 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 7226 State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache. 7227 } else { 7228 Type *PredInstType = PredInst->getType(); 7229 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 7230 Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB); 7231 Phi->addIncoming(ScalarPredInst, PredicatedBB); 7232 State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi); 7233 } 7234 } 7235 7236 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 7237 if (!User) 7238 return State.ILV->vectorizeMemoryInstruction(&Instr); 7239 7240 // Last (and currently only) operand is a mask. 7241 InnerLoopVectorizer::VectorParts MaskValues(State.UF); 7242 VPValue *Mask = User->getOperand(User->getNumOperands() - 1); 7243 for (unsigned Part = 0; Part < State.UF; ++Part) 7244 MaskValues[Part] = State.get(Mask, Part); 7245 State.ILV->vectorizeMemoryInstruction(&Instr, &MaskValues); 7246 } 7247 7248 static ScalarEpilogueLowering 7249 getScalarEpilogueLowering(Function *F, Loop *L, LoopVectorizeHints &Hints, 7250 ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) { 7251 ScalarEpilogueLowering SEL = CM_ScalarEpilogueAllowed; 7252 if (Hints.getForce() != LoopVectorizeHints::FK_Enabled && 7253 (F->hasOptSize() || 7254 llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI))) 7255 SEL = CM_ScalarEpilogueNotAllowedOptSize; 7256 else if (Hints.getPredicate()) 7257 SEL = CM_ScalarEpilogueNotNeededPredicatePragma; 7258 7259 return SEL; 7260 } 7261 7262 // Process the loop in the VPlan-native vectorization path. This path builds 7263 // VPlan upfront in the vectorization pipeline, which allows to apply 7264 // VPlan-to-VPlan transformations from the very beginning without modifying the 7265 // input LLVM IR. 7266 static bool processLoopInVPlanNativePath( 7267 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 7268 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 7269 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 7270 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 7271 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) { 7272 7273 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 7274 Function *F = L->getHeader()->getParent(); 7275 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 7276 ScalarEpilogueLowering SEL = getScalarEpilogueLowering(F, L, Hints, PSI, BFI); 7277 7278 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 7279 &Hints, IAI); 7280 // Use the planner for outer loop vectorization. 7281 // TODO: CM is not used at this point inside the planner. Turn CM into an 7282 // optional argument if we don't need it in the future. 7283 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM); 7284 7285 // Get user vectorization factor. 7286 const unsigned UserVF = Hints.getWidth(); 7287 7288 // Plan how to best vectorize, return the best VF and its cost. 7289 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 7290 7291 // If we are stress testing VPlan builds, do not attempt to generate vector 7292 // code. Masked vector code generation support will follow soon. 7293 // Also, do not attempt to vectorize if no vector code will be produced. 7294 if (VPlanBuildStressTest || EnableVPlanPredication || 7295 VectorizationFactor::Disabled() == VF) 7296 return false; 7297 7298 LVP.setBestPlan(VF.Width, 1); 7299 7300 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 7301 &CM); 7302 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 7303 << L->getHeader()->getParent()->getName() << "\"\n"); 7304 LVP.executePlan(LB, DT); 7305 7306 // Mark the loop as already vectorized to avoid vectorizing again. 7307 Hints.setAlreadyVectorized(); 7308 7309 LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent())); 7310 return true; 7311 } 7312 7313 bool LoopVectorizePass::processLoop(Loop *L) { 7314 assert((EnableVPlanNativePath || L->empty()) && 7315 "VPlan-native path is not enabled. Only process inner loops."); 7316 7317 #ifndef NDEBUG 7318 const std::string DebugLocStr = getDebugLocString(L); 7319 #endif /* NDEBUG */ 7320 7321 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 7322 << L->getHeader()->getParent()->getName() << "\" from " 7323 << DebugLocStr << "\n"); 7324 7325 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 7326 7327 LLVM_DEBUG( 7328 dbgs() << "LV: Loop hints:" 7329 << " force=" 7330 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 7331 ? "disabled" 7332 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 7333 ? "enabled" 7334 : "?")) 7335 << " width=" << Hints.getWidth() 7336 << " unroll=" << Hints.getInterleave() << "\n"); 7337 7338 // Function containing loop 7339 Function *F = L->getHeader()->getParent(); 7340 7341 // Looking at the diagnostic output is the only way to determine if a loop 7342 // was vectorized (other than looking at the IR or machine code), so it 7343 // is important to generate an optimization remark for each loop. Most of 7344 // these messages are generated as OptimizationRemarkAnalysis. Remarks 7345 // generated as OptimizationRemark and OptimizationRemarkMissed are 7346 // less verbose reporting vectorized loops and unvectorized loops that may 7347 // benefit from vectorization, respectively. 7348 7349 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 7350 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 7351 return false; 7352 } 7353 7354 PredicatedScalarEvolution PSE(*SE, *L); 7355 7356 // Check if it is legal to vectorize the loop. 7357 LoopVectorizationRequirements Requirements(*ORE); 7358 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 7359 &Requirements, &Hints, DB, AC); 7360 if (!LVL.canVectorize(EnableVPlanNativePath)) { 7361 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 7362 Hints.emitRemarkWithHints(); 7363 return false; 7364 } 7365 7366 // Check the function attributes and profiles to find out if this function 7367 // should be optimized for size. 7368 ScalarEpilogueLowering SEL = getScalarEpilogueLowering(F, L, Hints, PSI, BFI); 7369 7370 // Entrance to the VPlan-native vectorization path. Outer loops are processed 7371 // here. They may require CFG and instruction level transformations before 7372 // even evaluating whether vectorization is profitable. Since we cannot modify 7373 // the incoming IR, we need to build VPlan upfront in the vectorization 7374 // pipeline. 7375 if (!L->empty()) 7376 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 7377 ORE, BFI, PSI, Hints); 7378 7379 assert(L->empty() && "Inner loop expected."); 7380 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 7381 // count by optimizing for size, to minimize overheads. 7382 // Prefer constant trip counts over profile data, over upper bound estimate. 7383 unsigned ExpectedTC = 0; 7384 bool HasExpectedTC = false; 7385 if (const SCEVConstant *ConstExits = 7386 dyn_cast<SCEVConstant>(SE->getBackedgeTakenCount(L))) { 7387 const APInt &ExitsCount = ConstExits->getAPInt(); 7388 // We are interested in small values for ExpectedTC. Skip over those that 7389 // can't fit an unsigned. 7390 if (ExitsCount.ult(std::numeric_limits<unsigned>::max())) { 7391 ExpectedTC = static_cast<unsigned>(ExitsCount.getZExtValue()) + 1; 7392 HasExpectedTC = true; 7393 } 7394 } 7395 // ExpectedTC may be large because it's bound by a variable. Check 7396 // profiling information to validate we should vectorize. 7397 if (!HasExpectedTC && LoopVectorizeWithBlockFrequency) { 7398 auto EstimatedTC = getLoopEstimatedTripCount(L); 7399 if (EstimatedTC) { 7400 ExpectedTC = *EstimatedTC; 7401 HasExpectedTC = true; 7402 } 7403 } 7404 if (!HasExpectedTC) { 7405 ExpectedTC = SE->getSmallConstantMaxTripCount(L); 7406 HasExpectedTC = (ExpectedTC > 0); 7407 } 7408 7409 if (HasExpectedTC && ExpectedTC < TinyTripCountVectorThreshold) { 7410 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 7411 << "This loop is worth vectorizing only if no scalar " 7412 << "iteration overheads are incurred."); 7413 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 7414 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 7415 else { 7416 LLVM_DEBUG(dbgs() << "\n"); 7417 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 7418 } 7419 } 7420 7421 // Check the function attributes to see if implicit floats are allowed. 7422 // FIXME: This check doesn't seem possibly correct -- what if the loop is 7423 // an integer loop and the vector instructions selected are purely integer 7424 // vector instructions? 7425 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 7426 LLVM_DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 7427 "attribute is used.\n"); 7428 ORE->emit(createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(), 7429 "NoImplicitFloat", L) 7430 << "loop not vectorized due to NoImplicitFloat attribute"); 7431 Hints.emitRemarkWithHints(); 7432 return false; 7433 } 7434 7435 // Check if the target supports potentially unsafe FP vectorization. 7436 // FIXME: Add a check for the type of safety issue (denormal, signaling) 7437 // for the target we're vectorizing for, to make sure none of the 7438 // additional fp-math flags can help. 7439 if (Hints.isPotentiallyUnsafe() && 7440 TTI->isFPVectorizationPotentiallyUnsafe()) { 7441 LLVM_DEBUG( 7442 dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 7443 ORE->emit( 7444 createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L) 7445 << "loop not vectorized due to unsafe FP support."); 7446 Hints.emitRemarkWithHints(); 7447 return false; 7448 } 7449 7450 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 7451 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 7452 7453 // If an override option has been passed in for interleaved accesses, use it. 7454 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 7455 UseInterleaved = EnableInterleavedMemAccesses; 7456 7457 // Analyze interleaved memory accesses. 7458 if (UseInterleaved) { 7459 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 7460 } 7461 7462 // Use the cost model. 7463 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 7464 F, &Hints, IAI); 7465 CM.collectValuesToIgnore(); 7466 7467 // Use the planner for vectorization. 7468 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM); 7469 7470 // Get user vectorization factor. 7471 unsigned UserVF = Hints.getWidth(); 7472 7473 // Plan how to best vectorize, return the best VF and its cost. 7474 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF); 7475 7476 VectorizationFactor VF = VectorizationFactor::Disabled(); 7477 unsigned IC = 1; 7478 unsigned UserIC = Hints.getInterleave(); 7479 7480 if (MaybeVF) { 7481 VF = *MaybeVF; 7482 // Select the interleave count. 7483 IC = CM.selectInterleaveCount(VF.Width, VF.Cost); 7484 } 7485 7486 // Identify the diagnostic messages that should be produced. 7487 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 7488 bool VectorizeLoop = true, InterleaveLoop = true; 7489 if (Requirements.doesNotMeet(F, L, Hints)) { 7490 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 7491 "requirements.\n"); 7492 Hints.emitRemarkWithHints(); 7493 return false; 7494 } 7495 7496 if (VF.Width == 1) { 7497 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 7498 VecDiagMsg = std::make_pair( 7499 "VectorizationNotBeneficial", 7500 "the cost-model indicates that vectorization is not beneficial"); 7501 VectorizeLoop = false; 7502 } 7503 7504 if (!MaybeVF && UserIC > 1) { 7505 // Tell the user interleaving was avoided up-front, despite being explicitly 7506 // requested. 7507 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 7508 "interleaving should be avoided up front\n"); 7509 IntDiagMsg = std::make_pair( 7510 "InterleavingAvoided", 7511 "Ignoring UserIC, because interleaving was avoided up front"); 7512 InterleaveLoop = false; 7513 } else if (IC == 1 && UserIC <= 1) { 7514 // Tell the user interleaving is not beneficial. 7515 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 7516 IntDiagMsg = std::make_pair( 7517 "InterleavingNotBeneficial", 7518 "the cost-model indicates that interleaving is not beneficial"); 7519 InterleaveLoop = false; 7520 if (UserIC == 1) { 7521 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 7522 IntDiagMsg.second += 7523 " and is explicitly disabled or interleave count is set to 1"; 7524 } 7525 } else if (IC > 1 && UserIC == 1) { 7526 // Tell the user interleaving is beneficial, but it explicitly disabled. 7527 LLVM_DEBUG( 7528 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 7529 IntDiagMsg = std::make_pair( 7530 "InterleavingBeneficialButDisabled", 7531 "the cost-model indicates that interleaving is beneficial " 7532 "but is explicitly disabled or interleave count is set to 1"); 7533 InterleaveLoop = false; 7534 } 7535 7536 // Override IC if user provided an interleave count. 7537 IC = UserIC > 0 ? UserIC : IC; 7538 7539 // Emit diagnostic messages, if any. 7540 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 7541 if (!VectorizeLoop && !InterleaveLoop) { 7542 // Do not vectorize or interleaving the loop. 7543 ORE->emit([&]() { 7544 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 7545 L->getStartLoc(), L->getHeader()) 7546 << VecDiagMsg.second; 7547 }); 7548 ORE->emit([&]() { 7549 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 7550 L->getStartLoc(), L->getHeader()) 7551 << IntDiagMsg.second; 7552 }); 7553 return false; 7554 } else if (!VectorizeLoop && InterleaveLoop) { 7555 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7556 ORE->emit([&]() { 7557 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 7558 L->getStartLoc(), L->getHeader()) 7559 << VecDiagMsg.second; 7560 }); 7561 } else if (VectorizeLoop && !InterleaveLoop) { 7562 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 7563 << ") in " << DebugLocStr << '\n'); 7564 ORE->emit([&]() { 7565 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 7566 L->getStartLoc(), L->getHeader()) 7567 << IntDiagMsg.second; 7568 }); 7569 } else if (VectorizeLoop && InterleaveLoop) { 7570 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 7571 << ") in " << DebugLocStr << '\n'); 7572 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7573 } 7574 7575 LVP.setBestPlan(VF.Width, IC); 7576 7577 using namespace ore; 7578 bool DisableRuntimeUnroll = false; 7579 MDNode *OrigLoopID = L->getLoopID(); 7580 7581 if (!VectorizeLoop) { 7582 assert(IC > 1 && "interleave count should not be 1 or 0"); 7583 // If we decided that it is not legal to vectorize the loop, then 7584 // interleave it. 7585 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 7586 &CM); 7587 LVP.executePlan(Unroller, DT); 7588 7589 ORE->emit([&]() { 7590 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 7591 L->getHeader()) 7592 << "interleaved loop (interleaved count: " 7593 << NV("InterleaveCount", IC) << ")"; 7594 }); 7595 } else { 7596 // If we decided that it is *legal* to vectorize the loop, then do it. 7597 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 7598 &LVL, &CM); 7599 LVP.executePlan(LB, DT); 7600 ++LoopsVectorized; 7601 7602 // Add metadata to disable runtime unrolling a scalar loop when there are 7603 // no runtime checks about strides and memory. A scalar loop that is 7604 // rarely used is not worth unrolling. 7605 if (!LB.areSafetyChecksAdded()) 7606 DisableRuntimeUnroll = true; 7607 7608 // Report the vectorization decision. 7609 ORE->emit([&]() { 7610 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 7611 L->getHeader()) 7612 << "vectorized loop (vectorization width: " 7613 << NV("VectorizationFactor", VF.Width) 7614 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 7615 }); 7616 } 7617 7618 Optional<MDNode *> RemainderLoopID = 7619 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 7620 LLVMLoopVectorizeFollowupEpilogue}); 7621 if (RemainderLoopID.hasValue()) { 7622 L->setLoopID(RemainderLoopID.getValue()); 7623 } else { 7624 if (DisableRuntimeUnroll) 7625 AddRuntimeUnrollDisableMetaData(L); 7626 7627 // Mark the loop as already vectorized to avoid vectorizing again. 7628 Hints.setAlreadyVectorized(); 7629 } 7630 7631 LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent())); 7632 return true; 7633 } 7634 7635 bool LoopVectorizePass::runImpl( 7636 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 7637 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 7638 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 7639 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 7640 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 7641 SE = &SE_; 7642 LI = &LI_; 7643 TTI = &TTI_; 7644 DT = &DT_; 7645 BFI = &BFI_; 7646 TLI = TLI_; 7647 AA = &AA_; 7648 AC = &AC_; 7649 GetLAA = &GetLAA_; 7650 DB = &DB_; 7651 ORE = &ORE_; 7652 PSI = PSI_; 7653 7654 // Don't attempt if 7655 // 1. the target claims to have no vector registers, and 7656 // 2. interleaving won't help ILP. 7657 // 7658 // The second condition is necessary because, even if the target has no 7659 // vector registers, loop vectorization may still enable scalar 7660 // interleaving. 7661 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 7662 return false; 7663 7664 bool Changed = false; 7665 7666 // The vectorizer requires loops to be in simplified form. 7667 // Since simplification may add new inner loops, it has to run before the 7668 // legality and profitability checks. This means running the loop vectorizer 7669 // will simplify all loops, regardless of whether anything end up being 7670 // vectorized. 7671 for (auto &L : *LI) 7672 Changed |= 7673 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 7674 7675 // Build up a worklist of inner-loops to vectorize. This is necessary as 7676 // the act of vectorizing or partially unrolling a loop creates new loops 7677 // and can invalidate iterators across the loops. 7678 SmallVector<Loop *, 8> Worklist; 7679 7680 for (Loop *L : *LI) 7681 collectSupportedLoops(*L, LI, ORE, Worklist); 7682 7683 LoopsAnalyzed += Worklist.size(); 7684 7685 // Now walk the identified inner loops. 7686 while (!Worklist.empty()) { 7687 Loop *L = Worklist.pop_back_val(); 7688 7689 // For the inner loops we actually process, form LCSSA to simplify the 7690 // transform. 7691 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 7692 7693 Changed |= processLoop(L); 7694 } 7695 7696 // Process each loop nest in the function. 7697 return Changed; 7698 } 7699 7700 PreservedAnalyses LoopVectorizePass::run(Function &F, 7701 FunctionAnalysisManager &AM) { 7702 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 7703 auto &LI = AM.getResult<LoopAnalysis>(F); 7704 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 7705 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 7706 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 7707 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 7708 auto &AA = AM.getResult<AAManager>(F); 7709 auto &AC = AM.getResult<AssumptionAnalysis>(F); 7710 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 7711 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 7712 MemorySSA *MSSA = EnableMSSALoopDependency 7713 ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() 7714 : nullptr; 7715 7716 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 7717 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 7718 [&](Loop &L) -> const LoopAccessInfo & { 7719 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, MSSA}; 7720 return LAM.getResult<LoopAccessAnalysis>(L, AR); 7721 }; 7722 const ModuleAnalysisManager &MAM = 7723 AM.getResult<ModuleAnalysisManagerFunctionProxy>(F).getManager(); 7724 ProfileSummaryInfo *PSI = 7725 MAM.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 7726 bool Changed = 7727 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 7728 if (!Changed) 7729 return PreservedAnalyses::all(); 7730 PreservedAnalyses PA; 7731 7732 // We currently do not preserve loopinfo/dominator analyses with outer loop 7733 // vectorization. Until this is addressed, mark these analyses as preserved 7734 // only for non-VPlan-native path. 7735 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 7736 if (!EnableVPlanNativePath) { 7737 PA.preserve<LoopAnalysis>(); 7738 PA.preserve<DominatorTreeAnalysis>(); 7739 } 7740 PA.preserve<BasicAA>(); 7741 PA.preserve<GlobalsAA>(); 7742 return PA; 7743 } 7744