1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 91 #include "llvm/Analysis/ProfileSummaryInfo.h" 92 #include "llvm/Analysis/ScalarEvolution.h" 93 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 94 #include "llvm/Analysis/TargetLibraryInfo.h" 95 #include "llvm/Analysis/TargetTransformInfo.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/LLVMContext.h" 116 #include "llvm/IR/Metadata.h" 117 #include "llvm/IR/Module.h" 118 #include "llvm/IR/Operator.h" 119 #include "llvm/IR/PatternMatch.h" 120 #include "llvm/IR/Type.h" 121 #include "llvm/IR/Use.h" 122 #include "llvm/IR/User.h" 123 #include "llvm/IR/Value.h" 124 #include "llvm/IR/ValueHandle.h" 125 #include "llvm/IR/Verifier.h" 126 #include "llvm/InitializePasses.h" 127 #include "llvm/Pass.h" 128 #include "llvm/Support/Casting.h" 129 #include "llvm/Support/CommandLine.h" 130 #include "llvm/Support/Compiler.h" 131 #include "llvm/Support/Debug.h" 132 #include "llvm/Support/ErrorHandling.h" 133 #include "llvm/Support/InstructionCost.h" 134 #include "llvm/Support/MathExtras.h" 135 #include "llvm/Support/raw_ostream.h" 136 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 137 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 138 #include "llvm/Transforms/Utils/LoopSimplify.h" 139 #include "llvm/Transforms/Utils/LoopUtils.h" 140 #include "llvm/Transforms/Utils/LoopVersioning.h" 141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 142 #include "llvm/Transforms/Utils/SizeOpts.h" 143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 144 #include <algorithm> 145 #include <cassert> 146 #include <cstdint> 147 #include <cstdlib> 148 #include <functional> 149 #include <iterator> 150 #include <limits> 151 #include <memory> 152 #include <string> 153 #include <tuple> 154 #include <utility> 155 156 using namespace llvm; 157 158 #define LV_NAME "loop-vectorize" 159 #define DEBUG_TYPE LV_NAME 160 161 #ifndef NDEBUG 162 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 163 #endif 164 165 /// @{ 166 /// Metadata attribute names 167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 168 const char LLVMLoopVectorizeFollowupVectorized[] = 169 "llvm.loop.vectorize.followup_vectorized"; 170 const char LLVMLoopVectorizeFollowupEpilogue[] = 171 "llvm.loop.vectorize.followup_epilogue"; 172 /// @} 173 174 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 177 178 static cl::opt<bool> EnableEpilogueVectorization( 179 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 180 cl::desc("Enable vectorization of epilogue loops.")); 181 182 static cl::opt<unsigned> EpilogueVectorizationForceVF( 183 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 184 cl::desc("When epilogue vectorization is enabled, and a value greater than " 185 "1 is specified, forces the given VF for all applicable epilogue " 186 "loops.")); 187 188 static cl::opt<unsigned> EpilogueVectorizationMinVF( 189 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 190 cl::desc("Only loops with vectorization factor equal to or larger than " 191 "the specified value are considered for epilogue vectorization.")); 192 193 /// Loops with a known constant trip count below this number are vectorized only 194 /// if no scalar iteration overheads are incurred. 195 static cl::opt<unsigned> TinyTripCountVectorThreshold( 196 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 197 cl::desc("Loops with a constant trip count that is smaller than this " 198 "value are vectorized only if no scalar iteration overheads " 199 "are incurred.")); 200 201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 202 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 203 cl::desc("The maximum allowed number of runtime memory checks with a " 204 "vectorize(enable) pragma.")); 205 206 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 207 // that predication is preferred, and this lists all options. I.e., the 208 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 209 // and predicate the instructions accordingly. If tail-folding fails, there are 210 // different fallback strategies depending on these values: 211 namespace PreferPredicateTy { 212 enum Option { 213 ScalarEpilogue = 0, 214 PredicateElseScalarEpilogue, 215 PredicateOrDontVectorize 216 }; 217 } // namespace PreferPredicateTy 218 219 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 220 "prefer-predicate-over-epilogue", 221 cl::init(PreferPredicateTy::ScalarEpilogue), 222 cl::Hidden, 223 cl::desc("Tail-folding and predication preferences over creating a scalar " 224 "epilogue loop."), 225 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 226 "scalar-epilogue", 227 "Don't tail-predicate loops, create scalar epilogue"), 228 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 229 "predicate-else-scalar-epilogue", 230 "prefer tail-folding, create scalar epilogue if tail " 231 "folding fails."), 232 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 233 "predicate-dont-vectorize", 234 "prefers tail-folding, don't attempt vectorization if " 235 "tail-folding fails."))); 236 237 static cl::opt<bool> MaximizeBandwidth( 238 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 239 cl::desc("Maximize bandwidth when selecting vectorization factor which " 240 "will be determined by the smallest type in loop.")); 241 242 static cl::opt<bool> EnableInterleavedMemAccesses( 243 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 244 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 245 246 /// An interleave-group may need masking if it resides in a block that needs 247 /// predication, or in order to mask away gaps. 248 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 249 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 250 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 251 252 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 253 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 254 cl::desc("We don't interleave loops with a estimated constant trip count " 255 "below this number")); 256 257 static cl::opt<unsigned> ForceTargetNumScalarRegs( 258 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 259 cl::desc("A flag that overrides the target's number of scalar registers.")); 260 261 static cl::opt<unsigned> ForceTargetNumVectorRegs( 262 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 263 cl::desc("A flag that overrides the target's number of vector registers.")); 264 265 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 266 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 267 cl::desc("A flag that overrides the target's max interleave factor for " 268 "scalar loops.")); 269 270 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 271 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 272 cl::desc("A flag that overrides the target's max interleave factor for " 273 "vectorized loops.")); 274 275 static cl::opt<unsigned> ForceTargetInstructionCost( 276 "force-target-instruction-cost", cl::init(0), cl::Hidden, 277 cl::desc("A flag that overrides the target's expected cost for " 278 "an instruction to a single constant value. Mostly " 279 "useful for getting consistent testing.")); 280 281 static cl::opt<bool> ForceTargetSupportsScalableVectors( 282 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 283 cl::desc( 284 "Pretend that scalable vectors are supported, even if the target does " 285 "not support them. This flag should only be used for testing.")); 286 287 static cl::opt<unsigned> SmallLoopCost( 288 "small-loop-cost", cl::init(20), cl::Hidden, 289 cl::desc( 290 "The cost of a loop that is considered 'small' by the interleaver.")); 291 292 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 293 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 294 cl::desc("Enable the use of the block frequency analysis to access PGO " 295 "heuristics minimizing code growth in cold regions and being more " 296 "aggressive in hot regions.")); 297 298 // Runtime interleave loops for load/store throughput. 299 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 300 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 301 cl::desc( 302 "Enable runtime interleaving until load/store ports are saturated")); 303 304 /// Interleave small loops with scalar reductions. 305 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 306 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 307 cl::desc("Enable interleaving for loops with small iteration counts that " 308 "contain scalar reductions to expose ILP.")); 309 310 /// The number of stores in a loop that are allowed to need predication. 311 static cl::opt<unsigned> NumberOfStoresToPredicate( 312 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 313 cl::desc("Max number of stores to be predicated behind an if.")); 314 315 static cl::opt<bool> EnableIndVarRegisterHeur( 316 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 317 cl::desc("Count the induction variable only once when interleaving")); 318 319 static cl::opt<bool> EnableCondStoresVectorization( 320 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 321 cl::desc("Enable if predication of stores during vectorization.")); 322 323 static cl::opt<unsigned> MaxNestedScalarReductionIC( 324 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 325 cl::desc("The maximum interleave count to use when interleaving a scalar " 326 "reduction in a nested loop.")); 327 328 static cl::opt<bool> 329 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 330 cl::Hidden, 331 cl::desc("Prefer in-loop vector reductions, " 332 "overriding the targets preference.")); 333 334 static cl::opt<bool> ForceOrderedReductions( 335 "force-ordered-reductions", cl::init(false), cl::Hidden, 336 cl::desc("Enable the vectorisation of loops with in-order (strict) " 337 "FP reductions")); 338 339 static cl::opt<bool> PreferPredicatedReductionSelect( 340 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 341 cl::desc( 342 "Prefer predicating a reduction operation over an after loop select.")); 343 344 cl::opt<bool> EnableVPlanNativePath( 345 "enable-vplan-native-path", cl::init(false), cl::Hidden, 346 cl::desc("Enable VPlan-native vectorization path with " 347 "support for outer loop vectorization.")); 348 349 // FIXME: Remove this switch once we have divergence analysis. Currently we 350 // assume divergent non-backedge branches when this switch is true. 351 cl::opt<bool> EnableVPlanPredication( 352 "enable-vplan-predication", cl::init(false), cl::Hidden, 353 cl::desc("Enable VPlan-native vectorization path predicator with " 354 "support for outer loop vectorization.")); 355 356 // This flag enables the stress testing of the VPlan H-CFG construction in the 357 // VPlan-native vectorization path. It must be used in conjuction with 358 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 359 // verification of the H-CFGs built. 360 static cl::opt<bool> VPlanBuildStressTest( 361 "vplan-build-stress-test", cl::init(false), cl::Hidden, 362 cl::desc( 363 "Build VPlan for every supported loop nest in the function and bail " 364 "out right after the build (stress test the VPlan H-CFG construction " 365 "in the VPlan-native vectorization path).")); 366 367 cl::opt<bool> llvm::EnableLoopInterleaving( 368 "interleave-loops", cl::init(true), cl::Hidden, 369 cl::desc("Enable loop interleaving in Loop vectorization passes")); 370 cl::opt<bool> llvm::EnableLoopVectorization( 371 "vectorize-loops", cl::init(true), cl::Hidden, 372 cl::desc("Run the Loop vectorization passes")); 373 374 cl::opt<bool> PrintVPlansInDotFormat( 375 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 376 cl::desc("Use dot format instead of plain text when dumping VPlans")); 377 378 /// A helper function that returns true if the given type is irregular. The 379 /// type is irregular if its allocated size doesn't equal the store size of an 380 /// element of the corresponding vector type. 381 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 382 // Determine if an array of N elements of type Ty is "bitcast compatible" 383 // with a <N x Ty> vector. 384 // This is only true if there is no padding between the array elements. 385 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 386 } 387 388 /// A helper function that returns the reciprocal of the block probability of 389 /// predicated blocks. If we return X, we are assuming the predicated block 390 /// will execute once for every X iterations of the loop header. 391 /// 392 /// TODO: We should use actual block probability here, if available. Currently, 393 /// we always assume predicated blocks have a 50% chance of executing. 394 static unsigned getReciprocalPredBlockProb() { return 2; } 395 396 /// A helper function that returns an integer or floating-point constant with 397 /// value C. 398 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 399 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 400 : ConstantFP::get(Ty, C); 401 } 402 403 /// Returns "best known" trip count for the specified loop \p L as defined by 404 /// the following procedure: 405 /// 1) Returns exact trip count if it is known. 406 /// 2) Returns expected trip count according to profile data if any. 407 /// 3) Returns upper bound estimate if it is known. 408 /// 4) Returns None if all of the above failed. 409 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 410 // Check if exact trip count is known. 411 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 412 return ExpectedTC; 413 414 // Check if there is an expected trip count available from profile data. 415 if (LoopVectorizeWithBlockFrequency) 416 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 417 return EstimatedTC; 418 419 // Check if upper bound estimate is known. 420 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 421 return ExpectedTC; 422 423 return None; 424 } 425 426 // Forward declare GeneratedRTChecks. 427 class GeneratedRTChecks; 428 429 namespace llvm { 430 431 /// InnerLoopVectorizer vectorizes loops which contain only one basic 432 /// block to a specified vectorization factor (VF). 433 /// This class performs the widening of scalars into vectors, or multiple 434 /// scalars. This class also implements the following features: 435 /// * It inserts an epilogue loop for handling loops that don't have iteration 436 /// counts that are known to be a multiple of the vectorization factor. 437 /// * It handles the code generation for reduction variables. 438 /// * Scalarization (implementation using scalars) of un-vectorizable 439 /// instructions. 440 /// InnerLoopVectorizer does not perform any vectorization-legality 441 /// checks, and relies on the caller to check for the different legality 442 /// aspects. The InnerLoopVectorizer relies on the 443 /// LoopVectorizationLegality class to provide information about the induction 444 /// and reduction variables that were found to a given vectorization factor. 445 class InnerLoopVectorizer { 446 public: 447 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 448 LoopInfo *LI, DominatorTree *DT, 449 const TargetLibraryInfo *TLI, 450 const TargetTransformInfo *TTI, AssumptionCache *AC, 451 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 452 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 453 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 454 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 455 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 456 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 457 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 458 PSI(PSI), RTChecks(RTChecks) { 459 // Query this against the original loop and save it here because the profile 460 // of the original loop header may change as the transformation happens. 461 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 462 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 463 } 464 465 virtual ~InnerLoopVectorizer() = default; 466 467 /// Create a new empty loop that will contain vectorized instructions later 468 /// on, while the old loop will be used as the scalar remainder. Control flow 469 /// is generated around the vectorized (and scalar epilogue) loops consisting 470 /// of various checks and bypasses. Return the pre-header block of the new 471 /// loop. 472 /// In the case of epilogue vectorization, this function is overriden to 473 /// handle the more complex control flow around the loops. 474 virtual BasicBlock *createVectorizedLoopSkeleton(); 475 476 /// Widen a single instruction within the innermost loop. 477 void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands, 478 VPTransformState &State); 479 480 /// Widen a single call instruction within the innermost loop. 481 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 482 VPTransformState &State); 483 484 /// Widen a single select instruction within the innermost loop. 485 void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands, 486 bool InvariantCond, VPTransformState &State); 487 488 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 489 void fixVectorizedLoop(VPTransformState &State); 490 491 // Return true if any runtime check is added. 492 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 493 494 /// A type for vectorized values in the new loop. Each value from the 495 /// original loop, when vectorized, is represented by UF vector values in the 496 /// new unrolled loop, where UF is the unroll factor. 497 using VectorParts = SmallVector<Value *, 2>; 498 499 /// Vectorize a single GetElementPtrInst based on information gathered and 500 /// decisions taken during planning. 501 void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices, 502 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant, 503 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State); 504 505 /// Vectorize a single first-order recurrence or pointer induction PHINode in 506 /// a block. This method handles the induction variable canonicalization. It 507 /// supports both VF = 1 for unrolled loops and arbitrary length vectors. 508 void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR, 509 VPTransformState &State); 510 511 /// A helper function to scalarize a single Instruction in the innermost loop. 512 /// Generates a sequence of scalar instances for each lane between \p MinLane 513 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 514 /// inclusive. Uses the VPValue operands from \p Operands instead of \p 515 /// Instr's operands. 516 void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands, 517 const VPIteration &Instance, bool IfPredicateInstr, 518 VPTransformState &State); 519 520 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 521 /// is provided, the integer induction variable will first be truncated to 522 /// the corresponding type. 523 void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc, 524 VPValue *Def, VPValue *CastDef, 525 VPTransformState &State); 526 527 /// Construct the vector value of a scalarized value \p V one lane at a time. 528 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 529 VPTransformState &State); 530 531 /// Try to vectorize interleaved access group \p Group with the base address 532 /// given in \p Addr, optionally masking the vector operations if \p 533 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 534 /// values in the vectorized loop. 535 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 536 ArrayRef<VPValue *> VPDefs, 537 VPTransformState &State, VPValue *Addr, 538 ArrayRef<VPValue *> StoredValues, 539 VPValue *BlockInMask = nullptr); 540 541 /// Vectorize Load and Store instructions with the base address given in \p 542 /// Addr, optionally masking the vector operations if \p BlockInMask is 543 /// non-null. Use \p State to translate given VPValues to IR values in the 544 /// vectorized loop. 545 void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State, 546 VPValue *Def, VPValue *Addr, 547 VPValue *StoredValue, VPValue *BlockInMask, 548 bool ConsecutiveStride, bool Reverse); 549 550 /// Set the debug location in the builder \p Ptr using the debug location in 551 /// \p V. If \p Ptr is None then it uses the class member's Builder. 552 void setDebugLocFromInst(const Value *V, 553 Optional<IRBuilder<> *> CustomBuilder = None); 554 555 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 556 void fixNonInductionPHIs(VPTransformState &State); 557 558 /// Returns true if the reordering of FP operations is not allowed, but we are 559 /// able to vectorize with strict in-order reductions for the given RdxDesc. 560 bool useOrderedReductions(RecurrenceDescriptor &RdxDesc); 561 562 /// Create a broadcast instruction. This method generates a broadcast 563 /// instruction (shuffle) for loop invariant values and for the induction 564 /// value. If this is the induction variable then we extend it to N, N+1, ... 565 /// this is needed because each iteration in the loop corresponds to a SIMD 566 /// element. 567 virtual Value *getBroadcastInstrs(Value *V); 568 569 protected: 570 friend class LoopVectorizationPlanner; 571 572 /// A small list of PHINodes. 573 using PhiVector = SmallVector<PHINode *, 4>; 574 575 /// A type for scalarized values in the new loop. Each value from the 576 /// original loop, when scalarized, is represented by UF x VF scalar values 577 /// in the new unrolled loop, where UF is the unroll factor and VF is the 578 /// vectorization factor. 579 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 580 581 /// Set up the values of the IVs correctly when exiting the vector loop. 582 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 583 Value *CountRoundDown, Value *EndValue, 584 BasicBlock *MiddleBlock); 585 586 /// Create a new induction variable inside L. 587 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 588 Value *Step, Instruction *DL); 589 590 /// Handle all cross-iteration phis in the header. 591 void fixCrossIterationPHIs(VPTransformState &State); 592 593 /// Create the exit value of first order recurrences in the middle block and 594 /// update their users. 595 void fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, VPTransformState &State); 596 597 /// Create code for the loop exit value of the reduction. 598 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); 599 600 /// Clear NSW/NUW flags from reduction instructions if necessary. 601 void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 602 VPTransformState &State); 603 604 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 605 /// means we need to add the appropriate incoming value from the middle 606 /// block as exiting edges from the scalar epilogue loop (if present) are 607 /// already in place, and we exit the vector loop exclusively to the middle 608 /// block. 609 void fixLCSSAPHIs(VPTransformState &State); 610 611 /// Iteratively sink the scalarized operands of a predicated instruction into 612 /// the block that was created for it. 613 void sinkScalarOperands(Instruction *PredInst); 614 615 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 616 /// represented as. 617 void truncateToMinimalBitwidths(VPTransformState &State); 618 619 /// This function adds 620 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 621 /// to each vector element of Val. The sequence starts at StartIndex. 622 /// \p Opcode is relevant for FP induction variable. 623 virtual Value * 624 getStepVector(Value *Val, Value *StartIdx, Value *Step, 625 Instruction::BinaryOps Opcode = Instruction::BinaryOpsEnd); 626 627 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 628 /// variable on which to base the steps, \p Step is the size of the step, and 629 /// \p EntryVal is the value from the original loop that maps to the steps. 630 /// Note that \p EntryVal doesn't have to be an induction variable - it 631 /// can also be a truncate instruction. 632 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 633 const InductionDescriptor &ID, VPValue *Def, 634 VPValue *CastDef, VPTransformState &State); 635 636 /// Create a vector induction phi node based on an existing scalar one. \p 637 /// EntryVal is the value from the original loop that maps to the vector phi 638 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 639 /// truncate instruction, instead of widening the original IV, we widen a 640 /// version of the IV truncated to \p EntryVal's type. 641 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 642 Value *Step, Value *Start, 643 Instruction *EntryVal, VPValue *Def, 644 VPValue *CastDef, 645 VPTransformState &State); 646 647 /// Returns true if an instruction \p I should be scalarized instead of 648 /// vectorized for the chosen vectorization factor. 649 bool shouldScalarizeInstruction(Instruction *I) const; 650 651 /// Returns true if we should generate a scalar version of \p IV. 652 bool needsScalarInduction(Instruction *IV) const; 653 654 /// If there is a cast involved in the induction variable \p ID, which should 655 /// be ignored in the vectorized loop body, this function records the 656 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 657 /// cast. We had already proved that the casted Phi is equal to the uncasted 658 /// Phi in the vectorized loop (under a runtime guard), and therefore 659 /// there is no need to vectorize the cast - the same value can be used in the 660 /// vector loop for both the Phi and the cast. 661 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 662 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 663 /// 664 /// \p EntryVal is the value from the original loop that maps to the vector 665 /// phi node and is used to distinguish what is the IV currently being 666 /// processed - original one (if \p EntryVal is a phi corresponding to the 667 /// original IV) or the "newly-created" one based on the proof mentioned above 668 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 669 /// latter case \p EntryVal is a TruncInst and we must not record anything for 670 /// that IV, but it's error-prone to expect callers of this routine to care 671 /// about that, hence this explicit parameter. 672 void recordVectorLoopValueForInductionCast( 673 const InductionDescriptor &ID, const Instruction *EntryVal, 674 Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State, 675 unsigned Part, unsigned Lane = UINT_MAX); 676 677 /// Generate a shuffle sequence that will reverse the vector Vec. 678 virtual Value *reverseVector(Value *Vec); 679 680 /// Returns (and creates if needed) the original loop trip count. 681 Value *getOrCreateTripCount(Loop *NewLoop); 682 683 /// Returns (and creates if needed) the trip count of the widened loop. 684 Value *getOrCreateVectorTripCount(Loop *NewLoop); 685 686 /// Returns a bitcasted value to the requested vector type. 687 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 688 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 689 const DataLayout &DL); 690 691 /// Emit a bypass check to see if the vector trip count is zero, including if 692 /// it overflows. 693 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 694 695 /// Emit a bypass check to see if all of the SCEV assumptions we've 696 /// had to make are correct. Returns the block containing the checks or 697 /// nullptr if no checks have been added. 698 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass); 699 700 /// Emit bypass checks to check any memory assumptions we may have made. 701 /// Returns the block containing the checks or nullptr if no checks have been 702 /// added. 703 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 704 705 /// Compute the transformed value of Index at offset StartValue using step 706 /// StepValue. 707 /// For integer induction, returns StartValue + Index * StepValue. 708 /// For pointer induction, returns StartValue[Index * StepValue]. 709 /// FIXME: The newly created binary instructions should contain nsw/nuw 710 /// flags, which can be found from the original scalar operations. 711 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 712 const DataLayout &DL, 713 const InductionDescriptor &ID) const; 714 715 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 716 /// vector loop preheader, middle block and scalar preheader. Also 717 /// allocate a loop object for the new vector loop and return it. 718 Loop *createVectorLoopSkeleton(StringRef Prefix); 719 720 /// Create new phi nodes for the induction variables to resume iteration count 721 /// in the scalar epilogue, from where the vectorized loop left off (given by 722 /// \p VectorTripCount). 723 /// In cases where the loop skeleton is more complicated (eg. epilogue 724 /// vectorization) and the resume values can come from an additional bypass 725 /// block, the \p AdditionalBypass pair provides information about the bypass 726 /// block and the end value on the edge from bypass to this loop. 727 void createInductionResumeValues( 728 Loop *L, Value *VectorTripCount, 729 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 730 731 /// Complete the loop skeleton by adding debug MDs, creating appropriate 732 /// conditional branches in the middle block, preparing the builder and 733 /// running the verifier. Take in the vector loop \p L as argument, and return 734 /// the preheader of the completed vector loop. 735 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 736 737 /// Add additional metadata to \p To that was not present on \p Orig. 738 /// 739 /// Currently this is used to add the noalias annotations based on the 740 /// inserted memchecks. Use this for instructions that are *cloned* into the 741 /// vector loop. 742 void addNewMetadata(Instruction *To, const Instruction *Orig); 743 744 /// Add metadata from one instruction to another. 745 /// 746 /// This includes both the original MDs from \p From and additional ones (\see 747 /// addNewMetadata). Use this for *newly created* instructions in the vector 748 /// loop. 749 void addMetadata(Instruction *To, Instruction *From); 750 751 /// Similar to the previous function but it adds the metadata to a 752 /// vector of instructions. 753 void addMetadata(ArrayRef<Value *> To, Instruction *From); 754 755 /// Allow subclasses to override and print debug traces before/after vplan 756 /// execution, when trace information is requested. 757 virtual void printDebugTracesAtStart(){}; 758 virtual void printDebugTracesAtEnd(){}; 759 760 /// The original loop. 761 Loop *OrigLoop; 762 763 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 764 /// dynamic knowledge to simplify SCEV expressions and converts them to a 765 /// more usable form. 766 PredicatedScalarEvolution &PSE; 767 768 /// Loop Info. 769 LoopInfo *LI; 770 771 /// Dominator Tree. 772 DominatorTree *DT; 773 774 /// Alias Analysis. 775 AAResults *AA; 776 777 /// Target Library Info. 778 const TargetLibraryInfo *TLI; 779 780 /// Target Transform Info. 781 const TargetTransformInfo *TTI; 782 783 /// Assumption Cache. 784 AssumptionCache *AC; 785 786 /// Interface to emit optimization remarks. 787 OptimizationRemarkEmitter *ORE; 788 789 /// LoopVersioning. It's only set up (non-null) if memchecks were 790 /// used. 791 /// 792 /// This is currently only used to add no-alias metadata based on the 793 /// memchecks. The actually versioning is performed manually. 794 std::unique_ptr<LoopVersioning> LVer; 795 796 /// The vectorization SIMD factor to use. Each vector will have this many 797 /// vector elements. 798 ElementCount VF; 799 800 /// The vectorization unroll factor to use. Each scalar is vectorized to this 801 /// many different vector instructions. 802 unsigned UF; 803 804 /// The builder that we use 805 IRBuilder<> Builder; 806 807 // --- Vectorization state --- 808 809 /// The vector-loop preheader. 810 BasicBlock *LoopVectorPreHeader; 811 812 /// The scalar-loop preheader. 813 BasicBlock *LoopScalarPreHeader; 814 815 /// Middle Block between the vector and the scalar. 816 BasicBlock *LoopMiddleBlock; 817 818 /// The unique ExitBlock of the scalar loop if one exists. Note that 819 /// there can be multiple exiting edges reaching this block. 820 BasicBlock *LoopExitBlock; 821 822 /// The vector loop body. 823 BasicBlock *LoopVectorBody; 824 825 /// The scalar loop body. 826 BasicBlock *LoopScalarBody; 827 828 /// A list of all bypass blocks. The first block is the entry of the loop. 829 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 830 831 /// The new Induction variable which was added to the new block. 832 PHINode *Induction = nullptr; 833 834 /// The induction variable of the old basic block. 835 PHINode *OldInduction = nullptr; 836 837 /// Store instructions that were predicated. 838 SmallVector<Instruction *, 4> PredicatedInstructions; 839 840 /// Trip count of the original loop. 841 Value *TripCount = nullptr; 842 843 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 844 Value *VectorTripCount = nullptr; 845 846 /// The legality analysis. 847 LoopVectorizationLegality *Legal; 848 849 /// The profitablity analysis. 850 LoopVectorizationCostModel *Cost; 851 852 // Record whether runtime checks are added. 853 bool AddedSafetyChecks = false; 854 855 // Holds the end values for each induction variable. We save the end values 856 // so we can later fix-up the external users of the induction variables. 857 DenseMap<PHINode *, Value *> IVEndValues; 858 859 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 860 // fixed up at the end of vector code generation. 861 SmallVector<PHINode *, 8> OrigPHIsToFix; 862 863 /// BFI and PSI are used to check for profile guided size optimizations. 864 BlockFrequencyInfo *BFI; 865 ProfileSummaryInfo *PSI; 866 867 // Whether this loop should be optimized for size based on profile guided size 868 // optimizatios. 869 bool OptForSizeBasedOnProfile; 870 871 /// Structure to hold information about generated runtime checks, responsible 872 /// for cleaning the checks, if vectorization turns out unprofitable. 873 GeneratedRTChecks &RTChecks; 874 }; 875 876 class InnerLoopUnroller : public InnerLoopVectorizer { 877 public: 878 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 879 LoopInfo *LI, DominatorTree *DT, 880 const TargetLibraryInfo *TLI, 881 const TargetTransformInfo *TTI, AssumptionCache *AC, 882 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 883 LoopVectorizationLegality *LVL, 884 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 885 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 886 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 887 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 888 BFI, PSI, Check) {} 889 890 private: 891 Value *getBroadcastInstrs(Value *V) override; 892 Value *getStepVector( 893 Value *Val, Value *StartIdx, Value *Step, 894 Instruction::BinaryOps Opcode = Instruction::BinaryOpsEnd) override; 895 Value *reverseVector(Value *Vec) override; 896 }; 897 898 /// Encapsulate information regarding vectorization of a loop and its epilogue. 899 /// This information is meant to be updated and used across two stages of 900 /// epilogue vectorization. 901 struct EpilogueLoopVectorizationInfo { 902 ElementCount MainLoopVF = ElementCount::getFixed(0); 903 unsigned MainLoopUF = 0; 904 ElementCount EpilogueVF = ElementCount::getFixed(0); 905 unsigned EpilogueUF = 0; 906 BasicBlock *MainLoopIterationCountCheck = nullptr; 907 BasicBlock *EpilogueIterationCountCheck = nullptr; 908 BasicBlock *SCEVSafetyCheck = nullptr; 909 BasicBlock *MemSafetyCheck = nullptr; 910 Value *TripCount = nullptr; 911 Value *VectorTripCount = nullptr; 912 913 EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, 914 ElementCount EVF, unsigned EUF) 915 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) { 916 assert(EUF == 1 && 917 "A high UF for the epilogue loop is likely not beneficial."); 918 } 919 }; 920 921 /// An extension of the inner loop vectorizer that creates a skeleton for a 922 /// vectorized loop that has its epilogue (residual) also vectorized. 923 /// The idea is to run the vplan on a given loop twice, firstly to setup the 924 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 925 /// from the first step and vectorize the epilogue. This is achieved by 926 /// deriving two concrete strategy classes from this base class and invoking 927 /// them in succession from the loop vectorizer planner. 928 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 929 public: 930 InnerLoopAndEpilogueVectorizer( 931 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 932 DominatorTree *DT, const TargetLibraryInfo *TLI, 933 const TargetTransformInfo *TTI, AssumptionCache *AC, 934 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 935 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 936 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 937 GeneratedRTChecks &Checks) 938 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 939 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 940 Checks), 941 EPI(EPI) {} 942 943 // Override this function to handle the more complex control flow around the 944 // three loops. 945 BasicBlock *createVectorizedLoopSkeleton() final override { 946 return createEpilogueVectorizedLoopSkeleton(); 947 } 948 949 /// The interface for creating a vectorized skeleton using one of two 950 /// different strategies, each corresponding to one execution of the vplan 951 /// as described above. 952 virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0; 953 954 /// Holds and updates state information required to vectorize the main loop 955 /// and its epilogue in two separate passes. This setup helps us avoid 956 /// regenerating and recomputing runtime safety checks. It also helps us to 957 /// shorten the iteration-count-check path length for the cases where the 958 /// iteration count of the loop is so small that the main vector loop is 959 /// completely skipped. 960 EpilogueLoopVectorizationInfo &EPI; 961 }; 962 963 /// A specialized derived class of inner loop vectorizer that performs 964 /// vectorization of *main* loops in the process of vectorizing loops and their 965 /// epilogues. 966 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 967 public: 968 EpilogueVectorizerMainLoop( 969 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 970 DominatorTree *DT, const TargetLibraryInfo *TLI, 971 const TargetTransformInfo *TTI, AssumptionCache *AC, 972 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 973 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 974 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 975 GeneratedRTChecks &Check) 976 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 977 EPI, LVL, CM, BFI, PSI, Check) {} 978 /// Implements the interface for creating a vectorized skeleton using the 979 /// *main loop* strategy (ie the first pass of vplan execution). 980 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 981 982 protected: 983 /// Emits an iteration count bypass check once for the main loop (when \p 984 /// ForEpilogue is false) and once for the epilogue loop (when \p 985 /// ForEpilogue is true). 986 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, 987 bool ForEpilogue); 988 void printDebugTracesAtStart() override; 989 void printDebugTracesAtEnd() override; 990 }; 991 992 // A specialized derived class of inner loop vectorizer that performs 993 // vectorization of *epilogue* loops in the process of vectorizing loops and 994 // their epilogues. 995 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 996 public: 997 EpilogueVectorizerEpilogueLoop( 998 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 999 DominatorTree *DT, const TargetLibraryInfo *TLI, 1000 const TargetTransformInfo *TTI, AssumptionCache *AC, 1001 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 1002 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 1003 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 1004 GeneratedRTChecks &Checks) 1005 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1006 EPI, LVL, CM, BFI, PSI, Checks) {} 1007 /// Implements the interface for creating a vectorized skeleton using the 1008 /// *epilogue loop* strategy (ie the second pass of vplan execution). 1009 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 1010 1011 protected: 1012 /// Emits an iteration count bypass check after the main vector loop has 1013 /// finished to see if there are any iterations left to execute by either 1014 /// the vector epilogue or the scalar epilogue. 1015 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, 1016 BasicBlock *Bypass, 1017 BasicBlock *Insert); 1018 void printDebugTracesAtStart() override; 1019 void printDebugTracesAtEnd() override; 1020 }; 1021 } // end namespace llvm 1022 1023 /// Look for a meaningful debug location on the instruction or it's 1024 /// operands. 1025 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 1026 if (!I) 1027 return I; 1028 1029 DebugLoc Empty; 1030 if (I->getDebugLoc() != Empty) 1031 return I; 1032 1033 for (Use &Op : I->operands()) { 1034 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 1035 if (OpInst->getDebugLoc() != Empty) 1036 return OpInst; 1037 } 1038 1039 return I; 1040 } 1041 1042 void InnerLoopVectorizer::setDebugLocFromInst( 1043 const Value *V, Optional<IRBuilder<> *> CustomBuilder) { 1044 IRBuilder<> *B = (CustomBuilder == None) ? &Builder : *CustomBuilder; 1045 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) { 1046 const DILocation *DIL = Inst->getDebugLoc(); 1047 1048 // When a FSDiscriminator is enabled, we don't need to add the multiply 1049 // factors to the discriminators. 1050 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 1051 !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { 1052 // FIXME: For scalable vectors, assume vscale=1. 1053 auto NewDIL = 1054 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 1055 if (NewDIL) 1056 B->SetCurrentDebugLocation(NewDIL.getValue()); 1057 else 1058 LLVM_DEBUG(dbgs() 1059 << "Failed to create new discriminator: " 1060 << DIL->getFilename() << " Line: " << DIL->getLine()); 1061 } else 1062 B->SetCurrentDebugLocation(DIL); 1063 } else 1064 B->SetCurrentDebugLocation(DebugLoc()); 1065 } 1066 1067 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 1068 /// is passed, the message relates to that particular instruction. 1069 #ifndef NDEBUG 1070 static void debugVectorizationMessage(const StringRef Prefix, 1071 const StringRef DebugMsg, 1072 Instruction *I) { 1073 dbgs() << "LV: " << Prefix << DebugMsg; 1074 if (I != nullptr) 1075 dbgs() << " " << *I; 1076 else 1077 dbgs() << '.'; 1078 dbgs() << '\n'; 1079 } 1080 #endif 1081 1082 /// Create an analysis remark that explains why vectorization failed 1083 /// 1084 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 1085 /// RemarkName is the identifier for the remark. If \p I is passed it is an 1086 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 1087 /// the location of the remark. \return the remark object that can be 1088 /// streamed to. 1089 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1090 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1091 Value *CodeRegion = TheLoop->getHeader(); 1092 DebugLoc DL = TheLoop->getStartLoc(); 1093 1094 if (I) { 1095 CodeRegion = I->getParent(); 1096 // If there is no debug location attached to the instruction, revert back to 1097 // using the loop's. 1098 if (I->getDebugLoc()) 1099 DL = I->getDebugLoc(); 1100 } 1101 1102 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 1103 } 1104 1105 /// Return a value for Step multiplied by VF. 1106 static Value *createStepForVF(IRBuilder<> &B, Type *Ty, ElementCount VF, 1107 int64_t Step) { 1108 assert(Ty->isIntegerTy() && "Expected an integer step"); 1109 Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue()); 1110 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1111 } 1112 1113 namespace llvm { 1114 1115 /// Return the runtime value for VF. 1116 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) { 1117 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1118 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1119 } 1120 1121 static Value *getRuntimeVFAsFloat(IRBuilder<> &B, Type *FTy, ElementCount VF) { 1122 assert(FTy->isFloatingPointTy() && "Expected floating point type!"); 1123 Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits()); 1124 Value *RuntimeVF = getRuntimeVF(B, IntTy, VF); 1125 return B.CreateUIToFP(RuntimeVF, FTy); 1126 } 1127 1128 void reportVectorizationFailure(const StringRef DebugMsg, 1129 const StringRef OREMsg, const StringRef ORETag, 1130 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1131 Instruction *I) { 1132 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 1133 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1134 ORE->emit( 1135 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1136 << "loop not vectorized: " << OREMsg); 1137 } 1138 1139 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 1140 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1141 Instruction *I) { 1142 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 1143 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1144 ORE->emit( 1145 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1146 << Msg); 1147 } 1148 1149 } // end namespace llvm 1150 1151 #ifndef NDEBUG 1152 /// \return string containing a file name and a line # for the given loop. 1153 static std::string getDebugLocString(const Loop *L) { 1154 std::string Result; 1155 if (L) { 1156 raw_string_ostream OS(Result); 1157 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1158 LoopDbgLoc.print(OS); 1159 else 1160 // Just print the module name. 1161 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1162 OS.flush(); 1163 } 1164 return Result; 1165 } 1166 #endif 1167 1168 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1169 const Instruction *Orig) { 1170 // If the loop was versioned with memchecks, add the corresponding no-alias 1171 // metadata. 1172 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1173 LVer->annotateInstWithNoAlias(To, Orig); 1174 } 1175 1176 void InnerLoopVectorizer::addMetadata(Instruction *To, 1177 Instruction *From) { 1178 propagateMetadata(To, From); 1179 addNewMetadata(To, From); 1180 } 1181 1182 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1183 Instruction *From) { 1184 for (Value *V : To) { 1185 if (Instruction *I = dyn_cast<Instruction>(V)) 1186 addMetadata(I, From); 1187 } 1188 } 1189 1190 namespace llvm { 1191 1192 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1193 // lowered. 1194 enum ScalarEpilogueLowering { 1195 1196 // The default: allowing scalar epilogues. 1197 CM_ScalarEpilogueAllowed, 1198 1199 // Vectorization with OptForSize: don't allow epilogues. 1200 CM_ScalarEpilogueNotAllowedOptSize, 1201 1202 // A special case of vectorisation with OptForSize: loops with a very small 1203 // trip count are considered for vectorization under OptForSize, thereby 1204 // making sure the cost of their loop body is dominant, free of runtime 1205 // guards and scalar iteration overheads. 1206 CM_ScalarEpilogueNotAllowedLowTripLoop, 1207 1208 // Loop hint predicate indicating an epilogue is undesired. 1209 CM_ScalarEpilogueNotNeededUsePredicate, 1210 1211 // Directive indicating we must either tail fold or not vectorize 1212 CM_ScalarEpilogueNotAllowedUsePredicate 1213 }; 1214 1215 /// ElementCountComparator creates a total ordering for ElementCount 1216 /// for the purposes of using it in a set structure. 1217 struct ElementCountComparator { 1218 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1219 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1220 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1221 } 1222 }; 1223 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1224 1225 /// LoopVectorizationCostModel - estimates the expected speedups due to 1226 /// vectorization. 1227 /// In many cases vectorization is not profitable. This can happen because of 1228 /// a number of reasons. In this class we mainly attempt to predict the 1229 /// expected speedup/slowdowns due to the supported instruction set. We use the 1230 /// TargetTransformInfo to query the different backends for the cost of 1231 /// different operations. 1232 class LoopVectorizationCostModel { 1233 public: 1234 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1235 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1236 LoopVectorizationLegality *Legal, 1237 const TargetTransformInfo &TTI, 1238 const TargetLibraryInfo *TLI, DemandedBits *DB, 1239 AssumptionCache *AC, 1240 OptimizationRemarkEmitter *ORE, const Function *F, 1241 const LoopVectorizeHints *Hints, 1242 InterleavedAccessInfo &IAI) 1243 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1244 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1245 Hints(Hints), InterleaveInfo(IAI) {} 1246 1247 /// \return An upper bound for the vectorization factors (both fixed and 1248 /// scalable). If the factors are 0, vectorization and interleaving should be 1249 /// avoided up front. 1250 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1251 1252 /// \return True if runtime checks are required for vectorization, and false 1253 /// otherwise. 1254 bool runtimeChecksRequired(); 1255 1256 /// \return The most profitable vectorization factor and the cost of that VF. 1257 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1258 /// then this vectorization factor will be selected if vectorization is 1259 /// possible. 1260 VectorizationFactor 1261 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1262 1263 VectorizationFactor 1264 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1265 const LoopVectorizationPlanner &LVP); 1266 1267 /// Setup cost-based decisions for user vectorization factor. 1268 /// \return true if the UserVF is a feasible VF to be chosen. 1269 bool selectUserVectorizationFactor(ElementCount UserVF) { 1270 collectUniformsAndScalars(UserVF); 1271 collectInstsToScalarize(UserVF); 1272 return expectedCost(UserVF).first.isValid(); 1273 } 1274 1275 /// \return The size (in bits) of the smallest and widest types in the code 1276 /// that needs to be vectorized. We ignore values that remain scalar such as 1277 /// 64 bit loop indices. 1278 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1279 1280 /// \return The desired interleave count. 1281 /// If interleave count has been specified by metadata it will be returned. 1282 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1283 /// are the selected vectorization factor and the cost of the selected VF. 1284 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1285 1286 /// Memory access instruction may be vectorized in more than one way. 1287 /// Form of instruction after vectorization depends on cost. 1288 /// This function takes cost-based decisions for Load/Store instructions 1289 /// and collects them in a map. This decisions map is used for building 1290 /// the lists of loop-uniform and loop-scalar instructions. 1291 /// The calculated cost is saved with widening decision in order to 1292 /// avoid redundant calculations. 1293 void setCostBasedWideningDecision(ElementCount VF); 1294 1295 /// A struct that represents some properties of the register usage 1296 /// of a loop. 1297 struct RegisterUsage { 1298 /// Holds the number of loop invariant values that are used in the loop. 1299 /// The key is ClassID of target-provided register class. 1300 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1301 /// Holds the maximum number of concurrent live intervals in the loop. 1302 /// The key is ClassID of target-provided register class. 1303 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1304 }; 1305 1306 /// \return Returns information about the register usages of the loop for the 1307 /// given vectorization factors. 1308 SmallVector<RegisterUsage, 8> 1309 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1310 1311 /// Collect values we want to ignore in the cost model. 1312 void collectValuesToIgnore(); 1313 1314 /// Collect all element types in the loop for which widening is needed. 1315 void collectElementTypesForWidening(); 1316 1317 /// Split reductions into those that happen in the loop, and those that happen 1318 /// outside. In loop reductions are collected into InLoopReductionChains. 1319 void collectInLoopReductions(); 1320 1321 /// Returns true if we should use strict in-order reductions for the given 1322 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, 1323 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering 1324 /// of FP operations. 1325 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) { 1326 return !Hints->allowReordering() && RdxDesc.isOrdered(); 1327 } 1328 1329 /// \returns The smallest bitwidth each instruction can be represented with. 1330 /// The vector equivalents of these instructions should be truncated to this 1331 /// type. 1332 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1333 return MinBWs; 1334 } 1335 1336 /// \returns True if it is more profitable to scalarize instruction \p I for 1337 /// vectorization factor \p VF. 1338 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1339 assert(VF.isVector() && 1340 "Profitable to scalarize relevant only for VF > 1."); 1341 1342 // Cost model is not run in the VPlan-native path - return conservative 1343 // result until this changes. 1344 if (EnableVPlanNativePath) 1345 return false; 1346 1347 auto Scalars = InstsToScalarize.find(VF); 1348 assert(Scalars != InstsToScalarize.end() && 1349 "VF not yet analyzed for scalarization profitability"); 1350 return Scalars->second.find(I) != Scalars->second.end(); 1351 } 1352 1353 /// Returns true if \p I is known to be uniform after vectorization. 1354 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1355 if (VF.isScalar()) 1356 return true; 1357 1358 // Cost model is not run in the VPlan-native path - return conservative 1359 // result until this changes. 1360 if (EnableVPlanNativePath) 1361 return false; 1362 1363 auto UniformsPerVF = Uniforms.find(VF); 1364 assert(UniformsPerVF != Uniforms.end() && 1365 "VF not yet analyzed for uniformity"); 1366 return UniformsPerVF->second.count(I); 1367 } 1368 1369 /// Returns true if \p I is known to be scalar after vectorization. 1370 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1371 if (VF.isScalar()) 1372 return true; 1373 1374 // Cost model is not run in the VPlan-native path - return conservative 1375 // result until this changes. 1376 if (EnableVPlanNativePath) 1377 return false; 1378 1379 auto ScalarsPerVF = Scalars.find(VF); 1380 assert(ScalarsPerVF != Scalars.end() && 1381 "Scalar values are not calculated for VF"); 1382 return ScalarsPerVF->second.count(I); 1383 } 1384 1385 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1386 /// for vectorization factor \p VF. 1387 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1388 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1389 !isProfitableToScalarize(I, VF) && 1390 !isScalarAfterVectorization(I, VF); 1391 } 1392 1393 /// Decision that was taken during cost calculation for memory instruction. 1394 enum InstWidening { 1395 CM_Unknown, 1396 CM_Widen, // For consecutive accesses with stride +1. 1397 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1398 CM_Interleave, 1399 CM_GatherScatter, 1400 CM_Scalarize 1401 }; 1402 1403 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1404 /// instruction \p I and vector width \p VF. 1405 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1406 InstructionCost Cost) { 1407 assert(VF.isVector() && "Expected VF >=2"); 1408 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1409 } 1410 1411 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1412 /// interleaving group \p Grp and vector width \p VF. 1413 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1414 ElementCount VF, InstWidening W, 1415 InstructionCost Cost) { 1416 assert(VF.isVector() && "Expected VF >=2"); 1417 /// Broadcast this decicion to all instructions inside the group. 1418 /// But the cost will be assigned to one instruction only. 1419 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1420 if (auto *I = Grp->getMember(i)) { 1421 if (Grp->getInsertPos() == I) 1422 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1423 else 1424 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1425 } 1426 } 1427 } 1428 1429 /// Return the cost model decision for the given instruction \p I and vector 1430 /// width \p VF. Return CM_Unknown if this instruction did not pass 1431 /// through the cost modeling. 1432 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1433 assert(VF.isVector() && "Expected VF to be a vector VF"); 1434 // Cost model is not run in the VPlan-native path - return conservative 1435 // result until this changes. 1436 if (EnableVPlanNativePath) 1437 return CM_GatherScatter; 1438 1439 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1440 auto Itr = WideningDecisions.find(InstOnVF); 1441 if (Itr == WideningDecisions.end()) 1442 return CM_Unknown; 1443 return Itr->second.first; 1444 } 1445 1446 /// Return the vectorization cost for the given instruction \p I and vector 1447 /// width \p VF. 1448 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1449 assert(VF.isVector() && "Expected VF >=2"); 1450 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1451 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1452 "The cost is not calculated"); 1453 return WideningDecisions[InstOnVF].second; 1454 } 1455 1456 /// Return True if instruction \p I is an optimizable truncate whose operand 1457 /// is an induction variable. Such a truncate will be removed by adding a new 1458 /// induction variable with the destination type. 1459 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1460 // If the instruction is not a truncate, return false. 1461 auto *Trunc = dyn_cast<TruncInst>(I); 1462 if (!Trunc) 1463 return false; 1464 1465 // Get the source and destination types of the truncate. 1466 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1467 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1468 1469 // If the truncate is free for the given types, return false. Replacing a 1470 // free truncate with an induction variable would add an induction variable 1471 // update instruction to each iteration of the loop. We exclude from this 1472 // check the primary induction variable since it will need an update 1473 // instruction regardless. 1474 Value *Op = Trunc->getOperand(0); 1475 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1476 return false; 1477 1478 // If the truncated value is not an induction variable, return false. 1479 return Legal->isInductionPhi(Op); 1480 } 1481 1482 /// Collects the instructions to scalarize for each predicated instruction in 1483 /// the loop. 1484 void collectInstsToScalarize(ElementCount VF); 1485 1486 /// Collect Uniform and Scalar values for the given \p VF. 1487 /// The sets depend on CM decision for Load/Store instructions 1488 /// that may be vectorized as interleave, gather-scatter or scalarized. 1489 void collectUniformsAndScalars(ElementCount VF) { 1490 // Do the analysis once. 1491 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1492 return; 1493 setCostBasedWideningDecision(VF); 1494 collectLoopUniforms(VF); 1495 collectLoopScalars(VF); 1496 } 1497 1498 /// Returns true if the target machine supports masked store operation 1499 /// for the given \p DataType and kind of access to \p Ptr. 1500 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1501 return Legal->isConsecutivePtr(DataType, Ptr) && 1502 TTI.isLegalMaskedStore(DataType, Alignment); 1503 } 1504 1505 /// Returns true if the target machine supports masked load operation 1506 /// for the given \p DataType and kind of access to \p Ptr. 1507 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1508 return Legal->isConsecutivePtr(DataType, Ptr) && 1509 TTI.isLegalMaskedLoad(DataType, Alignment); 1510 } 1511 1512 /// Returns true if the target machine can represent \p V as a masked gather 1513 /// or scatter operation. 1514 bool isLegalGatherOrScatter(Value *V) { 1515 bool LI = isa<LoadInst>(V); 1516 bool SI = isa<StoreInst>(V); 1517 if (!LI && !SI) 1518 return false; 1519 auto *Ty = getLoadStoreType(V); 1520 Align Align = getLoadStoreAlignment(V); 1521 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1522 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1523 } 1524 1525 /// Returns true if the target machine supports all of the reduction 1526 /// variables found for the given VF. 1527 bool canVectorizeReductions(ElementCount VF) const { 1528 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1529 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1530 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1531 })); 1532 } 1533 1534 /// Returns true if \p I is an instruction that will be scalarized with 1535 /// predication. Such instructions include conditional stores and 1536 /// instructions that may divide by zero. 1537 /// If a non-zero VF has been calculated, we check if I will be scalarized 1538 /// predication for that VF. 1539 bool isScalarWithPredication(Instruction *I) const; 1540 1541 // Returns true if \p I is an instruction that will be predicated either 1542 // through scalar predication or masked load/store or masked gather/scatter. 1543 // Superset of instructions that return true for isScalarWithPredication. 1544 bool isPredicatedInst(Instruction *I) { 1545 if (!blockNeedsPredication(I->getParent())) 1546 return false; 1547 // Loads and stores that need some form of masked operation are predicated 1548 // instructions. 1549 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1550 return Legal->isMaskRequired(I); 1551 return isScalarWithPredication(I); 1552 } 1553 1554 /// Returns true if \p I is a memory instruction with consecutive memory 1555 /// access that can be widened. 1556 bool 1557 memoryInstructionCanBeWidened(Instruction *I, 1558 ElementCount VF = ElementCount::getFixed(1)); 1559 1560 /// Returns true if \p I is a memory instruction in an interleaved-group 1561 /// of memory accesses that can be vectorized with wide vector loads/stores 1562 /// and shuffles. 1563 bool 1564 interleavedAccessCanBeWidened(Instruction *I, 1565 ElementCount VF = ElementCount::getFixed(1)); 1566 1567 /// Check if \p Instr belongs to any interleaved access group. 1568 bool isAccessInterleaved(Instruction *Instr) { 1569 return InterleaveInfo.isInterleaved(Instr); 1570 } 1571 1572 /// Get the interleaved access group that \p Instr belongs to. 1573 const InterleaveGroup<Instruction> * 1574 getInterleavedAccessGroup(Instruction *Instr) { 1575 return InterleaveInfo.getInterleaveGroup(Instr); 1576 } 1577 1578 /// Returns true if we're required to use a scalar epilogue for at least 1579 /// the final iteration of the original loop. 1580 bool requiresScalarEpilogue(ElementCount VF) const { 1581 if (!isScalarEpilogueAllowed()) 1582 return false; 1583 // If we might exit from anywhere but the latch, must run the exiting 1584 // iteration in scalar form. 1585 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1586 return true; 1587 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); 1588 } 1589 1590 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1591 /// loop hint annotation. 1592 bool isScalarEpilogueAllowed() const { 1593 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1594 } 1595 1596 /// Returns true if all loop blocks should be masked to fold tail loop. 1597 bool foldTailByMasking() const { return FoldTailByMasking; } 1598 1599 bool blockNeedsPredication(BasicBlock *BB) const { 1600 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1601 } 1602 1603 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1604 /// nodes to the chain of instructions representing the reductions. Uses a 1605 /// MapVector to ensure deterministic iteration order. 1606 using ReductionChainMap = 1607 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1608 1609 /// Return the chain of instructions representing an inloop reduction. 1610 const ReductionChainMap &getInLoopReductionChains() const { 1611 return InLoopReductionChains; 1612 } 1613 1614 /// Returns true if the Phi is part of an inloop reduction. 1615 bool isInLoopReduction(PHINode *Phi) const { 1616 return InLoopReductionChains.count(Phi); 1617 } 1618 1619 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1620 /// with factor VF. Return the cost of the instruction, including 1621 /// scalarization overhead if it's needed. 1622 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1623 1624 /// Estimate cost of a call instruction CI if it were vectorized with factor 1625 /// VF. Return the cost of the instruction, including scalarization overhead 1626 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1627 /// scalarized - 1628 /// i.e. either vector version isn't available, or is too expensive. 1629 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1630 bool &NeedToScalarize) const; 1631 1632 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1633 /// that of B. 1634 bool isMoreProfitable(const VectorizationFactor &A, 1635 const VectorizationFactor &B) const; 1636 1637 /// Invalidates decisions already taken by the cost model. 1638 void invalidateCostModelingDecisions() { 1639 WideningDecisions.clear(); 1640 Uniforms.clear(); 1641 Scalars.clear(); 1642 } 1643 1644 private: 1645 unsigned NumPredStores = 0; 1646 1647 /// \return An upper bound for the vectorization factors for both 1648 /// fixed and scalable vectorization, where the minimum-known number of 1649 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1650 /// disabled or unsupported, then the scalable part will be equal to 1651 /// ElementCount::getScalable(0). 1652 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1653 ElementCount UserVF); 1654 1655 /// \return the maximized element count based on the targets vector 1656 /// registers and the loop trip-count, but limited to a maximum safe VF. 1657 /// This is a helper function of computeFeasibleMaxVF. 1658 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure 1659 /// issue that occurred on one of the buildbots which cannot be reproduced 1660 /// without having access to the properietary compiler (see comments on 1661 /// D98509). The issue is currently under investigation and this workaround 1662 /// will be removed as soon as possible. 1663 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1664 unsigned SmallestType, 1665 unsigned WidestType, 1666 const ElementCount &MaxSafeVF); 1667 1668 /// \return the maximum legal scalable VF, based on the safe max number 1669 /// of elements. 1670 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1671 1672 /// The vectorization cost is a combination of the cost itself and a boolean 1673 /// indicating whether any of the contributing operations will actually 1674 /// operate on vector values after type legalization in the backend. If this 1675 /// latter value is false, then all operations will be scalarized (i.e. no 1676 /// vectorization has actually taken place). 1677 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1678 1679 /// Returns the expected execution cost. The unit of the cost does 1680 /// not matter because we use the 'cost' units to compare different 1681 /// vector widths. The cost that is returned is *not* normalized by 1682 /// the factor width. If \p Invalid is not nullptr, this function 1683 /// will add a pair(Instruction*, ElementCount) to \p Invalid for 1684 /// each instruction that has an Invalid cost for the given VF. 1685 using InstructionVFPair = std::pair<Instruction *, ElementCount>; 1686 VectorizationCostTy 1687 expectedCost(ElementCount VF, 1688 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); 1689 1690 /// Returns the execution time cost of an instruction for a given vector 1691 /// width. Vector width of one means scalar. 1692 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1693 1694 /// The cost-computation logic from getInstructionCost which provides 1695 /// the vector type as an output parameter. 1696 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1697 Type *&VectorTy); 1698 1699 /// Return the cost of instructions in an inloop reduction pattern, if I is 1700 /// part of that pattern. 1701 Optional<InstructionCost> 1702 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, 1703 TTI::TargetCostKind CostKind); 1704 1705 /// Calculate vectorization cost of memory instruction \p I. 1706 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1707 1708 /// The cost computation for scalarized memory instruction. 1709 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1710 1711 /// The cost computation for interleaving group of memory instructions. 1712 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1713 1714 /// The cost computation for Gather/Scatter instruction. 1715 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1716 1717 /// The cost computation for widening instruction \p I with consecutive 1718 /// memory access. 1719 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1720 1721 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1722 /// Load: scalar load + broadcast. 1723 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1724 /// element) 1725 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1726 1727 /// Estimate the overhead of scalarizing an instruction. This is a 1728 /// convenience wrapper for the type-based getScalarizationOverhead API. 1729 InstructionCost getScalarizationOverhead(Instruction *I, 1730 ElementCount VF) const; 1731 1732 /// Returns whether the instruction is a load or store and will be a emitted 1733 /// as a vector operation. 1734 bool isConsecutiveLoadOrStore(Instruction *I); 1735 1736 /// Returns true if an artificially high cost for emulated masked memrefs 1737 /// should be used. 1738 bool useEmulatedMaskMemRefHack(Instruction *I); 1739 1740 /// Map of scalar integer values to the smallest bitwidth they can be legally 1741 /// represented as. The vector equivalents of these values should be truncated 1742 /// to this type. 1743 MapVector<Instruction *, uint64_t> MinBWs; 1744 1745 /// A type representing the costs for instructions if they were to be 1746 /// scalarized rather than vectorized. The entries are Instruction-Cost 1747 /// pairs. 1748 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1749 1750 /// A set containing all BasicBlocks that are known to present after 1751 /// vectorization as a predicated block. 1752 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1753 1754 /// Records whether it is allowed to have the original scalar loop execute at 1755 /// least once. This may be needed as a fallback loop in case runtime 1756 /// aliasing/dependence checks fail, or to handle the tail/remainder 1757 /// iterations when the trip count is unknown or doesn't divide by the VF, 1758 /// or as a peel-loop to handle gaps in interleave-groups. 1759 /// Under optsize and when the trip count is very small we don't allow any 1760 /// iterations to execute in the scalar loop. 1761 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1762 1763 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1764 bool FoldTailByMasking = false; 1765 1766 /// A map holding scalar costs for different vectorization factors. The 1767 /// presence of a cost for an instruction in the mapping indicates that the 1768 /// instruction will be scalarized when vectorizing with the associated 1769 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1770 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1771 1772 /// Holds the instructions known to be uniform after vectorization. 1773 /// The data is collected per VF. 1774 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1775 1776 /// Holds the instructions known to be scalar after vectorization. 1777 /// The data is collected per VF. 1778 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1779 1780 /// Holds the instructions (address computations) that are forced to be 1781 /// scalarized. 1782 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1783 1784 /// PHINodes of the reductions that should be expanded in-loop along with 1785 /// their associated chains of reduction operations, in program order from top 1786 /// (PHI) to bottom 1787 ReductionChainMap InLoopReductionChains; 1788 1789 /// A Map of inloop reduction operations and their immediate chain operand. 1790 /// FIXME: This can be removed once reductions can be costed correctly in 1791 /// vplan. This was added to allow quick lookup to the inloop operations, 1792 /// without having to loop through InLoopReductionChains. 1793 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1794 1795 /// Returns the expected difference in cost from scalarizing the expression 1796 /// feeding a predicated instruction \p PredInst. The instructions to 1797 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1798 /// non-negative return value implies the expression will be scalarized. 1799 /// Currently, only single-use chains are considered for scalarization. 1800 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1801 ElementCount VF); 1802 1803 /// Collect the instructions that are uniform after vectorization. An 1804 /// instruction is uniform if we represent it with a single scalar value in 1805 /// the vectorized loop corresponding to each vector iteration. Examples of 1806 /// uniform instructions include pointer operands of consecutive or 1807 /// interleaved memory accesses. Note that although uniformity implies an 1808 /// instruction will be scalar, the reverse is not true. In general, a 1809 /// scalarized instruction will be represented by VF scalar values in the 1810 /// vectorized loop, each corresponding to an iteration of the original 1811 /// scalar loop. 1812 void collectLoopUniforms(ElementCount VF); 1813 1814 /// Collect the instructions that are scalar after vectorization. An 1815 /// instruction is scalar if it is known to be uniform or will be scalarized 1816 /// during vectorization. Non-uniform scalarized instructions will be 1817 /// represented by VF values in the vectorized loop, each corresponding to an 1818 /// iteration of the original scalar loop. 1819 void collectLoopScalars(ElementCount VF); 1820 1821 /// Keeps cost model vectorization decision and cost for instructions. 1822 /// Right now it is used for memory instructions only. 1823 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1824 std::pair<InstWidening, InstructionCost>>; 1825 1826 DecisionList WideningDecisions; 1827 1828 /// Returns true if \p V is expected to be vectorized and it needs to be 1829 /// extracted. 1830 bool needsExtract(Value *V, ElementCount VF) const { 1831 Instruction *I = dyn_cast<Instruction>(V); 1832 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1833 TheLoop->isLoopInvariant(I)) 1834 return false; 1835 1836 // Assume we can vectorize V (and hence we need extraction) if the 1837 // scalars are not computed yet. This can happen, because it is called 1838 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1839 // the scalars are collected. That should be a safe assumption in most 1840 // cases, because we check if the operands have vectorizable types 1841 // beforehand in LoopVectorizationLegality. 1842 return Scalars.find(VF) == Scalars.end() || 1843 !isScalarAfterVectorization(I, VF); 1844 }; 1845 1846 /// Returns a range containing only operands needing to be extracted. 1847 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1848 ElementCount VF) const { 1849 return SmallVector<Value *, 4>(make_filter_range( 1850 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1851 } 1852 1853 /// Determines if we have the infrastructure to vectorize loop \p L and its 1854 /// epilogue, assuming the main loop is vectorized by \p VF. 1855 bool isCandidateForEpilogueVectorization(const Loop &L, 1856 const ElementCount VF) const; 1857 1858 /// Returns true if epilogue vectorization is considered profitable, and 1859 /// false otherwise. 1860 /// \p VF is the vectorization factor chosen for the original loop. 1861 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1862 1863 public: 1864 /// The loop that we evaluate. 1865 Loop *TheLoop; 1866 1867 /// Predicated scalar evolution analysis. 1868 PredicatedScalarEvolution &PSE; 1869 1870 /// Loop Info analysis. 1871 LoopInfo *LI; 1872 1873 /// Vectorization legality. 1874 LoopVectorizationLegality *Legal; 1875 1876 /// Vector target information. 1877 const TargetTransformInfo &TTI; 1878 1879 /// Target Library Info. 1880 const TargetLibraryInfo *TLI; 1881 1882 /// Demanded bits analysis. 1883 DemandedBits *DB; 1884 1885 /// Assumption cache. 1886 AssumptionCache *AC; 1887 1888 /// Interface to emit optimization remarks. 1889 OptimizationRemarkEmitter *ORE; 1890 1891 const Function *TheFunction; 1892 1893 /// Loop Vectorize Hint. 1894 const LoopVectorizeHints *Hints; 1895 1896 /// The interleave access information contains groups of interleaved accesses 1897 /// with the same stride and close to each other. 1898 InterleavedAccessInfo &InterleaveInfo; 1899 1900 /// Values to ignore in the cost model. 1901 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1902 1903 /// Values to ignore in the cost model when VF > 1. 1904 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1905 1906 /// All element types found in the loop. 1907 SmallPtrSet<Type *, 16> ElementTypesInLoop; 1908 1909 /// Profitable vector factors. 1910 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1911 }; 1912 } // end namespace llvm 1913 1914 /// Helper struct to manage generating runtime checks for vectorization. 1915 /// 1916 /// The runtime checks are created up-front in temporary blocks to allow better 1917 /// estimating the cost and un-linked from the existing IR. After deciding to 1918 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1919 /// temporary blocks are completely removed. 1920 class GeneratedRTChecks { 1921 /// Basic block which contains the generated SCEV checks, if any. 1922 BasicBlock *SCEVCheckBlock = nullptr; 1923 1924 /// The value representing the result of the generated SCEV checks. If it is 1925 /// nullptr, either no SCEV checks have been generated or they have been used. 1926 Value *SCEVCheckCond = nullptr; 1927 1928 /// Basic block which contains the generated memory runtime checks, if any. 1929 BasicBlock *MemCheckBlock = nullptr; 1930 1931 /// The value representing the result of the generated memory runtime checks. 1932 /// If it is nullptr, either no memory runtime checks have been generated or 1933 /// they have been used. 1934 Value *MemRuntimeCheckCond = nullptr; 1935 1936 DominatorTree *DT; 1937 LoopInfo *LI; 1938 1939 SCEVExpander SCEVExp; 1940 SCEVExpander MemCheckExp; 1941 1942 public: 1943 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 1944 const DataLayout &DL) 1945 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 1946 MemCheckExp(SE, DL, "scev.check") {} 1947 1948 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 1949 /// accurately estimate the cost of the runtime checks. The blocks are 1950 /// un-linked from the IR and is added back during vector code generation. If 1951 /// there is no vector code generation, the check blocks are removed 1952 /// completely. 1953 void Create(Loop *L, const LoopAccessInfo &LAI, 1954 const SCEVUnionPredicate &UnionPred) { 1955 1956 BasicBlock *LoopHeader = L->getHeader(); 1957 BasicBlock *Preheader = L->getLoopPreheader(); 1958 1959 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 1960 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 1961 // may be used by SCEVExpander. The blocks will be un-linked from their 1962 // predecessors and removed from LI & DT at the end of the function. 1963 if (!UnionPred.isAlwaysTrue()) { 1964 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 1965 nullptr, "vector.scevcheck"); 1966 1967 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 1968 &UnionPred, SCEVCheckBlock->getTerminator()); 1969 } 1970 1971 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 1972 if (RtPtrChecking.Need) { 1973 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 1974 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 1975 "vector.memcheck"); 1976 1977 MemRuntimeCheckCond = 1978 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 1979 RtPtrChecking.getChecks(), MemCheckExp); 1980 assert(MemRuntimeCheckCond && 1981 "no RT checks generated although RtPtrChecking " 1982 "claimed checks are required"); 1983 } 1984 1985 if (!MemCheckBlock && !SCEVCheckBlock) 1986 return; 1987 1988 // Unhook the temporary block with the checks, update various places 1989 // accordingly. 1990 if (SCEVCheckBlock) 1991 SCEVCheckBlock->replaceAllUsesWith(Preheader); 1992 if (MemCheckBlock) 1993 MemCheckBlock->replaceAllUsesWith(Preheader); 1994 1995 if (SCEVCheckBlock) { 1996 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 1997 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 1998 Preheader->getTerminator()->eraseFromParent(); 1999 } 2000 if (MemCheckBlock) { 2001 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2002 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 2003 Preheader->getTerminator()->eraseFromParent(); 2004 } 2005 2006 DT->changeImmediateDominator(LoopHeader, Preheader); 2007 if (MemCheckBlock) { 2008 DT->eraseNode(MemCheckBlock); 2009 LI->removeBlock(MemCheckBlock); 2010 } 2011 if (SCEVCheckBlock) { 2012 DT->eraseNode(SCEVCheckBlock); 2013 LI->removeBlock(SCEVCheckBlock); 2014 } 2015 } 2016 2017 /// Remove the created SCEV & memory runtime check blocks & instructions, if 2018 /// unused. 2019 ~GeneratedRTChecks() { 2020 SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT); 2021 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT); 2022 if (!SCEVCheckCond) 2023 SCEVCleaner.markResultUsed(); 2024 2025 if (!MemRuntimeCheckCond) 2026 MemCheckCleaner.markResultUsed(); 2027 2028 if (MemRuntimeCheckCond) { 2029 auto &SE = *MemCheckExp.getSE(); 2030 // Memory runtime check generation creates compares that use expanded 2031 // values. Remove them before running the SCEVExpanderCleaners. 2032 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2033 if (MemCheckExp.isInsertedInstruction(&I)) 2034 continue; 2035 SE.forgetValue(&I); 2036 I.eraseFromParent(); 2037 } 2038 } 2039 MemCheckCleaner.cleanup(); 2040 SCEVCleaner.cleanup(); 2041 2042 if (SCEVCheckCond) 2043 SCEVCheckBlock->eraseFromParent(); 2044 if (MemRuntimeCheckCond) 2045 MemCheckBlock->eraseFromParent(); 2046 } 2047 2048 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2049 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2050 /// depending on the generated condition. 2051 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass, 2052 BasicBlock *LoopVectorPreHeader, 2053 BasicBlock *LoopExitBlock) { 2054 if (!SCEVCheckCond) 2055 return nullptr; 2056 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) 2057 if (C->isZero()) 2058 return nullptr; 2059 2060 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2061 2062 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2063 // Create new preheader for vector loop. 2064 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2065 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2066 2067 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2068 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2069 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2070 SCEVCheckBlock); 2071 2072 DT->addNewBlock(SCEVCheckBlock, Pred); 2073 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2074 2075 ReplaceInstWithInst( 2076 SCEVCheckBlock->getTerminator(), 2077 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); 2078 // Mark the check as used, to prevent it from being removed during cleanup. 2079 SCEVCheckCond = nullptr; 2080 return SCEVCheckBlock; 2081 } 2082 2083 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2084 /// the branches to branch to the vector preheader or \p Bypass, depending on 2085 /// the generated condition. 2086 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass, 2087 BasicBlock *LoopVectorPreHeader) { 2088 // Check if we generated code that checks in runtime if arrays overlap. 2089 if (!MemRuntimeCheckCond) 2090 return nullptr; 2091 2092 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2093 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2094 MemCheckBlock); 2095 2096 DT->addNewBlock(MemCheckBlock, Pred); 2097 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2098 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2099 2100 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2101 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2102 2103 ReplaceInstWithInst( 2104 MemCheckBlock->getTerminator(), 2105 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2106 MemCheckBlock->getTerminator()->setDebugLoc( 2107 Pred->getTerminator()->getDebugLoc()); 2108 2109 // Mark the check as used, to prevent it from being removed during cleanup. 2110 MemRuntimeCheckCond = nullptr; 2111 return MemCheckBlock; 2112 } 2113 }; 2114 2115 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2116 // vectorization. The loop needs to be annotated with #pragma omp simd 2117 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2118 // vector length information is not provided, vectorization is not considered 2119 // explicit. Interleave hints are not allowed either. These limitations will be 2120 // relaxed in the future. 2121 // Please, note that we are currently forced to abuse the pragma 'clang 2122 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2123 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2124 // provides *explicit vectorization hints* (LV can bypass legal checks and 2125 // assume that vectorization is legal). However, both hints are implemented 2126 // using the same metadata (llvm.loop.vectorize, processed by 2127 // LoopVectorizeHints). This will be fixed in the future when the native IR 2128 // representation for pragma 'omp simd' is introduced. 2129 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2130 OptimizationRemarkEmitter *ORE) { 2131 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2132 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2133 2134 // Only outer loops with an explicit vectorization hint are supported. 2135 // Unannotated outer loops are ignored. 2136 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2137 return false; 2138 2139 Function *Fn = OuterLp->getHeader()->getParent(); 2140 if (!Hints.allowVectorization(Fn, OuterLp, 2141 true /*VectorizeOnlyWhenForced*/)) { 2142 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2143 return false; 2144 } 2145 2146 if (Hints.getInterleave() > 1) { 2147 // TODO: Interleave support is future work. 2148 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2149 "outer loops.\n"); 2150 Hints.emitRemarkWithHints(); 2151 return false; 2152 } 2153 2154 return true; 2155 } 2156 2157 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2158 OptimizationRemarkEmitter *ORE, 2159 SmallVectorImpl<Loop *> &V) { 2160 // Collect inner loops and outer loops without irreducible control flow. For 2161 // now, only collect outer loops that have explicit vectorization hints. If we 2162 // are stress testing the VPlan H-CFG construction, we collect the outermost 2163 // loop of every loop nest. 2164 if (L.isInnermost() || VPlanBuildStressTest || 2165 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2166 LoopBlocksRPO RPOT(&L); 2167 RPOT.perform(LI); 2168 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2169 V.push_back(&L); 2170 // TODO: Collect inner loops inside marked outer loops in case 2171 // vectorization fails for the outer loop. Do not invoke 2172 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2173 // already known to be reducible. We can use an inherited attribute for 2174 // that. 2175 return; 2176 } 2177 } 2178 for (Loop *InnerL : L) 2179 collectSupportedLoops(*InnerL, LI, ORE, V); 2180 } 2181 2182 namespace { 2183 2184 /// The LoopVectorize Pass. 2185 struct LoopVectorize : public FunctionPass { 2186 /// Pass identification, replacement for typeid 2187 static char ID; 2188 2189 LoopVectorizePass Impl; 2190 2191 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2192 bool VectorizeOnlyWhenForced = false) 2193 : FunctionPass(ID), 2194 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2195 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2196 } 2197 2198 bool runOnFunction(Function &F) override { 2199 if (skipFunction(F)) 2200 return false; 2201 2202 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2203 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2204 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2205 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2206 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2207 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2208 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2209 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2210 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2211 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2212 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2213 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2214 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2215 2216 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2217 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2218 2219 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2220 GetLAA, *ORE, PSI).MadeAnyChange; 2221 } 2222 2223 void getAnalysisUsage(AnalysisUsage &AU) const override { 2224 AU.addRequired<AssumptionCacheTracker>(); 2225 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2226 AU.addRequired<DominatorTreeWrapperPass>(); 2227 AU.addRequired<LoopInfoWrapperPass>(); 2228 AU.addRequired<ScalarEvolutionWrapperPass>(); 2229 AU.addRequired<TargetTransformInfoWrapperPass>(); 2230 AU.addRequired<AAResultsWrapperPass>(); 2231 AU.addRequired<LoopAccessLegacyAnalysis>(); 2232 AU.addRequired<DemandedBitsWrapperPass>(); 2233 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2234 AU.addRequired<InjectTLIMappingsLegacy>(); 2235 2236 // We currently do not preserve loopinfo/dominator analyses with outer loop 2237 // vectorization. Until this is addressed, mark these analyses as preserved 2238 // only for non-VPlan-native path. 2239 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2240 if (!EnableVPlanNativePath) { 2241 AU.addPreserved<LoopInfoWrapperPass>(); 2242 AU.addPreserved<DominatorTreeWrapperPass>(); 2243 } 2244 2245 AU.addPreserved<BasicAAWrapperPass>(); 2246 AU.addPreserved<GlobalsAAWrapperPass>(); 2247 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2248 } 2249 }; 2250 2251 } // end anonymous namespace 2252 2253 //===----------------------------------------------------------------------===// 2254 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2255 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2256 //===----------------------------------------------------------------------===// 2257 2258 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2259 // We need to place the broadcast of invariant variables outside the loop, 2260 // but only if it's proven safe to do so. Else, broadcast will be inside 2261 // vector loop body. 2262 Instruction *Instr = dyn_cast<Instruction>(V); 2263 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2264 (!Instr || 2265 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2266 // Place the code for broadcasting invariant variables in the new preheader. 2267 IRBuilder<>::InsertPointGuard Guard(Builder); 2268 if (SafeToHoist) 2269 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2270 2271 // Broadcast the scalar into all locations in the vector. 2272 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2273 2274 return Shuf; 2275 } 2276 2277 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2278 const InductionDescriptor &II, Value *Step, Value *Start, 2279 Instruction *EntryVal, VPValue *Def, VPValue *CastDef, 2280 VPTransformState &State) { 2281 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2282 "Expected either an induction phi-node or a truncate of it!"); 2283 2284 // Construct the initial value of the vector IV in the vector loop preheader 2285 auto CurrIP = Builder.saveIP(); 2286 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2287 if (isa<TruncInst>(EntryVal)) { 2288 assert(Start->getType()->isIntegerTy() && 2289 "Truncation requires an integer type"); 2290 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2291 Step = Builder.CreateTrunc(Step, TruncType); 2292 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2293 } 2294 2295 Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0); 2296 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2297 Value *SteppedStart = 2298 getStepVector(SplatStart, Zero, Step, II.getInductionOpcode()); 2299 2300 // We create vector phi nodes for both integer and floating-point induction 2301 // variables. Here, we determine the kind of arithmetic we will perform. 2302 Instruction::BinaryOps AddOp; 2303 Instruction::BinaryOps MulOp; 2304 if (Step->getType()->isIntegerTy()) { 2305 AddOp = Instruction::Add; 2306 MulOp = Instruction::Mul; 2307 } else { 2308 AddOp = II.getInductionOpcode(); 2309 MulOp = Instruction::FMul; 2310 } 2311 2312 // Multiply the vectorization factor by the step using integer or 2313 // floating-point arithmetic as appropriate. 2314 Type *StepType = Step->getType(); 2315 Value *RuntimeVF; 2316 if (Step->getType()->isFloatingPointTy()) 2317 RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, VF); 2318 else 2319 RuntimeVF = getRuntimeVF(Builder, StepType, VF); 2320 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 2321 2322 // Create a vector splat to use in the induction update. 2323 // 2324 // FIXME: If the step is non-constant, we create the vector splat with 2325 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2326 // handle a constant vector splat. 2327 Value *SplatVF = isa<Constant>(Mul) 2328 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 2329 : Builder.CreateVectorSplat(VF, Mul); 2330 Builder.restoreIP(CurrIP); 2331 2332 // We may need to add the step a number of times, depending on the unroll 2333 // factor. The last of those goes into the PHI. 2334 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2335 &*LoopVectorBody->getFirstInsertionPt()); 2336 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 2337 Instruction *LastInduction = VecInd; 2338 for (unsigned Part = 0; Part < UF; ++Part) { 2339 State.set(Def, LastInduction, Part); 2340 2341 if (isa<TruncInst>(EntryVal)) 2342 addMetadata(LastInduction, EntryVal); 2343 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef, 2344 State, Part); 2345 2346 LastInduction = cast<Instruction>( 2347 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 2348 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 2349 } 2350 2351 // Move the last step to the end of the latch block. This ensures consistent 2352 // placement of all induction updates. 2353 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2354 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2355 auto *ICmp = cast<Instruction>(Br->getCondition()); 2356 LastInduction->moveBefore(ICmp); 2357 LastInduction->setName("vec.ind.next"); 2358 2359 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2360 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2361 } 2362 2363 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2364 return Cost->isScalarAfterVectorization(I, VF) || 2365 Cost->isProfitableToScalarize(I, VF); 2366 } 2367 2368 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2369 if (shouldScalarizeInstruction(IV)) 2370 return true; 2371 auto isScalarInst = [&](User *U) -> bool { 2372 auto *I = cast<Instruction>(U); 2373 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2374 }; 2375 return llvm::any_of(IV->users(), isScalarInst); 2376 } 2377 2378 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 2379 const InductionDescriptor &ID, const Instruction *EntryVal, 2380 Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State, 2381 unsigned Part, unsigned Lane) { 2382 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2383 "Expected either an induction phi-node or a truncate of it!"); 2384 2385 // This induction variable is not the phi from the original loop but the 2386 // newly-created IV based on the proof that casted Phi is equal to the 2387 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 2388 // re-uses the same InductionDescriptor that original IV uses but we don't 2389 // have to do any recording in this case - that is done when original IV is 2390 // processed. 2391 if (isa<TruncInst>(EntryVal)) 2392 return; 2393 2394 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 2395 if (Casts.empty()) 2396 return; 2397 // Only the first Cast instruction in the Casts vector is of interest. 2398 // The rest of the Casts (if exist) have no uses outside the 2399 // induction update chain itself. 2400 if (Lane < UINT_MAX) 2401 State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane)); 2402 else 2403 State.set(CastDef, VectorLoopVal, Part); 2404 } 2405 2406 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start, 2407 TruncInst *Trunc, VPValue *Def, 2408 VPValue *CastDef, 2409 VPTransformState &State) { 2410 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2411 "Primary induction variable must have an integer type"); 2412 2413 auto II = Legal->getInductionVars().find(IV); 2414 assert(II != Legal->getInductionVars().end() && "IV is not an induction"); 2415 2416 auto ID = II->second; 2417 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2418 2419 // The value from the original loop to which we are mapping the new induction 2420 // variable. 2421 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2422 2423 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2424 2425 // Generate code for the induction step. Note that induction steps are 2426 // required to be loop-invariant 2427 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 2428 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 2429 "Induction step should be loop invariant"); 2430 if (PSE.getSE()->isSCEVable(IV->getType())) { 2431 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2432 return Exp.expandCodeFor(Step, Step->getType(), 2433 LoopVectorPreHeader->getTerminator()); 2434 } 2435 return cast<SCEVUnknown>(Step)->getValue(); 2436 }; 2437 2438 // The scalar value to broadcast. This is derived from the canonical 2439 // induction variable. If a truncation type is given, truncate the canonical 2440 // induction variable and step. Otherwise, derive these values from the 2441 // induction descriptor. 2442 auto CreateScalarIV = [&](Value *&Step) -> Value * { 2443 Value *ScalarIV = Induction; 2444 if (IV != OldInduction) { 2445 ScalarIV = IV->getType()->isIntegerTy() 2446 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2447 : Builder.CreateCast(Instruction::SIToFP, Induction, 2448 IV->getType()); 2449 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 2450 ScalarIV->setName("offset.idx"); 2451 } 2452 if (Trunc) { 2453 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2454 assert(Step->getType()->isIntegerTy() && 2455 "Truncation requires an integer step"); 2456 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2457 Step = Builder.CreateTrunc(Step, TruncType); 2458 } 2459 return ScalarIV; 2460 }; 2461 2462 // Create the vector values from the scalar IV, in the absence of creating a 2463 // vector IV. 2464 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 2465 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2466 for (unsigned Part = 0; Part < UF; ++Part) { 2467 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2468 Value *StartIdx; 2469 if (Step->getType()->isFloatingPointTy()) 2470 StartIdx = getRuntimeVFAsFloat(Builder, Step->getType(), VF * Part); 2471 else 2472 StartIdx = getRuntimeVF(Builder, Step->getType(), VF * Part); 2473 2474 Value *EntryPart = 2475 getStepVector(Broadcasted, StartIdx, Step, ID.getInductionOpcode()); 2476 State.set(Def, EntryPart, Part); 2477 if (Trunc) 2478 addMetadata(EntryPart, Trunc); 2479 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef, 2480 State, Part); 2481 } 2482 }; 2483 2484 // Fast-math-flags propagate from the original induction instruction. 2485 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 2486 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 2487 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 2488 2489 // Now do the actual transformations, and start with creating the step value. 2490 Value *Step = CreateStepValue(ID.getStep()); 2491 if (VF.isZero() || VF.isScalar()) { 2492 Value *ScalarIV = CreateScalarIV(Step); 2493 CreateSplatIV(ScalarIV, Step); 2494 return; 2495 } 2496 2497 // Determine if we want a scalar version of the induction variable. This is 2498 // true if the induction variable itself is not widened, or if it has at 2499 // least one user in the loop that is not widened. 2500 auto NeedsScalarIV = needsScalarInduction(EntryVal); 2501 if (!NeedsScalarIV) { 2502 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2503 State); 2504 return; 2505 } 2506 2507 // Try to create a new independent vector induction variable. If we can't 2508 // create the phi node, we will splat the scalar induction variable in each 2509 // loop iteration. 2510 if (!shouldScalarizeInstruction(EntryVal)) { 2511 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2512 State); 2513 Value *ScalarIV = CreateScalarIV(Step); 2514 // Create scalar steps that can be used by instructions we will later 2515 // scalarize. Note that the addition of the scalar steps will not increase 2516 // the number of instructions in the loop in the common case prior to 2517 // InstCombine. We will be trading one vector extract for each scalar step. 2518 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2519 return; 2520 } 2521 2522 // All IV users are scalar instructions, so only emit a scalar IV, not a 2523 // vectorised IV. Except when we tail-fold, then the splat IV feeds the 2524 // predicate used by the masked loads/stores. 2525 Value *ScalarIV = CreateScalarIV(Step); 2526 if (!Cost->isScalarEpilogueAllowed()) 2527 CreateSplatIV(ScalarIV, Step); 2528 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2529 } 2530 2531 Value *InnerLoopVectorizer::getStepVector(Value *Val, Value *StartIdx, 2532 Value *Step, 2533 Instruction::BinaryOps BinOp) { 2534 // Create and check the types. 2535 auto *ValVTy = cast<VectorType>(Val->getType()); 2536 ElementCount VLen = ValVTy->getElementCount(); 2537 2538 Type *STy = Val->getType()->getScalarType(); 2539 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2540 "Induction Step must be an integer or FP"); 2541 assert(Step->getType() == STy && "Step has wrong type"); 2542 2543 SmallVector<Constant *, 8> Indices; 2544 2545 // Create a vector of consecutive numbers from zero to VF. 2546 VectorType *InitVecValVTy = ValVTy; 2547 Type *InitVecValSTy = STy; 2548 if (STy->isFloatingPointTy()) { 2549 InitVecValSTy = 2550 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2551 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2552 } 2553 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2554 2555 // Splat the StartIdx 2556 Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx); 2557 2558 if (STy->isIntegerTy()) { 2559 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2560 Step = Builder.CreateVectorSplat(VLen, Step); 2561 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2562 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2563 // which can be found from the original scalar operations. 2564 Step = Builder.CreateMul(InitVec, Step); 2565 return Builder.CreateAdd(Val, Step, "induction"); 2566 } 2567 2568 // Floating point induction. 2569 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2570 "Binary Opcode should be specified for FP induction"); 2571 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2572 InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat); 2573 2574 Step = Builder.CreateVectorSplat(VLen, Step); 2575 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2576 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2577 } 2578 2579 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2580 Instruction *EntryVal, 2581 const InductionDescriptor &ID, 2582 VPValue *Def, VPValue *CastDef, 2583 VPTransformState &State) { 2584 // We shouldn't have to build scalar steps if we aren't vectorizing. 2585 assert(VF.isVector() && "VF should be greater than one"); 2586 // Get the value type and ensure it and the step have the same integer type. 2587 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2588 assert(ScalarIVTy == Step->getType() && 2589 "Val and Step should have the same type"); 2590 2591 // We build scalar steps for both integer and floating-point induction 2592 // variables. Here, we determine the kind of arithmetic we will perform. 2593 Instruction::BinaryOps AddOp; 2594 Instruction::BinaryOps MulOp; 2595 if (ScalarIVTy->isIntegerTy()) { 2596 AddOp = Instruction::Add; 2597 MulOp = Instruction::Mul; 2598 } else { 2599 AddOp = ID.getInductionOpcode(); 2600 MulOp = Instruction::FMul; 2601 } 2602 2603 // Determine the number of scalars we need to generate for each unroll 2604 // iteration. If EntryVal is uniform, we only need to generate the first 2605 // lane. Otherwise, we generate all VF values. 2606 bool IsUniform = 2607 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF); 2608 unsigned Lanes = IsUniform ? 1 : VF.getKnownMinValue(); 2609 // Compute the scalar steps and save the results in State. 2610 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2611 ScalarIVTy->getScalarSizeInBits()); 2612 Type *VecIVTy = nullptr; 2613 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2614 if (!IsUniform && VF.isScalable()) { 2615 VecIVTy = VectorType::get(ScalarIVTy, VF); 2616 UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, VF)); 2617 SplatStep = Builder.CreateVectorSplat(VF, Step); 2618 SplatIV = Builder.CreateVectorSplat(VF, ScalarIV); 2619 } 2620 2621 for (unsigned Part = 0; Part < UF; ++Part) { 2622 Value *StartIdx0 = createStepForVF(Builder, IntStepTy, VF, Part); 2623 2624 if (!IsUniform && VF.isScalable()) { 2625 auto *SplatStartIdx = Builder.CreateVectorSplat(VF, StartIdx0); 2626 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2627 if (ScalarIVTy->isFloatingPointTy()) 2628 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2629 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2630 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2631 State.set(Def, Add, Part); 2632 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2633 Part); 2634 // It's useful to record the lane values too for the known minimum number 2635 // of elements so we do those below. This improves the code quality when 2636 // trying to extract the first element, for example. 2637 } 2638 2639 if (ScalarIVTy->isFloatingPointTy()) 2640 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2641 2642 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2643 Value *StartIdx = Builder.CreateBinOp( 2644 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2645 // The step returned by `createStepForVF` is a runtime-evaluated value 2646 // when VF is scalable. Otherwise, it should be folded into a Constant. 2647 assert((VF.isScalable() || isa<Constant>(StartIdx)) && 2648 "Expected StartIdx to be folded to a constant when VF is not " 2649 "scalable"); 2650 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2651 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2652 State.set(Def, Add, VPIteration(Part, Lane)); 2653 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2654 Part, Lane); 2655 } 2656 } 2657 } 2658 2659 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2660 const VPIteration &Instance, 2661 VPTransformState &State) { 2662 Value *ScalarInst = State.get(Def, Instance); 2663 Value *VectorValue = State.get(Def, Instance.Part); 2664 VectorValue = Builder.CreateInsertElement( 2665 VectorValue, ScalarInst, 2666 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2667 State.set(Def, VectorValue, Instance.Part); 2668 } 2669 2670 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2671 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2672 return Builder.CreateVectorReverse(Vec, "reverse"); 2673 } 2674 2675 // Return whether we allow using masked interleave-groups (for dealing with 2676 // strided loads/stores that reside in predicated blocks, or for dealing 2677 // with gaps). 2678 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2679 // If an override option has been passed in for interleaved accesses, use it. 2680 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2681 return EnableMaskedInterleavedMemAccesses; 2682 2683 return TTI.enableMaskedInterleavedAccessVectorization(); 2684 } 2685 2686 // Try to vectorize the interleave group that \p Instr belongs to. 2687 // 2688 // E.g. Translate following interleaved load group (factor = 3): 2689 // for (i = 0; i < N; i+=3) { 2690 // R = Pic[i]; // Member of index 0 2691 // G = Pic[i+1]; // Member of index 1 2692 // B = Pic[i+2]; // Member of index 2 2693 // ... // do something to R, G, B 2694 // } 2695 // To: 2696 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2697 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2698 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2699 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2700 // 2701 // Or translate following interleaved store group (factor = 3): 2702 // for (i = 0; i < N; i+=3) { 2703 // ... do something to R, G, B 2704 // Pic[i] = R; // Member of index 0 2705 // Pic[i+1] = G; // Member of index 1 2706 // Pic[i+2] = B; // Member of index 2 2707 // } 2708 // To: 2709 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2710 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2711 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2712 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2713 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2714 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2715 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2716 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2717 VPValue *BlockInMask) { 2718 Instruction *Instr = Group->getInsertPos(); 2719 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2720 2721 // Prepare for the vector type of the interleaved load/store. 2722 Type *ScalarTy = getLoadStoreType(Instr); 2723 unsigned InterleaveFactor = Group->getFactor(); 2724 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2725 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2726 2727 // Prepare for the new pointers. 2728 SmallVector<Value *, 2> AddrParts; 2729 unsigned Index = Group->getIndex(Instr); 2730 2731 // TODO: extend the masked interleaved-group support to reversed access. 2732 assert((!BlockInMask || !Group->isReverse()) && 2733 "Reversed masked interleave-group not supported."); 2734 2735 // If the group is reverse, adjust the index to refer to the last vector lane 2736 // instead of the first. We adjust the index from the first vector lane, 2737 // rather than directly getting the pointer for lane VF - 1, because the 2738 // pointer operand of the interleaved access is supposed to be uniform. For 2739 // uniform instructions, we're only required to generate a value for the 2740 // first vector lane in each unroll iteration. 2741 if (Group->isReverse()) 2742 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2743 2744 for (unsigned Part = 0; Part < UF; Part++) { 2745 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2746 setDebugLocFromInst(AddrPart); 2747 2748 // Notice current instruction could be any index. Need to adjust the address 2749 // to the member of index 0. 2750 // 2751 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2752 // b = A[i]; // Member of index 0 2753 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2754 // 2755 // E.g. A[i+1] = a; // Member of index 1 2756 // A[i] = b; // Member of index 0 2757 // A[i+2] = c; // Member of index 2 (Current instruction) 2758 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2759 2760 bool InBounds = false; 2761 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2762 InBounds = gep->isInBounds(); 2763 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2764 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2765 2766 // Cast to the vector pointer type. 2767 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2768 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2769 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2770 } 2771 2772 setDebugLocFromInst(Instr); 2773 Value *PoisonVec = PoisonValue::get(VecTy); 2774 2775 Value *MaskForGaps = nullptr; 2776 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2777 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2778 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2779 } 2780 2781 // Vectorize the interleaved load group. 2782 if (isa<LoadInst>(Instr)) { 2783 // For each unroll part, create a wide load for the group. 2784 SmallVector<Value *, 2> NewLoads; 2785 for (unsigned Part = 0; Part < UF; Part++) { 2786 Instruction *NewLoad; 2787 if (BlockInMask || MaskForGaps) { 2788 assert(useMaskedInterleavedAccesses(*TTI) && 2789 "masked interleaved groups are not allowed."); 2790 Value *GroupMask = MaskForGaps; 2791 if (BlockInMask) { 2792 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2793 Value *ShuffledMask = Builder.CreateShuffleVector( 2794 BlockInMaskPart, 2795 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2796 "interleaved.mask"); 2797 GroupMask = MaskForGaps 2798 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2799 MaskForGaps) 2800 : ShuffledMask; 2801 } 2802 NewLoad = 2803 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), 2804 GroupMask, PoisonVec, "wide.masked.vec"); 2805 } 2806 else 2807 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2808 Group->getAlign(), "wide.vec"); 2809 Group->addMetadata(NewLoad); 2810 NewLoads.push_back(NewLoad); 2811 } 2812 2813 // For each member in the group, shuffle out the appropriate data from the 2814 // wide loads. 2815 unsigned J = 0; 2816 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2817 Instruction *Member = Group->getMember(I); 2818 2819 // Skip the gaps in the group. 2820 if (!Member) 2821 continue; 2822 2823 auto StrideMask = 2824 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2825 for (unsigned Part = 0; Part < UF; Part++) { 2826 Value *StridedVec = Builder.CreateShuffleVector( 2827 NewLoads[Part], StrideMask, "strided.vec"); 2828 2829 // If this member has different type, cast the result type. 2830 if (Member->getType() != ScalarTy) { 2831 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2832 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2833 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2834 } 2835 2836 if (Group->isReverse()) 2837 StridedVec = reverseVector(StridedVec); 2838 2839 State.set(VPDefs[J], StridedVec, Part); 2840 } 2841 ++J; 2842 } 2843 return; 2844 } 2845 2846 // The sub vector type for current instruction. 2847 auto *SubVT = VectorType::get(ScalarTy, VF); 2848 2849 // Vectorize the interleaved store group. 2850 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2851 assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && 2852 "masked interleaved groups are not allowed."); 2853 assert((!MaskForGaps || !VF.isScalable()) && 2854 "masking gaps for scalable vectors is not yet supported."); 2855 for (unsigned Part = 0; Part < UF; Part++) { 2856 // Collect the stored vector from each member. 2857 SmallVector<Value *, 4> StoredVecs; 2858 for (unsigned i = 0; i < InterleaveFactor; i++) { 2859 assert((Group->getMember(i) || MaskForGaps) && 2860 "Fail to get a member from an interleaved store group"); 2861 Instruction *Member = Group->getMember(i); 2862 2863 // Skip the gaps in the group. 2864 if (!Member) { 2865 Value *Undef = PoisonValue::get(SubVT); 2866 StoredVecs.push_back(Undef); 2867 continue; 2868 } 2869 2870 Value *StoredVec = State.get(StoredValues[i], Part); 2871 2872 if (Group->isReverse()) 2873 StoredVec = reverseVector(StoredVec); 2874 2875 // If this member has different type, cast it to a unified type. 2876 2877 if (StoredVec->getType() != SubVT) 2878 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2879 2880 StoredVecs.push_back(StoredVec); 2881 } 2882 2883 // Concatenate all vectors into a wide vector. 2884 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2885 2886 // Interleave the elements in the wide vector. 2887 Value *IVec = Builder.CreateShuffleVector( 2888 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2889 "interleaved.vec"); 2890 2891 Instruction *NewStoreInstr; 2892 if (BlockInMask || MaskForGaps) { 2893 Value *GroupMask = MaskForGaps; 2894 if (BlockInMask) { 2895 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2896 Value *ShuffledMask = Builder.CreateShuffleVector( 2897 BlockInMaskPart, 2898 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2899 "interleaved.mask"); 2900 GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And, 2901 ShuffledMask, MaskForGaps) 2902 : ShuffledMask; 2903 } 2904 NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part], 2905 Group->getAlign(), GroupMask); 2906 } else 2907 NewStoreInstr = 2908 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2909 2910 Group->addMetadata(NewStoreInstr); 2911 } 2912 } 2913 2914 void InnerLoopVectorizer::vectorizeMemoryInstruction( 2915 Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr, 2916 VPValue *StoredValue, VPValue *BlockInMask, bool ConsecutiveStride, 2917 bool Reverse) { 2918 // Attempt to issue a wide load. 2919 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2920 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2921 2922 assert((LI || SI) && "Invalid Load/Store instruction"); 2923 assert((!SI || StoredValue) && "No stored value provided for widened store"); 2924 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 2925 2926 Type *ScalarDataTy = getLoadStoreType(Instr); 2927 2928 auto *DataTy = VectorType::get(ScalarDataTy, VF); 2929 const Align Alignment = getLoadStoreAlignment(Instr); 2930 bool CreateGatherScatter = !ConsecutiveStride; 2931 2932 VectorParts BlockInMaskParts(UF); 2933 bool isMaskRequired = BlockInMask; 2934 if (isMaskRequired) 2935 for (unsigned Part = 0; Part < UF; ++Part) 2936 BlockInMaskParts[Part] = State.get(BlockInMask, Part); 2937 2938 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2939 // Calculate the pointer for the specific unroll-part. 2940 GetElementPtrInst *PartPtr = nullptr; 2941 2942 bool InBounds = false; 2943 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2944 InBounds = gep->isInBounds(); 2945 if (Reverse) { 2946 // If the address is consecutive but reversed, then the 2947 // wide store needs to start at the last vector element. 2948 // RunTimeVF = VScale * VF.getKnownMinValue() 2949 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 2950 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), VF); 2951 // NumElt = -Part * RunTimeVF 2952 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 2953 // LastLane = 1 - RunTimeVF 2954 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 2955 PartPtr = 2956 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 2957 PartPtr->setIsInBounds(InBounds); 2958 PartPtr = cast<GetElementPtrInst>( 2959 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 2960 PartPtr->setIsInBounds(InBounds); 2961 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2962 BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]); 2963 } else { 2964 Value *Increment = 2965 createStepForVF(Builder, Builder.getInt32Ty(), VF, Part); 2966 PartPtr = cast<GetElementPtrInst>( 2967 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 2968 PartPtr->setIsInBounds(InBounds); 2969 } 2970 2971 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2972 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2973 }; 2974 2975 // Handle Stores: 2976 if (SI) { 2977 setDebugLocFromInst(SI); 2978 2979 for (unsigned Part = 0; Part < UF; ++Part) { 2980 Instruction *NewSI = nullptr; 2981 Value *StoredVal = State.get(StoredValue, Part); 2982 if (CreateGatherScatter) { 2983 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2984 Value *VectorGep = State.get(Addr, Part); 2985 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2986 MaskPart); 2987 } else { 2988 if (Reverse) { 2989 // If we store to reverse consecutive memory locations, then we need 2990 // to reverse the order of elements in the stored value. 2991 StoredVal = reverseVector(StoredVal); 2992 // We don't want to update the value in the map as it might be used in 2993 // another expression. So don't call resetVectorValue(StoredVal). 2994 } 2995 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 2996 if (isMaskRequired) 2997 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2998 BlockInMaskParts[Part]); 2999 else 3000 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 3001 } 3002 addMetadata(NewSI, SI); 3003 } 3004 return; 3005 } 3006 3007 // Handle loads. 3008 assert(LI && "Must have a load instruction"); 3009 setDebugLocFromInst(LI); 3010 for (unsigned Part = 0; Part < UF; ++Part) { 3011 Value *NewLI; 3012 if (CreateGatherScatter) { 3013 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 3014 Value *VectorGep = State.get(Addr, Part); 3015 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, 3016 nullptr, "wide.masked.gather"); 3017 addMetadata(NewLI, LI); 3018 } else { 3019 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 3020 if (isMaskRequired) 3021 NewLI = Builder.CreateMaskedLoad( 3022 DataTy, VecPtr, Alignment, BlockInMaskParts[Part], 3023 PoisonValue::get(DataTy), "wide.masked.load"); 3024 else 3025 NewLI = 3026 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 3027 3028 // Add metadata to the load, but setVectorValue to the reverse shuffle. 3029 addMetadata(NewLI, LI); 3030 if (Reverse) 3031 NewLI = reverseVector(NewLI); 3032 } 3033 3034 State.set(Def, NewLI, Part); 3035 } 3036 } 3037 3038 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def, 3039 VPUser &User, 3040 const VPIteration &Instance, 3041 bool IfPredicateInstr, 3042 VPTransformState &State) { 3043 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 3044 3045 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 3046 // the first lane and part. 3047 if (isa<NoAliasScopeDeclInst>(Instr)) 3048 if (!Instance.isFirstIteration()) 3049 return; 3050 3051 setDebugLocFromInst(Instr); 3052 3053 // Does this instruction return a value ? 3054 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 3055 3056 Instruction *Cloned = Instr->clone(); 3057 if (!IsVoidRetTy) 3058 Cloned->setName(Instr->getName() + ".cloned"); 3059 3060 State.Builder.SetInsertPoint(Builder.GetInsertBlock(), 3061 Builder.GetInsertPoint()); 3062 // Replace the operands of the cloned instructions with their scalar 3063 // equivalents in the new loop. 3064 for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) { 3065 auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op)); 3066 auto InputInstance = Instance; 3067 if (!Operand || !OrigLoop->contains(Operand) || 3068 (Cost->isUniformAfterVectorization(Operand, State.VF))) 3069 InputInstance.Lane = VPLane::getFirstLane(); 3070 auto *NewOp = State.get(User.getOperand(op), InputInstance); 3071 Cloned->setOperand(op, NewOp); 3072 } 3073 addNewMetadata(Cloned, Instr); 3074 3075 // Place the cloned scalar in the new loop. 3076 Builder.Insert(Cloned); 3077 3078 State.set(Def, Cloned, Instance); 3079 3080 // If we just cloned a new assumption, add it the assumption cache. 3081 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 3082 AC->registerAssumption(II); 3083 3084 // End if-block. 3085 if (IfPredicateInstr) 3086 PredicatedInstructions.push_back(Cloned); 3087 } 3088 3089 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3090 Value *End, Value *Step, 3091 Instruction *DL) { 3092 BasicBlock *Header = L->getHeader(); 3093 BasicBlock *Latch = L->getLoopLatch(); 3094 // As we're just creating this loop, it's possible no latch exists 3095 // yet. If so, use the header as this will be a single block loop. 3096 if (!Latch) 3097 Latch = Header; 3098 3099 IRBuilder<> B(&*Header->getFirstInsertionPt()); 3100 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 3101 setDebugLocFromInst(OldInst, &B); 3102 auto *Induction = B.CreatePHI(Start->getType(), 2, "index"); 3103 3104 B.SetInsertPoint(Latch->getTerminator()); 3105 setDebugLocFromInst(OldInst, &B); 3106 3107 // Create i+1 and fill the PHINode. 3108 // 3109 // If the tail is not folded, we know that End - Start >= Step (either 3110 // statically or through the minimum iteration checks). We also know that both 3111 // Start % Step == 0 and End % Step == 0. We exit the vector loop if %IV + 3112 // %Step == %End. Hence we must exit the loop before %IV + %Step unsigned 3113 // overflows and we can mark the induction increment as NUW. 3114 Value *Next = B.CreateAdd(Induction, Step, "index.next", 3115 /*NUW=*/!Cost->foldTailByMasking(), /*NSW=*/false); 3116 Induction->addIncoming(Start, L->getLoopPreheader()); 3117 Induction->addIncoming(Next, Latch); 3118 // Create the compare. 3119 Value *ICmp = B.CreateICmpEQ(Next, End); 3120 B.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header); 3121 3122 // Now we have two terminators. Remove the old one from the block. 3123 Latch->getTerminator()->eraseFromParent(); 3124 3125 return Induction; 3126 } 3127 3128 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3129 if (TripCount) 3130 return TripCount; 3131 3132 assert(L && "Create Trip Count for null loop."); 3133 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3134 // Find the loop boundaries. 3135 ScalarEvolution *SE = PSE.getSE(); 3136 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3137 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 3138 "Invalid loop count"); 3139 3140 Type *IdxTy = Legal->getWidestInductionType(); 3141 assert(IdxTy && "No type for induction"); 3142 3143 // The exit count might have the type of i64 while the phi is i32. This can 3144 // happen if we have an induction variable that is sign extended before the 3145 // compare. The only way that we get a backedge taken count is that the 3146 // induction variable was signed and as such will not overflow. In such a case 3147 // truncation is legal. 3148 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 3149 IdxTy->getPrimitiveSizeInBits()) 3150 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3151 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3152 3153 // Get the total trip count from the count by adding 1. 3154 const SCEV *ExitCount = SE->getAddExpr( 3155 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3156 3157 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3158 3159 // Expand the trip count and place the new instructions in the preheader. 3160 // Notice that the pre-header does not change, only the loop body. 3161 SCEVExpander Exp(*SE, DL, "induction"); 3162 3163 // Count holds the overall loop count (N). 3164 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3165 L->getLoopPreheader()->getTerminator()); 3166 3167 if (TripCount->getType()->isPointerTy()) 3168 TripCount = 3169 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3170 L->getLoopPreheader()->getTerminator()); 3171 3172 return TripCount; 3173 } 3174 3175 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3176 if (VectorTripCount) 3177 return VectorTripCount; 3178 3179 Value *TC = getOrCreateTripCount(L); 3180 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3181 3182 Type *Ty = TC->getType(); 3183 // This is where we can make the step a runtime constant. 3184 Value *Step = createStepForVF(Builder, Ty, VF, UF); 3185 3186 // If the tail is to be folded by masking, round the number of iterations N 3187 // up to a multiple of Step instead of rounding down. This is done by first 3188 // adding Step-1 and then rounding down. Note that it's ok if this addition 3189 // overflows: the vector induction variable will eventually wrap to zero given 3190 // that it starts at zero and its Step is a power of two; the loop will then 3191 // exit, with the last early-exit vector comparison also producing all-true. 3192 if (Cost->foldTailByMasking()) { 3193 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 3194 "VF*UF must be a power of 2 when folding tail by masking"); 3195 assert(!VF.isScalable() && 3196 "Tail folding not yet supported for scalable vectors"); 3197 TC = Builder.CreateAdd( 3198 TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up"); 3199 } 3200 3201 // Now we need to generate the expression for the part of the loop that the 3202 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3203 // iterations are not required for correctness, or N - Step, otherwise. Step 3204 // is equal to the vectorization factor (number of SIMD elements) times the 3205 // unroll factor (number of SIMD instructions). 3206 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3207 3208 // There are cases where we *must* run at least one iteration in the remainder 3209 // loop. See the cost model for when this can happen. If the step evenly 3210 // divides the trip count, we set the remainder to be equal to the step. If 3211 // the step does not evenly divide the trip count, no adjustment is necessary 3212 // since there will already be scalar iterations. Note that the minimum 3213 // iterations check ensures that N >= Step. 3214 if (Cost->requiresScalarEpilogue(VF)) { 3215 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3216 R = Builder.CreateSelect(IsZero, Step, R); 3217 } 3218 3219 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3220 3221 return VectorTripCount; 3222 } 3223 3224 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3225 const DataLayout &DL) { 3226 // Verify that V is a vector type with same number of elements as DstVTy. 3227 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 3228 unsigned VF = DstFVTy->getNumElements(); 3229 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 3230 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3231 Type *SrcElemTy = SrcVecTy->getElementType(); 3232 Type *DstElemTy = DstFVTy->getElementType(); 3233 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3234 "Vector elements must have same size"); 3235 3236 // Do a direct cast if element types are castable. 3237 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3238 return Builder.CreateBitOrPointerCast(V, DstFVTy); 3239 } 3240 // V cannot be directly casted to desired vector type. 3241 // May happen when V is a floating point vector but DstVTy is a vector of 3242 // pointers or vice-versa. Handle this using a two-step bitcast using an 3243 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3244 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3245 "Only one type should be a pointer type"); 3246 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3247 "Only one type should be a floating point type"); 3248 Type *IntTy = 3249 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3250 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 3251 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3252 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 3253 } 3254 3255 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3256 BasicBlock *Bypass) { 3257 Value *Count = getOrCreateTripCount(L); 3258 // Reuse existing vector loop preheader for TC checks. 3259 // Note that new preheader block is generated for vector loop. 3260 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 3261 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 3262 3263 // Generate code to check if the loop's trip count is less than VF * UF, or 3264 // equal to it in case a scalar epilogue is required; this implies that the 3265 // vector trip count is zero. This check also covers the case where adding one 3266 // to the backedge-taken count overflowed leading to an incorrect trip count 3267 // of zero. In this case we will also jump to the scalar loop. 3268 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE 3269 : ICmpInst::ICMP_ULT; 3270 3271 // If tail is to be folded, vector loop takes care of all iterations. 3272 Value *CheckMinIters = Builder.getFalse(); 3273 if (!Cost->foldTailByMasking()) { 3274 Value *Step = createStepForVF(Builder, Count->getType(), VF, UF); 3275 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3276 } 3277 // Create new preheader for vector loop. 3278 LoopVectorPreHeader = 3279 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3280 "vector.ph"); 3281 3282 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3283 DT->getNode(Bypass)->getIDom()) && 3284 "TC check is expected to dominate Bypass"); 3285 3286 // Update dominator for Bypass & LoopExit (if needed). 3287 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3288 if (!Cost->requiresScalarEpilogue(VF)) 3289 // If there is an epilogue which must run, there's no edge from the 3290 // middle block to exit blocks and thus no need to update the immediate 3291 // dominator of the exit blocks. 3292 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3293 3294 ReplaceInstWithInst( 3295 TCCheckBlock->getTerminator(), 3296 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3297 LoopBypassBlocks.push_back(TCCheckBlock); 3298 } 3299 3300 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3301 3302 BasicBlock *const SCEVCheckBlock = 3303 RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock); 3304 if (!SCEVCheckBlock) 3305 return nullptr; 3306 3307 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3308 (OptForSizeBasedOnProfile && 3309 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3310 "Cannot SCEV check stride or overflow when optimizing for size"); 3311 3312 3313 // Update dominator only if this is first RT check. 3314 if (LoopBypassBlocks.empty()) { 3315 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3316 if (!Cost->requiresScalarEpilogue(VF)) 3317 // If there is an epilogue which must run, there's no edge from the 3318 // middle block to exit blocks and thus no need to update the immediate 3319 // dominator of the exit blocks. 3320 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3321 } 3322 3323 LoopBypassBlocks.push_back(SCEVCheckBlock); 3324 AddedSafetyChecks = true; 3325 return SCEVCheckBlock; 3326 } 3327 3328 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, 3329 BasicBlock *Bypass) { 3330 // VPlan-native path does not do any analysis for runtime checks currently. 3331 if (EnableVPlanNativePath) 3332 return nullptr; 3333 3334 BasicBlock *const MemCheckBlock = 3335 RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader); 3336 3337 // Check if we generated code that checks in runtime if arrays overlap. We put 3338 // the checks into a separate block to make the more common case of few 3339 // elements faster. 3340 if (!MemCheckBlock) 3341 return nullptr; 3342 3343 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3344 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3345 "Cannot emit memory checks when optimizing for size, unless forced " 3346 "to vectorize."); 3347 ORE->emit([&]() { 3348 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3349 L->getStartLoc(), L->getHeader()) 3350 << "Code-size may be reduced by not forcing " 3351 "vectorization, or by source-code modifications " 3352 "eliminating the need for runtime checks " 3353 "(e.g., adding 'restrict')."; 3354 }); 3355 } 3356 3357 LoopBypassBlocks.push_back(MemCheckBlock); 3358 3359 AddedSafetyChecks = true; 3360 3361 // We currently don't use LoopVersioning for the actual loop cloning but we 3362 // still use it to add the noalias metadata. 3363 LVer = std::make_unique<LoopVersioning>( 3364 *Legal->getLAI(), 3365 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3366 DT, PSE.getSE()); 3367 LVer->prepareNoAliasMetadata(); 3368 return MemCheckBlock; 3369 } 3370 3371 Value *InnerLoopVectorizer::emitTransformedIndex( 3372 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 3373 const InductionDescriptor &ID) const { 3374 3375 SCEVExpander Exp(*SE, DL, "induction"); 3376 auto Step = ID.getStep(); 3377 auto StartValue = ID.getStartValue(); 3378 assert(Index->getType()->getScalarType() == Step->getType() && 3379 "Index scalar type does not match StepValue type"); 3380 3381 // Note: the IR at this point is broken. We cannot use SE to create any new 3382 // SCEV and then expand it, hoping that SCEV's simplification will give us 3383 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 3384 // lead to various SCEV crashes. So all we can do is to use builder and rely 3385 // on InstCombine for future simplifications. Here we handle some trivial 3386 // cases only. 3387 auto CreateAdd = [&B](Value *X, Value *Y) { 3388 assert(X->getType() == Y->getType() && "Types don't match!"); 3389 if (auto *CX = dyn_cast<ConstantInt>(X)) 3390 if (CX->isZero()) 3391 return Y; 3392 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3393 if (CY->isZero()) 3394 return X; 3395 return B.CreateAdd(X, Y); 3396 }; 3397 3398 // We allow X to be a vector type, in which case Y will potentially be 3399 // splatted into a vector with the same element count. 3400 auto CreateMul = [&B](Value *X, Value *Y) { 3401 assert(X->getType()->getScalarType() == Y->getType() && 3402 "Types don't match!"); 3403 if (auto *CX = dyn_cast<ConstantInt>(X)) 3404 if (CX->isOne()) 3405 return Y; 3406 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3407 if (CY->isOne()) 3408 return X; 3409 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 3410 if (XVTy && !isa<VectorType>(Y->getType())) 3411 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 3412 return B.CreateMul(X, Y); 3413 }; 3414 3415 // Get a suitable insert point for SCEV expansion. For blocks in the vector 3416 // loop, choose the end of the vector loop header (=LoopVectorBody), because 3417 // the DomTree is not kept up-to-date for additional blocks generated in the 3418 // vector loop. By using the header as insertion point, we guarantee that the 3419 // expanded instructions dominate all their uses. 3420 auto GetInsertPoint = [this, &B]() { 3421 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 3422 if (InsertBB != LoopVectorBody && 3423 LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB)) 3424 return LoopVectorBody->getTerminator(); 3425 return &*B.GetInsertPoint(); 3426 }; 3427 3428 switch (ID.getKind()) { 3429 case InductionDescriptor::IK_IntInduction: { 3430 assert(!isa<VectorType>(Index->getType()) && 3431 "Vector indices not supported for integer inductions yet"); 3432 assert(Index->getType() == StartValue->getType() && 3433 "Index type does not match StartValue type"); 3434 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 3435 return B.CreateSub(StartValue, Index); 3436 auto *Offset = CreateMul( 3437 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 3438 return CreateAdd(StartValue, Offset); 3439 } 3440 case InductionDescriptor::IK_PtrInduction: { 3441 assert(isa<SCEVConstant>(Step) && 3442 "Expected constant step for pointer induction"); 3443 return B.CreateGEP( 3444 ID.getElementType(), StartValue, 3445 CreateMul(Index, 3446 Exp.expandCodeFor(Step, Index->getType()->getScalarType(), 3447 GetInsertPoint()))); 3448 } 3449 case InductionDescriptor::IK_FpInduction: { 3450 assert(!isa<VectorType>(Index->getType()) && 3451 "Vector indices not supported for FP inductions yet"); 3452 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 3453 auto InductionBinOp = ID.getInductionBinOp(); 3454 assert(InductionBinOp && 3455 (InductionBinOp->getOpcode() == Instruction::FAdd || 3456 InductionBinOp->getOpcode() == Instruction::FSub) && 3457 "Original bin op should be defined for FP induction"); 3458 3459 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 3460 Value *MulExp = B.CreateFMul(StepValue, Index); 3461 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 3462 "induction"); 3463 } 3464 case InductionDescriptor::IK_NoInduction: 3465 return nullptr; 3466 } 3467 llvm_unreachable("invalid enum"); 3468 } 3469 3470 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3471 LoopScalarBody = OrigLoop->getHeader(); 3472 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3473 assert(LoopVectorPreHeader && "Invalid loop structure"); 3474 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr 3475 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && 3476 "multiple exit loop without required epilogue?"); 3477 3478 LoopMiddleBlock = 3479 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3480 LI, nullptr, Twine(Prefix) + "middle.block"); 3481 LoopScalarPreHeader = 3482 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3483 nullptr, Twine(Prefix) + "scalar.ph"); 3484 3485 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3486 3487 // Set up the middle block terminator. Two cases: 3488 // 1) If we know that we must execute the scalar epilogue, emit an 3489 // unconditional branch. 3490 // 2) Otherwise, we must have a single unique exit block (due to how we 3491 // implement the multiple exit case). In this case, set up a conditonal 3492 // branch from the middle block to the loop scalar preheader, and the 3493 // exit block. completeLoopSkeleton will update the condition to use an 3494 // iteration check, if required to decide whether to execute the remainder. 3495 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? 3496 BranchInst::Create(LoopScalarPreHeader) : 3497 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, 3498 Builder.getTrue()); 3499 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3500 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3501 3502 // We intentionally don't let SplitBlock to update LoopInfo since 3503 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3504 // LoopVectorBody is explicitly added to the correct place few lines later. 3505 LoopVectorBody = 3506 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3507 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3508 3509 // Update dominator for loop exit. 3510 if (!Cost->requiresScalarEpilogue(VF)) 3511 // If there is an epilogue which must run, there's no edge from the 3512 // middle block to exit blocks and thus no need to update the immediate 3513 // dominator of the exit blocks. 3514 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3515 3516 // Create and register the new vector loop. 3517 Loop *Lp = LI->AllocateLoop(); 3518 Loop *ParentLoop = OrigLoop->getParentLoop(); 3519 3520 // Insert the new loop into the loop nest and register the new basic blocks 3521 // before calling any utilities such as SCEV that require valid LoopInfo. 3522 if (ParentLoop) { 3523 ParentLoop->addChildLoop(Lp); 3524 } else { 3525 LI->addTopLevelLoop(Lp); 3526 } 3527 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3528 return Lp; 3529 } 3530 3531 void InnerLoopVectorizer::createInductionResumeValues( 3532 Loop *L, Value *VectorTripCount, 3533 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3534 assert(VectorTripCount && L && "Expected valid arguments"); 3535 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3536 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3537 "Inconsistent information about additional bypass."); 3538 // We are going to resume the execution of the scalar loop. 3539 // Go over all of the induction variables that we found and fix the 3540 // PHIs that are left in the scalar version of the loop. 3541 // The starting values of PHI nodes depend on the counter of the last 3542 // iteration in the vectorized loop. 3543 // If we come from a bypass edge then we need to start from the original 3544 // start value. 3545 for (auto &InductionEntry : Legal->getInductionVars()) { 3546 PHINode *OrigPhi = InductionEntry.first; 3547 InductionDescriptor II = InductionEntry.second; 3548 3549 // Create phi nodes to merge from the backedge-taken check block. 3550 PHINode *BCResumeVal = 3551 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3552 LoopScalarPreHeader->getTerminator()); 3553 // Copy original phi DL over to the new one. 3554 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3555 Value *&EndValue = IVEndValues[OrigPhi]; 3556 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3557 if (OrigPhi == OldInduction) { 3558 // We know what the end value is. 3559 EndValue = VectorTripCount; 3560 } else { 3561 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3562 3563 // Fast-math-flags propagate from the original induction instruction. 3564 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3565 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3566 3567 Type *StepType = II.getStep()->getType(); 3568 Instruction::CastOps CastOp = 3569 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3570 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3571 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3572 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3573 EndValue->setName("ind.end"); 3574 3575 // Compute the end value for the additional bypass (if applicable). 3576 if (AdditionalBypass.first) { 3577 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3578 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3579 StepType, true); 3580 CRD = 3581 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3582 EndValueFromAdditionalBypass = 3583 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3584 EndValueFromAdditionalBypass->setName("ind.end"); 3585 } 3586 } 3587 // The new PHI merges the original incoming value, in case of a bypass, 3588 // or the value at the end of the vectorized loop. 3589 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3590 3591 // Fix the scalar body counter (PHI node). 3592 // The old induction's phi node in the scalar body needs the truncated 3593 // value. 3594 for (BasicBlock *BB : LoopBypassBlocks) 3595 BCResumeVal->addIncoming(II.getStartValue(), BB); 3596 3597 if (AdditionalBypass.first) 3598 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3599 EndValueFromAdditionalBypass); 3600 3601 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3602 } 3603 } 3604 3605 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3606 MDNode *OrigLoopID) { 3607 assert(L && "Expected valid loop."); 3608 3609 // The trip counts should be cached by now. 3610 Value *Count = getOrCreateTripCount(L); 3611 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3612 3613 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3614 3615 // Add a check in the middle block to see if we have completed 3616 // all of the iterations in the first vector loop. Three cases: 3617 // 1) If we require a scalar epilogue, there is no conditional branch as 3618 // we unconditionally branch to the scalar preheader. Do nothing. 3619 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. 3620 // Thus if tail is to be folded, we know we don't need to run the 3621 // remainder and we can use the previous value for the condition (true). 3622 // 3) Otherwise, construct a runtime check. 3623 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { 3624 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3625 Count, VectorTripCount, "cmp.n", 3626 LoopMiddleBlock->getTerminator()); 3627 3628 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3629 // of the corresponding compare because they may have ended up with 3630 // different line numbers and we want to avoid awkward line stepping while 3631 // debugging. Eg. if the compare has got a line number inside the loop. 3632 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3633 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3634 } 3635 3636 // Get ready to start creating new instructions into the vectorized body. 3637 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3638 "Inconsistent vector loop preheader"); 3639 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3640 3641 Optional<MDNode *> VectorizedLoopID = 3642 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3643 LLVMLoopVectorizeFollowupVectorized}); 3644 if (VectorizedLoopID.hasValue()) { 3645 L->setLoopID(VectorizedLoopID.getValue()); 3646 3647 // Do not setAlreadyVectorized if loop attributes have been defined 3648 // explicitly. 3649 return LoopVectorPreHeader; 3650 } 3651 3652 // Keep all loop hints from the original loop on the vector loop (we'll 3653 // replace the vectorizer-specific hints below). 3654 if (MDNode *LID = OrigLoop->getLoopID()) 3655 L->setLoopID(LID); 3656 3657 LoopVectorizeHints Hints(L, true, *ORE); 3658 Hints.setAlreadyVectorized(); 3659 3660 #ifdef EXPENSIVE_CHECKS 3661 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3662 LI->verify(*DT); 3663 #endif 3664 3665 return LoopVectorPreHeader; 3666 } 3667 3668 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3669 /* 3670 In this function we generate a new loop. The new loop will contain 3671 the vectorized instructions while the old loop will continue to run the 3672 scalar remainder. 3673 3674 [ ] <-- loop iteration number check. 3675 / | 3676 / v 3677 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3678 | / | 3679 | / v 3680 || [ ] <-- vector pre header. 3681 |/ | 3682 | v 3683 | [ ] \ 3684 | [ ]_| <-- vector loop. 3685 | | 3686 | v 3687 \ -[ ] <--- middle-block. 3688 \/ | 3689 /\ v 3690 | ->[ ] <--- new preheader. 3691 | | 3692 (opt) v <-- edge from middle to exit iff epilogue is not required. 3693 | [ ] \ 3694 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). 3695 \ | 3696 \ v 3697 >[ ] <-- exit block(s). 3698 ... 3699 */ 3700 3701 // Get the metadata of the original loop before it gets modified. 3702 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3703 3704 // Workaround! Compute the trip count of the original loop and cache it 3705 // before we start modifying the CFG. This code has a systemic problem 3706 // wherein it tries to run analysis over partially constructed IR; this is 3707 // wrong, and not simply for SCEV. The trip count of the original loop 3708 // simply happens to be prone to hitting this in practice. In theory, we 3709 // can hit the same issue for any SCEV, or ValueTracking query done during 3710 // mutation. See PR49900. 3711 getOrCreateTripCount(OrigLoop); 3712 3713 // Create an empty vector loop, and prepare basic blocks for the runtime 3714 // checks. 3715 Loop *Lp = createVectorLoopSkeleton(""); 3716 3717 // Now, compare the new count to zero. If it is zero skip the vector loop and 3718 // jump to the scalar loop. This check also covers the case where the 3719 // backedge-taken count is uint##_max: adding one to it will overflow leading 3720 // to an incorrect trip count of zero. In this (rare) case we will also jump 3721 // to the scalar loop. 3722 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3723 3724 // Generate the code to check any assumptions that we've made for SCEV 3725 // expressions. 3726 emitSCEVChecks(Lp, LoopScalarPreHeader); 3727 3728 // Generate the code that checks in runtime if arrays overlap. We put the 3729 // checks into a separate block to make the more common case of few elements 3730 // faster. 3731 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3732 3733 // Some loops have a single integer induction variable, while other loops 3734 // don't. One example is c++ iterators that often have multiple pointer 3735 // induction variables. In the code below we also support a case where we 3736 // don't have a single induction variable. 3737 // 3738 // We try to obtain an induction variable from the original loop as hard 3739 // as possible. However if we don't find one that: 3740 // - is an integer 3741 // - counts from zero, stepping by one 3742 // - is the size of the widest induction variable type 3743 // then we create a new one. 3744 OldInduction = Legal->getPrimaryInduction(); 3745 Type *IdxTy = Legal->getWidestInductionType(); 3746 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3747 // The loop step is equal to the vectorization factor (num of SIMD elements) 3748 // times the unroll factor (num of SIMD instructions). 3749 Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt()); 3750 Value *Step = createStepForVF(Builder, IdxTy, VF, UF); 3751 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3752 Induction = 3753 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3754 getDebugLocFromInstOrOperands(OldInduction)); 3755 3756 // Emit phis for the new starting index of the scalar loop. 3757 createInductionResumeValues(Lp, CountRoundDown); 3758 3759 return completeLoopSkeleton(Lp, OrigLoopID); 3760 } 3761 3762 // Fix up external users of the induction variable. At this point, we are 3763 // in LCSSA form, with all external PHIs that use the IV having one input value, 3764 // coming from the remainder loop. We need those PHIs to also have a correct 3765 // value for the IV when arriving directly from the middle block. 3766 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3767 const InductionDescriptor &II, 3768 Value *CountRoundDown, Value *EndValue, 3769 BasicBlock *MiddleBlock) { 3770 // There are two kinds of external IV usages - those that use the value 3771 // computed in the last iteration (the PHI) and those that use the penultimate 3772 // value (the value that feeds into the phi from the loop latch). 3773 // We allow both, but they, obviously, have different values. 3774 3775 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3776 3777 DenseMap<Value *, Value *> MissingVals; 3778 3779 // An external user of the last iteration's value should see the value that 3780 // the remainder loop uses to initialize its own IV. 3781 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3782 for (User *U : PostInc->users()) { 3783 Instruction *UI = cast<Instruction>(U); 3784 if (!OrigLoop->contains(UI)) { 3785 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3786 MissingVals[UI] = EndValue; 3787 } 3788 } 3789 3790 // An external user of the penultimate value need to see EndValue - Step. 3791 // The simplest way to get this is to recompute it from the constituent SCEVs, 3792 // that is Start + (Step * (CRD - 1)). 3793 for (User *U : OrigPhi->users()) { 3794 auto *UI = cast<Instruction>(U); 3795 if (!OrigLoop->contains(UI)) { 3796 const DataLayout &DL = 3797 OrigLoop->getHeader()->getModule()->getDataLayout(); 3798 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3799 3800 IRBuilder<> B(MiddleBlock->getTerminator()); 3801 3802 // Fast-math-flags propagate from the original induction instruction. 3803 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3804 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3805 3806 Value *CountMinusOne = B.CreateSub( 3807 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3808 Value *CMO = 3809 !II.getStep()->getType()->isIntegerTy() 3810 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3811 II.getStep()->getType()) 3812 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3813 CMO->setName("cast.cmo"); 3814 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3815 Escape->setName("ind.escape"); 3816 MissingVals[UI] = Escape; 3817 } 3818 } 3819 3820 for (auto &I : MissingVals) { 3821 PHINode *PHI = cast<PHINode>(I.first); 3822 // One corner case we have to handle is two IVs "chasing" each-other, 3823 // that is %IV2 = phi [...], [ %IV1, %latch ] 3824 // In this case, if IV1 has an external use, we need to avoid adding both 3825 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3826 // don't already have an incoming value for the middle block. 3827 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3828 PHI->addIncoming(I.second, MiddleBlock); 3829 } 3830 } 3831 3832 namespace { 3833 3834 struct CSEDenseMapInfo { 3835 static bool canHandle(const Instruction *I) { 3836 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3837 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3838 } 3839 3840 static inline Instruction *getEmptyKey() { 3841 return DenseMapInfo<Instruction *>::getEmptyKey(); 3842 } 3843 3844 static inline Instruction *getTombstoneKey() { 3845 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3846 } 3847 3848 static unsigned getHashValue(const Instruction *I) { 3849 assert(canHandle(I) && "Unknown instruction!"); 3850 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3851 I->value_op_end())); 3852 } 3853 3854 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3855 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3856 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3857 return LHS == RHS; 3858 return LHS->isIdenticalTo(RHS); 3859 } 3860 }; 3861 3862 } // end anonymous namespace 3863 3864 ///Perform cse of induction variable instructions. 3865 static void cse(BasicBlock *BB) { 3866 // Perform simple cse. 3867 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3868 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 3869 if (!CSEDenseMapInfo::canHandle(&In)) 3870 continue; 3871 3872 // Check if we can replace this instruction with any of the 3873 // visited instructions. 3874 if (Instruction *V = CSEMap.lookup(&In)) { 3875 In.replaceAllUsesWith(V); 3876 In.eraseFromParent(); 3877 continue; 3878 } 3879 3880 CSEMap[&In] = &In; 3881 } 3882 } 3883 3884 InstructionCost 3885 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3886 bool &NeedToScalarize) const { 3887 Function *F = CI->getCalledFunction(); 3888 Type *ScalarRetTy = CI->getType(); 3889 SmallVector<Type *, 4> Tys, ScalarTys; 3890 for (auto &ArgOp : CI->args()) 3891 ScalarTys.push_back(ArgOp->getType()); 3892 3893 // Estimate cost of scalarized vector call. The source operands are assumed 3894 // to be vectors, so we need to extract individual elements from there, 3895 // execute VF scalar calls, and then gather the result into the vector return 3896 // value. 3897 InstructionCost ScalarCallCost = 3898 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3899 if (VF.isScalar()) 3900 return ScalarCallCost; 3901 3902 // Compute corresponding vector type for return value and arguments. 3903 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3904 for (Type *ScalarTy : ScalarTys) 3905 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3906 3907 // Compute costs of unpacking argument values for the scalar calls and 3908 // packing the return values to a vector. 3909 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3910 3911 InstructionCost Cost = 3912 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3913 3914 // If we can't emit a vector call for this function, then the currently found 3915 // cost is the cost we need to return. 3916 NeedToScalarize = true; 3917 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3918 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3919 3920 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3921 return Cost; 3922 3923 // If the corresponding vector cost is cheaper, return its cost. 3924 InstructionCost VectorCallCost = 3925 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3926 if (VectorCallCost < Cost) { 3927 NeedToScalarize = false; 3928 Cost = VectorCallCost; 3929 } 3930 return Cost; 3931 } 3932 3933 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3934 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3935 return Elt; 3936 return VectorType::get(Elt, VF); 3937 } 3938 3939 InstructionCost 3940 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3941 ElementCount VF) const { 3942 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3943 assert(ID && "Expected intrinsic call!"); 3944 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3945 FastMathFlags FMF; 3946 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3947 FMF = FPMO->getFastMathFlags(); 3948 3949 SmallVector<const Value *> Arguments(CI->args()); 3950 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3951 SmallVector<Type *> ParamTys; 3952 std::transform(FTy->param_begin(), FTy->param_end(), 3953 std::back_inserter(ParamTys), 3954 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3955 3956 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3957 dyn_cast<IntrinsicInst>(CI)); 3958 return TTI.getIntrinsicInstrCost(CostAttrs, 3959 TargetTransformInfo::TCK_RecipThroughput); 3960 } 3961 3962 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3963 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3964 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3965 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3966 } 3967 3968 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3969 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3970 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3971 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3972 } 3973 3974 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3975 // For every instruction `I` in MinBWs, truncate the operands, create a 3976 // truncated version of `I` and reextend its result. InstCombine runs 3977 // later and will remove any ext/trunc pairs. 3978 SmallPtrSet<Value *, 4> Erased; 3979 for (const auto &KV : Cost->getMinimalBitwidths()) { 3980 // If the value wasn't vectorized, we must maintain the original scalar 3981 // type. The absence of the value from State indicates that it 3982 // wasn't vectorized. 3983 // FIXME: Should not rely on getVPValue at this point. 3984 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3985 if (!State.hasAnyVectorValue(Def)) 3986 continue; 3987 for (unsigned Part = 0; Part < UF; ++Part) { 3988 Value *I = State.get(Def, Part); 3989 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3990 continue; 3991 Type *OriginalTy = I->getType(); 3992 Type *ScalarTruncatedTy = 3993 IntegerType::get(OriginalTy->getContext(), KV.second); 3994 auto *TruncatedTy = VectorType::get( 3995 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); 3996 if (TruncatedTy == OriginalTy) 3997 continue; 3998 3999 IRBuilder<> B(cast<Instruction>(I)); 4000 auto ShrinkOperand = [&](Value *V) -> Value * { 4001 if (auto *ZI = dyn_cast<ZExtInst>(V)) 4002 if (ZI->getSrcTy() == TruncatedTy) 4003 return ZI->getOperand(0); 4004 return B.CreateZExtOrTrunc(V, TruncatedTy); 4005 }; 4006 4007 // The actual instruction modification depends on the instruction type, 4008 // unfortunately. 4009 Value *NewI = nullptr; 4010 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 4011 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 4012 ShrinkOperand(BO->getOperand(1))); 4013 4014 // Any wrapping introduced by shrinking this operation shouldn't be 4015 // considered undefined behavior. So, we can't unconditionally copy 4016 // arithmetic wrapping flags to NewI. 4017 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 4018 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 4019 NewI = 4020 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 4021 ShrinkOperand(CI->getOperand(1))); 4022 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 4023 NewI = B.CreateSelect(SI->getCondition(), 4024 ShrinkOperand(SI->getTrueValue()), 4025 ShrinkOperand(SI->getFalseValue())); 4026 } else if (auto *CI = dyn_cast<CastInst>(I)) { 4027 switch (CI->getOpcode()) { 4028 default: 4029 llvm_unreachable("Unhandled cast!"); 4030 case Instruction::Trunc: 4031 NewI = ShrinkOperand(CI->getOperand(0)); 4032 break; 4033 case Instruction::SExt: 4034 NewI = B.CreateSExtOrTrunc( 4035 CI->getOperand(0), 4036 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 4037 break; 4038 case Instruction::ZExt: 4039 NewI = B.CreateZExtOrTrunc( 4040 CI->getOperand(0), 4041 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 4042 break; 4043 } 4044 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 4045 auto Elements0 = 4046 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); 4047 auto *O0 = B.CreateZExtOrTrunc( 4048 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 4049 auto Elements1 = 4050 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); 4051 auto *O1 = B.CreateZExtOrTrunc( 4052 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 4053 4054 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 4055 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 4056 // Don't do anything with the operands, just extend the result. 4057 continue; 4058 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 4059 auto Elements = 4060 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); 4061 auto *O0 = B.CreateZExtOrTrunc( 4062 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 4063 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 4064 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 4065 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 4066 auto Elements = 4067 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); 4068 auto *O0 = B.CreateZExtOrTrunc( 4069 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 4070 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 4071 } else { 4072 // If we don't know what to do, be conservative and don't do anything. 4073 continue; 4074 } 4075 4076 // Lastly, extend the result. 4077 NewI->takeName(cast<Instruction>(I)); 4078 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 4079 I->replaceAllUsesWith(Res); 4080 cast<Instruction>(I)->eraseFromParent(); 4081 Erased.insert(I); 4082 State.reset(Def, Res, Part); 4083 } 4084 } 4085 4086 // We'll have created a bunch of ZExts that are now parentless. Clean up. 4087 for (const auto &KV : Cost->getMinimalBitwidths()) { 4088 // If the value wasn't vectorized, we must maintain the original scalar 4089 // type. The absence of the value from State indicates that it 4090 // wasn't vectorized. 4091 // FIXME: Should not rely on getVPValue at this point. 4092 VPValue *Def = State.Plan->getVPValue(KV.first, true); 4093 if (!State.hasAnyVectorValue(Def)) 4094 continue; 4095 for (unsigned Part = 0; Part < UF; ++Part) { 4096 Value *I = State.get(Def, Part); 4097 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 4098 if (Inst && Inst->use_empty()) { 4099 Value *NewI = Inst->getOperand(0); 4100 Inst->eraseFromParent(); 4101 State.reset(Def, NewI, Part); 4102 } 4103 } 4104 } 4105 } 4106 4107 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 4108 // Insert truncates and extends for any truncated instructions as hints to 4109 // InstCombine. 4110 if (VF.isVector()) 4111 truncateToMinimalBitwidths(State); 4112 4113 // Fix widened non-induction PHIs by setting up the PHI operands. 4114 if (OrigPHIsToFix.size()) { 4115 assert(EnableVPlanNativePath && 4116 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 4117 fixNonInductionPHIs(State); 4118 } 4119 4120 // At this point every instruction in the original loop is widened to a 4121 // vector form. Now we need to fix the recurrences in the loop. These PHI 4122 // nodes are currently empty because we did not want to introduce cycles. 4123 // This is the second stage of vectorizing recurrences. 4124 fixCrossIterationPHIs(State); 4125 4126 // Forget the original basic block. 4127 PSE.getSE()->forgetLoop(OrigLoop); 4128 4129 // If we inserted an edge from the middle block to the unique exit block, 4130 // update uses outside the loop (phis) to account for the newly inserted 4131 // edge. 4132 if (!Cost->requiresScalarEpilogue(VF)) { 4133 // Fix-up external users of the induction variables. 4134 for (auto &Entry : Legal->getInductionVars()) 4135 fixupIVUsers(Entry.first, Entry.second, 4136 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 4137 IVEndValues[Entry.first], LoopMiddleBlock); 4138 4139 fixLCSSAPHIs(State); 4140 } 4141 4142 for (Instruction *PI : PredicatedInstructions) 4143 sinkScalarOperands(&*PI); 4144 4145 // Remove redundant induction instructions. 4146 cse(LoopVectorBody); 4147 4148 // Set/update profile weights for the vector and remainder loops as original 4149 // loop iterations are now distributed among them. Note that original loop 4150 // represented by LoopScalarBody becomes remainder loop after vectorization. 4151 // 4152 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 4153 // end up getting slightly roughened result but that should be OK since 4154 // profile is not inherently precise anyway. Note also possible bypass of 4155 // vector code caused by legality checks is ignored, assigning all the weight 4156 // to the vector loop, optimistically. 4157 // 4158 // For scalable vectorization we can't know at compile time how many iterations 4159 // of the loop are handled in one vector iteration, so instead assume a pessimistic 4160 // vscale of '1'. 4161 setProfileInfoAfterUnrolling( 4162 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), 4163 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); 4164 } 4165 4166 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 4167 // In order to support recurrences we need to be able to vectorize Phi nodes. 4168 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4169 // stage #2: We now need to fix the recurrences by adding incoming edges to 4170 // the currently empty PHI nodes. At this point every instruction in the 4171 // original loop is widened to a vector form so we can use them to construct 4172 // the incoming edges. 4173 VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock(); 4174 for (VPRecipeBase &R : Header->phis()) { 4175 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) 4176 fixReduction(ReductionPhi, State); 4177 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 4178 fixFirstOrderRecurrence(FOR, State); 4179 } 4180 } 4181 4182 void InnerLoopVectorizer::fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, 4183 VPTransformState &State) { 4184 // This is the second phase of vectorizing first-order recurrences. An 4185 // overview of the transformation is described below. Suppose we have the 4186 // following loop. 4187 // 4188 // for (int i = 0; i < n; ++i) 4189 // b[i] = a[i] - a[i - 1]; 4190 // 4191 // There is a first-order recurrence on "a". For this loop, the shorthand 4192 // scalar IR looks like: 4193 // 4194 // scalar.ph: 4195 // s_init = a[-1] 4196 // br scalar.body 4197 // 4198 // scalar.body: 4199 // i = phi [0, scalar.ph], [i+1, scalar.body] 4200 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4201 // s2 = a[i] 4202 // b[i] = s2 - s1 4203 // br cond, scalar.body, ... 4204 // 4205 // In this example, s1 is a recurrence because it's value depends on the 4206 // previous iteration. In the first phase of vectorization, we created a 4207 // vector phi v1 for s1. We now complete the vectorization and produce the 4208 // shorthand vector IR shown below (for VF = 4, UF = 1). 4209 // 4210 // vector.ph: 4211 // v_init = vector(..., ..., ..., a[-1]) 4212 // br vector.body 4213 // 4214 // vector.body 4215 // i = phi [0, vector.ph], [i+4, vector.body] 4216 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4217 // v2 = a[i, i+1, i+2, i+3]; 4218 // v3 = vector(v1(3), v2(0, 1, 2)) 4219 // b[i, i+1, i+2, i+3] = v2 - v3 4220 // br cond, vector.body, middle.block 4221 // 4222 // middle.block: 4223 // x = v2(3) 4224 // br scalar.ph 4225 // 4226 // scalar.ph: 4227 // s_init = phi [x, middle.block], [a[-1], otherwise] 4228 // br scalar.body 4229 // 4230 // After execution completes the vector loop, we extract the next value of 4231 // the recurrence (x) to use as the initial value in the scalar loop. 4232 4233 // Extract the last vector element in the middle block. This will be the 4234 // initial value for the recurrence when jumping to the scalar loop. 4235 VPValue *PreviousDef = PhiR->getBackedgeValue(); 4236 Value *Incoming = State.get(PreviousDef, UF - 1); 4237 auto *ExtractForScalar = Incoming; 4238 auto *IdxTy = Builder.getInt32Ty(); 4239 if (VF.isVector()) { 4240 auto *One = ConstantInt::get(IdxTy, 1); 4241 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4242 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4243 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 4244 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 4245 "vector.recur.extract"); 4246 } 4247 // Extract the second last element in the middle block if the 4248 // Phi is used outside the loop. We need to extract the phi itself 4249 // and not the last element (the phi update in the current iteration). This 4250 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4251 // when the scalar loop is not run at all. 4252 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4253 if (VF.isVector()) { 4254 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4255 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 4256 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4257 Incoming, Idx, "vector.recur.extract.for.phi"); 4258 } else if (UF > 1) 4259 // When loop is unrolled without vectorizing, initialize 4260 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 4261 // of `Incoming`. This is analogous to the vectorized case above: extracting 4262 // the second last element when VF > 1. 4263 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 4264 4265 // Fix the initial value of the original recurrence in the scalar loop. 4266 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4267 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); 4268 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4269 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); 4270 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4271 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4272 Start->addIncoming(Incoming, BB); 4273 } 4274 4275 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 4276 Phi->setName("scalar.recur"); 4277 4278 // Finally, fix users of the recurrence outside the loop. The users will need 4279 // either the last value of the scalar recurrence or the last value of the 4280 // vector recurrence we extracted in the middle block. Since the loop is in 4281 // LCSSA form, we just need to find all the phi nodes for the original scalar 4282 // recurrence in the exit block, and then add an edge for the middle block. 4283 // Note that LCSSA does not imply single entry when the original scalar loop 4284 // had multiple exiting edges (as we always run the last iteration in the 4285 // scalar epilogue); in that case, there is no edge from middle to exit and 4286 // and thus no phis which needed updated. 4287 if (!Cost->requiresScalarEpilogue(VF)) 4288 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4289 if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi)) 4290 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4291 } 4292 4293 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, 4294 VPTransformState &State) { 4295 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 4296 // Get it's reduction variable descriptor. 4297 assert(Legal->isReductionVariable(OrigPhi) && 4298 "Unable to find the reduction variable"); 4299 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 4300 4301 RecurKind RK = RdxDesc.getRecurrenceKind(); 4302 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4303 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4304 setDebugLocFromInst(ReductionStartValue); 4305 4306 VPValue *LoopExitInstDef = PhiR->getBackedgeValue(); 4307 // This is the vector-clone of the value that leaves the loop. 4308 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 4309 4310 // Wrap flags are in general invalid after vectorization, clear them. 4311 clearReductionWrapFlags(RdxDesc, State); 4312 4313 // Before each round, move the insertion point right between 4314 // the PHIs and the values we are going to write. 4315 // This allows us to write both PHINodes and the extractelement 4316 // instructions. 4317 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4318 4319 setDebugLocFromInst(LoopExitInst); 4320 4321 Type *PhiTy = OrigPhi->getType(); 4322 // If tail is folded by masking, the vector value to leave the loop should be 4323 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 4324 // instead of the former. For an inloop reduction the reduction will already 4325 // be predicated, and does not need to be handled here. 4326 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { 4327 for (unsigned Part = 0; Part < UF; ++Part) { 4328 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 4329 Value *Sel = nullptr; 4330 for (User *U : VecLoopExitInst->users()) { 4331 if (isa<SelectInst>(U)) { 4332 assert(!Sel && "Reduction exit feeding two selects"); 4333 Sel = U; 4334 } else 4335 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 4336 } 4337 assert(Sel && "Reduction exit feeds no select"); 4338 State.reset(LoopExitInstDef, Sel, Part); 4339 4340 // If the target can create a predicated operator for the reduction at no 4341 // extra cost in the loop (for example a predicated vadd), it can be 4342 // cheaper for the select to remain in the loop than be sunk out of it, 4343 // and so use the select value for the phi instead of the old 4344 // LoopExitValue. 4345 if (PreferPredicatedReductionSelect || 4346 TTI->preferPredicatedReductionSelect( 4347 RdxDesc.getOpcode(), PhiTy, 4348 TargetTransformInfo::ReductionFlags())) { 4349 auto *VecRdxPhi = 4350 cast<PHINode>(State.get(PhiR, Part)); 4351 VecRdxPhi->setIncomingValueForBlock( 4352 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 4353 } 4354 } 4355 } 4356 4357 // If the vector reduction can be performed in a smaller type, we truncate 4358 // then extend the loop exit value to enable InstCombine to evaluate the 4359 // entire expression in the smaller type. 4360 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 4361 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"); 4362 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4363 Builder.SetInsertPoint( 4364 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4365 VectorParts RdxParts(UF); 4366 for (unsigned Part = 0; Part < UF; ++Part) { 4367 RdxParts[Part] = State.get(LoopExitInstDef, Part); 4368 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4369 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4370 : Builder.CreateZExt(Trunc, VecTy); 4371 for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users())) 4372 if (U != Trunc) { 4373 U->replaceUsesOfWith(RdxParts[Part], Extnd); 4374 RdxParts[Part] = Extnd; 4375 } 4376 } 4377 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4378 for (unsigned Part = 0; Part < UF; ++Part) { 4379 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4380 State.reset(LoopExitInstDef, RdxParts[Part], Part); 4381 } 4382 } 4383 4384 // Reduce all of the unrolled parts into a single vector. 4385 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 4386 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 4387 4388 // The middle block terminator has already been assigned a DebugLoc here (the 4389 // OrigLoop's single latch terminator). We want the whole middle block to 4390 // appear to execute on this line because: (a) it is all compiler generated, 4391 // (b) these instructions are always executed after evaluating the latch 4392 // conditional branch, and (c) other passes may add new predecessors which 4393 // terminate on this line. This is the easiest way to ensure we don't 4394 // accidentally cause an extra step back into the loop while debugging. 4395 setDebugLocFromInst(LoopMiddleBlock->getTerminator()); 4396 if (PhiR->isOrdered()) 4397 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 4398 else { 4399 // Floating-point operations should have some FMF to enable the reduction. 4400 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 4401 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 4402 for (unsigned Part = 1; Part < UF; ++Part) { 4403 Value *RdxPart = State.get(LoopExitInstDef, Part); 4404 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 4405 ReducedPartRdx = Builder.CreateBinOp( 4406 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 4407 } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) 4408 ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK, 4409 ReducedPartRdx, RdxPart); 4410 else 4411 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4412 } 4413 } 4414 4415 // Create the reduction after the loop. Note that inloop reductions create the 4416 // target reduction in the loop using a Reduction recipe. 4417 if (VF.isVector() && !PhiR->isInLoop()) { 4418 ReducedPartRdx = 4419 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi); 4420 // If the reduction can be performed in a smaller type, we need to extend 4421 // the reduction to the wider type before we branch to the original loop. 4422 if (PhiTy != RdxDesc.getRecurrenceType()) 4423 ReducedPartRdx = RdxDesc.isSigned() 4424 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 4425 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 4426 } 4427 4428 // Create a phi node that merges control-flow from the backedge-taken check 4429 // block and the middle block. 4430 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 4431 LoopScalarPreHeader->getTerminator()); 4432 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4433 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4434 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4435 4436 // Now, we need to fix the users of the reduction variable 4437 // inside and outside of the scalar remainder loop. 4438 4439 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4440 // in the exit blocks. See comment on analogous loop in 4441 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4442 if (!Cost->requiresScalarEpilogue(VF)) 4443 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4444 if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst)) 4445 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4446 4447 // Fix the scalar loop reduction variable with the incoming reduction sum 4448 // from the vector body and from the backedge value. 4449 int IncomingEdgeBlockIdx = 4450 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4451 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4452 // Pick the other block. 4453 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4454 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4455 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4456 } 4457 4458 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 4459 VPTransformState &State) { 4460 RecurKind RK = RdxDesc.getRecurrenceKind(); 4461 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4462 return; 4463 4464 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4465 assert(LoopExitInstr && "null loop exit instruction"); 4466 SmallVector<Instruction *, 8> Worklist; 4467 SmallPtrSet<Instruction *, 8> Visited; 4468 Worklist.push_back(LoopExitInstr); 4469 Visited.insert(LoopExitInstr); 4470 4471 while (!Worklist.empty()) { 4472 Instruction *Cur = Worklist.pop_back_val(); 4473 if (isa<OverflowingBinaryOperator>(Cur)) 4474 for (unsigned Part = 0; Part < UF; ++Part) { 4475 // FIXME: Should not rely on getVPValue at this point. 4476 Value *V = State.get(State.Plan->getVPValue(Cur, true), Part); 4477 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4478 } 4479 4480 for (User *U : Cur->users()) { 4481 Instruction *UI = cast<Instruction>(U); 4482 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4483 Visited.insert(UI).second) 4484 Worklist.push_back(UI); 4485 } 4486 } 4487 } 4488 4489 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4490 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4491 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4492 // Some phis were already hand updated by the reduction and recurrence 4493 // code above, leave them alone. 4494 continue; 4495 4496 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4497 // Non-instruction incoming values will have only one value. 4498 4499 VPLane Lane = VPLane::getFirstLane(); 4500 if (isa<Instruction>(IncomingValue) && 4501 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), 4502 VF)) 4503 Lane = VPLane::getLastLaneForVF(VF); 4504 4505 // Can be a loop invariant incoming value or the last scalar value to be 4506 // extracted from the vectorized loop. 4507 // FIXME: Should not rely on getVPValue at this point. 4508 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4509 Value *lastIncomingValue = 4510 OrigLoop->isLoopInvariant(IncomingValue) 4511 ? IncomingValue 4512 : State.get(State.Plan->getVPValue(IncomingValue, true), 4513 VPIteration(UF - 1, Lane)); 4514 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4515 } 4516 } 4517 4518 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4519 // The basic block and loop containing the predicated instruction. 4520 auto *PredBB = PredInst->getParent(); 4521 auto *VectorLoop = LI->getLoopFor(PredBB); 4522 4523 // Initialize a worklist with the operands of the predicated instruction. 4524 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4525 4526 // Holds instructions that we need to analyze again. An instruction may be 4527 // reanalyzed if we don't yet know if we can sink it or not. 4528 SmallVector<Instruction *, 8> InstsToReanalyze; 4529 4530 // Returns true if a given use occurs in the predicated block. Phi nodes use 4531 // their operands in their corresponding predecessor blocks. 4532 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4533 auto *I = cast<Instruction>(U.getUser()); 4534 BasicBlock *BB = I->getParent(); 4535 if (auto *Phi = dyn_cast<PHINode>(I)) 4536 BB = Phi->getIncomingBlock( 4537 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4538 return BB == PredBB; 4539 }; 4540 4541 // Iteratively sink the scalarized operands of the predicated instruction 4542 // into the block we created for it. When an instruction is sunk, it's 4543 // operands are then added to the worklist. The algorithm ends after one pass 4544 // through the worklist doesn't sink a single instruction. 4545 bool Changed; 4546 do { 4547 // Add the instructions that need to be reanalyzed to the worklist, and 4548 // reset the changed indicator. 4549 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4550 InstsToReanalyze.clear(); 4551 Changed = false; 4552 4553 while (!Worklist.empty()) { 4554 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4555 4556 // We can't sink an instruction if it is a phi node, is not in the loop, 4557 // or may have side effects. 4558 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4559 I->mayHaveSideEffects()) 4560 continue; 4561 4562 // If the instruction is already in PredBB, check if we can sink its 4563 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4564 // sinking the scalar instruction I, hence it appears in PredBB; but it 4565 // may have failed to sink I's operands (recursively), which we try 4566 // (again) here. 4567 if (I->getParent() == PredBB) { 4568 Worklist.insert(I->op_begin(), I->op_end()); 4569 continue; 4570 } 4571 4572 // It's legal to sink the instruction if all its uses occur in the 4573 // predicated block. Otherwise, there's nothing to do yet, and we may 4574 // need to reanalyze the instruction. 4575 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4576 InstsToReanalyze.push_back(I); 4577 continue; 4578 } 4579 4580 // Move the instruction to the beginning of the predicated block, and add 4581 // it's operands to the worklist. 4582 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4583 Worklist.insert(I->op_begin(), I->op_end()); 4584 4585 // The sinking may have enabled other instructions to be sunk, so we will 4586 // need to iterate. 4587 Changed = true; 4588 } 4589 } while (Changed); 4590 } 4591 4592 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4593 for (PHINode *OrigPhi : OrigPHIsToFix) { 4594 VPWidenPHIRecipe *VPPhi = 4595 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4596 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4597 // Make sure the builder has a valid insert point. 4598 Builder.SetInsertPoint(NewPhi); 4599 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4600 VPValue *Inc = VPPhi->getIncomingValue(i); 4601 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4602 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4603 } 4604 } 4605 } 4606 4607 bool InnerLoopVectorizer::useOrderedReductions(RecurrenceDescriptor &RdxDesc) { 4608 return Cost->useOrderedReductions(RdxDesc); 4609 } 4610 4611 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, 4612 VPUser &Operands, unsigned UF, 4613 ElementCount VF, bool IsPtrLoopInvariant, 4614 SmallBitVector &IsIndexLoopInvariant, 4615 VPTransformState &State) { 4616 // Construct a vector GEP by widening the operands of the scalar GEP as 4617 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4618 // results in a vector of pointers when at least one operand of the GEP 4619 // is vector-typed. Thus, to keep the representation compact, we only use 4620 // vector-typed operands for loop-varying values. 4621 4622 if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 4623 // If we are vectorizing, but the GEP has only loop-invariant operands, 4624 // the GEP we build (by only using vector-typed operands for 4625 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4626 // produce a vector of pointers, we need to either arbitrarily pick an 4627 // operand to broadcast, or broadcast a clone of the original GEP. 4628 // Here, we broadcast a clone of the original. 4629 // 4630 // TODO: If at some point we decide to scalarize instructions having 4631 // loop-invariant operands, this special case will no longer be 4632 // required. We would add the scalarization decision to 4633 // collectLoopScalars() and teach getVectorValue() to broadcast 4634 // the lane-zero scalar value. 4635 auto *Clone = Builder.Insert(GEP->clone()); 4636 for (unsigned Part = 0; Part < UF; ++Part) { 4637 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4638 State.set(VPDef, EntryPart, Part); 4639 addMetadata(EntryPart, GEP); 4640 } 4641 } else { 4642 // If the GEP has at least one loop-varying operand, we are sure to 4643 // produce a vector of pointers. But if we are only unrolling, we want 4644 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4645 // produce with the code below will be scalar (if VF == 1) or vector 4646 // (otherwise). Note that for the unroll-only case, we still maintain 4647 // values in the vector mapping with initVector, as we do for other 4648 // instructions. 4649 for (unsigned Part = 0; Part < UF; ++Part) { 4650 // The pointer operand of the new GEP. If it's loop-invariant, we 4651 // won't broadcast it. 4652 auto *Ptr = IsPtrLoopInvariant 4653 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 4654 : State.get(Operands.getOperand(0), Part); 4655 4656 // Collect all the indices for the new GEP. If any index is 4657 // loop-invariant, we won't broadcast it. 4658 SmallVector<Value *, 4> Indices; 4659 for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) { 4660 VPValue *Operand = Operands.getOperand(I); 4661 if (IsIndexLoopInvariant[I - 1]) 4662 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 4663 else 4664 Indices.push_back(State.get(Operand, Part)); 4665 } 4666 4667 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4668 // but it should be a vector, otherwise. 4669 auto *NewGEP = 4670 GEP->isInBounds() 4671 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 4672 Indices) 4673 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 4674 assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) && 4675 "NewGEP is not a pointer vector"); 4676 State.set(VPDef, NewGEP, Part); 4677 addMetadata(NewGEP, GEP); 4678 } 4679 } 4680 } 4681 4682 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4683 VPWidenPHIRecipe *PhiR, 4684 VPTransformState &State) { 4685 PHINode *P = cast<PHINode>(PN); 4686 if (EnableVPlanNativePath) { 4687 // Currently we enter here in the VPlan-native path for non-induction 4688 // PHIs where all control flow is uniform. We simply widen these PHIs. 4689 // Create a vector phi with no operands - the vector phi operands will be 4690 // set at the end of vector code generation. 4691 Type *VecTy = (State.VF.isScalar()) 4692 ? PN->getType() 4693 : VectorType::get(PN->getType(), State.VF); 4694 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4695 State.set(PhiR, VecPhi, 0); 4696 OrigPHIsToFix.push_back(P); 4697 4698 return; 4699 } 4700 4701 assert(PN->getParent() == OrigLoop->getHeader() && 4702 "Non-header phis should have been handled elsewhere"); 4703 4704 // In order to support recurrences we need to be able to vectorize Phi nodes. 4705 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4706 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4707 // this value when we vectorize all of the instructions that use the PHI. 4708 4709 assert(!Legal->isReductionVariable(P) && 4710 "reductions should be handled elsewhere"); 4711 4712 setDebugLocFromInst(P); 4713 4714 // This PHINode must be an induction variable. 4715 // Make sure that we know about it. 4716 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4717 4718 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4719 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4720 4721 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4722 // which can be found from the original scalar operations. 4723 switch (II.getKind()) { 4724 case InductionDescriptor::IK_NoInduction: 4725 llvm_unreachable("Unknown induction"); 4726 case InductionDescriptor::IK_IntInduction: 4727 case InductionDescriptor::IK_FpInduction: 4728 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4729 case InductionDescriptor::IK_PtrInduction: { 4730 // Handle the pointer induction variable case. 4731 assert(P->getType()->isPointerTy() && "Unexpected type."); 4732 4733 if (Cost->isScalarAfterVectorization(P, State.VF)) { 4734 // This is the normalized GEP that starts counting at zero. 4735 Value *PtrInd = 4736 Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType()); 4737 // Determine the number of scalars we need to generate for each unroll 4738 // iteration. If the instruction is uniform, we only need to generate the 4739 // first lane. Otherwise, we generate all VF values. 4740 bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF); 4741 unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue(); 4742 4743 bool NeedsVectorIndex = !IsUniform && VF.isScalable(); 4744 Value *UnitStepVec = nullptr, *PtrIndSplat = nullptr; 4745 if (NeedsVectorIndex) { 4746 Type *VecIVTy = VectorType::get(PtrInd->getType(), VF); 4747 UnitStepVec = Builder.CreateStepVector(VecIVTy); 4748 PtrIndSplat = Builder.CreateVectorSplat(VF, PtrInd); 4749 } 4750 4751 for (unsigned Part = 0; Part < UF; ++Part) { 4752 Value *PartStart = 4753 createStepForVF(Builder, PtrInd->getType(), VF, Part); 4754 4755 if (NeedsVectorIndex) { 4756 // Here we cache the whole vector, which means we can support the 4757 // extraction of any lane. However, in some cases the extractelement 4758 // instruction that is generated for scalar uses of this vector (e.g. 4759 // a load instruction) is not folded away. Therefore we still 4760 // calculate values for the first n lanes to avoid redundant moves 4761 // (when extracting the 0th element) and to produce scalar code (i.e. 4762 // additional add/gep instructions instead of expensive extractelement 4763 // instructions) when extracting higher-order elements. 4764 Value *PartStartSplat = Builder.CreateVectorSplat(VF, PartStart); 4765 Value *Indices = Builder.CreateAdd(PartStartSplat, UnitStepVec); 4766 Value *GlobalIndices = Builder.CreateAdd(PtrIndSplat, Indices); 4767 Value *SclrGep = 4768 emitTransformedIndex(Builder, GlobalIndices, PSE.getSE(), DL, II); 4769 SclrGep->setName("next.gep"); 4770 State.set(PhiR, SclrGep, Part); 4771 } 4772 4773 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4774 Value *Idx = Builder.CreateAdd( 4775 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 4776 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4777 Value *SclrGep = 4778 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 4779 SclrGep->setName("next.gep"); 4780 State.set(PhiR, SclrGep, VPIteration(Part, Lane)); 4781 } 4782 } 4783 return; 4784 } 4785 assert(isa<SCEVConstant>(II.getStep()) && 4786 "Induction step not a SCEV constant!"); 4787 Type *PhiType = II.getStep()->getType(); 4788 4789 // Build a pointer phi 4790 Value *ScalarStartValue = II.getStartValue(); 4791 Type *ScStValueType = ScalarStartValue->getType(); 4792 PHINode *NewPointerPhi = 4793 PHINode::Create(ScStValueType, 2, "pointer.phi", Induction); 4794 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4795 4796 // A pointer induction, performed by using a gep 4797 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4798 Instruction *InductionLoc = LoopLatch->getTerminator(); 4799 const SCEV *ScalarStep = II.getStep(); 4800 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4801 Value *ScalarStepValue = 4802 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4803 Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF); 4804 Value *NumUnrolledElems = 4805 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 4806 Value *InductionGEP = GetElementPtrInst::Create( 4807 II.getElementType(), NewPointerPhi, 4808 Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 4809 InductionLoc); 4810 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4811 4812 // Create UF many actual address geps that use the pointer 4813 // phi as base and a vectorized version of the step value 4814 // (<step*0, ..., step*N>) as offset. 4815 for (unsigned Part = 0; Part < State.UF; ++Part) { 4816 Type *VecPhiType = VectorType::get(PhiType, State.VF); 4817 Value *StartOffsetScalar = 4818 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 4819 Value *StartOffset = 4820 Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 4821 // Create a vector of consecutive numbers from zero to VF. 4822 StartOffset = 4823 Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType)); 4824 4825 Value *GEP = Builder.CreateGEP( 4826 II.getElementType(), NewPointerPhi, 4827 Builder.CreateMul( 4828 StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue), 4829 "vector.gep")); 4830 State.set(PhiR, GEP, Part); 4831 } 4832 } 4833 } 4834 } 4835 4836 /// A helper function for checking whether an integer division-related 4837 /// instruction may divide by zero (in which case it must be predicated if 4838 /// executed conditionally in the scalar code). 4839 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4840 /// Non-zero divisors that are non compile-time constants will not be 4841 /// converted into multiplication, so we will still end up scalarizing 4842 /// the division, but can do so w/o predication. 4843 static bool mayDivideByZero(Instruction &I) { 4844 assert((I.getOpcode() == Instruction::UDiv || 4845 I.getOpcode() == Instruction::SDiv || 4846 I.getOpcode() == Instruction::URem || 4847 I.getOpcode() == Instruction::SRem) && 4848 "Unexpected instruction"); 4849 Value *Divisor = I.getOperand(1); 4850 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4851 return !CInt || CInt->isZero(); 4852 } 4853 4854 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def, 4855 VPUser &User, 4856 VPTransformState &State) { 4857 switch (I.getOpcode()) { 4858 case Instruction::Call: 4859 case Instruction::Br: 4860 case Instruction::PHI: 4861 case Instruction::GetElementPtr: 4862 case Instruction::Select: 4863 llvm_unreachable("This instruction is handled by a different recipe."); 4864 case Instruction::UDiv: 4865 case Instruction::SDiv: 4866 case Instruction::SRem: 4867 case Instruction::URem: 4868 case Instruction::Add: 4869 case Instruction::FAdd: 4870 case Instruction::Sub: 4871 case Instruction::FSub: 4872 case Instruction::FNeg: 4873 case Instruction::Mul: 4874 case Instruction::FMul: 4875 case Instruction::FDiv: 4876 case Instruction::FRem: 4877 case Instruction::Shl: 4878 case Instruction::LShr: 4879 case Instruction::AShr: 4880 case Instruction::And: 4881 case Instruction::Or: 4882 case Instruction::Xor: { 4883 // Just widen unops and binops. 4884 setDebugLocFromInst(&I); 4885 4886 for (unsigned Part = 0; Part < UF; ++Part) { 4887 SmallVector<Value *, 2> Ops; 4888 for (VPValue *VPOp : User.operands()) 4889 Ops.push_back(State.get(VPOp, Part)); 4890 4891 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 4892 4893 if (auto *VecOp = dyn_cast<Instruction>(V)) 4894 VecOp->copyIRFlags(&I); 4895 4896 // Use this vector value for all users of the original instruction. 4897 State.set(Def, V, Part); 4898 addMetadata(V, &I); 4899 } 4900 4901 break; 4902 } 4903 case Instruction::ICmp: 4904 case Instruction::FCmp: { 4905 // Widen compares. Generate vector compares. 4906 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4907 auto *Cmp = cast<CmpInst>(&I); 4908 setDebugLocFromInst(Cmp); 4909 for (unsigned Part = 0; Part < UF; ++Part) { 4910 Value *A = State.get(User.getOperand(0), Part); 4911 Value *B = State.get(User.getOperand(1), Part); 4912 Value *C = nullptr; 4913 if (FCmp) { 4914 // Propagate fast math flags. 4915 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4916 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4917 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4918 } else { 4919 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4920 } 4921 State.set(Def, C, Part); 4922 addMetadata(C, &I); 4923 } 4924 4925 break; 4926 } 4927 4928 case Instruction::ZExt: 4929 case Instruction::SExt: 4930 case Instruction::FPToUI: 4931 case Instruction::FPToSI: 4932 case Instruction::FPExt: 4933 case Instruction::PtrToInt: 4934 case Instruction::IntToPtr: 4935 case Instruction::SIToFP: 4936 case Instruction::UIToFP: 4937 case Instruction::Trunc: 4938 case Instruction::FPTrunc: 4939 case Instruction::BitCast: { 4940 auto *CI = cast<CastInst>(&I); 4941 setDebugLocFromInst(CI); 4942 4943 /// Vectorize casts. 4944 Type *DestTy = 4945 (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF); 4946 4947 for (unsigned Part = 0; Part < UF; ++Part) { 4948 Value *A = State.get(User.getOperand(0), Part); 4949 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4950 State.set(Def, Cast, Part); 4951 addMetadata(Cast, &I); 4952 } 4953 break; 4954 } 4955 default: 4956 // This instruction is not vectorized by simple widening. 4957 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4958 llvm_unreachable("Unhandled instruction!"); 4959 } // end of switch. 4960 } 4961 4962 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4963 VPUser &ArgOperands, 4964 VPTransformState &State) { 4965 assert(!isa<DbgInfoIntrinsic>(I) && 4966 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4967 setDebugLocFromInst(&I); 4968 4969 Module *M = I.getParent()->getParent()->getParent(); 4970 auto *CI = cast<CallInst>(&I); 4971 4972 SmallVector<Type *, 4> Tys; 4973 for (Value *ArgOperand : CI->args()) 4974 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4975 4976 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4977 4978 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4979 // version of the instruction. 4980 // Is it beneficial to perform intrinsic call compared to lib call? 4981 bool NeedToScalarize = false; 4982 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4983 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4984 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4985 assert((UseVectorIntrinsic || !NeedToScalarize) && 4986 "Instruction should be scalarized elsewhere."); 4987 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 4988 "Either the intrinsic cost or vector call cost must be valid"); 4989 4990 for (unsigned Part = 0; Part < UF; ++Part) { 4991 SmallVector<Type *, 2> TysForDecl = {CI->getType()}; 4992 SmallVector<Value *, 4> Args; 4993 for (auto &I : enumerate(ArgOperands.operands())) { 4994 // Some intrinsics have a scalar argument - don't replace it with a 4995 // vector. 4996 Value *Arg; 4997 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4998 Arg = State.get(I.value(), Part); 4999 else { 5000 Arg = State.get(I.value(), VPIteration(0, 0)); 5001 if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index())) 5002 TysForDecl.push_back(Arg->getType()); 5003 } 5004 Args.push_back(Arg); 5005 } 5006 5007 Function *VectorF; 5008 if (UseVectorIntrinsic) { 5009 // Use vector version of the intrinsic. 5010 if (VF.isVector()) 5011 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 5012 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 5013 assert(VectorF && "Can't retrieve vector intrinsic."); 5014 } else { 5015 // Use vector version of the function call. 5016 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 5017 #ifndef NDEBUG 5018 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 5019 "Can't create vector function."); 5020 #endif 5021 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 5022 } 5023 SmallVector<OperandBundleDef, 1> OpBundles; 5024 CI->getOperandBundlesAsDefs(OpBundles); 5025 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 5026 5027 if (isa<FPMathOperator>(V)) 5028 V->copyFastMathFlags(CI); 5029 5030 State.set(Def, V, Part); 5031 addMetadata(V, &I); 5032 } 5033 } 5034 5035 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef, 5036 VPUser &Operands, 5037 bool InvariantCond, 5038 VPTransformState &State) { 5039 setDebugLocFromInst(&I); 5040 5041 // The condition can be loop invariant but still defined inside the 5042 // loop. This means that we can't just use the original 'cond' value. 5043 // We have to take the 'vectorized' value and pick the first lane. 5044 // Instcombine will make this a no-op. 5045 auto *InvarCond = InvariantCond 5046 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 5047 : nullptr; 5048 5049 for (unsigned Part = 0; Part < UF; ++Part) { 5050 Value *Cond = 5051 InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part); 5052 Value *Op0 = State.get(Operands.getOperand(1), Part); 5053 Value *Op1 = State.get(Operands.getOperand(2), Part); 5054 Value *Sel = Builder.CreateSelect(Cond, Op0, Op1); 5055 State.set(VPDef, Sel, Part); 5056 addMetadata(Sel, &I); 5057 } 5058 } 5059 5060 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 5061 // We should not collect Scalars more than once per VF. Right now, this 5062 // function is called from collectUniformsAndScalars(), which already does 5063 // this check. Collecting Scalars for VF=1 does not make any sense. 5064 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 5065 "This function should not be visited twice for the same VF"); 5066 5067 SmallSetVector<Instruction *, 8> Worklist; 5068 5069 // These sets are used to seed the analysis with pointers used by memory 5070 // accesses that will remain scalar. 5071 SmallSetVector<Instruction *, 8> ScalarPtrs; 5072 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 5073 auto *Latch = TheLoop->getLoopLatch(); 5074 5075 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 5076 // The pointer operands of loads and stores will be scalar as long as the 5077 // memory access is not a gather or scatter operation. The value operand of a 5078 // store will remain scalar if the store is scalarized. 5079 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 5080 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 5081 assert(WideningDecision != CM_Unknown && 5082 "Widening decision should be ready at this moment"); 5083 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 5084 if (Ptr == Store->getValueOperand()) 5085 return WideningDecision == CM_Scalarize; 5086 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 5087 "Ptr is neither a value or pointer operand"); 5088 return WideningDecision != CM_GatherScatter; 5089 }; 5090 5091 // A helper that returns true if the given value is a bitcast or 5092 // getelementptr instruction contained in the loop. 5093 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 5094 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 5095 isa<GetElementPtrInst>(V)) && 5096 !TheLoop->isLoopInvariant(V); 5097 }; 5098 5099 auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) { 5100 if (!isa<PHINode>(Ptr) || 5101 !Legal->getInductionVars().count(cast<PHINode>(Ptr))) 5102 return false; 5103 auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)]; 5104 if (Induction.getKind() != InductionDescriptor::IK_PtrInduction) 5105 return false; 5106 return isScalarUse(MemAccess, Ptr); 5107 }; 5108 5109 // A helper that evaluates a memory access's use of a pointer. If the 5110 // pointer is actually the pointer induction of a loop, it is being 5111 // inserted into Worklist. If the use will be a scalar use, and the 5112 // pointer is only used by memory accesses, we place the pointer in 5113 // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs. 5114 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 5115 if (isScalarPtrInduction(MemAccess, Ptr)) { 5116 Worklist.insert(cast<Instruction>(Ptr)); 5117 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr 5118 << "\n"); 5119 5120 Instruction *Update = cast<Instruction>( 5121 cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch)); 5122 5123 // If there is more than one user of Update (Ptr), we shouldn't assume it 5124 // will be scalar after vectorisation as other users of the instruction 5125 // may require widening. Otherwise, add it to ScalarPtrs. 5126 if (Update->hasOneUse() && cast<Value>(*Update->user_begin()) == Ptr) { 5127 ScalarPtrs.insert(Update); 5128 return; 5129 } 5130 } 5131 // We only care about bitcast and getelementptr instructions contained in 5132 // the loop. 5133 if (!isLoopVaryingBitCastOrGEP(Ptr)) 5134 return; 5135 5136 // If the pointer has already been identified as scalar (e.g., if it was 5137 // also identified as uniform), there's nothing to do. 5138 auto *I = cast<Instruction>(Ptr); 5139 if (Worklist.count(I)) 5140 return; 5141 5142 // If the use of the pointer will be a scalar use, and all users of the 5143 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 5144 // place the pointer in PossibleNonScalarPtrs. 5145 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 5146 return isa<LoadInst>(U) || isa<StoreInst>(U); 5147 })) 5148 ScalarPtrs.insert(I); 5149 else 5150 PossibleNonScalarPtrs.insert(I); 5151 }; 5152 5153 // We seed the scalars analysis with three classes of instructions: (1) 5154 // instructions marked uniform-after-vectorization and (2) bitcast, 5155 // getelementptr and (pointer) phi instructions used by memory accesses 5156 // requiring a scalar use. 5157 // 5158 // (1) Add to the worklist all instructions that have been identified as 5159 // uniform-after-vectorization. 5160 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 5161 5162 // (2) Add to the worklist all bitcast and getelementptr instructions used by 5163 // memory accesses requiring a scalar use. The pointer operands of loads and 5164 // stores will be scalar as long as the memory accesses is not a gather or 5165 // scatter operation. The value operand of a store will remain scalar if the 5166 // store is scalarized. 5167 for (auto *BB : TheLoop->blocks()) 5168 for (auto &I : *BB) { 5169 if (auto *Load = dyn_cast<LoadInst>(&I)) { 5170 evaluatePtrUse(Load, Load->getPointerOperand()); 5171 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 5172 evaluatePtrUse(Store, Store->getPointerOperand()); 5173 evaluatePtrUse(Store, Store->getValueOperand()); 5174 } 5175 } 5176 for (auto *I : ScalarPtrs) 5177 if (!PossibleNonScalarPtrs.count(I)) { 5178 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 5179 Worklist.insert(I); 5180 } 5181 5182 // Insert the forced scalars. 5183 // FIXME: Currently widenPHIInstruction() often creates a dead vector 5184 // induction variable when the PHI user is scalarized. 5185 auto ForcedScalar = ForcedScalars.find(VF); 5186 if (ForcedScalar != ForcedScalars.end()) 5187 for (auto *I : ForcedScalar->second) 5188 Worklist.insert(I); 5189 5190 // Expand the worklist by looking through any bitcasts and getelementptr 5191 // instructions we've already identified as scalar. This is similar to the 5192 // expansion step in collectLoopUniforms(); however, here we're only 5193 // expanding to include additional bitcasts and getelementptr instructions. 5194 unsigned Idx = 0; 5195 while (Idx != Worklist.size()) { 5196 Instruction *Dst = Worklist[Idx++]; 5197 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 5198 continue; 5199 auto *Src = cast<Instruction>(Dst->getOperand(0)); 5200 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 5201 auto *J = cast<Instruction>(U); 5202 return !TheLoop->contains(J) || Worklist.count(J) || 5203 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 5204 isScalarUse(J, Src)); 5205 })) { 5206 Worklist.insert(Src); 5207 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 5208 } 5209 } 5210 5211 // An induction variable will remain scalar if all users of the induction 5212 // variable and induction variable update remain scalar. 5213 for (auto &Induction : Legal->getInductionVars()) { 5214 auto *Ind = Induction.first; 5215 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5216 5217 // If tail-folding is applied, the primary induction variable will be used 5218 // to feed a vector compare. 5219 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 5220 continue; 5221 5222 // Determine if all users of the induction variable are scalar after 5223 // vectorization. 5224 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5225 auto *I = cast<Instruction>(U); 5226 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 5227 }); 5228 if (!ScalarInd) 5229 continue; 5230 5231 // Determine if all users of the induction variable update instruction are 5232 // scalar after vectorization. 5233 auto ScalarIndUpdate = 5234 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5235 auto *I = cast<Instruction>(U); 5236 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 5237 }); 5238 if (!ScalarIndUpdate) 5239 continue; 5240 5241 // The induction variable and its update instruction will remain scalar. 5242 Worklist.insert(Ind); 5243 Worklist.insert(IndUpdate); 5244 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5245 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 5246 << "\n"); 5247 } 5248 5249 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 5250 } 5251 5252 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) const { 5253 if (!blockNeedsPredication(I->getParent())) 5254 return false; 5255 switch(I->getOpcode()) { 5256 default: 5257 break; 5258 case Instruction::Load: 5259 case Instruction::Store: { 5260 if (!Legal->isMaskRequired(I)) 5261 return false; 5262 auto *Ptr = getLoadStorePointerOperand(I); 5263 auto *Ty = getLoadStoreType(I); 5264 const Align Alignment = getLoadStoreAlignment(I); 5265 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 5266 TTI.isLegalMaskedGather(Ty, Alignment)) 5267 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 5268 TTI.isLegalMaskedScatter(Ty, Alignment)); 5269 } 5270 case Instruction::UDiv: 5271 case Instruction::SDiv: 5272 case Instruction::SRem: 5273 case Instruction::URem: 5274 return mayDivideByZero(*I); 5275 } 5276 return false; 5277 } 5278 5279 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 5280 Instruction *I, ElementCount VF) { 5281 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 5282 assert(getWideningDecision(I, VF) == CM_Unknown && 5283 "Decision should not be set yet."); 5284 auto *Group = getInterleavedAccessGroup(I); 5285 assert(Group && "Must have a group."); 5286 5287 // If the instruction's allocated size doesn't equal it's type size, it 5288 // requires padding and will be scalarized. 5289 auto &DL = I->getModule()->getDataLayout(); 5290 auto *ScalarTy = getLoadStoreType(I); 5291 if (hasIrregularType(ScalarTy, DL)) 5292 return false; 5293 5294 // Check if masking is required. 5295 // A Group may need masking for one of two reasons: it resides in a block that 5296 // needs predication, or it was decided to use masking to deal with gaps 5297 // (either a gap at the end of a load-access that may result in a speculative 5298 // load, or any gaps in a store-access). 5299 bool PredicatedAccessRequiresMasking = 5300 blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 5301 bool LoadAccessWithGapsRequiresEpilogMasking = 5302 isa<LoadInst>(I) && Group->requiresScalarEpilogue() && 5303 !isScalarEpilogueAllowed(); 5304 bool StoreAccessWithGapsRequiresMasking = 5305 isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()); 5306 if (!PredicatedAccessRequiresMasking && 5307 !LoadAccessWithGapsRequiresEpilogMasking && 5308 !StoreAccessWithGapsRequiresMasking) 5309 return true; 5310 5311 // If masked interleaving is required, we expect that the user/target had 5312 // enabled it, because otherwise it either wouldn't have been created or 5313 // it should have been invalidated by the CostModel. 5314 assert(useMaskedInterleavedAccesses(TTI) && 5315 "Masked interleave-groups for predicated accesses are not enabled."); 5316 5317 if (Group->isReverse()) 5318 return false; 5319 5320 auto *Ty = getLoadStoreType(I); 5321 const Align Alignment = getLoadStoreAlignment(I); 5322 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 5323 : TTI.isLegalMaskedStore(Ty, Alignment); 5324 } 5325 5326 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 5327 Instruction *I, ElementCount VF) { 5328 // Get and ensure we have a valid memory instruction. 5329 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction"); 5330 5331 auto *Ptr = getLoadStorePointerOperand(I); 5332 auto *ScalarTy = getLoadStoreType(I); 5333 5334 // In order to be widened, the pointer should be consecutive, first of all. 5335 if (!Legal->isConsecutivePtr(ScalarTy, Ptr)) 5336 return false; 5337 5338 // If the instruction is a store located in a predicated block, it will be 5339 // scalarized. 5340 if (isScalarWithPredication(I)) 5341 return false; 5342 5343 // If the instruction's allocated size doesn't equal it's type size, it 5344 // requires padding and will be scalarized. 5345 auto &DL = I->getModule()->getDataLayout(); 5346 if (hasIrregularType(ScalarTy, DL)) 5347 return false; 5348 5349 return true; 5350 } 5351 5352 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 5353 // We should not collect Uniforms more than once per VF. Right now, 5354 // this function is called from collectUniformsAndScalars(), which 5355 // already does this check. Collecting Uniforms for VF=1 does not make any 5356 // sense. 5357 5358 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 5359 "This function should not be visited twice for the same VF"); 5360 5361 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5362 // not analyze again. Uniforms.count(VF) will return 1. 5363 Uniforms[VF].clear(); 5364 5365 // We now know that the loop is vectorizable! 5366 // Collect instructions inside the loop that will remain uniform after 5367 // vectorization. 5368 5369 // Global values, params and instructions outside of current loop are out of 5370 // scope. 5371 auto isOutOfScope = [&](Value *V) -> bool { 5372 Instruction *I = dyn_cast<Instruction>(V); 5373 return (!I || !TheLoop->contains(I)); 5374 }; 5375 5376 // Worklist containing uniform instructions demanding lane 0. 5377 SetVector<Instruction *> Worklist; 5378 BasicBlock *Latch = TheLoop->getLoopLatch(); 5379 5380 // Add uniform instructions demanding lane 0 to the worklist. Instructions 5381 // that are scalar with predication must not be considered uniform after 5382 // vectorization, because that would create an erroneous replicating region 5383 // where only a single instance out of VF should be formed. 5384 // TODO: optimize such seldom cases if found important, see PR40816. 5385 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 5386 if (isOutOfScope(I)) { 5387 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 5388 << *I << "\n"); 5389 return; 5390 } 5391 if (isScalarWithPredication(I)) { 5392 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 5393 << *I << "\n"); 5394 return; 5395 } 5396 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 5397 Worklist.insert(I); 5398 }; 5399 5400 // Start with the conditional branch. If the branch condition is an 5401 // instruction contained in the loop that is only used by the branch, it is 5402 // uniform. 5403 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5404 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 5405 addToWorklistIfAllowed(Cmp); 5406 5407 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 5408 InstWidening WideningDecision = getWideningDecision(I, VF); 5409 assert(WideningDecision != CM_Unknown && 5410 "Widening decision should be ready at this moment"); 5411 5412 // A uniform memory op is itself uniform. We exclude uniform stores 5413 // here as they demand the last lane, not the first one. 5414 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 5415 assert(WideningDecision == CM_Scalarize); 5416 return true; 5417 } 5418 5419 return (WideningDecision == CM_Widen || 5420 WideningDecision == CM_Widen_Reverse || 5421 WideningDecision == CM_Interleave); 5422 }; 5423 5424 5425 // Returns true if Ptr is the pointer operand of a memory access instruction 5426 // I, and I is known to not require scalarization. 5427 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5428 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5429 }; 5430 5431 // Holds a list of values which are known to have at least one uniform use. 5432 // Note that there may be other uses which aren't uniform. A "uniform use" 5433 // here is something which only demands lane 0 of the unrolled iterations; 5434 // it does not imply that all lanes produce the same value (e.g. this is not 5435 // the usual meaning of uniform) 5436 SetVector<Value *> HasUniformUse; 5437 5438 // Scan the loop for instructions which are either a) known to have only 5439 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 5440 for (auto *BB : TheLoop->blocks()) 5441 for (auto &I : *BB) { 5442 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) { 5443 switch (II->getIntrinsicID()) { 5444 case Intrinsic::sideeffect: 5445 case Intrinsic::experimental_noalias_scope_decl: 5446 case Intrinsic::assume: 5447 case Intrinsic::lifetime_start: 5448 case Intrinsic::lifetime_end: 5449 if (TheLoop->hasLoopInvariantOperands(&I)) 5450 addToWorklistIfAllowed(&I); 5451 break; 5452 default: 5453 break; 5454 } 5455 } 5456 5457 // ExtractValue instructions must be uniform, because the operands are 5458 // known to be loop-invariant. 5459 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) { 5460 assert(isOutOfScope(EVI->getAggregateOperand()) && 5461 "Expected aggregate value to be loop invariant"); 5462 addToWorklistIfAllowed(EVI); 5463 continue; 5464 } 5465 5466 // If there's no pointer operand, there's nothing to do. 5467 auto *Ptr = getLoadStorePointerOperand(&I); 5468 if (!Ptr) 5469 continue; 5470 5471 // A uniform memory op is itself uniform. We exclude uniform stores 5472 // here as they demand the last lane, not the first one. 5473 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 5474 addToWorklistIfAllowed(&I); 5475 5476 if (isUniformDecision(&I, VF)) { 5477 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 5478 HasUniformUse.insert(Ptr); 5479 } 5480 } 5481 5482 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 5483 // demanding) users. Since loops are assumed to be in LCSSA form, this 5484 // disallows uses outside the loop as well. 5485 for (auto *V : HasUniformUse) { 5486 if (isOutOfScope(V)) 5487 continue; 5488 auto *I = cast<Instruction>(V); 5489 auto UsersAreMemAccesses = 5490 llvm::all_of(I->users(), [&](User *U) -> bool { 5491 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 5492 }); 5493 if (UsersAreMemAccesses) 5494 addToWorklistIfAllowed(I); 5495 } 5496 5497 // Expand Worklist in topological order: whenever a new instruction 5498 // is added , its users should be already inside Worklist. It ensures 5499 // a uniform instruction will only be used by uniform instructions. 5500 unsigned idx = 0; 5501 while (idx != Worklist.size()) { 5502 Instruction *I = Worklist[idx++]; 5503 5504 for (auto OV : I->operand_values()) { 5505 // isOutOfScope operands cannot be uniform instructions. 5506 if (isOutOfScope(OV)) 5507 continue; 5508 // First order recurrence Phi's should typically be considered 5509 // non-uniform. 5510 auto *OP = dyn_cast<PHINode>(OV); 5511 if (OP && Legal->isFirstOrderRecurrence(OP)) 5512 continue; 5513 // If all the users of the operand are uniform, then add the 5514 // operand into the uniform worklist. 5515 auto *OI = cast<Instruction>(OV); 5516 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5517 auto *J = cast<Instruction>(U); 5518 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 5519 })) 5520 addToWorklistIfAllowed(OI); 5521 } 5522 } 5523 5524 // For an instruction to be added into Worklist above, all its users inside 5525 // the loop should also be in Worklist. However, this condition cannot be 5526 // true for phi nodes that form a cyclic dependence. We must process phi 5527 // nodes separately. An induction variable will remain uniform if all users 5528 // of the induction variable and induction variable update remain uniform. 5529 // The code below handles both pointer and non-pointer induction variables. 5530 for (auto &Induction : Legal->getInductionVars()) { 5531 auto *Ind = Induction.first; 5532 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5533 5534 // Determine if all users of the induction variable are uniform after 5535 // vectorization. 5536 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5537 auto *I = cast<Instruction>(U); 5538 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5539 isVectorizedMemAccessUse(I, Ind); 5540 }); 5541 if (!UniformInd) 5542 continue; 5543 5544 // Determine if all users of the induction variable update instruction are 5545 // uniform after vectorization. 5546 auto UniformIndUpdate = 5547 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5548 auto *I = cast<Instruction>(U); 5549 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5550 isVectorizedMemAccessUse(I, IndUpdate); 5551 }); 5552 if (!UniformIndUpdate) 5553 continue; 5554 5555 // The induction variable and its update instruction will remain uniform. 5556 addToWorklistIfAllowed(Ind); 5557 addToWorklistIfAllowed(IndUpdate); 5558 } 5559 5560 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5561 } 5562 5563 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5564 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5565 5566 if (Legal->getRuntimePointerChecking()->Need) { 5567 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5568 "runtime pointer checks needed. Enable vectorization of this " 5569 "loop with '#pragma clang loop vectorize(enable)' when " 5570 "compiling with -Os/-Oz", 5571 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5572 return true; 5573 } 5574 5575 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5576 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5577 "runtime SCEV checks needed. Enable vectorization of this " 5578 "loop with '#pragma clang loop vectorize(enable)' when " 5579 "compiling with -Os/-Oz", 5580 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5581 return true; 5582 } 5583 5584 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5585 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5586 reportVectorizationFailure("Runtime stride check for small trip count", 5587 "runtime stride == 1 checks needed. Enable vectorization of " 5588 "this loop without such check by compiling with -Os/-Oz", 5589 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5590 return true; 5591 } 5592 5593 return false; 5594 } 5595 5596 ElementCount 5597 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 5598 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) 5599 return ElementCount::getScalable(0); 5600 5601 if (Hints->isScalableVectorizationDisabled()) { 5602 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 5603 "ScalableVectorizationDisabled", ORE, TheLoop); 5604 return ElementCount::getScalable(0); 5605 } 5606 5607 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n"); 5608 5609 auto MaxScalableVF = ElementCount::getScalable( 5610 std::numeric_limits<ElementCount::ScalarTy>::max()); 5611 5612 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 5613 // FIXME: While for scalable vectors this is currently sufficient, this should 5614 // be replaced by a more detailed mechanism that filters out specific VFs, 5615 // instead of invalidating vectorization for a whole set of VFs based on the 5616 // MaxVF. 5617 5618 // Disable scalable vectorization if the loop contains unsupported reductions. 5619 if (!canVectorizeReductions(MaxScalableVF)) { 5620 reportVectorizationInfo( 5621 "Scalable vectorization not supported for the reduction " 5622 "operations found in this loop.", 5623 "ScalableVFUnfeasible", ORE, TheLoop); 5624 return ElementCount::getScalable(0); 5625 } 5626 5627 // Disable scalable vectorization if the loop contains any instructions 5628 // with element types not supported for scalable vectors. 5629 if (any_of(ElementTypesInLoop, [&](Type *Ty) { 5630 return !Ty->isVoidTy() && 5631 !this->TTI.isElementTypeLegalForScalableVector(Ty); 5632 })) { 5633 reportVectorizationInfo("Scalable vectorization is not supported " 5634 "for all element types found in this loop.", 5635 "ScalableVFUnfeasible", ORE, TheLoop); 5636 return ElementCount::getScalable(0); 5637 } 5638 5639 if (Legal->isSafeForAnyVectorWidth()) 5640 return MaxScalableVF; 5641 5642 // Limit MaxScalableVF by the maximum safe dependence distance. 5643 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5644 if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) { 5645 unsigned VScaleMax = TheFunction->getFnAttribute(Attribute::VScaleRange) 5646 .getVScaleRangeArgs() 5647 .second; 5648 if (VScaleMax > 0) 5649 MaxVScale = VScaleMax; 5650 } 5651 MaxScalableVF = ElementCount::getScalable( 5652 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5653 if (!MaxScalableVF) 5654 reportVectorizationInfo( 5655 "Max legal vector width too small, scalable vectorization " 5656 "unfeasible.", 5657 "ScalableVFUnfeasible", ORE, TheLoop); 5658 5659 return MaxScalableVF; 5660 } 5661 5662 FixedScalableVFPair 5663 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount, 5664 ElementCount UserVF) { 5665 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5666 unsigned SmallestType, WidestType; 5667 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5668 5669 // Get the maximum safe dependence distance in bits computed by LAA. 5670 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5671 // the memory accesses that is most restrictive (involved in the smallest 5672 // dependence distance). 5673 unsigned MaxSafeElements = 5674 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 5675 5676 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 5677 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 5678 5679 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 5680 << ".\n"); 5681 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 5682 << ".\n"); 5683 5684 // First analyze the UserVF, fall back if the UserVF should be ignored. 5685 if (UserVF) { 5686 auto MaxSafeUserVF = 5687 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 5688 5689 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { 5690 // If `VF=vscale x N` is safe, then so is `VF=N` 5691 if (UserVF.isScalable()) 5692 return FixedScalableVFPair( 5693 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); 5694 else 5695 return UserVF; 5696 } 5697 5698 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 5699 5700 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 5701 // is better to ignore the hint and let the compiler choose a suitable VF. 5702 if (!UserVF.isScalable()) { 5703 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5704 << " is unsafe, clamping to max safe VF=" 5705 << MaxSafeFixedVF << ".\n"); 5706 ORE->emit([&]() { 5707 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5708 TheLoop->getStartLoc(), 5709 TheLoop->getHeader()) 5710 << "User-specified vectorization factor " 5711 << ore::NV("UserVectorizationFactor", UserVF) 5712 << " is unsafe, clamping to maximum safe vectorization factor " 5713 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 5714 }); 5715 return MaxSafeFixedVF; 5716 } 5717 5718 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 5719 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5720 << " is ignored because scalable vectors are not " 5721 "available.\n"); 5722 ORE->emit([&]() { 5723 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5724 TheLoop->getStartLoc(), 5725 TheLoop->getHeader()) 5726 << "User-specified vectorization factor " 5727 << ore::NV("UserVectorizationFactor", UserVF) 5728 << " is ignored because the target does not support scalable " 5729 "vectors. The compiler will pick a more suitable value."; 5730 }); 5731 } else { 5732 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5733 << " is unsafe. Ignoring scalable UserVF.\n"); 5734 ORE->emit([&]() { 5735 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5736 TheLoop->getStartLoc(), 5737 TheLoop->getHeader()) 5738 << "User-specified vectorization factor " 5739 << ore::NV("UserVectorizationFactor", UserVF) 5740 << " is unsafe. Ignoring the hint to let the compiler pick a " 5741 "more suitable value."; 5742 }); 5743 } 5744 } 5745 5746 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5747 << " / " << WidestType << " bits.\n"); 5748 5749 FixedScalableVFPair Result(ElementCount::getFixed(1), 5750 ElementCount::getScalable(0)); 5751 if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, 5752 WidestType, MaxSafeFixedVF)) 5753 Result.FixedVF = MaxVF; 5754 5755 if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, 5756 WidestType, MaxSafeScalableVF)) 5757 if (MaxVF.isScalable()) { 5758 Result.ScalableVF = MaxVF; 5759 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 5760 << "\n"); 5761 } 5762 5763 return Result; 5764 } 5765 5766 FixedScalableVFPair 5767 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5768 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5769 // TODO: It may by useful to do since it's still likely to be dynamically 5770 // uniform if the target can skip. 5771 reportVectorizationFailure( 5772 "Not inserting runtime ptr check for divergent target", 5773 "runtime pointer checks needed. Not enabled for divergent target", 5774 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5775 return FixedScalableVFPair::getNone(); 5776 } 5777 5778 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5779 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5780 if (TC == 1) { 5781 reportVectorizationFailure("Single iteration (non) loop", 5782 "loop trip count is one, irrelevant for vectorization", 5783 "SingleIterationLoop", ORE, TheLoop); 5784 return FixedScalableVFPair::getNone(); 5785 } 5786 5787 switch (ScalarEpilogueStatus) { 5788 case CM_ScalarEpilogueAllowed: 5789 return computeFeasibleMaxVF(TC, UserVF); 5790 case CM_ScalarEpilogueNotAllowedUsePredicate: 5791 LLVM_FALLTHROUGH; 5792 case CM_ScalarEpilogueNotNeededUsePredicate: 5793 LLVM_DEBUG( 5794 dbgs() << "LV: vector predicate hint/switch found.\n" 5795 << "LV: Not allowing scalar epilogue, creating predicated " 5796 << "vector loop.\n"); 5797 break; 5798 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5799 // fallthrough as a special case of OptForSize 5800 case CM_ScalarEpilogueNotAllowedOptSize: 5801 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5802 LLVM_DEBUG( 5803 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5804 else 5805 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5806 << "count.\n"); 5807 5808 // Bail if runtime checks are required, which are not good when optimising 5809 // for size. 5810 if (runtimeChecksRequired()) 5811 return FixedScalableVFPair::getNone(); 5812 5813 break; 5814 } 5815 5816 // The only loops we can vectorize without a scalar epilogue, are loops with 5817 // a bottom-test and a single exiting block. We'd have to handle the fact 5818 // that not every instruction executes on the last iteration. This will 5819 // require a lane mask which varies through the vector loop body. (TODO) 5820 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5821 // If there was a tail-folding hint/switch, but we can't fold the tail by 5822 // masking, fallback to a vectorization with a scalar epilogue. 5823 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5824 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5825 "scalar epilogue instead.\n"); 5826 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5827 return computeFeasibleMaxVF(TC, UserVF); 5828 } 5829 return FixedScalableVFPair::getNone(); 5830 } 5831 5832 // Now try the tail folding 5833 5834 // Invalidate interleave groups that require an epilogue if we can't mask 5835 // the interleave-group. 5836 if (!useMaskedInterleavedAccesses(TTI)) { 5837 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5838 "No decisions should have been taken at this point"); 5839 // Note: There is no need to invalidate any cost modeling decisions here, as 5840 // non where taken so far. 5841 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5842 } 5843 5844 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF); 5845 // Avoid tail folding if the trip count is known to be a multiple of any VF 5846 // we chose. 5847 // FIXME: The condition below pessimises the case for fixed-width vectors, 5848 // when scalable VFs are also candidates for vectorization. 5849 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5850 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5851 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5852 "MaxFixedVF must be a power of 2"); 5853 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5854 : MaxFixedVF.getFixedValue(); 5855 ScalarEvolution *SE = PSE.getSE(); 5856 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5857 const SCEV *ExitCount = SE->getAddExpr( 5858 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5859 const SCEV *Rem = SE->getURemExpr( 5860 SE->applyLoopGuards(ExitCount, TheLoop), 5861 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5862 if (Rem->isZero()) { 5863 // Accept MaxFixedVF if we do not have a tail. 5864 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5865 return MaxFactors; 5866 } 5867 } 5868 5869 // For scalable vectors, don't use tail folding as this is currently not yet 5870 // supported. The code is likely to have ended up here if the tripcount is 5871 // low, in which case it makes sense not to use scalable vectors. 5872 if (MaxFactors.ScalableVF.isVector()) 5873 MaxFactors.ScalableVF = ElementCount::getScalable(0); 5874 5875 // If we don't know the precise trip count, or if the trip count that we 5876 // found modulo the vectorization factor is not zero, try to fold the tail 5877 // by masking. 5878 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5879 if (Legal->prepareToFoldTailByMasking()) { 5880 FoldTailByMasking = true; 5881 return MaxFactors; 5882 } 5883 5884 // If there was a tail-folding hint/switch, but we can't fold the tail by 5885 // masking, fallback to a vectorization with a scalar epilogue. 5886 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5887 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5888 "scalar epilogue instead.\n"); 5889 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5890 return MaxFactors; 5891 } 5892 5893 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5894 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5895 return FixedScalableVFPair::getNone(); 5896 } 5897 5898 if (TC == 0) { 5899 reportVectorizationFailure( 5900 "Unable to calculate the loop count due to complex control flow", 5901 "unable to calculate the loop count due to complex control flow", 5902 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5903 return FixedScalableVFPair::getNone(); 5904 } 5905 5906 reportVectorizationFailure( 5907 "Cannot optimize for size and vectorize at the same time.", 5908 "cannot optimize for size and vectorize at the same time. " 5909 "Enable vectorization of this loop with '#pragma clang loop " 5910 "vectorize(enable)' when compiling with -Os/-Oz", 5911 "NoTailLoopWithOptForSize", ORE, TheLoop); 5912 return FixedScalableVFPair::getNone(); 5913 } 5914 5915 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5916 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5917 const ElementCount &MaxSafeVF) { 5918 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5919 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5920 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5921 : TargetTransformInfo::RGK_FixedWidthVector); 5922 5923 // Convenience function to return the minimum of two ElementCounts. 5924 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5925 assert((LHS.isScalable() == RHS.isScalable()) && 5926 "Scalable flags must match"); 5927 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5928 }; 5929 5930 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5931 // Note that both WidestRegister and WidestType may not be a powers of 2. 5932 auto MaxVectorElementCount = ElementCount::get( 5933 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5934 ComputeScalableMaxVF); 5935 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5936 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5937 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5938 5939 if (!MaxVectorElementCount) { 5940 LLVM_DEBUG(dbgs() << "LV: The target has no " 5941 << (ComputeScalableMaxVF ? "scalable" : "fixed") 5942 << " vector registers.\n"); 5943 return ElementCount::getFixed(1); 5944 } 5945 5946 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5947 if (ConstTripCount && 5948 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5949 isPowerOf2_32(ConstTripCount)) { 5950 // We need to clamp the VF to be the ConstTripCount. There is no point in 5951 // choosing a higher viable VF as done in the loop below. If 5952 // MaxVectorElementCount is scalable, we only fall back on a fixed VF when 5953 // the TC is less than or equal to the known number of lanes. 5954 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 5955 << ConstTripCount << "\n"); 5956 return TripCountEC; 5957 } 5958 5959 ElementCount MaxVF = MaxVectorElementCount; 5960 if (TTI.shouldMaximizeVectorBandwidth() || 5961 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5962 auto MaxVectorElementCountMaxBW = ElementCount::get( 5963 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5964 ComputeScalableMaxVF); 5965 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5966 5967 // Collect all viable vectorization factors larger than the default MaxVF 5968 // (i.e. MaxVectorElementCount). 5969 SmallVector<ElementCount, 8> VFs; 5970 for (ElementCount VS = MaxVectorElementCount * 2; 5971 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5972 VFs.push_back(VS); 5973 5974 // For each VF calculate its register usage. 5975 auto RUs = calculateRegisterUsage(VFs); 5976 5977 // Select the largest VF which doesn't require more registers than existing 5978 // ones. 5979 for (int i = RUs.size() - 1; i >= 0; --i) { 5980 bool Selected = true; 5981 for (auto &pair : RUs[i].MaxLocalUsers) { 5982 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5983 if (pair.second > TargetNumRegisters) 5984 Selected = false; 5985 } 5986 if (Selected) { 5987 MaxVF = VFs[i]; 5988 break; 5989 } 5990 } 5991 if (ElementCount MinVF = 5992 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 5993 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5994 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5995 << ") with target's minimum: " << MinVF << '\n'); 5996 MaxVF = MinVF; 5997 } 5998 } 5999 } 6000 return MaxVF; 6001 } 6002 6003 bool LoopVectorizationCostModel::isMoreProfitable( 6004 const VectorizationFactor &A, const VectorizationFactor &B) const { 6005 InstructionCost CostA = A.Cost; 6006 InstructionCost CostB = B.Cost; 6007 6008 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 6009 6010 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 6011 MaxTripCount) { 6012 // If we are folding the tail and the trip count is a known (possibly small) 6013 // constant, the trip count will be rounded up to an integer number of 6014 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 6015 // which we compare directly. When not folding the tail, the total cost will 6016 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 6017 // approximated with the per-lane cost below instead of using the tripcount 6018 // as here. 6019 auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 6020 auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 6021 return RTCostA < RTCostB; 6022 } 6023 6024 // When set to preferred, for now assume vscale may be larger than 1, so 6025 // that scalable vectorization is slightly favorable over fixed-width 6026 // vectorization. 6027 if (Hints->isScalableVectorizationPreferred()) 6028 if (A.Width.isScalable() && !B.Width.isScalable()) 6029 return (CostA * B.Width.getKnownMinValue()) <= 6030 (CostB * A.Width.getKnownMinValue()); 6031 6032 // To avoid the need for FP division: 6033 // (CostA / A.Width) < (CostB / B.Width) 6034 // <=> (CostA * B.Width) < (CostB * A.Width) 6035 return (CostA * B.Width.getKnownMinValue()) < 6036 (CostB * A.Width.getKnownMinValue()); 6037 } 6038 6039 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 6040 const ElementCountSet &VFCandidates) { 6041 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 6042 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 6043 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 6044 assert(VFCandidates.count(ElementCount::getFixed(1)) && 6045 "Expected Scalar VF to be a candidate"); 6046 6047 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 6048 VectorizationFactor ChosenFactor = ScalarCost; 6049 6050 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 6051 if (ForceVectorization && VFCandidates.size() > 1) { 6052 // Ignore scalar width, because the user explicitly wants vectorization. 6053 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 6054 // evaluation. 6055 ChosenFactor.Cost = InstructionCost::getMax(); 6056 } 6057 6058 SmallVector<InstructionVFPair> InvalidCosts; 6059 for (const auto &i : VFCandidates) { 6060 // The cost for scalar VF=1 is already calculated, so ignore it. 6061 if (i.isScalar()) 6062 continue; 6063 6064 VectorizationCostTy C = expectedCost(i, &InvalidCosts); 6065 VectorizationFactor Candidate(i, C.first); 6066 LLVM_DEBUG( 6067 dbgs() << "LV: Vector loop of width " << i << " costs: " 6068 << (Candidate.Cost / Candidate.Width.getKnownMinValue()) 6069 << (i.isScalable() ? " (assuming a minimum vscale of 1)" : "") 6070 << ".\n"); 6071 6072 if (!C.second && !ForceVectorization) { 6073 LLVM_DEBUG( 6074 dbgs() << "LV: Not considering vector loop of width " << i 6075 << " because it will not generate any vector instructions.\n"); 6076 continue; 6077 } 6078 6079 // If profitable add it to ProfitableVF list. 6080 if (isMoreProfitable(Candidate, ScalarCost)) 6081 ProfitableVFs.push_back(Candidate); 6082 6083 if (isMoreProfitable(Candidate, ChosenFactor)) 6084 ChosenFactor = Candidate; 6085 } 6086 6087 // Emit a report of VFs with invalid costs in the loop. 6088 if (!InvalidCosts.empty()) { 6089 // Group the remarks per instruction, keeping the instruction order from 6090 // InvalidCosts. 6091 std::map<Instruction *, unsigned> Numbering; 6092 unsigned I = 0; 6093 for (auto &Pair : InvalidCosts) 6094 if (!Numbering.count(Pair.first)) 6095 Numbering[Pair.first] = I++; 6096 6097 // Sort the list, first on instruction(number) then on VF. 6098 llvm::sort(InvalidCosts, 6099 [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { 6100 if (Numbering[A.first] != Numbering[B.first]) 6101 return Numbering[A.first] < Numbering[B.first]; 6102 ElementCountComparator ECC; 6103 return ECC(A.second, B.second); 6104 }); 6105 6106 // For a list of ordered instruction-vf pairs: 6107 // [(load, vf1), (load, vf2), (store, vf1)] 6108 // Group the instructions together to emit separate remarks for: 6109 // load (vf1, vf2) 6110 // store (vf1) 6111 auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); 6112 auto Subset = ArrayRef<InstructionVFPair>(); 6113 do { 6114 if (Subset.empty()) 6115 Subset = Tail.take_front(1); 6116 6117 Instruction *I = Subset.front().first; 6118 6119 // If the next instruction is different, or if there are no other pairs, 6120 // emit a remark for the collated subset. e.g. 6121 // [(load, vf1), (load, vf2))] 6122 // to emit: 6123 // remark: invalid costs for 'load' at VF=(vf, vf2) 6124 if (Subset == Tail || Tail[Subset.size()].first != I) { 6125 std::string OutString; 6126 raw_string_ostream OS(OutString); 6127 assert(!Subset.empty() && "Unexpected empty range"); 6128 OS << "Instruction with invalid costs prevented vectorization at VF=("; 6129 for (auto &Pair : Subset) 6130 OS << (Pair.second == Subset.front().second ? "" : ", ") 6131 << Pair.second; 6132 OS << "):"; 6133 if (auto *CI = dyn_cast<CallInst>(I)) 6134 OS << " call to " << CI->getCalledFunction()->getName(); 6135 else 6136 OS << " " << I->getOpcodeName(); 6137 OS.flush(); 6138 reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); 6139 Tail = Tail.drop_front(Subset.size()); 6140 Subset = {}; 6141 } else 6142 // Grow the subset by one element 6143 Subset = Tail.take_front(Subset.size() + 1); 6144 } while (!Tail.empty()); 6145 } 6146 6147 if (!EnableCondStoresVectorization && NumPredStores) { 6148 reportVectorizationFailure("There are conditional stores.", 6149 "store that is conditionally executed prevents vectorization", 6150 "ConditionalStore", ORE, TheLoop); 6151 ChosenFactor = ScalarCost; 6152 } 6153 6154 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 6155 ChosenFactor.Cost >= ScalarCost.Cost) dbgs() 6156 << "LV: Vectorization seems to be not beneficial, " 6157 << "but was forced by a user.\n"); 6158 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 6159 return ChosenFactor; 6160 } 6161 6162 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 6163 const Loop &L, ElementCount VF) const { 6164 // Cross iteration phis such as reductions need special handling and are 6165 // currently unsupported. 6166 if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) { 6167 return Legal->isFirstOrderRecurrence(&Phi) || 6168 Legal->isReductionVariable(&Phi); 6169 })) 6170 return false; 6171 6172 // Phis with uses outside of the loop require special handling and are 6173 // currently unsupported. 6174 for (auto &Entry : Legal->getInductionVars()) { 6175 // Look for uses of the value of the induction at the last iteration. 6176 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 6177 for (User *U : PostInc->users()) 6178 if (!L.contains(cast<Instruction>(U))) 6179 return false; 6180 // Look for uses of penultimate value of the induction. 6181 for (User *U : Entry.first->users()) 6182 if (!L.contains(cast<Instruction>(U))) 6183 return false; 6184 } 6185 6186 // Induction variables that are widened require special handling that is 6187 // currently not supported. 6188 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 6189 return !(this->isScalarAfterVectorization(Entry.first, VF) || 6190 this->isProfitableToScalarize(Entry.first, VF)); 6191 })) 6192 return false; 6193 6194 // Epilogue vectorization code has not been auditted to ensure it handles 6195 // non-latch exits properly. It may be fine, but it needs auditted and 6196 // tested. 6197 if (L.getExitingBlock() != L.getLoopLatch()) 6198 return false; 6199 6200 return true; 6201 } 6202 6203 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 6204 const ElementCount VF) const { 6205 // FIXME: We need a much better cost-model to take different parameters such 6206 // as register pressure, code size increase and cost of extra branches into 6207 // account. For now we apply a very crude heuristic and only consider loops 6208 // with vectorization factors larger than a certain value. 6209 // We also consider epilogue vectorization unprofitable for targets that don't 6210 // consider interleaving beneficial (eg. MVE). 6211 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 6212 return false; 6213 if (VF.getFixedValue() >= EpilogueVectorizationMinVF) 6214 return true; 6215 return false; 6216 } 6217 6218 VectorizationFactor 6219 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 6220 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 6221 VectorizationFactor Result = VectorizationFactor::Disabled(); 6222 if (!EnableEpilogueVectorization) { 6223 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 6224 return Result; 6225 } 6226 6227 if (!isScalarEpilogueAllowed()) { 6228 LLVM_DEBUG( 6229 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 6230 "allowed.\n";); 6231 return Result; 6232 } 6233 6234 // Not really a cost consideration, but check for unsupported cases here to 6235 // simplify the logic. 6236 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 6237 LLVM_DEBUG( 6238 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 6239 "not a supported candidate.\n";); 6240 return Result; 6241 } 6242 6243 if (EpilogueVectorizationForceVF > 1) { 6244 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 6245 ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF); 6246 if (LVP.hasPlanWithVF(ForcedEC)) 6247 return {ForcedEC, 0}; 6248 else { 6249 LLVM_DEBUG( 6250 dbgs() 6251 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 6252 return Result; 6253 } 6254 } 6255 6256 if (TheLoop->getHeader()->getParent()->hasOptSize() || 6257 TheLoop->getHeader()->getParent()->hasMinSize()) { 6258 LLVM_DEBUG( 6259 dbgs() 6260 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 6261 return Result; 6262 } 6263 6264 auto FixedMainLoopVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue()); 6265 if (MainLoopVF.isScalable()) 6266 LLVM_DEBUG( 6267 dbgs() << "LEV: Epilogue vectorization using scalable vectors not " 6268 "yet supported. Converting to fixed-width (VF=" 6269 << FixedMainLoopVF << ") instead\n"); 6270 6271 if (!isEpilogueVectorizationProfitable(FixedMainLoopVF)) { 6272 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for " 6273 "this loop\n"); 6274 return Result; 6275 } 6276 6277 for (auto &NextVF : ProfitableVFs) 6278 if (ElementCount::isKnownLT(NextVF.Width, FixedMainLoopVF) && 6279 (Result.Width.getFixedValue() == 1 || 6280 isMoreProfitable(NextVF, Result)) && 6281 LVP.hasPlanWithVF(NextVF.Width)) 6282 Result = NextVF; 6283 6284 if (Result != VectorizationFactor::Disabled()) 6285 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 6286 << Result.Width.getFixedValue() << "\n";); 6287 return Result; 6288 } 6289 6290 std::pair<unsigned, unsigned> 6291 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6292 unsigned MinWidth = -1U; 6293 unsigned MaxWidth = 8; 6294 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6295 for (Type *T : ElementTypesInLoop) { 6296 MinWidth = std::min<unsigned>( 6297 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 6298 MaxWidth = std::max<unsigned>( 6299 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 6300 } 6301 return {MinWidth, MaxWidth}; 6302 } 6303 6304 void LoopVectorizationCostModel::collectElementTypesForWidening() { 6305 ElementTypesInLoop.clear(); 6306 // For each block. 6307 for (BasicBlock *BB : TheLoop->blocks()) { 6308 // For each instruction in the loop. 6309 for (Instruction &I : BB->instructionsWithoutDebug()) { 6310 Type *T = I.getType(); 6311 6312 // Skip ignored values. 6313 if (ValuesToIgnore.count(&I)) 6314 continue; 6315 6316 // Only examine Loads, Stores and PHINodes. 6317 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6318 continue; 6319 6320 // Examine PHI nodes that are reduction variables. Update the type to 6321 // account for the recurrence type. 6322 if (auto *PN = dyn_cast<PHINode>(&I)) { 6323 if (!Legal->isReductionVariable(PN)) 6324 continue; 6325 const RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[PN]; 6326 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 6327 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 6328 RdxDesc.getRecurrenceType(), 6329 TargetTransformInfo::ReductionFlags())) 6330 continue; 6331 T = RdxDesc.getRecurrenceType(); 6332 } 6333 6334 // Examine the stored values. 6335 if (auto *ST = dyn_cast<StoreInst>(&I)) 6336 T = ST->getValueOperand()->getType(); 6337 6338 // Ignore loaded pointer types and stored pointer types that are not 6339 // vectorizable. 6340 // 6341 // FIXME: The check here attempts to predict whether a load or store will 6342 // be vectorized. We only know this for certain after a VF has 6343 // been selected. Here, we assume that if an access can be 6344 // vectorized, it will be. We should also look at extending this 6345 // optimization to non-pointer types. 6346 // 6347 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 6348 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 6349 continue; 6350 6351 ElementTypesInLoop.insert(T); 6352 } 6353 } 6354 } 6355 6356 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 6357 unsigned LoopCost) { 6358 // -- The interleave heuristics -- 6359 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6360 // There are many micro-architectural considerations that we can't predict 6361 // at this level. For example, frontend pressure (on decode or fetch) due to 6362 // code size, or the number and capabilities of the execution ports. 6363 // 6364 // We use the following heuristics to select the interleave count: 6365 // 1. If the code has reductions, then we interleave to break the cross 6366 // iteration dependency. 6367 // 2. If the loop is really small, then we interleave to reduce the loop 6368 // overhead. 6369 // 3. We don't interleave if we think that we will spill registers to memory 6370 // due to the increased register pressure. 6371 6372 if (!isScalarEpilogueAllowed()) 6373 return 1; 6374 6375 // We used the distance for the interleave count. 6376 if (Legal->getMaxSafeDepDistBytes() != -1U) 6377 return 1; 6378 6379 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 6380 const bool HasReductions = !Legal->getReductionVars().empty(); 6381 // Do not interleave loops with a relatively small known or estimated trip 6382 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 6383 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 6384 // because with the above conditions interleaving can expose ILP and break 6385 // cross iteration dependences for reductions. 6386 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 6387 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 6388 return 1; 6389 6390 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6391 // We divide by these constants so assume that we have at least one 6392 // instruction that uses at least one register. 6393 for (auto& pair : R.MaxLocalUsers) { 6394 pair.second = std::max(pair.second, 1U); 6395 } 6396 6397 // We calculate the interleave count using the following formula. 6398 // Subtract the number of loop invariants from the number of available 6399 // registers. These registers are used by all of the interleaved instances. 6400 // Next, divide the remaining registers by the number of registers that is 6401 // required by the loop, in order to estimate how many parallel instances 6402 // fit without causing spills. All of this is rounded down if necessary to be 6403 // a power of two. We want power of two interleave count to simplify any 6404 // addressing operations or alignment considerations. 6405 // We also want power of two interleave counts to ensure that the induction 6406 // variable of the vector loop wraps to zero, when tail is folded by masking; 6407 // this currently happens when OptForSize, in which case IC is set to 1 above. 6408 unsigned IC = UINT_MAX; 6409 6410 for (auto& pair : R.MaxLocalUsers) { 6411 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 6412 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6413 << " registers of " 6414 << TTI.getRegisterClassName(pair.first) << " register class\n"); 6415 if (VF.isScalar()) { 6416 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6417 TargetNumRegisters = ForceTargetNumScalarRegs; 6418 } else { 6419 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6420 TargetNumRegisters = ForceTargetNumVectorRegs; 6421 } 6422 unsigned MaxLocalUsers = pair.second; 6423 unsigned LoopInvariantRegs = 0; 6424 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 6425 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 6426 6427 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 6428 // Don't count the induction variable as interleaved. 6429 if (EnableIndVarRegisterHeur) { 6430 TmpIC = 6431 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 6432 std::max(1U, (MaxLocalUsers - 1))); 6433 } 6434 6435 IC = std::min(IC, TmpIC); 6436 } 6437 6438 // Clamp the interleave ranges to reasonable counts. 6439 unsigned MaxInterleaveCount = 6440 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 6441 6442 // Check if the user has overridden the max. 6443 if (VF.isScalar()) { 6444 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6445 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6446 } else { 6447 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6448 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6449 } 6450 6451 // If trip count is known or estimated compile time constant, limit the 6452 // interleave count to be less than the trip count divided by VF, provided it 6453 // is at least 1. 6454 // 6455 // For scalable vectors we can't know if interleaving is beneficial. It may 6456 // not be beneficial for small loops if none of the lanes in the second vector 6457 // iterations is enabled. However, for larger loops, there is likely to be a 6458 // similar benefit as for fixed-width vectors. For now, we choose to leave 6459 // the InterleaveCount as if vscale is '1', although if some information about 6460 // the vector is known (e.g. min vector size), we can make a better decision. 6461 if (BestKnownTC) { 6462 MaxInterleaveCount = 6463 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 6464 // Make sure MaxInterleaveCount is greater than 0. 6465 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 6466 } 6467 6468 assert(MaxInterleaveCount > 0 && 6469 "Maximum interleave count must be greater than 0"); 6470 6471 // Clamp the calculated IC to be between the 1 and the max interleave count 6472 // that the target and trip count allows. 6473 if (IC > MaxInterleaveCount) 6474 IC = MaxInterleaveCount; 6475 else 6476 // Make sure IC is greater than 0. 6477 IC = std::max(1u, IC); 6478 6479 assert(IC > 0 && "Interleave count must be greater than 0."); 6480 6481 // If we did not calculate the cost for VF (because the user selected the VF) 6482 // then we calculate the cost of VF here. 6483 if (LoopCost == 0) { 6484 InstructionCost C = expectedCost(VF).first; 6485 assert(C.isValid() && "Expected to have chosen a VF with valid cost"); 6486 LoopCost = *C.getValue(); 6487 } 6488 6489 assert(LoopCost && "Non-zero loop cost expected"); 6490 6491 // Interleave if we vectorized this loop and there is a reduction that could 6492 // benefit from interleaving. 6493 if (VF.isVector() && HasReductions) { 6494 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6495 return IC; 6496 } 6497 6498 // Note that if we've already vectorized the loop we will have done the 6499 // runtime check and so interleaving won't require further checks. 6500 bool InterleavingRequiresRuntimePointerCheck = 6501 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 6502 6503 // We want to interleave small loops in order to reduce the loop overhead and 6504 // potentially expose ILP opportunities. 6505 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 6506 << "LV: IC is " << IC << '\n' 6507 << "LV: VF is " << VF << '\n'); 6508 const bool AggressivelyInterleaveReductions = 6509 TTI.enableAggressiveInterleaving(HasReductions); 6510 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6511 // We assume that the cost overhead is 1 and we use the cost model 6512 // to estimate the cost of the loop and interleave until the cost of the 6513 // loop overhead is about 5% of the cost of the loop. 6514 unsigned SmallIC = 6515 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6516 6517 // Interleave until store/load ports (estimated by max interleave count) are 6518 // saturated. 6519 unsigned NumStores = Legal->getNumStores(); 6520 unsigned NumLoads = Legal->getNumLoads(); 6521 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6522 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6523 6524 // There is little point in interleaving for reductions containing selects 6525 // and compares when VF=1 since it may just create more overhead than it's 6526 // worth for loops with small trip counts. This is because we still have to 6527 // do the final reduction after the loop. 6528 bool HasSelectCmpReductions = 6529 HasReductions && 6530 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 6531 const RecurrenceDescriptor &RdxDesc = Reduction.second; 6532 return RecurrenceDescriptor::isSelectCmpRecurrenceKind( 6533 RdxDesc.getRecurrenceKind()); 6534 }); 6535 if (HasSelectCmpReductions) { 6536 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n"); 6537 return 1; 6538 } 6539 6540 // If we have a scalar reduction (vector reductions are already dealt with 6541 // by this point), we can increase the critical path length if the loop 6542 // we're interleaving is inside another loop. For tree-wise reductions 6543 // set the limit to 2, and for ordered reductions it's best to disable 6544 // interleaving entirely. 6545 if (HasReductions && TheLoop->getLoopDepth() > 1) { 6546 bool HasOrderedReductions = 6547 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 6548 const RecurrenceDescriptor &RdxDesc = Reduction.second; 6549 return RdxDesc.isOrdered(); 6550 }); 6551 if (HasOrderedReductions) { 6552 LLVM_DEBUG( 6553 dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); 6554 return 1; 6555 } 6556 6557 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6558 SmallIC = std::min(SmallIC, F); 6559 StoresIC = std::min(StoresIC, F); 6560 LoadsIC = std::min(LoadsIC, F); 6561 } 6562 6563 if (EnableLoadStoreRuntimeInterleave && 6564 std::max(StoresIC, LoadsIC) > SmallIC) { 6565 LLVM_DEBUG( 6566 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6567 return std::max(StoresIC, LoadsIC); 6568 } 6569 6570 // If there are scalar reductions and TTI has enabled aggressive 6571 // interleaving for reductions, we will interleave to expose ILP. 6572 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6573 AggressivelyInterleaveReductions) { 6574 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6575 // Interleave no less than SmallIC but not as aggressive as the normal IC 6576 // to satisfy the rare situation when resources are too limited. 6577 return std::max(IC / 2, SmallIC); 6578 } else { 6579 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6580 return SmallIC; 6581 } 6582 } 6583 6584 // Interleave if this is a large loop (small loops are already dealt with by 6585 // this point) that could benefit from interleaving. 6586 if (AggressivelyInterleaveReductions) { 6587 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6588 return IC; 6589 } 6590 6591 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6592 return 1; 6593 } 6594 6595 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6596 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6597 // This function calculates the register usage by measuring the highest number 6598 // of values that are alive at a single location. Obviously, this is a very 6599 // rough estimation. We scan the loop in a topological order in order and 6600 // assign a number to each instruction. We use RPO to ensure that defs are 6601 // met before their users. We assume that each instruction that has in-loop 6602 // users starts an interval. We record every time that an in-loop value is 6603 // used, so we have a list of the first and last occurrences of each 6604 // instruction. Next, we transpose this data structure into a multi map that 6605 // holds the list of intervals that *end* at a specific location. This multi 6606 // map allows us to perform a linear search. We scan the instructions linearly 6607 // and record each time that a new interval starts, by placing it in a set. 6608 // If we find this value in the multi-map then we remove it from the set. 6609 // The max register usage is the maximum size of the set. 6610 // We also search for instructions that are defined outside the loop, but are 6611 // used inside the loop. We need this number separately from the max-interval 6612 // usage number because when we unroll, loop-invariant values do not take 6613 // more register. 6614 LoopBlocksDFS DFS(TheLoop); 6615 DFS.perform(LI); 6616 6617 RegisterUsage RU; 6618 6619 // Each 'key' in the map opens a new interval. The values 6620 // of the map are the index of the 'last seen' usage of the 6621 // instruction that is the key. 6622 using IntervalMap = DenseMap<Instruction *, unsigned>; 6623 6624 // Maps instruction to its index. 6625 SmallVector<Instruction *, 64> IdxToInstr; 6626 // Marks the end of each interval. 6627 IntervalMap EndPoint; 6628 // Saves the list of instruction indices that are used in the loop. 6629 SmallPtrSet<Instruction *, 8> Ends; 6630 // Saves the list of values that are used in the loop but are 6631 // defined outside the loop, such as arguments and constants. 6632 SmallPtrSet<Value *, 8> LoopInvariants; 6633 6634 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6635 for (Instruction &I : BB->instructionsWithoutDebug()) { 6636 IdxToInstr.push_back(&I); 6637 6638 // Save the end location of each USE. 6639 for (Value *U : I.operands()) { 6640 auto *Instr = dyn_cast<Instruction>(U); 6641 6642 // Ignore non-instruction values such as arguments, constants, etc. 6643 if (!Instr) 6644 continue; 6645 6646 // If this instruction is outside the loop then record it and continue. 6647 if (!TheLoop->contains(Instr)) { 6648 LoopInvariants.insert(Instr); 6649 continue; 6650 } 6651 6652 // Overwrite previous end points. 6653 EndPoint[Instr] = IdxToInstr.size(); 6654 Ends.insert(Instr); 6655 } 6656 } 6657 } 6658 6659 // Saves the list of intervals that end with the index in 'key'. 6660 using InstrList = SmallVector<Instruction *, 2>; 6661 DenseMap<unsigned, InstrList> TransposeEnds; 6662 6663 // Transpose the EndPoints to a list of values that end at each index. 6664 for (auto &Interval : EndPoint) 6665 TransposeEnds[Interval.second].push_back(Interval.first); 6666 6667 SmallPtrSet<Instruction *, 8> OpenIntervals; 6668 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6669 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6670 6671 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6672 6673 // A lambda that gets the register usage for the given type and VF. 6674 const auto &TTICapture = TTI; 6675 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned { 6676 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6677 return 0; 6678 InstructionCost::CostType RegUsage = 6679 *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue(); 6680 assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() && 6681 "Nonsensical values for register usage."); 6682 return RegUsage; 6683 }; 6684 6685 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6686 Instruction *I = IdxToInstr[i]; 6687 6688 // Remove all of the instructions that end at this location. 6689 InstrList &List = TransposeEnds[i]; 6690 for (Instruction *ToRemove : List) 6691 OpenIntervals.erase(ToRemove); 6692 6693 // Ignore instructions that are never used within the loop. 6694 if (!Ends.count(I)) 6695 continue; 6696 6697 // Skip ignored values. 6698 if (ValuesToIgnore.count(I)) 6699 continue; 6700 6701 // For each VF find the maximum usage of registers. 6702 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6703 // Count the number of live intervals. 6704 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6705 6706 if (VFs[j].isScalar()) { 6707 for (auto Inst : OpenIntervals) { 6708 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6709 if (RegUsage.find(ClassID) == RegUsage.end()) 6710 RegUsage[ClassID] = 1; 6711 else 6712 RegUsage[ClassID] += 1; 6713 } 6714 } else { 6715 collectUniformsAndScalars(VFs[j]); 6716 for (auto Inst : OpenIntervals) { 6717 // Skip ignored values for VF > 1. 6718 if (VecValuesToIgnore.count(Inst)) 6719 continue; 6720 if (isScalarAfterVectorization(Inst, VFs[j])) { 6721 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6722 if (RegUsage.find(ClassID) == RegUsage.end()) 6723 RegUsage[ClassID] = 1; 6724 else 6725 RegUsage[ClassID] += 1; 6726 } else { 6727 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6728 if (RegUsage.find(ClassID) == RegUsage.end()) 6729 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6730 else 6731 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6732 } 6733 } 6734 } 6735 6736 for (auto& pair : RegUsage) { 6737 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6738 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6739 else 6740 MaxUsages[j][pair.first] = pair.second; 6741 } 6742 } 6743 6744 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6745 << OpenIntervals.size() << '\n'); 6746 6747 // Add the current instruction to the list of open intervals. 6748 OpenIntervals.insert(I); 6749 } 6750 6751 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6752 SmallMapVector<unsigned, unsigned, 4> Invariant; 6753 6754 for (auto Inst : LoopInvariants) { 6755 unsigned Usage = 6756 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6757 unsigned ClassID = 6758 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6759 if (Invariant.find(ClassID) == Invariant.end()) 6760 Invariant[ClassID] = Usage; 6761 else 6762 Invariant[ClassID] += Usage; 6763 } 6764 6765 LLVM_DEBUG({ 6766 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6767 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6768 << " item\n"; 6769 for (const auto &pair : MaxUsages[i]) { 6770 dbgs() << "LV(REG): RegisterClass: " 6771 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6772 << " registers\n"; 6773 } 6774 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6775 << " item\n"; 6776 for (const auto &pair : Invariant) { 6777 dbgs() << "LV(REG): RegisterClass: " 6778 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6779 << " registers\n"; 6780 } 6781 }); 6782 6783 RU.LoopInvariantRegs = Invariant; 6784 RU.MaxLocalUsers = MaxUsages[i]; 6785 RUs[i] = RU; 6786 } 6787 6788 return RUs; 6789 } 6790 6791 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 6792 // TODO: Cost model for emulated masked load/store is completely 6793 // broken. This hack guides the cost model to use an artificially 6794 // high enough value to practically disable vectorization with such 6795 // operations, except where previously deployed legality hack allowed 6796 // using very low cost values. This is to avoid regressions coming simply 6797 // from moving "masked load/store" check from legality to cost model. 6798 // Masked Load/Gather emulation was previously never allowed. 6799 // Limited number of Masked Store/Scatter emulation was allowed. 6800 assert(isPredicatedInst(I) && 6801 "Expecting a scalar emulated instruction"); 6802 return isa<LoadInst>(I) || 6803 (isa<StoreInst>(I) && 6804 NumPredStores > NumberOfStoresToPredicate); 6805 } 6806 6807 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6808 // If we aren't vectorizing the loop, or if we've already collected the 6809 // instructions to scalarize, there's nothing to do. Collection may already 6810 // have occurred if we have a user-selected VF and are now computing the 6811 // expected cost for interleaving. 6812 if (VF.isScalar() || VF.isZero() || 6813 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6814 return; 6815 6816 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6817 // not profitable to scalarize any instructions, the presence of VF in the 6818 // map will indicate that we've analyzed it already. 6819 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6820 6821 // Find all the instructions that are scalar with predication in the loop and 6822 // determine if it would be better to not if-convert the blocks they are in. 6823 // If so, we also record the instructions to scalarize. 6824 for (BasicBlock *BB : TheLoop->blocks()) { 6825 if (!blockNeedsPredication(BB)) 6826 continue; 6827 for (Instruction &I : *BB) 6828 if (isScalarWithPredication(&I)) { 6829 ScalarCostsTy ScalarCosts; 6830 // Do not apply discount if scalable, because that would lead to 6831 // invalid scalarization costs. 6832 // Do not apply discount logic if hacked cost is needed 6833 // for emulated masked memrefs. 6834 if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I) && 6835 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6836 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6837 // Remember that BB will remain after vectorization. 6838 PredicatedBBsAfterVectorization.insert(BB); 6839 } 6840 } 6841 } 6842 6843 int LoopVectorizationCostModel::computePredInstDiscount( 6844 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6845 assert(!isUniformAfterVectorization(PredInst, VF) && 6846 "Instruction marked uniform-after-vectorization will be predicated"); 6847 6848 // Initialize the discount to zero, meaning that the scalar version and the 6849 // vector version cost the same. 6850 InstructionCost Discount = 0; 6851 6852 // Holds instructions to analyze. The instructions we visit are mapped in 6853 // ScalarCosts. Those instructions are the ones that would be scalarized if 6854 // we find that the scalar version costs less. 6855 SmallVector<Instruction *, 8> Worklist; 6856 6857 // Returns true if the given instruction can be scalarized. 6858 auto canBeScalarized = [&](Instruction *I) -> bool { 6859 // We only attempt to scalarize instructions forming a single-use chain 6860 // from the original predicated block that would otherwise be vectorized. 6861 // Although not strictly necessary, we give up on instructions we know will 6862 // already be scalar to avoid traversing chains that are unlikely to be 6863 // beneficial. 6864 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6865 isScalarAfterVectorization(I, VF)) 6866 return false; 6867 6868 // If the instruction is scalar with predication, it will be analyzed 6869 // separately. We ignore it within the context of PredInst. 6870 if (isScalarWithPredication(I)) 6871 return false; 6872 6873 // If any of the instruction's operands are uniform after vectorization, 6874 // the instruction cannot be scalarized. This prevents, for example, a 6875 // masked load from being scalarized. 6876 // 6877 // We assume we will only emit a value for lane zero of an instruction 6878 // marked uniform after vectorization, rather than VF identical values. 6879 // Thus, if we scalarize an instruction that uses a uniform, we would 6880 // create uses of values corresponding to the lanes we aren't emitting code 6881 // for. This behavior can be changed by allowing getScalarValue to clone 6882 // the lane zero values for uniforms rather than asserting. 6883 for (Use &U : I->operands()) 6884 if (auto *J = dyn_cast<Instruction>(U.get())) 6885 if (isUniformAfterVectorization(J, VF)) 6886 return false; 6887 6888 // Otherwise, we can scalarize the instruction. 6889 return true; 6890 }; 6891 6892 // Compute the expected cost discount from scalarizing the entire expression 6893 // feeding the predicated instruction. We currently only consider expressions 6894 // that are single-use instruction chains. 6895 Worklist.push_back(PredInst); 6896 while (!Worklist.empty()) { 6897 Instruction *I = Worklist.pop_back_val(); 6898 6899 // If we've already analyzed the instruction, there's nothing to do. 6900 if (ScalarCosts.find(I) != ScalarCosts.end()) 6901 continue; 6902 6903 // Compute the cost of the vector instruction. Note that this cost already 6904 // includes the scalarization overhead of the predicated instruction. 6905 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6906 6907 // Compute the cost of the scalarized instruction. This cost is the cost of 6908 // the instruction as if it wasn't if-converted and instead remained in the 6909 // predicated block. We will scale this cost by block probability after 6910 // computing the scalarization overhead. 6911 InstructionCost ScalarCost = 6912 VF.getFixedValue() * 6913 getInstructionCost(I, ElementCount::getFixed(1)).first; 6914 6915 // Compute the scalarization overhead of needed insertelement instructions 6916 // and phi nodes. 6917 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6918 ScalarCost += TTI.getScalarizationOverhead( 6919 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6920 APInt::getAllOnes(VF.getFixedValue()), true, false); 6921 ScalarCost += 6922 VF.getFixedValue() * 6923 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6924 } 6925 6926 // Compute the scalarization overhead of needed extractelement 6927 // instructions. For each of the instruction's operands, if the operand can 6928 // be scalarized, add it to the worklist; otherwise, account for the 6929 // overhead. 6930 for (Use &U : I->operands()) 6931 if (auto *J = dyn_cast<Instruction>(U.get())) { 6932 assert(VectorType::isValidElementType(J->getType()) && 6933 "Instruction has non-scalar type"); 6934 if (canBeScalarized(J)) 6935 Worklist.push_back(J); 6936 else if (needsExtract(J, VF)) { 6937 ScalarCost += TTI.getScalarizationOverhead( 6938 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6939 APInt::getAllOnes(VF.getFixedValue()), false, true); 6940 } 6941 } 6942 6943 // Scale the total scalar cost by block probability. 6944 ScalarCost /= getReciprocalPredBlockProb(); 6945 6946 // Compute the discount. A non-negative discount means the vector version 6947 // of the instruction costs more, and scalarizing would be beneficial. 6948 Discount += VectorCost - ScalarCost; 6949 ScalarCosts[I] = ScalarCost; 6950 } 6951 6952 return *Discount.getValue(); 6953 } 6954 6955 LoopVectorizationCostModel::VectorizationCostTy 6956 LoopVectorizationCostModel::expectedCost( 6957 ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { 6958 VectorizationCostTy Cost; 6959 6960 // For each block. 6961 for (BasicBlock *BB : TheLoop->blocks()) { 6962 VectorizationCostTy BlockCost; 6963 6964 // For each instruction in the old loop. 6965 for (Instruction &I : BB->instructionsWithoutDebug()) { 6966 // Skip ignored values. 6967 if (ValuesToIgnore.count(&I) || 6968 (VF.isVector() && VecValuesToIgnore.count(&I))) 6969 continue; 6970 6971 VectorizationCostTy C = getInstructionCost(&I, VF); 6972 6973 // Check if we should override the cost. 6974 if (C.first.isValid() && 6975 ForceTargetInstructionCost.getNumOccurrences() > 0) 6976 C.first = InstructionCost(ForceTargetInstructionCost); 6977 6978 // Keep a list of instructions with invalid costs. 6979 if (Invalid && !C.first.isValid()) 6980 Invalid->emplace_back(&I, VF); 6981 6982 BlockCost.first += C.first; 6983 BlockCost.second |= C.second; 6984 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6985 << " for VF " << VF << " For instruction: " << I 6986 << '\n'); 6987 } 6988 6989 // If we are vectorizing a predicated block, it will have been 6990 // if-converted. This means that the block's instructions (aside from 6991 // stores and instructions that may divide by zero) will now be 6992 // unconditionally executed. For the scalar case, we may not always execute 6993 // the predicated block, if it is an if-else block. Thus, scale the block's 6994 // cost by the probability of executing it. blockNeedsPredication from 6995 // Legal is used so as to not include all blocks in tail folded loops. 6996 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6997 BlockCost.first /= getReciprocalPredBlockProb(); 6998 6999 Cost.first += BlockCost.first; 7000 Cost.second |= BlockCost.second; 7001 } 7002 7003 return Cost; 7004 } 7005 7006 /// Gets Address Access SCEV after verifying that the access pattern 7007 /// is loop invariant except the induction variable dependence. 7008 /// 7009 /// This SCEV can be sent to the Target in order to estimate the address 7010 /// calculation cost. 7011 static const SCEV *getAddressAccessSCEV( 7012 Value *Ptr, 7013 LoopVectorizationLegality *Legal, 7014 PredicatedScalarEvolution &PSE, 7015 const Loop *TheLoop) { 7016 7017 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 7018 if (!Gep) 7019 return nullptr; 7020 7021 // We are looking for a gep with all loop invariant indices except for one 7022 // which should be an induction variable. 7023 auto SE = PSE.getSE(); 7024 unsigned NumOperands = Gep->getNumOperands(); 7025 for (unsigned i = 1; i < NumOperands; ++i) { 7026 Value *Opd = Gep->getOperand(i); 7027 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 7028 !Legal->isInductionVariable(Opd)) 7029 return nullptr; 7030 } 7031 7032 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 7033 return PSE.getSCEV(Ptr); 7034 } 7035 7036 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 7037 return Legal->hasStride(I->getOperand(0)) || 7038 Legal->hasStride(I->getOperand(1)); 7039 } 7040 7041 InstructionCost 7042 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 7043 ElementCount VF) { 7044 assert(VF.isVector() && 7045 "Scalarization cost of instruction implies vectorization."); 7046 if (VF.isScalable()) 7047 return InstructionCost::getInvalid(); 7048 7049 Type *ValTy = getLoadStoreType(I); 7050 auto SE = PSE.getSE(); 7051 7052 unsigned AS = getLoadStoreAddressSpace(I); 7053 Value *Ptr = getLoadStorePointerOperand(I); 7054 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 7055 7056 // Figure out whether the access is strided and get the stride value 7057 // if it's known in compile time 7058 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 7059 7060 // Get the cost of the scalar memory instruction and address computation. 7061 InstructionCost Cost = 7062 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 7063 7064 // Don't pass *I here, since it is scalar but will actually be part of a 7065 // vectorized loop where the user of it is a vectorized instruction. 7066 const Align Alignment = getLoadStoreAlignment(I); 7067 Cost += VF.getKnownMinValue() * 7068 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 7069 AS, TTI::TCK_RecipThroughput); 7070 7071 // Get the overhead of the extractelement and insertelement instructions 7072 // we might create due to scalarization. 7073 Cost += getScalarizationOverhead(I, VF); 7074 7075 // If we have a predicated load/store, it will need extra i1 extracts and 7076 // conditional branches, but may not be executed for each vector lane. Scale 7077 // the cost by the probability of executing the predicated block. 7078 if (isPredicatedInst(I)) { 7079 Cost /= getReciprocalPredBlockProb(); 7080 7081 // Add the cost of an i1 extract and a branch 7082 auto *Vec_i1Ty = 7083 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 7084 Cost += TTI.getScalarizationOverhead( 7085 Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()), 7086 /*Insert=*/false, /*Extract=*/true); 7087 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 7088 7089 if (useEmulatedMaskMemRefHack(I)) 7090 // Artificially setting to a high enough value to practically disable 7091 // vectorization with such operations. 7092 Cost = 3000000; 7093 } 7094 7095 return Cost; 7096 } 7097 7098 InstructionCost 7099 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 7100 ElementCount VF) { 7101 Type *ValTy = getLoadStoreType(I); 7102 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7103 Value *Ptr = getLoadStorePointerOperand(I); 7104 unsigned AS = getLoadStoreAddressSpace(I); 7105 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr); 7106 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7107 7108 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7109 "Stride should be 1 or -1 for consecutive memory access"); 7110 const Align Alignment = getLoadStoreAlignment(I); 7111 InstructionCost Cost = 0; 7112 if (Legal->isMaskRequired(I)) 7113 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 7114 CostKind); 7115 else 7116 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 7117 CostKind, I); 7118 7119 bool Reverse = ConsecutiveStride < 0; 7120 if (Reverse) 7121 Cost += 7122 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 7123 return Cost; 7124 } 7125 7126 InstructionCost 7127 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 7128 ElementCount VF) { 7129 assert(Legal->isUniformMemOp(*I)); 7130 7131 Type *ValTy = getLoadStoreType(I); 7132 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7133 const Align Alignment = getLoadStoreAlignment(I); 7134 unsigned AS = getLoadStoreAddressSpace(I); 7135 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7136 if (isa<LoadInst>(I)) { 7137 return TTI.getAddressComputationCost(ValTy) + 7138 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 7139 CostKind) + 7140 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 7141 } 7142 StoreInst *SI = cast<StoreInst>(I); 7143 7144 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 7145 return TTI.getAddressComputationCost(ValTy) + 7146 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 7147 CostKind) + 7148 (isLoopInvariantStoreValue 7149 ? 0 7150 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 7151 VF.getKnownMinValue() - 1)); 7152 } 7153 7154 InstructionCost 7155 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 7156 ElementCount VF) { 7157 Type *ValTy = getLoadStoreType(I); 7158 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7159 const Align Alignment = getLoadStoreAlignment(I); 7160 const Value *Ptr = getLoadStorePointerOperand(I); 7161 7162 return TTI.getAddressComputationCost(VectorTy) + 7163 TTI.getGatherScatterOpCost( 7164 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 7165 TargetTransformInfo::TCK_RecipThroughput, I); 7166 } 7167 7168 InstructionCost 7169 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 7170 ElementCount VF) { 7171 // TODO: Once we have support for interleaving with scalable vectors 7172 // we can calculate the cost properly here. 7173 if (VF.isScalable()) 7174 return InstructionCost::getInvalid(); 7175 7176 Type *ValTy = getLoadStoreType(I); 7177 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7178 unsigned AS = getLoadStoreAddressSpace(I); 7179 7180 auto Group = getInterleavedAccessGroup(I); 7181 assert(Group && "Fail to get an interleaved access group."); 7182 7183 unsigned InterleaveFactor = Group->getFactor(); 7184 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 7185 7186 // Holds the indices of existing members in the interleaved group. 7187 SmallVector<unsigned, 4> Indices; 7188 for (unsigned IF = 0; IF < InterleaveFactor; IF++) 7189 if (Group->getMember(IF)) 7190 Indices.push_back(IF); 7191 7192 // Calculate the cost of the whole interleaved group. 7193 bool UseMaskForGaps = 7194 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) || 7195 (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor())); 7196 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 7197 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 7198 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 7199 7200 if (Group->isReverse()) { 7201 // TODO: Add support for reversed masked interleaved access. 7202 assert(!Legal->isMaskRequired(I) && 7203 "Reverse masked interleaved access not supported."); 7204 Cost += 7205 Group->getNumMembers() * 7206 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 7207 } 7208 return Cost; 7209 } 7210 7211 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( 7212 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 7213 using namespace llvm::PatternMatch; 7214 // Early exit for no inloop reductions 7215 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 7216 return None; 7217 auto *VectorTy = cast<VectorType>(Ty); 7218 7219 // We are looking for a pattern of, and finding the minimal acceptable cost: 7220 // reduce(mul(ext(A), ext(B))) or 7221 // reduce(mul(A, B)) or 7222 // reduce(ext(A)) or 7223 // reduce(A). 7224 // The basic idea is that we walk down the tree to do that, finding the root 7225 // reduction instruction in InLoopReductionImmediateChains. From there we find 7226 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 7227 // of the components. If the reduction cost is lower then we return it for the 7228 // reduction instruction and 0 for the other instructions in the pattern. If 7229 // it is not we return an invalid cost specifying the orignal cost method 7230 // should be used. 7231 Instruction *RetI = I; 7232 if (match(RetI, m_ZExtOrSExt(m_Value()))) { 7233 if (!RetI->hasOneUser()) 7234 return None; 7235 RetI = RetI->user_back(); 7236 } 7237 if (match(RetI, m_Mul(m_Value(), m_Value())) && 7238 RetI->user_back()->getOpcode() == Instruction::Add) { 7239 if (!RetI->hasOneUser()) 7240 return None; 7241 RetI = RetI->user_back(); 7242 } 7243 7244 // Test if the found instruction is a reduction, and if not return an invalid 7245 // cost specifying the parent to use the original cost modelling. 7246 if (!InLoopReductionImmediateChains.count(RetI)) 7247 return None; 7248 7249 // Find the reduction this chain is a part of and calculate the basic cost of 7250 // the reduction on its own. 7251 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 7252 Instruction *ReductionPhi = LastChain; 7253 while (!isa<PHINode>(ReductionPhi)) 7254 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 7255 7256 const RecurrenceDescriptor &RdxDesc = 7257 Legal->getReductionVars()[cast<PHINode>(ReductionPhi)]; 7258 7259 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 7260 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); 7261 7262 // If we're using ordered reductions then we can just return the base cost 7263 // here, since getArithmeticReductionCost calculates the full ordered 7264 // reduction cost when FP reassociation is not allowed. 7265 if (useOrderedReductions(RdxDesc)) 7266 return BaseCost; 7267 7268 // Get the operand that was not the reduction chain and match it to one of the 7269 // patterns, returning the better cost if it is found. 7270 Instruction *RedOp = RetI->getOperand(1) == LastChain 7271 ? dyn_cast<Instruction>(RetI->getOperand(0)) 7272 : dyn_cast<Instruction>(RetI->getOperand(1)); 7273 7274 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 7275 7276 Instruction *Op0, *Op1; 7277 if (RedOp && 7278 match(RedOp, 7279 m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) && 7280 match(Op0, m_ZExtOrSExt(m_Value())) && 7281 Op0->getOpcode() == Op1->getOpcode() && 7282 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 7283 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) && 7284 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) { 7285 7286 // Matched reduce(ext(mul(ext(A), ext(B))) 7287 // Note that the extend opcodes need to all match, or if A==B they will have 7288 // been converted to zext(mul(sext(A), sext(A))) as it is known positive, 7289 // which is equally fine. 7290 bool IsUnsigned = isa<ZExtInst>(Op0); 7291 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 7292 auto *MulType = VectorType::get(Op0->getType(), VectorTy); 7293 7294 InstructionCost ExtCost = 7295 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType, 7296 TTI::CastContextHint::None, CostKind, Op0); 7297 InstructionCost MulCost = 7298 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind); 7299 InstructionCost Ext2Cost = 7300 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType, 7301 TTI::CastContextHint::None, CostKind, RedOp); 7302 7303 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7304 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7305 CostKind); 7306 7307 if (RedCost.isValid() && 7308 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost) 7309 return I == RetI ? RedCost : 0; 7310 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && 7311 !TheLoop->isLoopInvariant(RedOp)) { 7312 // Matched reduce(ext(A)) 7313 bool IsUnsigned = isa<ZExtInst>(RedOp); 7314 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 7315 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7316 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7317 CostKind); 7318 7319 InstructionCost ExtCost = 7320 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 7321 TTI::CastContextHint::None, CostKind, RedOp); 7322 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 7323 return I == RetI ? RedCost : 0; 7324 } else if (RedOp && 7325 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { 7326 if (match(Op0, m_ZExtOrSExt(m_Value())) && 7327 Op0->getOpcode() == Op1->getOpcode() && 7328 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 7329 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 7330 bool IsUnsigned = isa<ZExtInst>(Op0); 7331 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 7332 // Matched reduce(mul(ext, ext)) 7333 InstructionCost ExtCost = 7334 TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType, 7335 TTI::CastContextHint::None, CostKind, Op0); 7336 InstructionCost MulCost = 7337 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7338 7339 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7340 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7341 CostKind); 7342 7343 if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost) 7344 return I == RetI ? RedCost : 0; 7345 } else if (!match(I, m_ZExtOrSExt(m_Value()))) { 7346 // Matched reduce(mul()) 7347 InstructionCost MulCost = 7348 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7349 7350 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7351 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 7352 CostKind); 7353 7354 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 7355 return I == RetI ? RedCost : 0; 7356 } 7357 } 7358 7359 return I == RetI ? Optional<InstructionCost>(BaseCost) : None; 7360 } 7361 7362 InstructionCost 7363 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 7364 ElementCount VF) { 7365 // Calculate scalar cost only. Vectorization cost should be ready at this 7366 // moment. 7367 if (VF.isScalar()) { 7368 Type *ValTy = getLoadStoreType(I); 7369 const Align Alignment = getLoadStoreAlignment(I); 7370 unsigned AS = getLoadStoreAddressSpace(I); 7371 7372 return TTI.getAddressComputationCost(ValTy) + 7373 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 7374 TTI::TCK_RecipThroughput, I); 7375 } 7376 return getWideningCost(I, VF); 7377 } 7378 7379 LoopVectorizationCostModel::VectorizationCostTy 7380 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7381 ElementCount VF) { 7382 // If we know that this instruction will remain uniform, check the cost of 7383 // the scalar version. 7384 if (isUniformAfterVectorization(I, VF)) 7385 VF = ElementCount::getFixed(1); 7386 7387 if (VF.isVector() && isProfitableToScalarize(I, VF)) 7388 return VectorizationCostTy(InstsToScalarize[VF][I], false); 7389 7390 // Forced scalars do not have any scalarization overhead. 7391 auto ForcedScalar = ForcedScalars.find(VF); 7392 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 7393 auto InstSet = ForcedScalar->second; 7394 if (InstSet.count(I)) 7395 return VectorizationCostTy( 7396 (getInstructionCost(I, ElementCount::getFixed(1)).first * 7397 VF.getKnownMinValue()), 7398 false); 7399 } 7400 7401 Type *VectorTy; 7402 InstructionCost C = getInstructionCost(I, VF, VectorTy); 7403 7404 bool TypeNotScalarized = 7405 VF.isVector() && VectorTy->isVectorTy() && 7406 TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue(); 7407 return VectorizationCostTy(C, TypeNotScalarized); 7408 } 7409 7410 InstructionCost 7411 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 7412 ElementCount VF) const { 7413 7414 // There is no mechanism yet to create a scalable scalarization loop, 7415 // so this is currently Invalid. 7416 if (VF.isScalable()) 7417 return InstructionCost::getInvalid(); 7418 7419 if (VF.isScalar()) 7420 return 0; 7421 7422 InstructionCost Cost = 0; 7423 Type *RetTy = ToVectorTy(I->getType(), VF); 7424 if (!RetTy->isVoidTy() && 7425 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 7426 Cost += TTI.getScalarizationOverhead( 7427 cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true, 7428 false); 7429 7430 // Some targets keep addresses scalar. 7431 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 7432 return Cost; 7433 7434 // Some targets support efficient element stores. 7435 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 7436 return Cost; 7437 7438 // Collect operands to consider. 7439 CallInst *CI = dyn_cast<CallInst>(I); 7440 Instruction::op_range Ops = CI ? CI->args() : I->operands(); 7441 7442 // Skip operands that do not require extraction/scalarization and do not incur 7443 // any overhead. 7444 SmallVector<Type *> Tys; 7445 for (auto *V : filterExtractingOperands(Ops, VF)) 7446 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 7447 return Cost + TTI.getOperandsScalarizationOverhead( 7448 filterExtractingOperands(Ops, VF), Tys); 7449 } 7450 7451 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 7452 if (VF.isScalar()) 7453 return; 7454 NumPredStores = 0; 7455 for (BasicBlock *BB : TheLoop->blocks()) { 7456 // For each instruction in the old loop. 7457 for (Instruction &I : *BB) { 7458 Value *Ptr = getLoadStorePointerOperand(&I); 7459 if (!Ptr) 7460 continue; 7461 7462 // TODO: We should generate better code and update the cost model for 7463 // predicated uniform stores. Today they are treated as any other 7464 // predicated store (see added test cases in 7465 // invariant-store-vectorization.ll). 7466 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 7467 NumPredStores++; 7468 7469 if (Legal->isUniformMemOp(I)) { 7470 // TODO: Avoid replicating loads and stores instead of 7471 // relying on instcombine to remove them. 7472 // Load: Scalar load + broadcast 7473 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 7474 InstructionCost Cost; 7475 if (isa<StoreInst>(&I) && VF.isScalable() && 7476 isLegalGatherOrScatter(&I)) { 7477 Cost = getGatherScatterCost(&I, VF); 7478 setWideningDecision(&I, VF, CM_GatherScatter, Cost); 7479 } else { 7480 assert((isa<LoadInst>(&I) || !VF.isScalable()) && 7481 "Cannot yet scalarize uniform stores"); 7482 Cost = getUniformMemOpCost(&I, VF); 7483 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7484 } 7485 continue; 7486 } 7487 7488 // We assume that widening is the best solution when possible. 7489 if (memoryInstructionCanBeWidened(&I, VF)) { 7490 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 7491 int ConsecutiveStride = Legal->isConsecutivePtr( 7492 getLoadStoreType(&I), getLoadStorePointerOperand(&I)); 7493 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7494 "Expected consecutive stride."); 7495 InstWidening Decision = 7496 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 7497 setWideningDecision(&I, VF, Decision, Cost); 7498 continue; 7499 } 7500 7501 // Choose between Interleaving, Gather/Scatter or Scalarization. 7502 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 7503 unsigned NumAccesses = 1; 7504 if (isAccessInterleaved(&I)) { 7505 auto Group = getInterleavedAccessGroup(&I); 7506 assert(Group && "Fail to get an interleaved access group."); 7507 7508 // Make one decision for the whole group. 7509 if (getWideningDecision(&I, VF) != CM_Unknown) 7510 continue; 7511 7512 NumAccesses = Group->getNumMembers(); 7513 if (interleavedAccessCanBeWidened(&I, VF)) 7514 InterleaveCost = getInterleaveGroupCost(&I, VF); 7515 } 7516 7517 InstructionCost GatherScatterCost = 7518 isLegalGatherOrScatter(&I) 7519 ? getGatherScatterCost(&I, VF) * NumAccesses 7520 : InstructionCost::getInvalid(); 7521 7522 InstructionCost ScalarizationCost = 7523 getMemInstScalarizationCost(&I, VF) * NumAccesses; 7524 7525 // Choose better solution for the current VF, 7526 // write down this decision and use it during vectorization. 7527 InstructionCost Cost; 7528 InstWidening Decision; 7529 if (InterleaveCost <= GatherScatterCost && 7530 InterleaveCost < ScalarizationCost) { 7531 Decision = CM_Interleave; 7532 Cost = InterleaveCost; 7533 } else if (GatherScatterCost < ScalarizationCost) { 7534 Decision = CM_GatherScatter; 7535 Cost = GatherScatterCost; 7536 } else { 7537 Decision = CM_Scalarize; 7538 Cost = ScalarizationCost; 7539 } 7540 // If the instructions belongs to an interleave group, the whole group 7541 // receives the same decision. The whole group receives the cost, but 7542 // the cost will actually be assigned to one instruction. 7543 if (auto Group = getInterleavedAccessGroup(&I)) 7544 setWideningDecision(Group, VF, Decision, Cost); 7545 else 7546 setWideningDecision(&I, VF, Decision, Cost); 7547 } 7548 } 7549 7550 // Make sure that any load of address and any other address computation 7551 // remains scalar unless there is gather/scatter support. This avoids 7552 // inevitable extracts into address registers, and also has the benefit of 7553 // activating LSR more, since that pass can't optimize vectorized 7554 // addresses. 7555 if (TTI.prefersVectorizedAddressing()) 7556 return; 7557 7558 // Start with all scalar pointer uses. 7559 SmallPtrSet<Instruction *, 8> AddrDefs; 7560 for (BasicBlock *BB : TheLoop->blocks()) 7561 for (Instruction &I : *BB) { 7562 Instruction *PtrDef = 7563 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7564 if (PtrDef && TheLoop->contains(PtrDef) && 7565 getWideningDecision(&I, VF) != CM_GatherScatter) 7566 AddrDefs.insert(PtrDef); 7567 } 7568 7569 // Add all instructions used to generate the addresses. 7570 SmallVector<Instruction *, 4> Worklist; 7571 append_range(Worklist, AddrDefs); 7572 while (!Worklist.empty()) { 7573 Instruction *I = Worklist.pop_back_val(); 7574 for (auto &Op : I->operands()) 7575 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7576 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7577 AddrDefs.insert(InstOp).second) 7578 Worklist.push_back(InstOp); 7579 } 7580 7581 for (auto *I : AddrDefs) { 7582 if (isa<LoadInst>(I)) { 7583 // Setting the desired widening decision should ideally be handled in 7584 // by cost functions, but since this involves the task of finding out 7585 // if the loaded register is involved in an address computation, it is 7586 // instead changed here when we know this is the case. 7587 InstWidening Decision = getWideningDecision(I, VF); 7588 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7589 // Scalarize a widened load of address. 7590 setWideningDecision( 7591 I, VF, CM_Scalarize, 7592 (VF.getKnownMinValue() * 7593 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7594 else if (auto Group = getInterleavedAccessGroup(I)) { 7595 // Scalarize an interleave group of address loads. 7596 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7597 if (Instruction *Member = Group->getMember(I)) 7598 setWideningDecision( 7599 Member, VF, CM_Scalarize, 7600 (VF.getKnownMinValue() * 7601 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7602 } 7603 } 7604 } else 7605 // Make sure I gets scalarized and a cost estimate without 7606 // scalarization overhead. 7607 ForcedScalars[VF].insert(I); 7608 } 7609 } 7610 7611 InstructionCost 7612 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 7613 Type *&VectorTy) { 7614 Type *RetTy = I->getType(); 7615 if (canTruncateToMinimalBitwidth(I, VF)) 7616 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7617 auto SE = PSE.getSE(); 7618 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7619 7620 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 7621 ElementCount VF) -> bool { 7622 if (VF.isScalar()) 7623 return true; 7624 7625 auto Scalarized = InstsToScalarize.find(VF); 7626 assert(Scalarized != InstsToScalarize.end() && 7627 "VF not yet analyzed for scalarization profitability"); 7628 return !Scalarized->second.count(I) && 7629 llvm::all_of(I->users(), [&](User *U) { 7630 auto *UI = cast<Instruction>(U); 7631 return !Scalarized->second.count(UI); 7632 }); 7633 }; 7634 (void) hasSingleCopyAfterVectorization; 7635 7636 if (isScalarAfterVectorization(I, VF)) { 7637 // With the exception of GEPs and PHIs, after scalarization there should 7638 // only be one copy of the instruction generated in the loop. This is 7639 // because the VF is either 1, or any instructions that need scalarizing 7640 // have already been dealt with by the the time we get here. As a result, 7641 // it means we don't have to multiply the instruction cost by VF. 7642 assert(I->getOpcode() == Instruction::GetElementPtr || 7643 I->getOpcode() == Instruction::PHI || 7644 (I->getOpcode() == Instruction::BitCast && 7645 I->getType()->isPointerTy()) || 7646 hasSingleCopyAfterVectorization(I, VF)); 7647 VectorTy = RetTy; 7648 } else 7649 VectorTy = ToVectorTy(RetTy, VF); 7650 7651 // TODO: We need to estimate the cost of intrinsic calls. 7652 switch (I->getOpcode()) { 7653 case Instruction::GetElementPtr: 7654 // We mark this instruction as zero-cost because the cost of GEPs in 7655 // vectorized code depends on whether the corresponding memory instruction 7656 // is scalarized or not. Therefore, we handle GEPs with the memory 7657 // instruction cost. 7658 return 0; 7659 case Instruction::Br: { 7660 // In cases of scalarized and predicated instructions, there will be VF 7661 // predicated blocks in the vectorized loop. Each branch around these 7662 // blocks requires also an extract of its vector compare i1 element. 7663 bool ScalarPredicatedBB = false; 7664 BranchInst *BI = cast<BranchInst>(I); 7665 if (VF.isVector() && BI->isConditional() && 7666 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7667 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7668 ScalarPredicatedBB = true; 7669 7670 if (ScalarPredicatedBB) { 7671 // Not possible to scalarize scalable vector with predicated instructions. 7672 if (VF.isScalable()) 7673 return InstructionCost::getInvalid(); 7674 // Return cost for branches around scalarized and predicated blocks. 7675 auto *Vec_i1Ty = 7676 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7677 return ( 7678 TTI.getScalarizationOverhead( 7679 Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) + 7680 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); 7681 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7682 // The back-edge branch will remain, as will all scalar branches. 7683 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7684 else 7685 // This branch will be eliminated by if-conversion. 7686 return 0; 7687 // Note: We currently assume zero cost for an unconditional branch inside 7688 // a predicated block since it will become a fall-through, although we 7689 // may decide in the future to call TTI for all branches. 7690 } 7691 case Instruction::PHI: { 7692 auto *Phi = cast<PHINode>(I); 7693 7694 // First-order recurrences are replaced by vector shuffles inside the loop. 7695 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7696 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7697 return TTI.getShuffleCost( 7698 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7699 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7700 7701 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7702 // converted into select instructions. We require N - 1 selects per phi 7703 // node, where N is the number of incoming values. 7704 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7705 return (Phi->getNumIncomingValues() - 1) * 7706 TTI.getCmpSelInstrCost( 7707 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7708 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7709 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7710 7711 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7712 } 7713 case Instruction::UDiv: 7714 case Instruction::SDiv: 7715 case Instruction::URem: 7716 case Instruction::SRem: 7717 // If we have a predicated instruction, it may not be executed for each 7718 // vector lane. Get the scalarization cost and scale this amount by the 7719 // probability of executing the predicated block. If the instruction is not 7720 // predicated, we fall through to the next case. 7721 if (VF.isVector() && isScalarWithPredication(I)) { 7722 InstructionCost Cost = 0; 7723 7724 // These instructions have a non-void type, so account for the phi nodes 7725 // that we will create. This cost is likely to be zero. The phi node 7726 // cost, if any, should be scaled by the block probability because it 7727 // models a copy at the end of each predicated block. 7728 Cost += VF.getKnownMinValue() * 7729 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7730 7731 // The cost of the non-predicated instruction. 7732 Cost += VF.getKnownMinValue() * 7733 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7734 7735 // The cost of insertelement and extractelement instructions needed for 7736 // scalarization. 7737 Cost += getScalarizationOverhead(I, VF); 7738 7739 // Scale the cost by the probability of executing the predicated blocks. 7740 // This assumes the predicated block for each vector lane is equally 7741 // likely. 7742 return Cost / getReciprocalPredBlockProb(); 7743 } 7744 LLVM_FALLTHROUGH; 7745 case Instruction::Add: 7746 case Instruction::FAdd: 7747 case Instruction::Sub: 7748 case Instruction::FSub: 7749 case Instruction::Mul: 7750 case Instruction::FMul: 7751 case Instruction::FDiv: 7752 case Instruction::FRem: 7753 case Instruction::Shl: 7754 case Instruction::LShr: 7755 case Instruction::AShr: 7756 case Instruction::And: 7757 case Instruction::Or: 7758 case Instruction::Xor: { 7759 // Since we will replace the stride by 1 the multiplication should go away. 7760 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7761 return 0; 7762 7763 // Detect reduction patterns 7764 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7765 return *RedCost; 7766 7767 // Certain instructions can be cheaper to vectorize if they have a constant 7768 // second vector operand. One example of this are shifts on x86. 7769 Value *Op2 = I->getOperand(1); 7770 TargetTransformInfo::OperandValueProperties Op2VP; 7771 TargetTransformInfo::OperandValueKind Op2VK = 7772 TTI.getOperandInfo(Op2, Op2VP); 7773 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7774 Op2VK = TargetTransformInfo::OK_UniformValue; 7775 7776 SmallVector<const Value *, 4> Operands(I->operand_values()); 7777 return TTI.getArithmeticInstrCost( 7778 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7779 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7780 } 7781 case Instruction::FNeg: { 7782 return TTI.getArithmeticInstrCost( 7783 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7784 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7785 TargetTransformInfo::OP_None, I->getOperand(0), I); 7786 } 7787 case Instruction::Select: { 7788 SelectInst *SI = cast<SelectInst>(I); 7789 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7790 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7791 7792 const Value *Op0, *Op1; 7793 using namespace llvm::PatternMatch; 7794 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7795 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7796 // select x, y, false --> x & y 7797 // select x, true, y --> x | y 7798 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7799 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7800 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7801 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7802 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7803 Op1->getType()->getScalarSizeInBits() == 1); 7804 7805 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7806 return TTI.getArithmeticInstrCost( 7807 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7808 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7809 } 7810 7811 Type *CondTy = SI->getCondition()->getType(); 7812 if (!ScalarCond) 7813 CondTy = VectorType::get(CondTy, VF); 7814 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, 7815 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7816 } 7817 case Instruction::ICmp: 7818 case Instruction::FCmp: { 7819 Type *ValTy = I->getOperand(0)->getType(); 7820 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7821 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7822 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7823 VectorTy = ToVectorTy(ValTy, VF); 7824 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7825 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7826 } 7827 case Instruction::Store: 7828 case Instruction::Load: { 7829 ElementCount Width = VF; 7830 if (Width.isVector()) { 7831 InstWidening Decision = getWideningDecision(I, Width); 7832 assert(Decision != CM_Unknown && 7833 "CM decision should be taken at this point"); 7834 if (Decision == CM_Scalarize) 7835 Width = ElementCount::getFixed(1); 7836 } 7837 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7838 return getMemoryInstructionCost(I, VF); 7839 } 7840 case Instruction::BitCast: 7841 if (I->getType()->isPointerTy()) 7842 return 0; 7843 LLVM_FALLTHROUGH; 7844 case Instruction::ZExt: 7845 case Instruction::SExt: 7846 case Instruction::FPToUI: 7847 case Instruction::FPToSI: 7848 case Instruction::FPExt: 7849 case Instruction::PtrToInt: 7850 case Instruction::IntToPtr: 7851 case Instruction::SIToFP: 7852 case Instruction::UIToFP: 7853 case Instruction::Trunc: 7854 case Instruction::FPTrunc: { 7855 // Computes the CastContextHint from a Load/Store instruction. 7856 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7857 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7858 "Expected a load or a store!"); 7859 7860 if (VF.isScalar() || !TheLoop->contains(I)) 7861 return TTI::CastContextHint::Normal; 7862 7863 switch (getWideningDecision(I, VF)) { 7864 case LoopVectorizationCostModel::CM_GatherScatter: 7865 return TTI::CastContextHint::GatherScatter; 7866 case LoopVectorizationCostModel::CM_Interleave: 7867 return TTI::CastContextHint::Interleave; 7868 case LoopVectorizationCostModel::CM_Scalarize: 7869 case LoopVectorizationCostModel::CM_Widen: 7870 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7871 : TTI::CastContextHint::Normal; 7872 case LoopVectorizationCostModel::CM_Widen_Reverse: 7873 return TTI::CastContextHint::Reversed; 7874 case LoopVectorizationCostModel::CM_Unknown: 7875 llvm_unreachable("Instr did not go through cost modelling?"); 7876 } 7877 7878 llvm_unreachable("Unhandled case!"); 7879 }; 7880 7881 unsigned Opcode = I->getOpcode(); 7882 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7883 // For Trunc, the context is the only user, which must be a StoreInst. 7884 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7885 if (I->hasOneUse()) 7886 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7887 CCH = ComputeCCH(Store); 7888 } 7889 // For Z/Sext, the context is the operand, which must be a LoadInst. 7890 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7891 Opcode == Instruction::FPExt) { 7892 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7893 CCH = ComputeCCH(Load); 7894 } 7895 7896 // We optimize the truncation of induction variables having constant 7897 // integer steps. The cost of these truncations is the same as the scalar 7898 // operation. 7899 if (isOptimizableIVTruncate(I, VF)) { 7900 auto *Trunc = cast<TruncInst>(I); 7901 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7902 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7903 } 7904 7905 // Detect reduction patterns 7906 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7907 return *RedCost; 7908 7909 Type *SrcScalarTy = I->getOperand(0)->getType(); 7910 Type *SrcVecTy = 7911 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7912 if (canTruncateToMinimalBitwidth(I, VF)) { 7913 // This cast is going to be shrunk. This may remove the cast or it might 7914 // turn it into slightly different cast. For example, if MinBW == 16, 7915 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7916 // 7917 // Calculate the modified src and dest types. 7918 Type *MinVecTy = VectorTy; 7919 if (Opcode == Instruction::Trunc) { 7920 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7921 VectorTy = 7922 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7923 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7924 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7925 VectorTy = 7926 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7927 } 7928 } 7929 7930 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7931 } 7932 case Instruction::Call: { 7933 bool NeedToScalarize; 7934 CallInst *CI = cast<CallInst>(I); 7935 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7936 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7937 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7938 return std::min(CallCost, IntrinsicCost); 7939 } 7940 return CallCost; 7941 } 7942 case Instruction::ExtractValue: 7943 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7944 case Instruction::Alloca: 7945 // We cannot easily widen alloca to a scalable alloca, as 7946 // the result would need to be a vector of pointers. 7947 if (VF.isScalable()) 7948 return InstructionCost::getInvalid(); 7949 LLVM_FALLTHROUGH; 7950 default: 7951 // This opcode is unknown. Assume that it is the same as 'mul'. 7952 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7953 } // end of switch. 7954 } 7955 7956 char LoopVectorize::ID = 0; 7957 7958 static const char lv_name[] = "Loop Vectorization"; 7959 7960 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7961 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7962 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7963 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7964 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7965 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7966 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7967 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7968 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7969 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7970 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7971 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7972 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7973 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7974 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7975 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7976 7977 namespace llvm { 7978 7979 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7980 7981 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7982 bool VectorizeOnlyWhenForced) { 7983 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7984 } 7985 7986 } // end namespace llvm 7987 7988 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7989 // Check if the pointer operand of a load or store instruction is 7990 // consecutive. 7991 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7992 return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr); 7993 return false; 7994 } 7995 7996 void LoopVectorizationCostModel::collectValuesToIgnore() { 7997 // Ignore ephemeral values. 7998 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7999 8000 // Ignore type-promoting instructions we identified during reduction 8001 // detection. 8002 for (auto &Reduction : Legal->getReductionVars()) { 8003 RecurrenceDescriptor &RedDes = Reduction.second; 8004 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 8005 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 8006 } 8007 // Ignore type-casting instructions we identified during induction 8008 // detection. 8009 for (auto &Induction : Legal->getInductionVars()) { 8010 InductionDescriptor &IndDes = Induction.second; 8011 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 8012 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 8013 } 8014 } 8015 8016 void LoopVectorizationCostModel::collectInLoopReductions() { 8017 for (auto &Reduction : Legal->getReductionVars()) { 8018 PHINode *Phi = Reduction.first; 8019 RecurrenceDescriptor &RdxDesc = Reduction.second; 8020 8021 // We don't collect reductions that are type promoted (yet). 8022 if (RdxDesc.getRecurrenceType() != Phi->getType()) 8023 continue; 8024 8025 // If the target would prefer this reduction to happen "in-loop", then we 8026 // want to record it as such. 8027 unsigned Opcode = RdxDesc.getOpcode(); 8028 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 8029 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 8030 TargetTransformInfo::ReductionFlags())) 8031 continue; 8032 8033 // Check that we can correctly put the reductions into the loop, by 8034 // finding the chain of operations that leads from the phi to the loop 8035 // exit value. 8036 SmallVector<Instruction *, 4> ReductionOperations = 8037 RdxDesc.getReductionOpChain(Phi, TheLoop); 8038 bool InLoop = !ReductionOperations.empty(); 8039 if (InLoop) { 8040 InLoopReductionChains[Phi] = ReductionOperations; 8041 // Add the elements to InLoopReductionImmediateChains for cost modelling. 8042 Instruction *LastChain = Phi; 8043 for (auto *I : ReductionOperations) { 8044 InLoopReductionImmediateChains[I] = LastChain; 8045 LastChain = I; 8046 } 8047 } 8048 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 8049 << " reduction for phi: " << *Phi << "\n"); 8050 } 8051 } 8052 8053 // TODO: we could return a pair of values that specify the max VF and 8054 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 8055 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 8056 // doesn't have a cost model that can choose which plan to execute if 8057 // more than one is generated. 8058 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 8059 LoopVectorizationCostModel &CM) { 8060 unsigned WidestType; 8061 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 8062 return WidestVectorRegBits / WidestType; 8063 } 8064 8065 VectorizationFactor 8066 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 8067 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 8068 ElementCount VF = UserVF; 8069 // Outer loop handling: They may require CFG and instruction level 8070 // transformations before even evaluating whether vectorization is profitable. 8071 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 8072 // the vectorization pipeline. 8073 if (!OrigLoop->isInnermost()) { 8074 // If the user doesn't provide a vectorization factor, determine a 8075 // reasonable one. 8076 if (UserVF.isZero()) { 8077 VF = ElementCount::getFixed(determineVPlanVF( 8078 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 8079 .getFixedSize(), 8080 CM)); 8081 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 8082 8083 // Make sure we have a VF > 1 for stress testing. 8084 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 8085 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 8086 << "overriding computed VF.\n"); 8087 VF = ElementCount::getFixed(4); 8088 } 8089 } 8090 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 8091 assert(isPowerOf2_32(VF.getKnownMinValue()) && 8092 "VF needs to be a power of two"); 8093 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 8094 << "VF " << VF << " to build VPlans.\n"); 8095 buildVPlans(VF, VF); 8096 8097 // For VPlan build stress testing, we bail out after VPlan construction. 8098 if (VPlanBuildStressTest) 8099 return VectorizationFactor::Disabled(); 8100 8101 return {VF, 0 /*Cost*/}; 8102 } 8103 8104 LLVM_DEBUG( 8105 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 8106 "VPlan-native path.\n"); 8107 return VectorizationFactor::Disabled(); 8108 } 8109 8110 Optional<VectorizationFactor> 8111 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 8112 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8113 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 8114 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 8115 return None; 8116 8117 // Invalidate interleave groups if all blocks of loop will be predicated. 8118 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 8119 !useMaskedInterleavedAccesses(*TTI)) { 8120 LLVM_DEBUG( 8121 dbgs() 8122 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 8123 "which requires masked-interleaved support.\n"); 8124 if (CM.InterleaveInfo.invalidateGroups()) 8125 // Invalidating interleave groups also requires invalidating all decisions 8126 // based on them, which includes widening decisions and uniform and scalar 8127 // values. 8128 CM.invalidateCostModelingDecisions(); 8129 } 8130 8131 ElementCount MaxUserVF = 8132 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 8133 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 8134 if (!UserVF.isZero() && UserVFIsLegal) { 8135 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 8136 "VF needs to be a power of two"); 8137 // Collect the instructions (and their associated costs) that will be more 8138 // profitable to scalarize. 8139 if (CM.selectUserVectorizationFactor(UserVF)) { 8140 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 8141 CM.collectInLoopReductions(); 8142 buildVPlansWithVPRecipes(UserVF, UserVF); 8143 LLVM_DEBUG(printPlans(dbgs())); 8144 return {{UserVF, 0}}; 8145 } else 8146 reportVectorizationInfo("UserVF ignored because of invalid costs.", 8147 "InvalidCost", ORE, OrigLoop); 8148 } 8149 8150 // Populate the set of Vectorization Factor Candidates. 8151 ElementCountSet VFCandidates; 8152 for (auto VF = ElementCount::getFixed(1); 8153 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 8154 VFCandidates.insert(VF); 8155 for (auto VF = ElementCount::getScalable(1); 8156 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 8157 VFCandidates.insert(VF); 8158 8159 for (const auto &VF : VFCandidates) { 8160 // Collect Uniform and Scalar instructions after vectorization with VF. 8161 CM.collectUniformsAndScalars(VF); 8162 8163 // Collect the instructions (and their associated costs) that will be more 8164 // profitable to scalarize. 8165 if (VF.isVector()) 8166 CM.collectInstsToScalarize(VF); 8167 } 8168 8169 CM.collectInLoopReductions(); 8170 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 8171 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 8172 8173 LLVM_DEBUG(printPlans(dbgs())); 8174 if (!MaxFactors.hasVector()) 8175 return VectorizationFactor::Disabled(); 8176 8177 // Select the optimal vectorization factor. 8178 auto SelectedVF = CM.selectVectorizationFactor(VFCandidates); 8179 8180 // Check if it is profitable to vectorize with runtime checks. 8181 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 8182 if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { 8183 bool PragmaThresholdReached = 8184 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 8185 bool ThresholdReached = 8186 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 8187 if ((ThresholdReached && !Hints.allowReordering()) || 8188 PragmaThresholdReached) { 8189 ORE->emit([&]() { 8190 return OptimizationRemarkAnalysisAliasing( 8191 DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(), 8192 OrigLoop->getHeader()) 8193 << "loop not vectorized: cannot prove it is safe to reorder " 8194 "memory operations"; 8195 }); 8196 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 8197 Hints.emitRemarkWithHints(); 8198 return VectorizationFactor::Disabled(); 8199 } 8200 } 8201 return SelectedVF; 8202 } 8203 8204 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const { 8205 assert(count_if(VPlans, 8206 [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) == 8207 1 && 8208 "Best VF has not a single VPlan."); 8209 8210 for (const VPlanPtr &Plan : VPlans) { 8211 if (Plan->hasVF(VF)) 8212 return *Plan.get(); 8213 } 8214 llvm_unreachable("No plan found!"); 8215 } 8216 8217 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF, 8218 VPlan &BestVPlan, 8219 InnerLoopVectorizer &ILV, 8220 DominatorTree *DT) { 8221 LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF 8222 << '\n'); 8223 8224 // Perform the actual loop transformation. 8225 8226 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 8227 VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan}; 8228 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 8229 State.TripCount = ILV.getOrCreateTripCount(nullptr); 8230 State.CanonicalIV = ILV.Induction; 8231 8232 ILV.printDebugTracesAtStart(); 8233 8234 //===------------------------------------------------===// 8235 // 8236 // Notice: any optimization or new instruction that go 8237 // into the code below should also be implemented in 8238 // the cost-model. 8239 // 8240 //===------------------------------------------------===// 8241 8242 // 2. Copy and widen instructions from the old loop into the new loop. 8243 BestVPlan.execute(&State); 8244 8245 // 3. Fix the vectorized code: take care of header phi's, live-outs, 8246 // predication, updating analyses. 8247 ILV.fixVectorizedLoop(State); 8248 8249 ILV.printDebugTracesAtEnd(); 8250 } 8251 8252 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 8253 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 8254 for (const auto &Plan : VPlans) 8255 if (PrintVPlansInDotFormat) 8256 Plan->printDOT(O); 8257 else 8258 Plan->print(O); 8259 } 8260 #endif 8261 8262 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 8263 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 8264 8265 // We create new control-flow for the vectorized loop, so the original exit 8266 // conditions will be dead after vectorization if it's only used by the 8267 // terminator 8268 SmallVector<BasicBlock*> ExitingBlocks; 8269 OrigLoop->getExitingBlocks(ExitingBlocks); 8270 for (auto *BB : ExitingBlocks) { 8271 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 8272 if (!Cmp || !Cmp->hasOneUse()) 8273 continue; 8274 8275 // TODO: we should introduce a getUniqueExitingBlocks on Loop 8276 if (!DeadInstructions.insert(Cmp).second) 8277 continue; 8278 8279 // The operands of the icmp is often a dead trunc, used by IndUpdate. 8280 // TODO: can recurse through operands in general 8281 for (Value *Op : Cmp->operands()) { 8282 if (isa<TruncInst>(Op) && Op->hasOneUse()) 8283 DeadInstructions.insert(cast<Instruction>(Op)); 8284 } 8285 } 8286 8287 // We create new "steps" for induction variable updates to which the original 8288 // induction variables map. An original update instruction will be dead if 8289 // all its users except the induction variable are dead. 8290 auto *Latch = OrigLoop->getLoopLatch(); 8291 for (auto &Induction : Legal->getInductionVars()) { 8292 PHINode *Ind = Induction.first; 8293 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 8294 8295 // If the tail is to be folded by masking, the primary induction variable, 8296 // if exists, isn't dead: it will be used for masking. Don't kill it. 8297 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 8298 continue; 8299 8300 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 8301 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 8302 })) 8303 DeadInstructions.insert(IndUpdate); 8304 8305 // We record as "Dead" also the type-casting instructions we had identified 8306 // during induction analysis. We don't need any handling for them in the 8307 // vectorized loop because we have proven that, under a proper runtime 8308 // test guarding the vectorized loop, the value of the phi, and the casted 8309 // value of the phi, are the same. The last instruction in this casting chain 8310 // will get its scalar/vector/widened def from the scalar/vector/widened def 8311 // of the respective phi node. Any other casts in the induction def-use chain 8312 // have no other uses outside the phi update chain, and will be ignored. 8313 InductionDescriptor &IndDes = Induction.second; 8314 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 8315 DeadInstructions.insert(Casts.begin(), Casts.end()); 8316 } 8317 } 8318 8319 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 8320 8321 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 8322 8323 Value *InnerLoopUnroller::getStepVector(Value *Val, Value *StartIdx, 8324 Value *Step, 8325 Instruction::BinaryOps BinOp) { 8326 // When unrolling and the VF is 1, we only need to add a simple scalar. 8327 Type *Ty = Val->getType(); 8328 assert(!Ty->isVectorTy() && "Val must be a scalar"); 8329 8330 if (Ty->isFloatingPointTy()) { 8331 // Floating-point operations inherit FMF via the builder's flags. 8332 Value *MulOp = Builder.CreateFMul(StartIdx, Step); 8333 return Builder.CreateBinOp(BinOp, Val, MulOp); 8334 } 8335 return Builder.CreateAdd(Val, Builder.CreateMul(StartIdx, Step), "induction"); 8336 } 8337 8338 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 8339 SmallVector<Metadata *, 4> MDs; 8340 // Reserve first location for self reference to the LoopID metadata node. 8341 MDs.push_back(nullptr); 8342 bool IsUnrollMetadata = false; 8343 MDNode *LoopID = L->getLoopID(); 8344 if (LoopID) { 8345 // First find existing loop unrolling disable metadata. 8346 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 8347 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 8348 if (MD) { 8349 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 8350 IsUnrollMetadata = 8351 S && S->getString().startswith("llvm.loop.unroll.disable"); 8352 } 8353 MDs.push_back(LoopID->getOperand(i)); 8354 } 8355 } 8356 8357 if (!IsUnrollMetadata) { 8358 // Add runtime unroll disable metadata. 8359 LLVMContext &Context = L->getHeader()->getContext(); 8360 SmallVector<Metadata *, 1> DisableOperands; 8361 DisableOperands.push_back( 8362 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 8363 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 8364 MDs.push_back(DisableNode); 8365 MDNode *NewLoopID = MDNode::get(Context, MDs); 8366 // Set operand 0 to refer to the loop id itself. 8367 NewLoopID->replaceOperandWith(0, NewLoopID); 8368 L->setLoopID(NewLoopID); 8369 } 8370 } 8371 8372 //===--------------------------------------------------------------------===// 8373 // EpilogueVectorizerMainLoop 8374 //===--------------------------------------------------------------------===// 8375 8376 /// This function is partially responsible for generating the control flow 8377 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8378 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 8379 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8380 Loop *Lp = createVectorLoopSkeleton(""); 8381 8382 // Generate the code to check the minimum iteration count of the vector 8383 // epilogue (see below). 8384 EPI.EpilogueIterationCountCheck = 8385 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); 8386 EPI.EpilogueIterationCountCheck->setName("iter.check"); 8387 8388 // Generate the code to check any assumptions that we've made for SCEV 8389 // expressions. 8390 EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader); 8391 8392 // Generate the code that checks at runtime if arrays overlap. We put the 8393 // checks into a separate block to make the more common case of few elements 8394 // faster. 8395 EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 8396 8397 // Generate the iteration count check for the main loop, *after* the check 8398 // for the epilogue loop, so that the path-length is shorter for the case 8399 // that goes directly through the vector epilogue. The longer-path length for 8400 // the main loop is compensated for, by the gain from vectorizing the larger 8401 // trip count. Note: the branch will get updated later on when we vectorize 8402 // the epilogue. 8403 EPI.MainLoopIterationCountCheck = 8404 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); 8405 8406 // Generate the induction variable. 8407 OldInduction = Legal->getPrimaryInduction(); 8408 Type *IdxTy = Legal->getWidestInductionType(); 8409 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8410 8411 IRBuilder<> B(&*Lp->getLoopPreheader()->getFirstInsertionPt()); 8412 Value *Step = getRuntimeVF(B, IdxTy, VF * UF); 8413 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8414 EPI.VectorTripCount = CountRoundDown; 8415 Induction = 8416 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8417 getDebugLocFromInstOrOperands(OldInduction)); 8418 8419 // Skip induction resume value creation here because they will be created in 8420 // the second pass. If we created them here, they wouldn't be used anyway, 8421 // because the vplan in the second pass still contains the inductions from the 8422 // original loop. 8423 8424 return completeLoopSkeleton(Lp, OrigLoopID); 8425 } 8426 8427 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 8428 LLVM_DEBUG({ 8429 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 8430 << "Main Loop VF:" << EPI.MainLoopVF 8431 << ", Main Loop UF:" << EPI.MainLoopUF 8432 << ", Epilogue Loop VF:" << EPI.EpilogueVF 8433 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8434 }); 8435 } 8436 8437 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 8438 DEBUG_WITH_TYPE(VerboseDebug, { 8439 dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n"; 8440 }); 8441 } 8442 8443 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( 8444 Loop *L, BasicBlock *Bypass, bool ForEpilogue) { 8445 assert(L && "Expected valid Loop."); 8446 assert(Bypass && "Expected valid bypass basic block."); 8447 ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF; 8448 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 8449 Value *Count = getOrCreateTripCount(L); 8450 // Reuse existing vector loop preheader for TC checks. 8451 // Note that new preheader block is generated for vector loop. 8452 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 8453 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 8454 8455 // Generate code to check if the loop's trip count is less than VF * UF of the 8456 // main vector loop. 8457 auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? 8458 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8459 8460 Value *CheckMinIters = Builder.CreateICmp( 8461 P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor), 8462 "min.iters.check"); 8463 8464 if (!ForEpilogue) 8465 TCCheckBlock->setName("vector.main.loop.iter.check"); 8466 8467 // Create new preheader for vector loop. 8468 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 8469 DT, LI, nullptr, "vector.ph"); 8470 8471 if (ForEpilogue) { 8472 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 8473 DT->getNode(Bypass)->getIDom()) && 8474 "TC check is expected to dominate Bypass"); 8475 8476 // Update dominator for Bypass & LoopExit. 8477 DT->changeImmediateDominator(Bypass, TCCheckBlock); 8478 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8479 // For loops with multiple exits, there's no edge from the middle block 8480 // to exit blocks (as the epilogue must run) and thus no need to update 8481 // the immediate dominator of the exit blocks. 8482 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 8483 8484 LoopBypassBlocks.push_back(TCCheckBlock); 8485 8486 // Save the trip count so we don't have to regenerate it in the 8487 // vec.epilog.iter.check. This is safe to do because the trip count 8488 // generated here dominates the vector epilog iter check. 8489 EPI.TripCount = Count; 8490 } 8491 8492 ReplaceInstWithInst( 8493 TCCheckBlock->getTerminator(), 8494 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8495 8496 return TCCheckBlock; 8497 } 8498 8499 //===--------------------------------------------------------------------===// 8500 // EpilogueVectorizerEpilogueLoop 8501 //===--------------------------------------------------------------------===// 8502 8503 /// This function is partially responsible for generating the control flow 8504 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8505 BasicBlock * 8506 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 8507 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8508 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 8509 8510 // Now, compare the remaining count and if there aren't enough iterations to 8511 // execute the vectorized epilogue skip to the scalar part. 8512 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 8513 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 8514 LoopVectorPreHeader = 8515 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 8516 LI, nullptr, "vec.epilog.ph"); 8517 emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, 8518 VecEpilogueIterationCountCheck); 8519 8520 // Adjust the control flow taking the state info from the main loop 8521 // vectorization into account. 8522 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 8523 "expected this to be saved from the previous pass."); 8524 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 8525 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 8526 8527 DT->changeImmediateDominator(LoopVectorPreHeader, 8528 EPI.MainLoopIterationCountCheck); 8529 8530 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 8531 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8532 8533 if (EPI.SCEVSafetyCheck) 8534 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 8535 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8536 if (EPI.MemSafetyCheck) 8537 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 8538 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8539 8540 DT->changeImmediateDominator( 8541 VecEpilogueIterationCountCheck, 8542 VecEpilogueIterationCountCheck->getSinglePredecessor()); 8543 8544 DT->changeImmediateDominator(LoopScalarPreHeader, 8545 EPI.EpilogueIterationCountCheck); 8546 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8547 // If there is an epilogue which must run, there's no edge from the 8548 // middle block to exit blocks and thus no need to update the immediate 8549 // dominator of the exit blocks. 8550 DT->changeImmediateDominator(LoopExitBlock, 8551 EPI.EpilogueIterationCountCheck); 8552 8553 // Keep track of bypass blocks, as they feed start values to the induction 8554 // phis in the scalar loop preheader. 8555 if (EPI.SCEVSafetyCheck) 8556 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 8557 if (EPI.MemSafetyCheck) 8558 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 8559 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 8560 8561 // Generate a resume induction for the vector epilogue and put it in the 8562 // vector epilogue preheader 8563 Type *IdxTy = Legal->getWidestInductionType(); 8564 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 8565 LoopVectorPreHeader->getFirstNonPHI()); 8566 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 8567 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 8568 EPI.MainLoopIterationCountCheck); 8569 8570 // Generate the induction variable. 8571 OldInduction = Legal->getPrimaryInduction(); 8572 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8573 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 8574 Value *StartIdx = EPResumeVal; 8575 Induction = 8576 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8577 getDebugLocFromInstOrOperands(OldInduction)); 8578 8579 // Generate induction resume values. These variables save the new starting 8580 // indexes for the scalar loop. They are used to test if there are any tail 8581 // iterations left once the vector loop has completed. 8582 // Note that when the vectorized epilogue is skipped due to iteration count 8583 // check, then the resume value for the induction variable comes from 8584 // the trip count of the main vector loop, hence passing the AdditionalBypass 8585 // argument. 8586 createInductionResumeValues(Lp, CountRoundDown, 8587 {VecEpilogueIterationCountCheck, 8588 EPI.VectorTripCount} /* AdditionalBypass */); 8589 8590 AddRuntimeUnrollDisableMetaData(Lp); 8591 return completeLoopSkeleton(Lp, OrigLoopID); 8592 } 8593 8594 BasicBlock * 8595 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 8596 Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { 8597 8598 assert(EPI.TripCount && 8599 "Expected trip count to have been safed in the first pass."); 8600 assert( 8601 (!isa<Instruction>(EPI.TripCount) || 8602 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 8603 "saved trip count does not dominate insertion point."); 8604 Value *TC = EPI.TripCount; 8605 IRBuilder<> Builder(Insert->getTerminator()); 8606 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 8607 8608 // Generate code to check if the loop's trip count is less than VF * UF of the 8609 // vector epilogue loop. 8610 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? 8611 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8612 8613 Value *CheckMinIters = 8614 Builder.CreateICmp(P, Count, 8615 createStepForVF(Builder, Count->getType(), 8616 EPI.EpilogueVF, EPI.EpilogueUF), 8617 "min.epilog.iters.check"); 8618 8619 ReplaceInstWithInst( 8620 Insert->getTerminator(), 8621 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8622 8623 LoopBypassBlocks.push_back(Insert); 8624 return Insert; 8625 } 8626 8627 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 8628 LLVM_DEBUG({ 8629 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 8630 << "Epilogue Loop VF:" << EPI.EpilogueVF 8631 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8632 }); 8633 } 8634 8635 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 8636 DEBUG_WITH_TYPE(VerboseDebug, { 8637 dbgs() << "final fn:\n" << *Induction->getFunction() << "\n"; 8638 }); 8639 } 8640 8641 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8642 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8643 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8644 bool PredicateAtRangeStart = Predicate(Range.Start); 8645 8646 for (ElementCount TmpVF = Range.Start * 2; 8647 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8648 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8649 Range.End = TmpVF; 8650 break; 8651 } 8652 8653 return PredicateAtRangeStart; 8654 } 8655 8656 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8657 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8658 /// of VF's starting at a given VF and extending it as much as possible. Each 8659 /// vectorization decision can potentially shorten this sub-range during 8660 /// buildVPlan(). 8661 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8662 ElementCount MaxVF) { 8663 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8664 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8665 VFRange SubRange = {VF, MaxVFPlusOne}; 8666 VPlans.push_back(buildVPlan(SubRange)); 8667 VF = SubRange.End; 8668 } 8669 } 8670 8671 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8672 VPlanPtr &Plan) { 8673 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8674 8675 // Look for cached value. 8676 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8677 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8678 if (ECEntryIt != EdgeMaskCache.end()) 8679 return ECEntryIt->second; 8680 8681 VPValue *SrcMask = createBlockInMask(Src, Plan); 8682 8683 // The terminator has to be a branch inst! 8684 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8685 assert(BI && "Unexpected terminator found"); 8686 8687 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8688 return EdgeMaskCache[Edge] = SrcMask; 8689 8690 // If source is an exiting block, we know the exit edge is dynamically dead 8691 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8692 // adding uses of an otherwise potentially dead instruction. 8693 if (OrigLoop->isLoopExiting(Src)) 8694 return EdgeMaskCache[Edge] = SrcMask; 8695 8696 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8697 assert(EdgeMask && "No Edge Mask found for condition"); 8698 8699 if (BI->getSuccessor(0) != Dst) 8700 EdgeMask = Builder.createNot(EdgeMask); 8701 8702 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8703 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8704 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8705 // The select version does not introduce new UB if SrcMask is false and 8706 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8707 VPValue *False = Plan->getOrAddVPValue( 8708 ConstantInt::getFalse(BI->getCondition()->getType())); 8709 EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False); 8710 } 8711 8712 return EdgeMaskCache[Edge] = EdgeMask; 8713 } 8714 8715 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8716 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8717 8718 // Look for cached value. 8719 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8720 if (BCEntryIt != BlockMaskCache.end()) 8721 return BCEntryIt->second; 8722 8723 // All-one mask is modelled as no-mask following the convention for masked 8724 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8725 VPValue *BlockMask = nullptr; 8726 8727 if (OrigLoop->getHeader() == BB) { 8728 if (!CM.blockNeedsPredication(BB)) 8729 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8730 8731 // Create the block in mask as the first non-phi instruction in the block. 8732 VPBuilder::InsertPointGuard Guard(Builder); 8733 auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi(); 8734 Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint); 8735 8736 // Introduce the early-exit compare IV <= BTC to form header block mask. 8737 // This is used instead of IV < TC because TC may wrap, unlike BTC. 8738 // Start by constructing the desired canonical IV. 8739 VPValue *IV = nullptr; 8740 if (Legal->getPrimaryInduction()) 8741 IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction()); 8742 else { 8743 auto *IVRecipe = new VPWidenCanonicalIVRecipe(); 8744 Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint); 8745 IV = IVRecipe; 8746 } 8747 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8748 bool TailFolded = !CM.isScalarEpilogueAllowed(); 8749 8750 if (TailFolded && CM.TTI.emitGetActiveLaneMask()) { 8751 // While ActiveLaneMask is a binary op that consumes the loop tripcount 8752 // as a second argument, we only pass the IV here and extract the 8753 // tripcount from the transform state where codegen of the VP instructions 8754 // happen. 8755 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV}); 8756 } else { 8757 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8758 } 8759 return BlockMaskCache[BB] = BlockMask; 8760 } 8761 8762 // This is the block mask. We OR all incoming edges. 8763 for (auto *Predecessor : predecessors(BB)) { 8764 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8765 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8766 return BlockMaskCache[BB] = EdgeMask; 8767 8768 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8769 BlockMask = EdgeMask; 8770 continue; 8771 } 8772 8773 BlockMask = Builder.createOr(BlockMask, EdgeMask); 8774 } 8775 8776 return BlockMaskCache[BB] = BlockMask; 8777 } 8778 8779 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8780 ArrayRef<VPValue *> Operands, 8781 VFRange &Range, 8782 VPlanPtr &Plan) { 8783 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8784 "Must be called with either a load or store"); 8785 8786 auto willWiden = [&](ElementCount VF) -> bool { 8787 if (VF.isScalar()) 8788 return false; 8789 LoopVectorizationCostModel::InstWidening Decision = 8790 CM.getWideningDecision(I, VF); 8791 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8792 "CM decision should be taken at this point."); 8793 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8794 return true; 8795 if (CM.isScalarAfterVectorization(I, VF) || 8796 CM.isProfitableToScalarize(I, VF)) 8797 return false; 8798 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8799 }; 8800 8801 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8802 return nullptr; 8803 8804 VPValue *Mask = nullptr; 8805 if (Legal->isMaskRequired(I)) 8806 Mask = createBlockInMask(I->getParent(), Plan); 8807 8808 // Determine if the pointer operand of the access is either consecutive or 8809 // reverse consecutive. 8810 LoopVectorizationCostModel::InstWidening Decision = 8811 CM.getWideningDecision(I, Range.Start); 8812 bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse; 8813 bool Consecutive = 8814 Reverse || Decision == LoopVectorizationCostModel::CM_Widen; 8815 8816 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8817 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask, 8818 Consecutive, Reverse); 8819 8820 StoreInst *Store = cast<StoreInst>(I); 8821 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8822 Mask, Consecutive, Reverse); 8823 } 8824 8825 VPWidenIntOrFpInductionRecipe * 8826 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, 8827 ArrayRef<VPValue *> Operands) const { 8828 // Check if this is an integer or fp induction. If so, build the recipe that 8829 // produces its scalar and vector values. 8830 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8831 if (II.getKind() == InductionDescriptor::IK_IntInduction || 8832 II.getKind() == InductionDescriptor::IK_FpInduction) { 8833 assert(II.getStartValue() == 8834 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8835 const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts(); 8836 return new VPWidenIntOrFpInductionRecipe( 8837 Phi, Operands[0], Casts.empty() ? nullptr : Casts.front()); 8838 } 8839 8840 return nullptr; 8841 } 8842 8843 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8844 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, 8845 VPlan &Plan) const { 8846 // Optimize the special case where the source is a constant integer 8847 // induction variable. Notice that we can only optimize the 'trunc' case 8848 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8849 // (c) other casts depend on pointer size. 8850 8851 // Determine whether \p K is a truncation based on an induction variable that 8852 // can be optimized. 8853 auto isOptimizableIVTruncate = 8854 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8855 return [=](ElementCount VF) -> bool { 8856 return CM.isOptimizableIVTruncate(K, VF); 8857 }; 8858 }; 8859 8860 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8861 isOptimizableIVTruncate(I), Range)) { 8862 8863 InductionDescriptor II = 8864 Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0))); 8865 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8866 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 8867 Start, nullptr, I); 8868 } 8869 return nullptr; 8870 } 8871 8872 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8873 ArrayRef<VPValue *> Operands, 8874 VPlanPtr &Plan) { 8875 // If all incoming values are equal, the incoming VPValue can be used directly 8876 // instead of creating a new VPBlendRecipe. 8877 VPValue *FirstIncoming = Operands[0]; 8878 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8879 return FirstIncoming == Inc; 8880 })) { 8881 return Operands[0]; 8882 } 8883 8884 // We know that all PHIs in non-header blocks are converted into selects, so 8885 // we don't have to worry about the insertion order and we can just use the 8886 // builder. At this point we generate the predication tree. There may be 8887 // duplications since this is a simple recursive scan, but future 8888 // optimizations will clean it up. 8889 SmallVector<VPValue *, 2> OperandsWithMask; 8890 unsigned NumIncoming = Phi->getNumIncomingValues(); 8891 8892 for (unsigned In = 0; In < NumIncoming; In++) { 8893 VPValue *EdgeMask = 8894 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8895 assert((EdgeMask || NumIncoming == 1) && 8896 "Multiple predecessors with one having a full mask"); 8897 OperandsWithMask.push_back(Operands[In]); 8898 if (EdgeMask) 8899 OperandsWithMask.push_back(EdgeMask); 8900 } 8901 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8902 } 8903 8904 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8905 ArrayRef<VPValue *> Operands, 8906 VFRange &Range) const { 8907 8908 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8909 [this, CI](ElementCount VF) { return CM.isScalarWithPredication(CI); }, 8910 Range); 8911 8912 if (IsPredicated) 8913 return nullptr; 8914 8915 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8916 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8917 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8918 ID == Intrinsic::pseudoprobe || 8919 ID == Intrinsic::experimental_noalias_scope_decl)) 8920 return nullptr; 8921 8922 auto willWiden = [&](ElementCount VF) -> bool { 8923 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8924 // The following case may be scalarized depending on the VF. 8925 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8926 // version of the instruction. 8927 // Is it beneficial to perform intrinsic call compared to lib call? 8928 bool NeedToScalarize = false; 8929 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8930 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8931 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8932 return UseVectorIntrinsic || !NeedToScalarize; 8933 }; 8934 8935 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8936 return nullptr; 8937 8938 ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size()); 8939 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8940 } 8941 8942 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8943 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8944 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8945 // Instruction should be widened, unless it is scalar after vectorization, 8946 // scalarization is profitable or it is predicated. 8947 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8948 return CM.isScalarAfterVectorization(I, VF) || 8949 CM.isProfitableToScalarize(I, VF) || CM.isScalarWithPredication(I); 8950 }; 8951 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8952 Range); 8953 } 8954 8955 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8956 ArrayRef<VPValue *> Operands) const { 8957 auto IsVectorizableOpcode = [](unsigned Opcode) { 8958 switch (Opcode) { 8959 case Instruction::Add: 8960 case Instruction::And: 8961 case Instruction::AShr: 8962 case Instruction::BitCast: 8963 case Instruction::FAdd: 8964 case Instruction::FCmp: 8965 case Instruction::FDiv: 8966 case Instruction::FMul: 8967 case Instruction::FNeg: 8968 case Instruction::FPExt: 8969 case Instruction::FPToSI: 8970 case Instruction::FPToUI: 8971 case Instruction::FPTrunc: 8972 case Instruction::FRem: 8973 case Instruction::FSub: 8974 case Instruction::ICmp: 8975 case Instruction::IntToPtr: 8976 case Instruction::LShr: 8977 case Instruction::Mul: 8978 case Instruction::Or: 8979 case Instruction::PtrToInt: 8980 case Instruction::SDiv: 8981 case Instruction::Select: 8982 case Instruction::SExt: 8983 case Instruction::Shl: 8984 case Instruction::SIToFP: 8985 case Instruction::SRem: 8986 case Instruction::Sub: 8987 case Instruction::Trunc: 8988 case Instruction::UDiv: 8989 case Instruction::UIToFP: 8990 case Instruction::URem: 8991 case Instruction::Xor: 8992 case Instruction::ZExt: 8993 return true; 8994 } 8995 return false; 8996 }; 8997 8998 if (!IsVectorizableOpcode(I->getOpcode())) 8999 return nullptr; 9000 9001 // Success: widen this instruction. 9002 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 9003 } 9004 9005 void VPRecipeBuilder::fixHeaderPhis() { 9006 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 9007 for (VPWidenPHIRecipe *R : PhisToFix) { 9008 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 9009 VPRecipeBase *IncR = 9010 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 9011 R->addOperand(IncR->getVPSingleValue()); 9012 } 9013 } 9014 9015 VPBasicBlock *VPRecipeBuilder::handleReplication( 9016 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 9017 VPlanPtr &Plan) { 9018 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 9019 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 9020 Range); 9021 9022 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 9023 [&](ElementCount VF) { return CM.isPredicatedInst(I); }, Range); 9024 9025 // Even if the instruction is not marked as uniform, there are certain 9026 // intrinsic calls that can be effectively treated as such, so we check for 9027 // them here. Conservatively, we only do this for scalable vectors, since 9028 // for fixed-width VFs we can always fall back on full scalarization. 9029 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) { 9030 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) { 9031 case Intrinsic::assume: 9032 case Intrinsic::lifetime_start: 9033 case Intrinsic::lifetime_end: 9034 // For scalable vectors if one of the operands is variant then we still 9035 // want to mark as uniform, which will generate one instruction for just 9036 // the first lane of the vector. We can't scalarize the call in the same 9037 // way as for fixed-width vectors because we don't know how many lanes 9038 // there are. 9039 // 9040 // The reasons for doing it this way for scalable vectors are: 9041 // 1. For the assume intrinsic generating the instruction for the first 9042 // lane is still be better than not generating any at all. For 9043 // example, the input may be a splat across all lanes. 9044 // 2. For the lifetime start/end intrinsics the pointer operand only 9045 // does anything useful when the input comes from a stack object, 9046 // which suggests it should always be uniform. For non-stack objects 9047 // the effect is to poison the object, which still allows us to 9048 // remove the call. 9049 IsUniform = true; 9050 break; 9051 default: 9052 break; 9053 } 9054 } 9055 9056 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 9057 IsUniform, IsPredicated); 9058 setRecipe(I, Recipe); 9059 Plan->addVPValue(I, Recipe); 9060 9061 // Find if I uses a predicated instruction. If so, it will use its scalar 9062 // value. Avoid hoisting the insert-element which packs the scalar value into 9063 // a vector value, as that happens iff all users use the vector value. 9064 for (VPValue *Op : Recipe->operands()) { 9065 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 9066 if (!PredR) 9067 continue; 9068 auto *RepR = 9069 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 9070 assert(RepR->isPredicated() && 9071 "expected Replicate recipe to be predicated"); 9072 RepR->setAlsoPack(false); 9073 } 9074 9075 // Finalize the recipe for Instr, first if it is not predicated. 9076 if (!IsPredicated) { 9077 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 9078 VPBB->appendRecipe(Recipe); 9079 return VPBB; 9080 } 9081 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 9082 assert(VPBB->getSuccessors().empty() && 9083 "VPBB has successors when handling predicated replication."); 9084 // Record predicated instructions for above packing optimizations. 9085 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 9086 VPBlockUtils::insertBlockAfter(Region, VPBB); 9087 auto *RegSucc = new VPBasicBlock(); 9088 VPBlockUtils::insertBlockAfter(RegSucc, Region); 9089 return RegSucc; 9090 } 9091 9092 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 9093 VPRecipeBase *PredRecipe, 9094 VPlanPtr &Plan) { 9095 // Instructions marked for predication are replicated and placed under an 9096 // if-then construct to prevent side-effects. 9097 9098 // Generate recipes to compute the block mask for this region. 9099 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 9100 9101 // Build the triangular if-then region. 9102 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 9103 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 9104 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 9105 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 9106 auto *PHIRecipe = Instr->getType()->isVoidTy() 9107 ? nullptr 9108 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 9109 if (PHIRecipe) { 9110 Plan->removeVPValueFor(Instr); 9111 Plan->addVPValue(Instr, PHIRecipe); 9112 } 9113 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 9114 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 9115 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 9116 9117 // Note: first set Entry as region entry and then connect successors starting 9118 // from it in order, to propagate the "parent" of each VPBasicBlock. 9119 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 9120 VPBlockUtils::connectBlocks(Pred, Exit); 9121 9122 return Region; 9123 } 9124 9125 VPRecipeOrVPValueTy 9126 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 9127 ArrayRef<VPValue *> Operands, 9128 VFRange &Range, VPlanPtr &Plan) { 9129 // First, check for specific widening recipes that deal with calls, memory 9130 // operations, inductions and Phi nodes. 9131 if (auto *CI = dyn_cast<CallInst>(Instr)) 9132 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 9133 9134 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 9135 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 9136 9137 VPRecipeBase *Recipe; 9138 if (auto Phi = dyn_cast<PHINode>(Instr)) { 9139 if (Phi->getParent() != OrigLoop->getHeader()) 9140 return tryToBlend(Phi, Operands, Plan); 9141 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands))) 9142 return toVPRecipeResult(Recipe); 9143 9144 VPWidenPHIRecipe *PhiRecipe = nullptr; 9145 if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) { 9146 VPValue *StartV = Operands[0]; 9147 if (Legal->isReductionVariable(Phi)) { 9148 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 9149 assert(RdxDesc.getRecurrenceStartValue() == 9150 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 9151 PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, 9152 CM.isInLoopReduction(Phi), 9153 CM.useOrderedReductions(RdxDesc)); 9154 } else { 9155 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); 9156 } 9157 9158 // Record the incoming value from the backedge, so we can add the incoming 9159 // value from the backedge after all recipes have been created. 9160 recordRecipeOf(cast<Instruction>( 9161 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 9162 PhisToFix.push_back(PhiRecipe); 9163 } else { 9164 // TODO: record start and backedge value for remaining pointer induction 9165 // phis. 9166 assert(Phi->getType()->isPointerTy() && 9167 "only pointer phis should be handled here"); 9168 PhiRecipe = new VPWidenPHIRecipe(Phi); 9169 } 9170 9171 return toVPRecipeResult(PhiRecipe); 9172 } 9173 9174 if (isa<TruncInst>(Instr) && 9175 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 9176 Range, *Plan))) 9177 return toVPRecipeResult(Recipe); 9178 9179 if (!shouldWiden(Instr, Range)) 9180 return nullptr; 9181 9182 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 9183 return toVPRecipeResult(new VPWidenGEPRecipe( 9184 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 9185 9186 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 9187 bool InvariantCond = 9188 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 9189 return toVPRecipeResult(new VPWidenSelectRecipe( 9190 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 9191 } 9192 9193 return toVPRecipeResult(tryToWiden(Instr, Operands)); 9194 } 9195 9196 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 9197 ElementCount MaxVF) { 9198 assert(OrigLoop->isInnermost() && "Inner loop expected."); 9199 9200 // Collect instructions from the original loop that will become trivially dead 9201 // in the vectorized loop. We don't need to vectorize these instructions. For 9202 // example, original induction update instructions can become dead because we 9203 // separately emit induction "steps" when generating code for the new loop. 9204 // Similarly, we create a new latch condition when setting up the structure 9205 // of the new loop, so the old one can become dead. 9206 SmallPtrSet<Instruction *, 4> DeadInstructions; 9207 collectTriviallyDeadInstructions(DeadInstructions); 9208 9209 // Add assume instructions we need to drop to DeadInstructions, to prevent 9210 // them from being added to the VPlan. 9211 // TODO: We only need to drop assumes in blocks that get flattend. If the 9212 // control flow is preserved, we should keep them. 9213 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 9214 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 9215 9216 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 9217 // Dead instructions do not need sinking. Remove them from SinkAfter. 9218 for (Instruction *I : DeadInstructions) 9219 SinkAfter.erase(I); 9220 9221 // Cannot sink instructions after dead instructions (there won't be any 9222 // recipes for them). Instead, find the first non-dead previous instruction. 9223 for (auto &P : Legal->getSinkAfter()) { 9224 Instruction *SinkTarget = P.second; 9225 Instruction *FirstInst = &*SinkTarget->getParent()->begin(); 9226 (void)FirstInst; 9227 while (DeadInstructions.contains(SinkTarget)) { 9228 assert( 9229 SinkTarget != FirstInst && 9230 "Must find a live instruction (at least the one feeding the " 9231 "first-order recurrence PHI) before reaching beginning of the block"); 9232 SinkTarget = SinkTarget->getPrevNode(); 9233 assert(SinkTarget != P.first && 9234 "sink source equals target, no sinking required"); 9235 } 9236 P.second = SinkTarget; 9237 } 9238 9239 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 9240 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 9241 VFRange SubRange = {VF, MaxVFPlusOne}; 9242 VPlans.push_back( 9243 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 9244 VF = SubRange.End; 9245 } 9246 } 9247 9248 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 9249 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 9250 const MapVector<Instruction *, Instruction *> &SinkAfter) { 9251 9252 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 9253 9254 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 9255 9256 // --------------------------------------------------------------------------- 9257 // Pre-construction: record ingredients whose recipes we'll need to further 9258 // process after constructing the initial VPlan. 9259 // --------------------------------------------------------------------------- 9260 9261 // Mark instructions we'll need to sink later and their targets as 9262 // ingredients whose recipe we'll need to record. 9263 for (auto &Entry : SinkAfter) { 9264 RecipeBuilder.recordRecipeOf(Entry.first); 9265 RecipeBuilder.recordRecipeOf(Entry.second); 9266 } 9267 for (auto &Reduction : CM.getInLoopReductionChains()) { 9268 PHINode *Phi = Reduction.first; 9269 RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind(); 9270 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9271 9272 RecipeBuilder.recordRecipeOf(Phi); 9273 for (auto &R : ReductionOperations) { 9274 RecipeBuilder.recordRecipeOf(R); 9275 // For min/max reducitons, where we have a pair of icmp/select, we also 9276 // need to record the ICmp recipe, so it can be removed later. 9277 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9278 "Only min/max recurrences allowed for inloop reductions"); 9279 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 9280 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 9281 } 9282 } 9283 9284 // For each interleave group which is relevant for this (possibly trimmed) 9285 // Range, add it to the set of groups to be later applied to the VPlan and add 9286 // placeholders for its members' Recipes which we'll be replacing with a 9287 // single VPInterleaveRecipe. 9288 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 9289 auto applyIG = [IG, this](ElementCount VF) -> bool { 9290 return (VF.isVector() && // Query is illegal for VF == 1 9291 CM.getWideningDecision(IG->getInsertPos(), VF) == 9292 LoopVectorizationCostModel::CM_Interleave); 9293 }; 9294 if (!getDecisionAndClampRange(applyIG, Range)) 9295 continue; 9296 InterleaveGroups.insert(IG); 9297 for (unsigned i = 0; i < IG->getFactor(); i++) 9298 if (Instruction *Member = IG->getMember(i)) 9299 RecipeBuilder.recordRecipeOf(Member); 9300 }; 9301 9302 // --------------------------------------------------------------------------- 9303 // Build initial VPlan: Scan the body of the loop in a topological order to 9304 // visit each basic block after having visited its predecessor basic blocks. 9305 // --------------------------------------------------------------------------- 9306 9307 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 9308 auto Plan = std::make_unique<VPlan>(); 9309 9310 // Scan the body of the loop in a topological order to visit each basic block 9311 // after having visited its predecessor basic blocks. 9312 LoopBlocksDFS DFS(OrigLoop); 9313 DFS.perform(LI); 9314 9315 VPBasicBlock *VPBB = nullptr; 9316 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 9317 // Relevant instructions from basic block BB will be grouped into VPRecipe 9318 // ingredients and fill a new VPBasicBlock. 9319 unsigned VPBBsForBB = 0; 9320 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 9321 if (VPBB) 9322 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 9323 else 9324 Plan->setEntry(FirstVPBBForBB); 9325 VPBB = FirstVPBBForBB; 9326 Builder.setInsertPoint(VPBB); 9327 9328 // Introduce each ingredient into VPlan. 9329 // TODO: Model and preserve debug instrinsics in VPlan. 9330 for (Instruction &I : BB->instructionsWithoutDebug()) { 9331 Instruction *Instr = &I; 9332 9333 // First filter out irrelevant instructions, to ensure no recipes are 9334 // built for them. 9335 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 9336 continue; 9337 9338 SmallVector<VPValue *, 4> Operands; 9339 auto *Phi = dyn_cast<PHINode>(Instr); 9340 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 9341 Operands.push_back(Plan->getOrAddVPValue( 9342 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 9343 } else { 9344 auto OpRange = Plan->mapToVPValues(Instr->operands()); 9345 Operands = {OpRange.begin(), OpRange.end()}; 9346 } 9347 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 9348 Instr, Operands, Range, Plan)) { 9349 // If Instr can be simplified to an existing VPValue, use it. 9350 if (RecipeOrValue.is<VPValue *>()) { 9351 auto *VPV = RecipeOrValue.get<VPValue *>(); 9352 Plan->addVPValue(Instr, VPV); 9353 // If the re-used value is a recipe, register the recipe for the 9354 // instruction, in case the recipe for Instr needs to be recorded. 9355 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 9356 RecipeBuilder.setRecipe(Instr, R); 9357 continue; 9358 } 9359 // Otherwise, add the new recipe. 9360 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 9361 for (auto *Def : Recipe->definedValues()) { 9362 auto *UV = Def->getUnderlyingValue(); 9363 Plan->addVPValue(UV, Def); 9364 } 9365 9366 RecipeBuilder.setRecipe(Instr, Recipe); 9367 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe)) { 9368 // Make sure induction recipes are all kept in the header block. 9369 // VPWidenIntOrFpInductionRecipe may be generated when reaching a 9370 // Trunc of an induction Phi, where Trunc may not be in the header. 9371 auto *Header = Plan->getEntry()->getEntryBasicBlock(); 9372 Header->insert(Recipe, Header->getFirstNonPhi()); 9373 } else 9374 VPBB->appendRecipe(Recipe); 9375 continue; 9376 } 9377 9378 // Otherwise, if all widening options failed, Instruction is to be 9379 // replicated. This may create a successor for VPBB. 9380 VPBasicBlock *NextVPBB = 9381 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 9382 if (NextVPBB != VPBB) { 9383 VPBB = NextVPBB; 9384 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 9385 : ""); 9386 } 9387 } 9388 } 9389 9390 assert(isa<VPBasicBlock>(Plan->getEntry()) && 9391 !Plan->getEntry()->getEntryBasicBlock()->empty() && 9392 "entry block must be set to a non-empty VPBasicBlock"); 9393 RecipeBuilder.fixHeaderPhis(); 9394 9395 // --------------------------------------------------------------------------- 9396 // Transform initial VPlan: Apply previously taken decisions, in order, to 9397 // bring the VPlan to its final state. 9398 // --------------------------------------------------------------------------- 9399 9400 // Apply Sink-After legal constraints. 9401 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 9402 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 9403 if (Region && Region->isReplicator()) { 9404 assert(Region->getNumSuccessors() == 1 && 9405 Region->getNumPredecessors() == 1 && "Expected SESE region!"); 9406 assert(R->getParent()->size() == 1 && 9407 "A recipe in an original replicator region must be the only " 9408 "recipe in its block"); 9409 return Region; 9410 } 9411 return nullptr; 9412 }; 9413 for (auto &Entry : SinkAfter) { 9414 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 9415 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 9416 9417 auto *TargetRegion = GetReplicateRegion(Target); 9418 auto *SinkRegion = GetReplicateRegion(Sink); 9419 if (!SinkRegion) { 9420 // If the sink source is not a replicate region, sink the recipe directly. 9421 if (TargetRegion) { 9422 // The target is in a replication region, make sure to move Sink to 9423 // the block after it, not into the replication region itself. 9424 VPBasicBlock *NextBlock = 9425 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 9426 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 9427 } else 9428 Sink->moveAfter(Target); 9429 continue; 9430 } 9431 9432 // The sink source is in a replicate region. Unhook the region from the CFG. 9433 auto *SinkPred = SinkRegion->getSinglePredecessor(); 9434 auto *SinkSucc = SinkRegion->getSingleSuccessor(); 9435 VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); 9436 VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); 9437 VPBlockUtils::connectBlocks(SinkPred, SinkSucc); 9438 9439 if (TargetRegion) { 9440 // The target recipe is also in a replicate region, move the sink region 9441 // after the target region. 9442 auto *TargetSucc = TargetRegion->getSingleSuccessor(); 9443 VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); 9444 VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); 9445 VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); 9446 } else { 9447 // The sink source is in a replicate region, we need to move the whole 9448 // replicate region, which should only contain a single recipe in the 9449 // main block. 9450 auto *SplitBlock = 9451 Target->getParent()->splitAt(std::next(Target->getIterator())); 9452 9453 auto *SplitPred = SplitBlock->getSinglePredecessor(); 9454 9455 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 9456 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 9457 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 9458 if (VPBB == SplitPred) 9459 VPBB = SplitBlock; 9460 } 9461 } 9462 9463 // Adjust the recipes for any inloop reductions. 9464 adjustRecipesForReductions(VPBB, Plan, RecipeBuilder, Range.Start); 9465 9466 // Introduce a recipe to combine the incoming and previous values of a 9467 // first-order recurrence. 9468 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9469 auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); 9470 if (!RecurPhi) 9471 continue; 9472 9473 auto *RecurSplice = cast<VPInstruction>( 9474 Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 9475 {RecurPhi, RecurPhi->getBackedgeValue()})); 9476 9477 VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); 9478 if (auto *Region = GetReplicateRegion(PrevRecipe)) { 9479 VPBasicBlock *Succ = cast<VPBasicBlock>(Region->getSingleSuccessor()); 9480 RecurSplice->moveBefore(*Succ, Succ->getFirstNonPhi()); 9481 } else 9482 RecurSplice->moveAfter(PrevRecipe); 9483 RecurPhi->replaceAllUsesWith(RecurSplice); 9484 // Set the first operand of RecurSplice to RecurPhi again, after replacing 9485 // all users. 9486 RecurSplice->setOperand(0, RecurPhi); 9487 } 9488 9489 // Interleave memory: for each Interleave Group we marked earlier as relevant 9490 // for this VPlan, replace the Recipes widening its memory instructions with a 9491 // single VPInterleaveRecipe at its insertion point. 9492 for (auto IG : InterleaveGroups) { 9493 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 9494 RecipeBuilder.getRecipe(IG->getInsertPos())); 9495 SmallVector<VPValue *, 4> StoredValues; 9496 for (unsigned i = 0; i < IG->getFactor(); ++i) 9497 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 9498 auto *StoreR = 9499 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); 9500 StoredValues.push_back(StoreR->getStoredValue()); 9501 } 9502 9503 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 9504 Recipe->getMask()); 9505 VPIG->insertBefore(Recipe); 9506 unsigned J = 0; 9507 for (unsigned i = 0; i < IG->getFactor(); ++i) 9508 if (Instruction *Member = IG->getMember(i)) { 9509 if (!Member->getType()->isVoidTy()) { 9510 VPValue *OriginalV = Plan->getVPValue(Member); 9511 Plan->removeVPValueFor(Member); 9512 Plan->addVPValue(Member, VPIG->getVPValue(J)); 9513 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 9514 J++; 9515 } 9516 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9517 } 9518 } 9519 9520 // From this point onwards, VPlan-to-VPlan transformations may change the plan 9521 // in ways that accessing values using original IR values is incorrect. 9522 Plan->disableValue2VPValue(); 9523 9524 VPlanTransforms::sinkScalarOperands(*Plan); 9525 VPlanTransforms::mergeReplicateRegions(*Plan); 9526 9527 std::string PlanName; 9528 raw_string_ostream RSO(PlanName); 9529 ElementCount VF = Range.Start; 9530 Plan->addVF(VF); 9531 RSO << "Initial VPlan for VF={" << VF; 9532 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9533 Plan->addVF(VF); 9534 RSO << "," << VF; 9535 } 9536 RSO << "},UF>=1"; 9537 RSO.flush(); 9538 Plan->setName(PlanName); 9539 9540 return Plan; 9541 } 9542 9543 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9544 // Outer loop handling: They may require CFG and instruction level 9545 // transformations before even evaluating whether vectorization is profitable. 9546 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9547 // the vectorization pipeline. 9548 assert(!OrigLoop->isInnermost()); 9549 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9550 9551 // Create new empty VPlan 9552 auto Plan = std::make_unique<VPlan>(); 9553 9554 // Build hierarchical CFG 9555 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9556 HCFGBuilder.buildHierarchicalCFG(); 9557 9558 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9559 VF *= 2) 9560 Plan->addVF(VF); 9561 9562 if (EnableVPlanPredication) { 9563 VPlanPredicator VPP(*Plan); 9564 VPP.predicate(); 9565 9566 // Avoid running transformation to recipes until masked code generation in 9567 // VPlan-native path is in place. 9568 return Plan; 9569 } 9570 9571 SmallPtrSet<Instruction *, 1> DeadInstructions; 9572 VPlanTransforms::VPInstructionsToVPRecipes(OrigLoop, Plan, 9573 Legal->getInductionVars(), 9574 DeadInstructions, *PSE.getSE()); 9575 return Plan; 9576 } 9577 9578 // Adjust the recipes for reductions. For in-loop reductions the chain of 9579 // instructions leading from the loop exit instr to the phi need to be converted 9580 // to reductions, with one operand being vector and the other being the scalar 9581 // reduction chain. For other reductions, a select is introduced between the phi 9582 // and live-out recipes when folding the tail. 9583 void LoopVectorizationPlanner::adjustRecipesForReductions( 9584 VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, 9585 ElementCount MinVF) { 9586 for (auto &Reduction : CM.getInLoopReductionChains()) { 9587 PHINode *Phi = Reduction.first; 9588 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 9589 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9590 9591 if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) 9592 continue; 9593 9594 // ReductionOperations are orders top-down from the phi's use to the 9595 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9596 // which of the two operands will remain scalar and which will be reduced. 9597 // For minmax the chain will be the select instructions. 9598 Instruction *Chain = Phi; 9599 for (Instruction *R : ReductionOperations) { 9600 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9601 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9602 9603 VPValue *ChainOp = Plan->getVPValue(Chain); 9604 unsigned FirstOpId; 9605 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9606 "Only min/max recurrences allowed for inloop reductions"); 9607 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9608 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9609 "Expected to replace a VPWidenSelectSC"); 9610 FirstOpId = 1; 9611 } else { 9612 assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe)) && 9613 "Expected to replace a VPWidenSC"); 9614 FirstOpId = 0; 9615 } 9616 unsigned VecOpId = 9617 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9618 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9619 9620 auto *CondOp = CM.foldTailByMasking() 9621 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9622 : nullptr; 9623 VPReductionRecipe *RedRecipe = new VPReductionRecipe( 9624 &RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9625 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9626 Plan->removeVPValueFor(R); 9627 Plan->addVPValue(R, RedRecipe); 9628 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9629 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9630 WidenRecipe->eraseFromParent(); 9631 9632 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9633 VPRecipeBase *CompareRecipe = 9634 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9635 assert(isa<VPWidenRecipe>(CompareRecipe) && 9636 "Expected to replace a VPWidenSC"); 9637 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9638 "Expected no remaining users"); 9639 CompareRecipe->eraseFromParent(); 9640 } 9641 Chain = R; 9642 } 9643 } 9644 9645 // If tail is folded by masking, introduce selects between the phi 9646 // and the live-out instruction of each reduction, at the end of the latch. 9647 if (CM.foldTailByMasking()) { 9648 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9649 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R); 9650 if (!PhiR || PhiR->isInLoop()) 9651 continue; 9652 Builder.setInsertPoint(LatchVPBB); 9653 VPValue *Cond = 9654 RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9655 VPValue *Red = PhiR->getBackedgeValue(); 9656 Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR}); 9657 } 9658 } 9659 } 9660 9661 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9662 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9663 VPSlotTracker &SlotTracker) const { 9664 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9665 IG->getInsertPos()->printAsOperand(O, false); 9666 O << ", "; 9667 getAddr()->printAsOperand(O, SlotTracker); 9668 VPValue *Mask = getMask(); 9669 if (Mask) { 9670 O << ", "; 9671 Mask->printAsOperand(O, SlotTracker); 9672 } 9673 9674 unsigned OpIdx = 0; 9675 for (unsigned i = 0; i < IG->getFactor(); ++i) { 9676 if (!IG->getMember(i)) 9677 continue; 9678 if (getNumStoreOperands() > 0) { 9679 O << "\n" << Indent << " store "; 9680 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker); 9681 O << " to index " << i; 9682 } else { 9683 O << "\n" << Indent << " "; 9684 getVPValue(OpIdx)->printAsOperand(O, SlotTracker); 9685 O << " = load from index " << i; 9686 } 9687 ++OpIdx; 9688 } 9689 } 9690 #endif 9691 9692 void VPWidenCallRecipe::execute(VPTransformState &State) { 9693 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9694 *this, State); 9695 } 9696 9697 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9698 State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()), 9699 this, *this, InvariantCond, State); 9700 } 9701 9702 void VPWidenRecipe::execute(VPTransformState &State) { 9703 State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State); 9704 } 9705 9706 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9707 State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this, 9708 *this, State.UF, State.VF, IsPtrLoopInvariant, 9709 IsIndexLoopInvariant, State); 9710 } 9711 9712 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9713 assert(!State.Instance && "Int or FP induction being replicated."); 9714 State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(), 9715 getTruncInst(), getVPValue(0), 9716 getCastValue(), State); 9717 } 9718 9719 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9720 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this, 9721 State); 9722 } 9723 9724 void VPBlendRecipe::execute(VPTransformState &State) { 9725 State.ILV->setDebugLocFromInst(Phi, &State.Builder); 9726 // We know that all PHIs in non-header blocks are converted into 9727 // selects, so we don't have to worry about the insertion order and we 9728 // can just use the builder. 9729 // At this point we generate the predication tree. There may be 9730 // duplications since this is a simple recursive scan, but future 9731 // optimizations will clean it up. 9732 9733 unsigned NumIncoming = getNumIncomingValues(); 9734 9735 // Generate a sequence of selects of the form: 9736 // SELECT(Mask3, In3, 9737 // SELECT(Mask2, In2, 9738 // SELECT(Mask1, In1, 9739 // In0))) 9740 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9741 // are essentially undef are taken from In0. 9742 InnerLoopVectorizer::VectorParts Entry(State.UF); 9743 for (unsigned In = 0; In < NumIncoming; ++In) { 9744 for (unsigned Part = 0; Part < State.UF; ++Part) { 9745 // We might have single edge PHIs (blocks) - use an identity 9746 // 'select' for the first PHI operand. 9747 Value *In0 = State.get(getIncomingValue(In), Part); 9748 if (In == 0) 9749 Entry[Part] = In0; // Initialize with the first incoming value. 9750 else { 9751 // Select between the current value and the previous incoming edge 9752 // based on the incoming mask. 9753 Value *Cond = State.get(getMask(In), Part); 9754 Entry[Part] = 9755 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9756 } 9757 } 9758 } 9759 for (unsigned Part = 0; Part < State.UF; ++Part) 9760 State.set(this, Entry[Part], Part); 9761 } 9762 9763 void VPInterleaveRecipe::execute(VPTransformState &State) { 9764 assert(!State.Instance && "Interleave group being replicated."); 9765 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9766 getStoredValues(), getMask()); 9767 } 9768 9769 void VPReductionRecipe::execute(VPTransformState &State) { 9770 assert(!State.Instance && "Reduction being replicated."); 9771 Value *PrevInChain = State.get(getChainOp(), 0); 9772 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9773 bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); 9774 // Propagate the fast-math flags carried by the underlying instruction. 9775 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 9776 State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags()); 9777 for (unsigned Part = 0; Part < State.UF; ++Part) { 9778 Value *NewVecOp = State.get(getVecOp(), Part); 9779 if (VPValue *Cond = getCondOp()) { 9780 Value *NewCond = State.get(Cond, Part); 9781 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9782 Value *Iden = RdxDesc->getRecurrenceIdentity( 9783 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9784 Value *IdenVec = 9785 State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden); 9786 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9787 NewVecOp = Select; 9788 } 9789 Value *NewRed; 9790 Value *NextInChain; 9791 if (IsOrdered) { 9792 if (State.VF.isVector()) 9793 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9794 PrevInChain); 9795 else 9796 NewRed = State.Builder.CreateBinOp( 9797 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain, 9798 NewVecOp); 9799 PrevInChain = NewRed; 9800 } else { 9801 PrevInChain = State.get(getChainOp(), Part); 9802 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9803 } 9804 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9805 NextInChain = 9806 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9807 NewRed, PrevInChain); 9808 } else if (IsOrdered) 9809 NextInChain = NewRed; 9810 else 9811 NextInChain = State.Builder.CreateBinOp( 9812 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed, 9813 PrevInChain); 9814 State.set(this, NextInChain, Part); 9815 } 9816 } 9817 9818 void VPReplicateRecipe::execute(VPTransformState &State) { 9819 if (State.Instance) { // Generate a single instance. 9820 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9821 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, 9822 *State.Instance, IsPredicated, State); 9823 // Insert scalar instance packing it into a vector. 9824 if (AlsoPack && State.VF.isVector()) { 9825 // If we're constructing lane 0, initialize to start from poison. 9826 if (State.Instance->Lane.isFirstLane()) { 9827 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9828 Value *Poison = PoisonValue::get( 9829 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9830 State.set(this, Poison, State.Instance->Part); 9831 } 9832 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9833 } 9834 return; 9835 } 9836 9837 // Generate scalar instances for all VF lanes of all UF parts, unless the 9838 // instruction is uniform inwhich case generate only the first lane for each 9839 // of the UF parts. 9840 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9841 assert((!State.VF.isScalable() || IsUniform) && 9842 "Can't scalarize a scalable vector"); 9843 for (unsigned Part = 0; Part < State.UF; ++Part) 9844 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9845 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, 9846 VPIteration(Part, Lane), IsPredicated, 9847 State); 9848 } 9849 9850 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9851 assert(State.Instance && "Branch on Mask works only on single instance."); 9852 9853 unsigned Part = State.Instance->Part; 9854 unsigned Lane = State.Instance->Lane.getKnownLane(); 9855 9856 Value *ConditionBit = nullptr; 9857 VPValue *BlockInMask = getMask(); 9858 if (BlockInMask) { 9859 ConditionBit = State.get(BlockInMask, Part); 9860 if (ConditionBit->getType()->isVectorTy()) 9861 ConditionBit = State.Builder.CreateExtractElement( 9862 ConditionBit, State.Builder.getInt32(Lane)); 9863 } else // Block in mask is all-one. 9864 ConditionBit = State.Builder.getTrue(); 9865 9866 // Replace the temporary unreachable terminator with a new conditional branch, 9867 // whose two destinations will be set later when they are created. 9868 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9869 assert(isa<UnreachableInst>(CurrentTerminator) && 9870 "Expected to replace unreachable terminator with conditional branch."); 9871 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9872 CondBr->setSuccessor(0, nullptr); 9873 ReplaceInstWithInst(CurrentTerminator, CondBr); 9874 } 9875 9876 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9877 assert(State.Instance && "Predicated instruction PHI works per instance."); 9878 Instruction *ScalarPredInst = 9879 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9880 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9881 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9882 assert(PredicatingBB && "Predicated block has no single predecessor."); 9883 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9884 "operand must be VPReplicateRecipe"); 9885 9886 // By current pack/unpack logic we need to generate only a single phi node: if 9887 // a vector value for the predicated instruction exists at this point it means 9888 // the instruction has vector users only, and a phi for the vector value is 9889 // needed. In this case the recipe of the predicated instruction is marked to 9890 // also do that packing, thereby "hoisting" the insert-element sequence. 9891 // Otherwise, a phi node for the scalar value is needed. 9892 unsigned Part = State.Instance->Part; 9893 if (State.hasVectorValue(getOperand(0), Part)) { 9894 Value *VectorValue = State.get(getOperand(0), Part); 9895 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9896 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9897 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9898 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9899 if (State.hasVectorValue(this, Part)) 9900 State.reset(this, VPhi, Part); 9901 else 9902 State.set(this, VPhi, Part); 9903 // NOTE: Currently we need to update the value of the operand, so the next 9904 // predicated iteration inserts its generated value in the correct vector. 9905 State.reset(getOperand(0), VPhi, Part); 9906 } else { 9907 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9908 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9909 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9910 PredicatingBB); 9911 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9912 if (State.hasScalarValue(this, *State.Instance)) 9913 State.reset(this, Phi, *State.Instance); 9914 else 9915 State.set(this, Phi, *State.Instance); 9916 // NOTE: Currently we need to update the value of the operand, so the next 9917 // predicated iteration inserts its generated value in the correct vector. 9918 State.reset(getOperand(0), Phi, *State.Instance); 9919 } 9920 } 9921 9922 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9923 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9924 State.ILV->vectorizeMemoryInstruction( 9925 &Ingredient, State, StoredValue ? nullptr : getVPSingleValue(), getAddr(), 9926 StoredValue, getMask(), Consecutive, Reverse); 9927 } 9928 9929 // Determine how to lower the scalar epilogue, which depends on 1) optimising 9930 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 9931 // predication, and 4) a TTI hook that analyses whether the loop is suitable 9932 // for predication. 9933 static ScalarEpilogueLowering getScalarEpilogueLowering( 9934 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 9935 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 9936 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 9937 LoopVectorizationLegality &LVL) { 9938 // 1) OptSize takes precedence over all other options, i.e. if this is set, 9939 // don't look at hints or options, and don't request a scalar epilogue. 9940 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 9941 // LoopAccessInfo (due to code dependency and not being able to reliably get 9942 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 9943 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 9944 // versioning when the vectorization is forced, unlike hasOptSize. So revert 9945 // back to the old way and vectorize with versioning when forced. See D81345.) 9946 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 9947 PGSOQueryType::IRPass) && 9948 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 9949 return CM_ScalarEpilogueNotAllowedOptSize; 9950 9951 // 2) If set, obey the directives 9952 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 9953 switch (PreferPredicateOverEpilogue) { 9954 case PreferPredicateTy::ScalarEpilogue: 9955 return CM_ScalarEpilogueAllowed; 9956 case PreferPredicateTy::PredicateElseScalarEpilogue: 9957 return CM_ScalarEpilogueNotNeededUsePredicate; 9958 case PreferPredicateTy::PredicateOrDontVectorize: 9959 return CM_ScalarEpilogueNotAllowedUsePredicate; 9960 }; 9961 } 9962 9963 // 3) If set, obey the hints 9964 switch (Hints.getPredicate()) { 9965 case LoopVectorizeHints::FK_Enabled: 9966 return CM_ScalarEpilogueNotNeededUsePredicate; 9967 case LoopVectorizeHints::FK_Disabled: 9968 return CM_ScalarEpilogueAllowed; 9969 }; 9970 9971 // 4) if the TTI hook indicates this is profitable, request predication. 9972 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 9973 LVL.getLAI())) 9974 return CM_ScalarEpilogueNotNeededUsePredicate; 9975 9976 return CM_ScalarEpilogueAllowed; 9977 } 9978 9979 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 9980 // If Values have been set for this Def return the one relevant for \p Part. 9981 if (hasVectorValue(Def, Part)) 9982 return Data.PerPartOutput[Def][Part]; 9983 9984 if (!hasScalarValue(Def, {Part, 0})) { 9985 Value *IRV = Def->getLiveInIRValue(); 9986 Value *B = ILV->getBroadcastInstrs(IRV); 9987 set(Def, B, Part); 9988 return B; 9989 } 9990 9991 Value *ScalarValue = get(Def, {Part, 0}); 9992 // If we aren't vectorizing, we can just copy the scalar map values over 9993 // to the vector map. 9994 if (VF.isScalar()) { 9995 set(Def, ScalarValue, Part); 9996 return ScalarValue; 9997 } 9998 9999 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 10000 bool IsUniform = RepR && RepR->isUniform(); 10001 10002 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 10003 // Check if there is a scalar value for the selected lane. 10004 if (!hasScalarValue(Def, {Part, LastLane})) { 10005 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 10006 assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) && 10007 "unexpected recipe found to be invariant"); 10008 IsUniform = true; 10009 LastLane = 0; 10010 } 10011 10012 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 10013 // Set the insert point after the last scalarized instruction or after the 10014 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 10015 // will directly follow the scalar definitions. 10016 auto OldIP = Builder.saveIP(); 10017 auto NewIP = 10018 isa<PHINode>(LastInst) 10019 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 10020 : std::next(BasicBlock::iterator(LastInst)); 10021 Builder.SetInsertPoint(&*NewIP); 10022 10023 // However, if we are vectorizing, we need to construct the vector values. 10024 // If the value is known to be uniform after vectorization, we can just 10025 // broadcast the scalar value corresponding to lane zero for each unroll 10026 // iteration. Otherwise, we construct the vector values using 10027 // insertelement instructions. Since the resulting vectors are stored in 10028 // State, we will only generate the insertelements once. 10029 Value *VectorValue = nullptr; 10030 if (IsUniform) { 10031 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 10032 set(Def, VectorValue, Part); 10033 } else { 10034 // Initialize packing with insertelements to start from undef. 10035 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 10036 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 10037 set(Def, Undef, Part); 10038 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 10039 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 10040 VectorValue = get(Def, Part); 10041 } 10042 Builder.restoreIP(OldIP); 10043 return VectorValue; 10044 } 10045 10046 // Process the loop in the VPlan-native vectorization path. This path builds 10047 // VPlan upfront in the vectorization pipeline, which allows to apply 10048 // VPlan-to-VPlan transformations from the very beginning without modifying the 10049 // input LLVM IR. 10050 static bool processLoopInVPlanNativePath( 10051 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 10052 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 10053 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 10054 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 10055 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 10056 LoopVectorizationRequirements &Requirements) { 10057 10058 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 10059 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 10060 return false; 10061 } 10062 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 10063 Function *F = L->getHeader()->getParent(); 10064 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 10065 10066 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10067 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 10068 10069 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 10070 &Hints, IAI); 10071 // Use the planner for outer loop vectorization. 10072 // TODO: CM is not used at this point inside the planner. Turn CM into an 10073 // optional argument if we don't need it in the future. 10074 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 10075 Requirements, ORE); 10076 10077 // Get user vectorization factor. 10078 ElementCount UserVF = Hints.getWidth(); 10079 10080 CM.collectElementTypesForWidening(); 10081 10082 // Plan how to best vectorize, return the best VF and its cost. 10083 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 10084 10085 // If we are stress testing VPlan builds, do not attempt to generate vector 10086 // code. Masked vector code generation support will follow soon. 10087 // Also, do not attempt to vectorize if no vector code will be produced. 10088 if (VPlanBuildStressTest || EnableVPlanPredication || 10089 VectorizationFactor::Disabled() == VF) 10090 return false; 10091 10092 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10093 10094 { 10095 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10096 F->getParent()->getDataLayout()); 10097 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 10098 &CM, BFI, PSI, Checks); 10099 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 10100 << L->getHeader()->getParent()->getName() << "\"\n"); 10101 LVP.executePlan(VF.Width, 1, BestPlan, LB, DT); 10102 } 10103 10104 // Mark the loop as already vectorized to avoid vectorizing again. 10105 Hints.setAlreadyVectorized(); 10106 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10107 return true; 10108 } 10109 10110 // Emit a remark if there are stores to floats that required a floating point 10111 // extension. If the vectorized loop was generated with floating point there 10112 // will be a performance penalty from the conversion overhead and the change in 10113 // the vector width. 10114 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 10115 SmallVector<Instruction *, 4> Worklist; 10116 for (BasicBlock *BB : L->getBlocks()) { 10117 for (Instruction &Inst : *BB) { 10118 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 10119 if (S->getValueOperand()->getType()->isFloatTy()) 10120 Worklist.push_back(S); 10121 } 10122 } 10123 } 10124 10125 // Traverse the floating point stores upwards searching, for floating point 10126 // conversions. 10127 SmallPtrSet<const Instruction *, 4> Visited; 10128 SmallPtrSet<const Instruction *, 4> EmittedRemark; 10129 while (!Worklist.empty()) { 10130 auto *I = Worklist.pop_back_val(); 10131 if (!L->contains(I)) 10132 continue; 10133 if (!Visited.insert(I).second) 10134 continue; 10135 10136 // Emit a remark if the floating point store required a floating 10137 // point conversion. 10138 // TODO: More work could be done to identify the root cause such as a 10139 // constant or a function return type and point the user to it. 10140 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 10141 ORE->emit([&]() { 10142 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 10143 I->getDebugLoc(), L->getHeader()) 10144 << "floating point conversion changes vector width. " 10145 << "Mixed floating point precision requires an up/down " 10146 << "cast that will negatively impact performance."; 10147 }); 10148 10149 for (Use &Op : I->operands()) 10150 if (auto *OpI = dyn_cast<Instruction>(Op)) 10151 Worklist.push_back(OpI); 10152 } 10153 } 10154 10155 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 10156 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 10157 !EnableLoopInterleaving), 10158 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 10159 !EnableLoopVectorization) {} 10160 10161 bool LoopVectorizePass::processLoop(Loop *L) { 10162 assert((EnableVPlanNativePath || L->isInnermost()) && 10163 "VPlan-native path is not enabled. Only process inner loops."); 10164 10165 #ifndef NDEBUG 10166 const std::string DebugLocStr = getDebugLocString(L); 10167 #endif /* NDEBUG */ 10168 10169 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 10170 << L->getHeader()->getParent()->getName() << "\" from " 10171 << DebugLocStr << "\n"); 10172 10173 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 10174 10175 LLVM_DEBUG( 10176 dbgs() << "LV: Loop hints:" 10177 << " force=" 10178 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 10179 ? "disabled" 10180 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 10181 ? "enabled" 10182 : "?")) 10183 << " width=" << Hints.getWidth() 10184 << " interleave=" << Hints.getInterleave() << "\n"); 10185 10186 // Function containing loop 10187 Function *F = L->getHeader()->getParent(); 10188 10189 // Looking at the diagnostic output is the only way to determine if a loop 10190 // was vectorized (other than looking at the IR or machine code), so it 10191 // is important to generate an optimization remark for each loop. Most of 10192 // these messages are generated as OptimizationRemarkAnalysis. Remarks 10193 // generated as OptimizationRemark and OptimizationRemarkMissed are 10194 // less verbose reporting vectorized loops and unvectorized loops that may 10195 // benefit from vectorization, respectively. 10196 10197 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 10198 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 10199 return false; 10200 } 10201 10202 PredicatedScalarEvolution PSE(*SE, *L); 10203 10204 // Check if it is legal to vectorize the loop. 10205 LoopVectorizationRequirements Requirements; 10206 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 10207 &Requirements, &Hints, DB, AC, BFI, PSI); 10208 if (!LVL.canVectorize(EnableVPlanNativePath)) { 10209 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 10210 Hints.emitRemarkWithHints(); 10211 return false; 10212 } 10213 10214 // Check the function attributes and profiles to find out if this function 10215 // should be optimized for size. 10216 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10217 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 10218 10219 // Entrance to the VPlan-native vectorization path. Outer loops are processed 10220 // here. They may require CFG and instruction level transformations before 10221 // even evaluating whether vectorization is profitable. Since we cannot modify 10222 // the incoming IR, we need to build VPlan upfront in the vectorization 10223 // pipeline. 10224 if (!L->isInnermost()) 10225 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 10226 ORE, BFI, PSI, Hints, Requirements); 10227 10228 assert(L->isInnermost() && "Inner loop expected."); 10229 10230 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 10231 // count by optimizing for size, to minimize overheads. 10232 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 10233 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 10234 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 10235 << "This loop is worth vectorizing only if no scalar " 10236 << "iteration overheads are incurred."); 10237 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 10238 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 10239 else { 10240 LLVM_DEBUG(dbgs() << "\n"); 10241 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 10242 } 10243 } 10244 10245 // Check the function attributes to see if implicit floats are allowed. 10246 // FIXME: This check doesn't seem possibly correct -- what if the loop is 10247 // an integer loop and the vector instructions selected are purely integer 10248 // vector instructions? 10249 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10250 reportVectorizationFailure( 10251 "Can't vectorize when the NoImplicitFloat attribute is used", 10252 "loop not vectorized due to NoImplicitFloat attribute", 10253 "NoImplicitFloat", ORE, L); 10254 Hints.emitRemarkWithHints(); 10255 return false; 10256 } 10257 10258 // Check if the target supports potentially unsafe FP vectorization. 10259 // FIXME: Add a check for the type of safety issue (denormal, signaling) 10260 // for the target we're vectorizing for, to make sure none of the 10261 // additional fp-math flags can help. 10262 if (Hints.isPotentiallyUnsafe() && 10263 TTI->isFPVectorizationPotentiallyUnsafe()) { 10264 reportVectorizationFailure( 10265 "Potentially unsafe FP op prevents vectorization", 10266 "loop not vectorized due to unsafe FP support.", 10267 "UnsafeFP", ORE, L); 10268 Hints.emitRemarkWithHints(); 10269 return false; 10270 } 10271 10272 bool AllowOrderedReductions; 10273 // If the flag is set, use that instead and override the TTI behaviour. 10274 if (ForceOrderedReductions.getNumOccurrences() > 0) 10275 AllowOrderedReductions = ForceOrderedReductions; 10276 else 10277 AllowOrderedReductions = TTI->enableOrderedReductions(); 10278 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) { 10279 ORE->emit([&]() { 10280 auto *ExactFPMathInst = Requirements.getExactFPInst(); 10281 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 10282 ExactFPMathInst->getDebugLoc(), 10283 ExactFPMathInst->getParent()) 10284 << "loop not vectorized: cannot prove it is safe to reorder " 10285 "floating-point operations"; 10286 }); 10287 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 10288 "reorder floating-point operations\n"); 10289 Hints.emitRemarkWithHints(); 10290 return false; 10291 } 10292 10293 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 10294 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 10295 10296 // If an override option has been passed in for interleaved accesses, use it. 10297 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 10298 UseInterleaved = EnableInterleavedMemAccesses; 10299 10300 // Analyze interleaved memory accesses. 10301 if (UseInterleaved) { 10302 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10303 } 10304 10305 // Use the cost model. 10306 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10307 F, &Hints, IAI); 10308 CM.collectValuesToIgnore(); 10309 CM.collectElementTypesForWidening(); 10310 10311 // Use the planner for vectorization. 10312 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 10313 Requirements, ORE); 10314 10315 // Get user vectorization factor and interleave count. 10316 ElementCount UserVF = Hints.getWidth(); 10317 unsigned UserIC = Hints.getInterleave(); 10318 10319 // Plan how to best vectorize, return the best VF and its cost. 10320 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10321 10322 VectorizationFactor VF = VectorizationFactor::Disabled(); 10323 unsigned IC = 1; 10324 10325 if (MaybeVF) { 10326 VF = *MaybeVF; 10327 // Select the interleave count. 10328 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10329 } 10330 10331 // Identify the diagnostic messages that should be produced. 10332 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10333 bool VectorizeLoop = true, InterleaveLoop = true; 10334 if (VF.Width.isScalar()) { 10335 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10336 VecDiagMsg = std::make_pair( 10337 "VectorizationNotBeneficial", 10338 "the cost-model indicates that vectorization is not beneficial"); 10339 VectorizeLoop = false; 10340 } 10341 10342 if (!MaybeVF && UserIC > 1) { 10343 // Tell the user interleaving was avoided up-front, despite being explicitly 10344 // requested. 10345 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10346 "interleaving should be avoided up front\n"); 10347 IntDiagMsg = std::make_pair( 10348 "InterleavingAvoided", 10349 "Ignoring UserIC, because interleaving was avoided up front"); 10350 InterleaveLoop = false; 10351 } else if (IC == 1 && UserIC <= 1) { 10352 // Tell the user interleaving is not beneficial. 10353 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10354 IntDiagMsg = std::make_pair( 10355 "InterleavingNotBeneficial", 10356 "the cost-model indicates that interleaving is not beneficial"); 10357 InterleaveLoop = false; 10358 if (UserIC == 1) { 10359 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10360 IntDiagMsg.second += 10361 " and is explicitly disabled or interleave count is set to 1"; 10362 } 10363 } else if (IC > 1 && UserIC == 1) { 10364 // Tell the user interleaving is beneficial, but it explicitly disabled. 10365 LLVM_DEBUG( 10366 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10367 IntDiagMsg = std::make_pair( 10368 "InterleavingBeneficialButDisabled", 10369 "the cost-model indicates that interleaving is beneficial " 10370 "but is explicitly disabled or interleave count is set to 1"); 10371 InterleaveLoop = false; 10372 } 10373 10374 // Override IC if user provided an interleave count. 10375 IC = UserIC > 0 ? UserIC : IC; 10376 10377 // Emit diagnostic messages, if any. 10378 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10379 if (!VectorizeLoop && !InterleaveLoop) { 10380 // Do not vectorize or interleaving the loop. 10381 ORE->emit([&]() { 10382 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10383 L->getStartLoc(), L->getHeader()) 10384 << VecDiagMsg.second; 10385 }); 10386 ORE->emit([&]() { 10387 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10388 L->getStartLoc(), L->getHeader()) 10389 << IntDiagMsg.second; 10390 }); 10391 return false; 10392 } else if (!VectorizeLoop && InterleaveLoop) { 10393 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10394 ORE->emit([&]() { 10395 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10396 L->getStartLoc(), L->getHeader()) 10397 << VecDiagMsg.second; 10398 }); 10399 } else if (VectorizeLoop && !InterleaveLoop) { 10400 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10401 << ") in " << DebugLocStr << '\n'); 10402 ORE->emit([&]() { 10403 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10404 L->getStartLoc(), L->getHeader()) 10405 << IntDiagMsg.second; 10406 }); 10407 } else if (VectorizeLoop && InterleaveLoop) { 10408 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10409 << ") in " << DebugLocStr << '\n'); 10410 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10411 } 10412 10413 bool DisableRuntimeUnroll = false; 10414 MDNode *OrigLoopID = L->getLoopID(); 10415 { 10416 // Optimistically generate runtime checks. Drop them if they turn out to not 10417 // be profitable. Limit the scope of Checks, so the cleanup happens 10418 // immediately after vector codegeneration is done. 10419 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10420 F->getParent()->getDataLayout()); 10421 if (!VF.Width.isScalar() || IC > 1) 10422 Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate()); 10423 10424 using namespace ore; 10425 if (!VectorizeLoop) { 10426 assert(IC > 1 && "interleave count should not be 1 or 0"); 10427 // If we decided that it is not legal to vectorize the loop, then 10428 // interleave it. 10429 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10430 &CM, BFI, PSI, Checks); 10431 10432 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10433 LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT); 10434 10435 ORE->emit([&]() { 10436 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10437 L->getHeader()) 10438 << "interleaved loop (interleaved count: " 10439 << NV("InterleaveCount", IC) << ")"; 10440 }); 10441 } else { 10442 // If we decided that it is *legal* to vectorize the loop, then do it. 10443 10444 // Consider vectorizing the epilogue too if it's profitable. 10445 VectorizationFactor EpilogueVF = 10446 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10447 if (EpilogueVF.Width.isVector()) { 10448 10449 // The first pass vectorizes the main loop and creates a scalar epilogue 10450 // to be vectorized by executing the plan (potentially with a different 10451 // factor) again shortly afterwards. 10452 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1); 10453 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10454 EPI, &LVL, &CM, BFI, PSI, Checks); 10455 10456 VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF); 10457 LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, 10458 DT); 10459 ++LoopsVectorized; 10460 10461 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10462 formLCSSARecursively(*L, *DT, LI, SE); 10463 10464 // Second pass vectorizes the epilogue and adjusts the control flow 10465 // edges from the first pass. 10466 EPI.MainLoopVF = EPI.EpilogueVF; 10467 EPI.MainLoopUF = EPI.EpilogueUF; 10468 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10469 ORE, EPI, &LVL, &CM, BFI, PSI, 10470 Checks); 10471 10472 VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF); 10473 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, 10474 DT); 10475 ++LoopsEpilogueVectorized; 10476 10477 if (!MainILV.areSafetyChecksAdded()) 10478 DisableRuntimeUnroll = true; 10479 } else { 10480 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 10481 &LVL, &CM, BFI, PSI, Checks); 10482 10483 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10484 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT); 10485 ++LoopsVectorized; 10486 10487 // Add metadata to disable runtime unrolling a scalar loop when there 10488 // are no runtime checks about strides and memory. A scalar loop that is 10489 // rarely used is not worth unrolling. 10490 if (!LB.areSafetyChecksAdded()) 10491 DisableRuntimeUnroll = true; 10492 } 10493 // Report the vectorization decision. 10494 ORE->emit([&]() { 10495 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10496 L->getHeader()) 10497 << "vectorized loop (vectorization width: " 10498 << NV("VectorizationFactor", VF.Width) 10499 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10500 }); 10501 } 10502 10503 if (ORE->allowExtraAnalysis(LV_NAME)) 10504 checkMixedPrecision(L, ORE); 10505 } 10506 10507 Optional<MDNode *> RemainderLoopID = 10508 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10509 LLVMLoopVectorizeFollowupEpilogue}); 10510 if (RemainderLoopID.hasValue()) { 10511 L->setLoopID(RemainderLoopID.getValue()); 10512 } else { 10513 if (DisableRuntimeUnroll) 10514 AddRuntimeUnrollDisableMetaData(L); 10515 10516 // Mark the loop as already vectorized to avoid vectorizing again. 10517 Hints.setAlreadyVectorized(); 10518 } 10519 10520 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10521 return true; 10522 } 10523 10524 LoopVectorizeResult LoopVectorizePass::runImpl( 10525 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10526 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10527 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10528 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10529 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10530 SE = &SE_; 10531 LI = &LI_; 10532 TTI = &TTI_; 10533 DT = &DT_; 10534 BFI = &BFI_; 10535 TLI = TLI_; 10536 AA = &AA_; 10537 AC = &AC_; 10538 GetLAA = &GetLAA_; 10539 DB = &DB_; 10540 ORE = &ORE_; 10541 PSI = PSI_; 10542 10543 // Don't attempt if 10544 // 1. the target claims to have no vector registers, and 10545 // 2. interleaving won't help ILP. 10546 // 10547 // The second condition is necessary because, even if the target has no 10548 // vector registers, loop vectorization may still enable scalar 10549 // interleaving. 10550 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10551 TTI->getMaxInterleaveFactor(1) < 2) 10552 return LoopVectorizeResult(false, false); 10553 10554 bool Changed = false, CFGChanged = false; 10555 10556 // The vectorizer requires loops to be in simplified form. 10557 // Since simplification may add new inner loops, it has to run before the 10558 // legality and profitability checks. This means running the loop vectorizer 10559 // will simplify all loops, regardless of whether anything end up being 10560 // vectorized. 10561 for (auto &L : *LI) 10562 Changed |= CFGChanged |= 10563 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10564 10565 // Build up a worklist of inner-loops to vectorize. This is necessary as 10566 // the act of vectorizing or partially unrolling a loop creates new loops 10567 // and can invalidate iterators across the loops. 10568 SmallVector<Loop *, 8> Worklist; 10569 10570 for (Loop *L : *LI) 10571 collectSupportedLoops(*L, LI, ORE, Worklist); 10572 10573 LoopsAnalyzed += Worklist.size(); 10574 10575 // Now walk the identified inner loops. 10576 while (!Worklist.empty()) { 10577 Loop *L = Worklist.pop_back_val(); 10578 10579 // For the inner loops we actually process, form LCSSA to simplify the 10580 // transform. 10581 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10582 10583 Changed |= CFGChanged |= processLoop(L); 10584 } 10585 10586 // Process each loop nest in the function. 10587 return LoopVectorizeResult(Changed, CFGChanged); 10588 } 10589 10590 PreservedAnalyses LoopVectorizePass::run(Function &F, 10591 FunctionAnalysisManager &AM) { 10592 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10593 auto &LI = AM.getResult<LoopAnalysis>(F); 10594 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10595 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10596 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10597 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10598 auto &AA = AM.getResult<AAManager>(F); 10599 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10600 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10601 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10602 10603 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10604 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10605 [&](Loop &L) -> const LoopAccessInfo & { 10606 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10607 TLI, TTI, nullptr, nullptr, nullptr}; 10608 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10609 }; 10610 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10611 ProfileSummaryInfo *PSI = 10612 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10613 LoopVectorizeResult Result = 10614 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10615 if (!Result.MadeAnyChange) 10616 return PreservedAnalyses::all(); 10617 PreservedAnalyses PA; 10618 10619 // We currently do not preserve loopinfo/dominator analyses with outer loop 10620 // vectorization. Until this is addressed, mark these analyses as preserved 10621 // only for non-VPlan-native path. 10622 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10623 if (!EnableVPlanNativePath) { 10624 PA.preserve<LoopAnalysis>(); 10625 PA.preserve<DominatorTreeAnalysis>(); 10626 } 10627 if (!Result.MadeCFGChange) 10628 PA.preserveSet<CFGAnalyses>(); 10629 return PA; 10630 } 10631 10632 void LoopVectorizePass::printPipeline( 10633 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 10634 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline( 10635 OS, MapClassName2PassName); 10636 10637 OS << "<"; 10638 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;"; 10639 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;"; 10640 OS << ">"; 10641 } 10642