1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 91 #include "llvm/Analysis/ProfileSummaryInfo.h" 92 #include "llvm/Analysis/ScalarEvolution.h" 93 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 94 #include "llvm/Analysis/TargetLibraryInfo.h" 95 #include "llvm/Analysis/TargetTransformInfo.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/LLVMContext.h" 116 #include "llvm/IR/Metadata.h" 117 #include "llvm/IR/Module.h" 118 #include "llvm/IR/Operator.h" 119 #include "llvm/IR/PatternMatch.h" 120 #include "llvm/IR/Type.h" 121 #include "llvm/IR/Use.h" 122 #include "llvm/IR/User.h" 123 #include "llvm/IR/Value.h" 124 #include "llvm/IR/ValueHandle.h" 125 #include "llvm/IR/Verifier.h" 126 #include "llvm/InitializePasses.h" 127 #include "llvm/Pass.h" 128 #include "llvm/Support/Casting.h" 129 #include "llvm/Support/CommandLine.h" 130 #include "llvm/Support/Compiler.h" 131 #include "llvm/Support/Debug.h" 132 #include "llvm/Support/ErrorHandling.h" 133 #include "llvm/Support/InstructionCost.h" 134 #include "llvm/Support/MathExtras.h" 135 #include "llvm/Support/raw_ostream.h" 136 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 137 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 138 #include "llvm/Transforms/Utils/LoopSimplify.h" 139 #include "llvm/Transforms/Utils/LoopUtils.h" 140 #include "llvm/Transforms/Utils/LoopVersioning.h" 141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 142 #include "llvm/Transforms/Utils/SizeOpts.h" 143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 144 #include <algorithm> 145 #include <cassert> 146 #include <cstdint> 147 #include <cstdlib> 148 #include <functional> 149 #include <iterator> 150 #include <limits> 151 #include <memory> 152 #include <string> 153 #include <tuple> 154 #include <utility> 155 156 using namespace llvm; 157 158 #define LV_NAME "loop-vectorize" 159 #define DEBUG_TYPE LV_NAME 160 161 #ifndef NDEBUG 162 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 163 #endif 164 165 /// @{ 166 /// Metadata attribute names 167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 168 const char LLVMLoopVectorizeFollowupVectorized[] = 169 "llvm.loop.vectorize.followup_vectorized"; 170 const char LLVMLoopVectorizeFollowupEpilogue[] = 171 "llvm.loop.vectorize.followup_epilogue"; 172 /// @} 173 174 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 177 178 static cl::opt<bool> EnableEpilogueVectorization( 179 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 180 cl::desc("Enable vectorization of epilogue loops.")); 181 182 static cl::opt<unsigned> EpilogueVectorizationForceVF( 183 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 184 cl::desc("When epilogue vectorization is enabled, and a value greater than " 185 "1 is specified, forces the given VF for all applicable epilogue " 186 "loops.")); 187 188 static cl::opt<unsigned> EpilogueVectorizationMinVF( 189 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 190 cl::desc("Only loops with vectorization factor equal to or larger than " 191 "the specified value are considered for epilogue vectorization.")); 192 193 /// Loops with a known constant trip count below this number are vectorized only 194 /// if no scalar iteration overheads are incurred. 195 static cl::opt<unsigned> TinyTripCountVectorThreshold( 196 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 197 cl::desc("Loops with a constant trip count that is smaller than this " 198 "value are vectorized only if no scalar iteration overheads " 199 "are incurred.")); 200 201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 202 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 203 cl::desc("The maximum allowed number of runtime memory checks with a " 204 "vectorize(enable) pragma.")); 205 206 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 207 // that predication is preferred, and this lists all options. I.e., the 208 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 209 // and predicate the instructions accordingly. If tail-folding fails, there are 210 // different fallback strategies depending on these values: 211 namespace PreferPredicateTy { 212 enum Option { 213 ScalarEpilogue = 0, 214 PredicateElseScalarEpilogue, 215 PredicateOrDontVectorize 216 }; 217 } // namespace PreferPredicateTy 218 219 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 220 "prefer-predicate-over-epilogue", 221 cl::init(PreferPredicateTy::ScalarEpilogue), 222 cl::Hidden, 223 cl::desc("Tail-folding and predication preferences over creating a scalar " 224 "epilogue loop."), 225 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 226 "scalar-epilogue", 227 "Don't tail-predicate loops, create scalar epilogue"), 228 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 229 "predicate-else-scalar-epilogue", 230 "prefer tail-folding, create scalar epilogue if tail " 231 "folding fails."), 232 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 233 "predicate-dont-vectorize", 234 "prefers tail-folding, don't attempt vectorization if " 235 "tail-folding fails."))); 236 237 static cl::opt<bool> MaximizeBandwidth( 238 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 239 cl::desc("Maximize bandwidth when selecting vectorization factor which " 240 "will be determined by the smallest type in loop.")); 241 242 static cl::opt<bool> EnableInterleavedMemAccesses( 243 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 244 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 245 246 /// An interleave-group may need masking if it resides in a block that needs 247 /// predication, or in order to mask away gaps. 248 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 249 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 250 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 251 252 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 253 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 254 cl::desc("We don't interleave loops with a estimated constant trip count " 255 "below this number")); 256 257 static cl::opt<unsigned> ForceTargetNumScalarRegs( 258 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 259 cl::desc("A flag that overrides the target's number of scalar registers.")); 260 261 static cl::opt<unsigned> ForceTargetNumVectorRegs( 262 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 263 cl::desc("A flag that overrides the target's number of vector registers.")); 264 265 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 266 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 267 cl::desc("A flag that overrides the target's max interleave factor for " 268 "scalar loops.")); 269 270 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 271 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 272 cl::desc("A flag that overrides the target's max interleave factor for " 273 "vectorized loops.")); 274 275 static cl::opt<unsigned> ForceTargetInstructionCost( 276 "force-target-instruction-cost", cl::init(0), cl::Hidden, 277 cl::desc("A flag that overrides the target's expected cost for " 278 "an instruction to a single constant value. Mostly " 279 "useful for getting consistent testing.")); 280 281 static cl::opt<bool> ForceTargetSupportsScalableVectors( 282 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 283 cl::desc( 284 "Pretend that scalable vectors are supported, even if the target does " 285 "not support them. This flag should only be used for testing.")); 286 287 static cl::opt<unsigned> SmallLoopCost( 288 "small-loop-cost", cl::init(20), cl::Hidden, 289 cl::desc( 290 "The cost of a loop that is considered 'small' by the interleaver.")); 291 292 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 293 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 294 cl::desc("Enable the use of the block frequency analysis to access PGO " 295 "heuristics minimizing code growth in cold regions and being more " 296 "aggressive in hot regions.")); 297 298 // Runtime interleave loops for load/store throughput. 299 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 300 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 301 cl::desc( 302 "Enable runtime interleaving until load/store ports are saturated")); 303 304 /// Interleave small loops with scalar reductions. 305 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 306 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 307 cl::desc("Enable interleaving for loops with small iteration counts that " 308 "contain scalar reductions to expose ILP.")); 309 310 /// The number of stores in a loop that are allowed to need predication. 311 static cl::opt<unsigned> NumberOfStoresToPredicate( 312 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 313 cl::desc("Max number of stores to be predicated behind an if.")); 314 315 static cl::opt<bool> EnableIndVarRegisterHeur( 316 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 317 cl::desc("Count the induction variable only once when interleaving")); 318 319 static cl::opt<bool> EnableCondStoresVectorization( 320 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 321 cl::desc("Enable if predication of stores during vectorization.")); 322 323 static cl::opt<unsigned> MaxNestedScalarReductionIC( 324 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 325 cl::desc("The maximum interleave count to use when interleaving a scalar " 326 "reduction in a nested loop.")); 327 328 static cl::opt<bool> 329 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 330 cl::Hidden, 331 cl::desc("Prefer in-loop vector reductions, " 332 "overriding the targets preference.")); 333 334 static cl::opt<bool> ForceOrderedReductions( 335 "force-ordered-reductions", cl::init(false), cl::Hidden, 336 cl::desc("Enable the vectorisation of loops with in-order (strict) " 337 "FP reductions")); 338 339 static cl::opt<bool> PreferPredicatedReductionSelect( 340 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 341 cl::desc( 342 "Prefer predicating a reduction operation over an after loop select.")); 343 344 cl::opt<bool> EnableVPlanNativePath( 345 "enable-vplan-native-path", cl::init(false), cl::Hidden, 346 cl::desc("Enable VPlan-native vectorization path with " 347 "support for outer loop vectorization.")); 348 349 // FIXME: Remove this switch once we have divergence analysis. Currently we 350 // assume divergent non-backedge branches when this switch is true. 351 cl::opt<bool> EnableVPlanPredication( 352 "enable-vplan-predication", cl::init(false), cl::Hidden, 353 cl::desc("Enable VPlan-native vectorization path predicator with " 354 "support for outer loop vectorization.")); 355 356 // This flag enables the stress testing of the VPlan H-CFG construction in the 357 // VPlan-native vectorization path. It must be used in conjuction with 358 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 359 // verification of the H-CFGs built. 360 static cl::opt<bool> VPlanBuildStressTest( 361 "vplan-build-stress-test", cl::init(false), cl::Hidden, 362 cl::desc( 363 "Build VPlan for every supported loop nest in the function and bail " 364 "out right after the build (stress test the VPlan H-CFG construction " 365 "in the VPlan-native vectorization path).")); 366 367 cl::opt<bool> llvm::EnableLoopInterleaving( 368 "interleave-loops", cl::init(true), cl::Hidden, 369 cl::desc("Enable loop interleaving in Loop vectorization passes")); 370 cl::opt<bool> llvm::EnableLoopVectorization( 371 "vectorize-loops", cl::init(true), cl::Hidden, 372 cl::desc("Run the Loop vectorization passes")); 373 374 cl::opt<bool> PrintVPlansInDotFormat( 375 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 376 cl::desc("Use dot format instead of plain text when dumping VPlans")); 377 378 /// A helper function that returns true if the given type is irregular. The 379 /// type is irregular if its allocated size doesn't equal the store size of an 380 /// element of the corresponding vector type. 381 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 382 // Determine if an array of N elements of type Ty is "bitcast compatible" 383 // with a <N x Ty> vector. 384 // This is only true if there is no padding between the array elements. 385 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 386 } 387 388 /// A helper function that returns the reciprocal of the block probability of 389 /// predicated blocks. If we return X, we are assuming the predicated block 390 /// will execute once for every X iterations of the loop header. 391 /// 392 /// TODO: We should use actual block probability here, if available. Currently, 393 /// we always assume predicated blocks have a 50% chance of executing. 394 static unsigned getReciprocalPredBlockProb() { return 2; } 395 396 /// A helper function that returns an integer or floating-point constant with 397 /// value C. 398 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 399 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 400 : ConstantFP::get(Ty, C); 401 } 402 403 /// Returns "best known" trip count for the specified loop \p L as defined by 404 /// the following procedure: 405 /// 1) Returns exact trip count if it is known. 406 /// 2) Returns expected trip count according to profile data if any. 407 /// 3) Returns upper bound estimate if it is known. 408 /// 4) Returns None if all of the above failed. 409 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 410 // Check if exact trip count is known. 411 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 412 return ExpectedTC; 413 414 // Check if there is an expected trip count available from profile data. 415 if (LoopVectorizeWithBlockFrequency) 416 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 417 return EstimatedTC; 418 419 // Check if upper bound estimate is known. 420 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 421 return ExpectedTC; 422 423 return None; 424 } 425 426 // Forward declare GeneratedRTChecks. 427 class GeneratedRTChecks; 428 429 namespace llvm { 430 431 AnalysisKey ShouldRunExtraVectorPasses::Key; 432 433 /// InnerLoopVectorizer vectorizes loops which contain only one basic 434 /// block to a specified vectorization factor (VF). 435 /// This class performs the widening of scalars into vectors, or multiple 436 /// scalars. This class also implements the following features: 437 /// * It inserts an epilogue loop for handling loops that don't have iteration 438 /// counts that are known to be a multiple of the vectorization factor. 439 /// * It handles the code generation for reduction variables. 440 /// * Scalarization (implementation using scalars) of un-vectorizable 441 /// instructions. 442 /// InnerLoopVectorizer does not perform any vectorization-legality 443 /// checks, and relies on the caller to check for the different legality 444 /// aspects. The InnerLoopVectorizer relies on the 445 /// LoopVectorizationLegality class to provide information about the induction 446 /// and reduction variables that were found to a given vectorization factor. 447 class InnerLoopVectorizer { 448 public: 449 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 450 LoopInfo *LI, DominatorTree *DT, 451 const TargetLibraryInfo *TLI, 452 const TargetTransformInfo *TTI, AssumptionCache *AC, 453 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 454 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 455 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 456 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 457 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 458 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 459 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 460 PSI(PSI), RTChecks(RTChecks) { 461 // Query this against the original loop and save it here because the profile 462 // of the original loop header may change as the transformation happens. 463 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 464 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 465 } 466 467 virtual ~InnerLoopVectorizer() = default; 468 469 /// Create a new empty loop that will contain vectorized instructions later 470 /// on, while the old loop will be used as the scalar remainder. Control flow 471 /// is generated around the vectorized (and scalar epilogue) loops consisting 472 /// of various checks and bypasses. Return the pre-header block of the new 473 /// loop and the start value for the canonical induction, if it is != 0. The 474 /// latter is the case when vectorizing the epilogue loop. In the case of 475 /// epilogue vectorization, this function is overriden to handle the more 476 /// complex control flow around the loops. 477 virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton(); 478 479 /// Widen a single call instruction within the innermost loop. 480 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 481 VPTransformState &State); 482 483 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 484 void fixVectorizedLoop(VPTransformState &State); 485 486 // Return true if any runtime check is added. 487 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 488 489 /// A type for vectorized values in the new loop. Each value from the 490 /// original loop, when vectorized, is represented by UF vector values in the 491 /// new unrolled loop, where UF is the unroll factor. 492 using VectorParts = SmallVector<Value *, 2>; 493 494 /// Vectorize a single first-order recurrence or pointer induction PHINode in 495 /// a block. This method handles the induction variable canonicalization. It 496 /// supports both VF = 1 for unrolled loops and arbitrary length vectors. 497 void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR, 498 VPTransformState &State); 499 500 /// A helper function to scalarize a single Instruction in the innermost loop. 501 /// Generates a sequence of scalar instances for each lane between \p MinLane 502 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 503 /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p 504 /// Instr's operands. 505 void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe, 506 const VPIteration &Instance, bool IfPredicateInstr, 507 VPTransformState &State); 508 509 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 510 /// is provided, the integer induction variable will first be truncated to 511 /// the corresponding type. \p CanonicalIV is the scalar value generated for 512 /// the canonical induction variable. 513 void widenIntOrFpInduction(PHINode *IV, VPWidenIntOrFpInductionRecipe *Def, 514 VPTransformState &State, Value *CanonicalIV); 515 516 /// Construct the vector value of a scalarized value \p V one lane at a time. 517 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 518 VPTransformState &State); 519 520 /// Try to vectorize interleaved access group \p Group with the base address 521 /// given in \p Addr, optionally masking the vector operations if \p 522 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 523 /// values in the vectorized loop. 524 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 525 ArrayRef<VPValue *> VPDefs, 526 VPTransformState &State, VPValue *Addr, 527 ArrayRef<VPValue *> StoredValues, 528 VPValue *BlockInMask = nullptr); 529 530 /// Set the debug location in the builder \p Ptr using the debug location in 531 /// \p V. If \p Ptr is None then it uses the class member's Builder. 532 void setDebugLocFromInst(const Value *V, 533 Optional<IRBuilder<> *> CustomBuilder = None); 534 535 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 536 void fixNonInductionPHIs(VPTransformState &State); 537 538 /// Returns true if the reordering of FP operations is not allowed, but we are 539 /// able to vectorize with strict in-order reductions for the given RdxDesc. 540 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc); 541 542 /// Create a broadcast instruction. This method generates a broadcast 543 /// instruction (shuffle) for loop invariant values and for the induction 544 /// value. If this is the induction variable then we extend it to N, N+1, ... 545 /// this is needed because each iteration in the loop corresponds to a SIMD 546 /// element. 547 virtual Value *getBroadcastInstrs(Value *V); 548 549 /// Add metadata from one instruction to another. 550 /// 551 /// This includes both the original MDs from \p From and additional ones (\see 552 /// addNewMetadata). Use this for *newly created* instructions in the vector 553 /// loop. 554 void addMetadata(Instruction *To, Instruction *From); 555 556 /// Similar to the previous function but it adds the metadata to a 557 /// vector of instructions. 558 void addMetadata(ArrayRef<Value *> To, Instruction *From); 559 560 // Returns the resume value (bc.merge.rdx) for a reduction as 561 // generated by fixReduction. 562 PHINode *getReductionResumeValue(const RecurrenceDescriptor &RdxDesc); 563 564 protected: 565 friend class LoopVectorizationPlanner; 566 567 /// A small list of PHINodes. 568 using PhiVector = SmallVector<PHINode *, 4>; 569 570 /// A type for scalarized values in the new loop. Each value from the 571 /// original loop, when scalarized, is represented by UF x VF scalar values 572 /// in the new unrolled loop, where UF is the unroll factor and VF is the 573 /// vectorization factor. 574 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 575 576 /// Set up the values of the IVs correctly when exiting the vector loop. 577 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 578 Value *CountRoundDown, Value *EndValue, 579 BasicBlock *MiddleBlock); 580 581 /// Introduce a conditional branch (on true, condition to be set later) at the 582 /// end of the header=latch connecting it to itself (across the backedge) and 583 /// to the exit block of \p L. 584 void createHeaderBranch(Loop *L); 585 586 /// Handle all cross-iteration phis in the header. 587 void fixCrossIterationPHIs(VPTransformState &State); 588 589 /// Create the exit value of first order recurrences in the middle block and 590 /// update their users. 591 void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR, 592 VPTransformState &State); 593 594 /// Create code for the loop exit value of the reduction. 595 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); 596 597 /// Clear NSW/NUW flags from reduction instructions if necessary. 598 void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 599 VPTransformState &State); 600 601 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 602 /// means we need to add the appropriate incoming value from the middle 603 /// block as exiting edges from the scalar epilogue loop (if present) are 604 /// already in place, and we exit the vector loop exclusively to the middle 605 /// block. 606 void fixLCSSAPHIs(VPTransformState &State); 607 608 /// Iteratively sink the scalarized operands of a predicated instruction into 609 /// the block that was created for it. 610 void sinkScalarOperands(Instruction *PredInst); 611 612 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 613 /// represented as. 614 void truncateToMinimalBitwidths(VPTransformState &State); 615 616 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 617 /// variable on which to base the steps, \p Step is the size of the step, and 618 /// \p EntryVal is the value from the original loop that maps to the steps. 619 /// Note that \p EntryVal doesn't have to be an induction variable - it 620 /// can also be a truncate instruction. 621 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 622 const InductionDescriptor &ID, VPValue *Def, 623 VPTransformState &State); 624 625 /// Create a vector induction phi node based on an existing scalar one. \p 626 /// EntryVal is the value from the original loop that maps to the vector phi 627 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 628 /// truncate instruction, instead of widening the original IV, we widen a 629 /// version of the IV truncated to \p EntryVal's type. 630 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 631 Value *Step, Value *Start, 632 Instruction *EntryVal, VPValue *Def, 633 VPTransformState &State); 634 635 /// Returns (and creates if needed) the original loop trip count. 636 Value *getOrCreateTripCount(Loop *NewLoop); 637 638 /// Returns (and creates if needed) the trip count of the widened loop. 639 Value *getOrCreateVectorTripCount(Loop *NewLoop); 640 641 /// Returns a bitcasted value to the requested vector type. 642 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 643 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 644 const DataLayout &DL); 645 646 /// Emit a bypass check to see if the vector trip count is zero, including if 647 /// it overflows. 648 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 649 650 /// Emit a bypass check to see if all of the SCEV assumptions we've 651 /// had to make are correct. Returns the block containing the checks or 652 /// nullptr if no checks have been added. 653 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass); 654 655 /// Emit bypass checks to check any memory assumptions we may have made. 656 /// Returns the block containing the checks or nullptr if no checks have been 657 /// added. 658 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 659 660 /// Compute the transformed value of Index at offset StartValue using step 661 /// StepValue. 662 /// For integer induction, returns StartValue + Index * StepValue. 663 /// For pointer induction, returns StartValue[Index * StepValue]. 664 /// FIXME: The newly created binary instructions should contain nsw/nuw 665 /// flags, which can be found from the original scalar operations. 666 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 667 const DataLayout &DL, 668 const InductionDescriptor &ID, 669 BasicBlock *VectorHeader) const; 670 671 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 672 /// vector loop preheader, middle block and scalar preheader. Also 673 /// allocate a loop object for the new vector loop and return it. 674 Loop *createVectorLoopSkeleton(StringRef Prefix); 675 676 /// Create new phi nodes for the induction variables to resume iteration count 677 /// in the scalar epilogue, from where the vectorized loop left off. 678 /// In cases where the loop skeleton is more complicated (eg. epilogue 679 /// vectorization) and the resume values can come from an additional bypass 680 /// block, the \p AdditionalBypass pair provides information about the bypass 681 /// block and the end value on the edge from bypass to this loop. 682 void createInductionResumeValues( 683 Loop *L, 684 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 685 686 /// Complete the loop skeleton by adding debug MDs, creating appropriate 687 /// conditional branches in the middle block, preparing the builder and 688 /// running the verifier. Take in the vector loop \p L as argument, and return 689 /// the preheader of the completed vector loop. 690 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 691 692 /// Add additional metadata to \p To that was not present on \p Orig. 693 /// 694 /// Currently this is used to add the noalias annotations based on the 695 /// inserted memchecks. Use this for instructions that are *cloned* into the 696 /// vector loop. 697 void addNewMetadata(Instruction *To, const Instruction *Orig); 698 699 /// Collect poison-generating recipes that may generate a poison value that is 700 /// used after vectorization, even when their operands are not poison. Those 701 /// recipes meet the following conditions: 702 /// * Contribute to the address computation of a recipe generating a widen 703 /// memory load/store (VPWidenMemoryInstructionRecipe or 704 /// VPInterleaveRecipe). 705 /// * Such a widen memory load/store has at least one underlying Instruction 706 /// that is in a basic block that needs predication and after vectorization 707 /// the generated instruction won't be predicated. 708 void collectPoisonGeneratingRecipes(VPTransformState &State); 709 710 /// Allow subclasses to override and print debug traces before/after vplan 711 /// execution, when trace information is requested. 712 virtual void printDebugTracesAtStart(){}; 713 virtual void printDebugTracesAtEnd(){}; 714 715 /// The original loop. 716 Loop *OrigLoop; 717 718 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 719 /// dynamic knowledge to simplify SCEV expressions and converts them to a 720 /// more usable form. 721 PredicatedScalarEvolution &PSE; 722 723 /// Loop Info. 724 LoopInfo *LI; 725 726 /// Dominator Tree. 727 DominatorTree *DT; 728 729 /// Alias Analysis. 730 AAResults *AA; 731 732 /// Target Library Info. 733 const TargetLibraryInfo *TLI; 734 735 /// Target Transform Info. 736 const TargetTransformInfo *TTI; 737 738 /// Assumption Cache. 739 AssumptionCache *AC; 740 741 /// Interface to emit optimization remarks. 742 OptimizationRemarkEmitter *ORE; 743 744 /// LoopVersioning. It's only set up (non-null) if memchecks were 745 /// used. 746 /// 747 /// This is currently only used to add no-alias metadata based on the 748 /// memchecks. The actually versioning is performed manually. 749 std::unique_ptr<LoopVersioning> LVer; 750 751 /// The vectorization SIMD factor to use. Each vector will have this many 752 /// vector elements. 753 ElementCount VF; 754 755 /// The vectorization unroll factor to use. Each scalar is vectorized to this 756 /// many different vector instructions. 757 unsigned UF; 758 759 /// The builder that we use 760 IRBuilder<> Builder; 761 762 // --- Vectorization state --- 763 764 /// The vector-loop preheader. 765 BasicBlock *LoopVectorPreHeader; 766 767 /// The scalar-loop preheader. 768 BasicBlock *LoopScalarPreHeader; 769 770 /// Middle Block between the vector and the scalar. 771 BasicBlock *LoopMiddleBlock; 772 773 /// The unique ExitBlock of the scalar loop if one exists. Note that 774 /// there can be multiple exiting edges reaching this block. 775 BasicBlock *LoopExitBlock; 776 777 /// The vector loop body. 778 BasicBlock *LoopVectorBody; 779 780 /// The scalar loop body. 781 BasicBlock *LoopScalarBody; 782 783 /// A list of all bypass blocks. The first block is the entry of the loop. 784 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 785 786 /// Store instructions that were predicated. 787 SmallVector<Instruction *, 4> PredicatedInstructions; 788 789 /// Trip count of the original loop. 790 Value *TripCount = nullptr; 791 792 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 793 Value *VectorTripCount = nullptr; 794 795 /// The legality analysis. 796 LoopVectorizationLegality *Legal; 797 798 /// The profitablity analysis. 799 LoopVectorizationCostModel *Cost; 800 801 // Record whether runtime checks are added. 802 bool AddedSafetyChecks = false; 803 804 // Holds the end values for each induction variable. We save the end values 805 // so we can later fix-up the external users of the induction variables. 806 DenseMap<PHINode *, Value *> IVEndValues; 807 808 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 809 // fixed up at the end of vector code generation. 810 SmallVector<PHINode *, 8> OrigPHIsToFix; 811 812 /// BFI and PSI are used to check for profile guided size optimizations. 813 BlockFrequencyInfo *BFI; 814 ProfileSummaryInfo *PSI; 815 816 // Whether this loop should be optimized for size based on profile guided size 817 // optimizatios. 818 bool OptForSizeBasedOnProfile; 819 820 /// Structure to hold information about generated runtime checks, responsible 821 /// for cleaning the checks, if vectorization turns out unprofitable. 822 GeneratedRTChecks &RTChecks; 823 824 // Holds the resume values for reductions in the loops, used to set the 825 // correct start value of reduction PHIs when vectorizing the epilogue. 826 SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4> 827 ReductionResumeValues; 828 }; 829 830 class InnerLoopUnroller : public InnerLoopVectorizer { 831 public: 832 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 833 LoopInfo *LI, DominatorTree *DT, 834 const TargetLibraryInfo *TLI, 835 const TargetTransformInfo *TTI, AssumptionCache *AC, 836 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 837 LoopVectorizationLegality *LVL, 838 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 839 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 840 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 841 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 842 BFI, PSI, Check) {} 843 844 private: 845 Value *getBroadcastInstrs(Value *V) override; 846 }; 847 848 /// Encapsulate information regarding vectorization of a loop and its epilogue. 849 /// This information is meant to be updated and used across two stages of 850 /// epilogue vectorization. 851 struct EpilogueLoopVectorizationInfo { 852 ElementCount MainLoopVF = ElementCount::getFixed(0); 853 unsigned MainLoopUF = 0; 854 ElementCount EpilogueVF = ElementCount::getFixed(0); 855 unsigned EpilogueUF = 0; 856 BasicBlock *MainLoopIterationCountCheck = nullptr; 857 BasicBlock *EpilogueIterationCountCheck = nullptr; 858 BasicBlock *SCEVSafetyCheck = nullptr; 859 BasicBlock *MemSafetyCheck = nullptr; 860 Value *TripCount = nullptr; 861 Value *VectorTripCount = nullptr; 862 863 EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, 864 ElementCount EVF, unsigned EUF) 865 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) { 866 assert(EUF == 1 && 867 "A high UF for the epilogue loop is likely not beneficial."); 868 } 869 }; 870 871 /// An extension of the inner loop vectorizer that creates a skeleton for a 872 /// vectorized loop that has its epilogue (residual) also vectorized. 873 /// The idea is to run the vplan on a given loop twice, firstly to setup the 874 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 875 /// from the first step and vectorize the epilogue. This is achieved by 876 /// deriving two concrete strategy classes from this base class and invoking 877 /// them in succession from the loop vectorizer planner. 878 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 879 public: 880 InnerLoopAndEpilogueVectorizer( 881 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 882 DominatorTree *DT, const TargetLibraryInfo *TLI, 883 const TargetTransformInfo *TTI, AssumptionCache *AC, 884 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 885 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 886 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 887 GeneratedRTChecks &Checks) 888 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 889 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 890 Checks), 891 EPI(EPI) {} 892 893 // Override this function to handle the more complex control flow around the 894 // three loops. 895 std::pair<BasicBlock *, Value *> 896 createVectorizedLoopSkeleton() final override { 897 return createEpilogueVectorizedLoopSkeleton(); 898 } 899 900 /// The interface for creating a vectorized skeleton using one of two 901 /// different strategies, each corresponding to one execution of the vplan 902 /// as described above. 903 virtual std::pair<BasicBlock *, Value *> 904 createEpilogueVectorizedLoopSkeleton() = 0; 905 906 /// Holds and updates state information required to vectorize the main loop 907 /// and its epilogue in two separate passes. This setup helps us avoid 908 /// regenerating and recomputing runtime safety checks. It also helps us to 909 /// shorten the iteration-count-check path length for the cases where the 910 /// iteration count of the loop is so small that the main vector loop is 911 /// completely skipped. 912 EpilogueLoopVectorizationInfo &EPI; 913 }; 914 915 /// A specialized derived class of inner loop vectorizer that performs 916 /// vectorization of *main* loops in the process of vectorizing loops and their 917 /// epilogues. 918 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 919 public: 920 EpilogueVectorizerMainLoop( 921 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 922 DominatorTree *DT, const TargetLibraryInfo *TLI, 923 const TargetTransformInfo *TTI, AssumptionCache *AC, 924 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 925 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 926 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 927 GeneratedRTChecks &Check) 928 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 929 EPI, LVL, CM, BFI, PSI, Check) {} 930 /// Implements the interface for creating a vectorized skeleton using the 931 /// *main loop* strategy (ie the first pass of vplan execution). 932 std::pair<BasicBlock *, Value *> 933 createEpilogueVectorizedLoopSkeleton() final override; 934 935 protected: 936 /// Emits an iteration count bypass check once for the main loop (when \p 937 /// ForEpilogue is false) and once for the epilogue loop (when \p 938 /// ForEpilogue is true). 939 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, 940 bool ForEpilogue); 941 void printDebugTracesAtStart() override; 942 void printDebugTracesAtEnd() override; 943 }; 944 945 // A specialized derived class of inner loop vectorizer that performs 946 // vectorization of *epilogue* loops in the process of vectorizing loops and 947 // their epilogues. 948 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 949 public: 950 EpilogueVectorizerEpilogueLoop( 951 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 952 DominatorTree *DT, const TargetLibraryInfo *TLI, 953 const TargetTransformInfo *TTI, AssumptionCache *AC, 954 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 955 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 956 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 957 GeneratedRTChecks &Checks) 958 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 959 EPI, LVL, CM, BFI, PSI, Checks) {} 960 /// Implements the interface for creating a vectorized skeleton using the 961 /// *epilogue loop* strategy (ie the second pass of vplan execution). 962 std::pair<BasicBlock *, Value *> 963 createEpilogueVectorizedLoopSkeleton() final override; 964 965 protected: 966 /// Emits an iteration count bypass check after the main vector loop has 967 /// finished to see if there are any iterations left to execute by either 968 /// the vector epilogue or the scalar epilogue. 969 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, 970 BasicBlock *Bypass, 971 BasicBlock *Insert); 972 void printDebugTracesAtStart() override; 973 void printDebugTracesAtEnd() override; 974 }; 975 } // end namespace llvm 976 977 /// Look for a meaningful debug location on the instruction or it's 978 /// operands. 979 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 980 if (!I) 981 return I; 982 983 DebugLoc Empty; 984 if (I->getDebugLoc() != Empty) 985 return I; 986 987 for (Use &Op : I->operands()) { 988 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 989 if (OpInst->getDebugLoc() != Empty) 990 return OpInst; 991 } 992 993 return I; 994 } 995 996 void InnerLoopVectorizer::setDebugLocFromInst( 997 const Value *V, Optional<IRBuilder<> *> CustomBuilder) { 998 IRBuilder<> *B = (CustomBuilder == None) ? &Builder : *CustomBuilder; 999 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) { 1000 const DILocation *DIL = Inst->getDebugLoc(); 1001 1002 // When a FSDiscriminator is enabled, we don't need to add the multiply 1003 // factors to the discriminators. 1004 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 1005 !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { 1006 // FIXME: For scalable vectors, assume vscale=1. 1007 auto NewDIL = 1008 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 1009 if (NewDIL) 1010 B->SetCurrentDebugLocation(NewDIL.getValue()); 1011 else 1012 LLVM_DEBUG(dbgs() 1013 << "Failed to create new discriminator: " 1014 << DIL->getFilename() << " Line: " << DIL->getLine()); 1015 } else 1016 B->SetCurrentDebugLocation(DIL); 1017 } else 1018 B->SetCurrentDebugLocation(DebugLoc()); 1019 } 1020 1021 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 1022 /// is passed, the message relates to that particular instruction. 1023 #ifndef NDEBUG 1024 static void debugVectorizationMessage(const StringRef Prefix, 1025 const StringRef DebugMsg, 1026 Instruction *I) { 1027 dbgs() << "LV: " << Prefix << DebugMsg; 1028 if (I != nullptr) 1029 dbgs() << " " << *I; 1030 else 1031 dbgs() << '.'; 1032 dbgs() << '\n'; 1033 } 1034 #endif 1035 1036 /// Create an analysis remark that explains why vectorization failed 1037 /// 1038 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 1039 /// RemarkName is the identifier for the remark. If \p I is passed it is an 1040 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 1041 /// the location of the remark. \return the remark object that can be 1042 /// streamed to. 1043 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1044 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1045 Value *CodeRegion = TheLoop->getHeader(); 1046 DebugLoc DL = TheLoop->getStartLoc(); 1047 1048 if (I) { 1049 CodeRegion = I->getParent(); 1050 // If there is no debug location attached to the instruction, revert back to 1051 // using the loop's. 1052 if (I->getDebugLoc()) 1053 DL = I->getDebugLoc(); 1054 } 1055 1056 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 1057 } 1058 1059 namespace llvm { 1060 1061 /// Return a value for Step multiplied by VF. 1062 Value *createStepForVF(IRBuilder<> &B, Type *Ty, ElementCount VF, 1063 int64_t Step) { 1064 assert(Ty->isIntegerTy() && "Expected an integer step"); 1065 Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue()); 1066 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1067 } 1068 1069 /// Return the runtime value for VF. 1070 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) { 1071 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1072 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1073 } 1074 1075 static Value *getRuntimeVFAsFloat(IRBuilder<> &B, Type *FTy, ElementCount VF) { 1076 assert(FTy->isFloatingPointTy() && "Expected floating point type!"); 1077 Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits()); 1078 Value *RuntimeVF = getRuntimeVF(B, IntTy, VF); 1079 return B.CreateUIToFP(RuntimeVF, FTy); 1080 } 1081 1082 void reportVectorizationFailure(const StringRef DebugMsg, 1083 const StringRef OREMsg, const StringRef ORETag, 1084 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1085 Instruction *I) { 1086 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 1087 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1088 ORE->emit( 1089 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1090 << "loop not vectorized: " << OREMsg); 1091 } 1092 1093 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 1094 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1095 Instruction *I) { 1096 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 1097 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1098 ORE->emit( 1099 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1100 << Msg); 1101 } 1102 1103 } // end namespace llvm 1104 1105 #ifndef NDEBUG 1106 /// \return string containing a file name and a line # for the given loop. 1107 static std::string getDebugLocString(const Loop *L) { 1108 std::string Result; 1109 if (L) { 1110 raw_string_ostream OS(Result); 1111 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1112 LoopDbgLoc.print(OS); 1113 else 1114 // Just print the module name. 1115 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1116 OS.flush(); 1117 } 1118 return Result; 1119 } 1120 #endif 1121 1122 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1123 const Instruction *Orig) { 1124 // If the loop was versioned with memchecks, add the corresponding no-alias 1125 // metadata. 1126 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1127 LVer->annotateInstWithNoAlias(To, Orig); 1128 } 1129 1130 void InnerLoopVectorizer::collectPoisonGeneratingRecipes( 1131 VPTransformState &State) { 1132 1133 // Collect recipes in the backward slice of `Root` that may generate a poison 1134 // value that is used after vectorization. 1135 SmallPtrSet<VPRecipeBase *, 16> Visited; 1136 auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) { 1137 SmallVector<VPRecipeBase *, 16> Worklist; 1138 Worklist.push_back(Root); 1139 1140 // Traverse the backward slice of Root through its use-def chain. 1141 while (!Worklist.empty()) { 1142 VPRecipeBase *CurRec = Worklist.back(); 1143 Worklist.pop_back(); 1144 1145 if (!Visited.insert(CurRec).second) 1146 continue; 1147 1148 // Prune search if we find another recipe generating a widen memory 1149 // instruction. Widen memory instructions involved in address computation 1150 // will lead to gather/scatter instructions, which don't need to be 1151 // handled. 1152 if (isa<VPWidenMemoryInstructionRecipe>(CurRec) || 1153 isa<VPInterleaveRecipe>(CurRec) || 1154 isa<VPCanonicalIVPHIRecipe>(CurRec)) 1155 continue; 1156 1157 // This recipe contributes to the address computation of a widen 1158 // load/store. Collect recipe if its underlying instruction has 1159 // poison-generating flags. 1160 Instruction *Instr = CurRec->getUnderlyingInstr(); 1161 if (Instr && Instr->hasPoisonGeneratingFlags()) 1162 State.MayGeneratePoisonRecipes.insert(CurRec); 1163 1164 // Add new definitions to the worklist. 1165 for (VPValue *operand : CurRec->operands()) 1166 if (VPDef *OpDef = operand->getDef()) 1167 Worklist.push_back(cast<VPRecipeBase>(OpDef)); 1168 } 1169 }); 1170 1171 // Traverse all the recipes in the VPlan and collect the poison-generating 1172 // recipes in the backward slice starting at the address of a VPWidenRecipe or 1173 // VPInterleaveRecipe. 1174 auto Iter = depth_first( 1175 VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry())); 1176 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) { 1177 for (VPRecipeBase &Recipe : *VPBB) { 1178 if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) { 1179 Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr(); 1180 VPDef *AddrDef = WidenRec->getAddr()->getDef(); 1181 if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr && 1182 Legal->blockNeedsPredication(UnderlyingInstr->getParent())) 1183 collectPoisonGeneratingInstrsInBackwardSlice( 1184 cast<VPRecipeBase>(AddrDef)); 1185 } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) { 1186 VPDef *AddrDef = InterleaveRec->getAddr()->getDef(); 1187 if (AddrDef) { 1188 // Check if any member of the interleave group needs predication. 1189 const InterleaveGroup<Instruction> *InterGroup = 1190 InterleaveRec->getInterleaveGroup(); 1191 bool NeedPredication = false; 1192 for (int I = 0, NumMembers = InterGroup->getNumMembers(); 1193 I < NumMembers; ++I) { 1194 Instruction *Member = InterGroup->getMember(I); 1195 if (Member) 1196 NeedPredication |= 1197 Legal->blockNeedsPredication(Member->getParent()); 1198 } 1199 1200 if (NeedPredication) 1201 collectPoisonGeneratingInstrsInBackwardSlice( 1202 cast<VPRecipeBase>(AddrDef)); 1203 } 1204 } 1205 } 1206 } 1207 } 1208 1209 void InnerLoopVectorizer::addMetadata(Instruction *To, 1210 Instruction *From) { 1211 propagateMetadata(To, From); 1212 addNewMetadata(To, From); 1213 } 1214 1215 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1216 Instruction *From) { 1217 for (Value *V : To) { 1218 if (Instruction *I = dyn_cast<Instruction>(V)) 1219 addMetadata(I, From); 1220 } 1221 } 1222 1223 PHINode *InnerLoopVectorizer::getReductionResumeValue( 1224 const RecurrenceDescriptor &RdxDesc) { 1225 auto It = ReductionResumeValues.find(&RdxDesc); 1226 assert(It != ReductionResumeValues.end() && 1227 "Expected to find a resume value for the reduction."); 1228 return It->second; 1229 } 1230 1231 namespace llvm { 1232 1233 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1234 // lowered. 1235 enum ScalarEpilogueLowering { 1236 1237 // The default: allowing scalar epilogues. 1238 CM_ScalarEpilogueAllowed, 1239 1240 // Vectorization with OptForSize: don't allow epilogues. 1241 CM_ScalarEpilogueNotAllowedOptSize, 1242 1243 // A special case of vectorisation with OptForSize: loops with a very small 1244 // trip count are considered for vectorization under OptForSize, thereby 1245 // making sure the cost of their loop body is dominant, free of runtime 1246 // guards and scalar iteration overheads. 1247 CM_ScalarEpilogueNotAllowedLowTripLoop, 1248 1249 // Loop hint predicate indicating an epilogue is undesired. 1250 CM_ScalarEpilogueNotNeededUsePredicate, 1251 1252 // Directive indicating we must either tail fold or not vectorize 1253 CM_ScalarEpilogueNotAllowedUsePredicate 1254 }; 1255 1256 /// ElementCountComparator creates a total ordering for ElementCount 1257 /// for the purposes of using it in a set structure. 1258 struct ElementCountComparator { 1259 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1260 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1261 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1262 } 1263 }; 1264 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1265 1266 /// LoopVectorizationCostModel - estimates the expected speedups due to 1267 /// vectorization. 1268 /// In many cases vectorization is not profitable. This can happen because of 1269 /// a number of reasons. In this class we mainly attempt to predict the 1270 /// expected speedup/slowdowns due to the supported instruction set. We use the 1271 /// TargetTransformInfo to query the different backends for the cost of 1272 /// different operations. 1273 class LoopVectorizationCostModel { 1274 public: 1275 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1276 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1277 LoopVectorizationLegality *Legal, 1278 const TargetTransformInfo &TTI, 1279 const TargetLibraryInfo *TLI, DemandedBits *DB, 1280 AssumptionCache *AC, 1281 OptimizationRemarkEmitter *ORE, const Function *F, 1282 const LoopVectorizeHints *Hints, 1283 InterleavedAccessInfo &IAI) 1284 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1285 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1286 Hints(Hints), InterleaveInfo(IAI) {} 1287 1288 /// \return An upper bound for the vectorization factors (both fixed and 1289 /// scalable). If the factors are 0, vectorization and interleaving should be 1290 /// avoided up front. 1291 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1292 1293 /// \return True if runtime checks are required for vectorization, and false 1294 /// otherwise. 1295 bool runtimeChecksRequired(); 1296 1297 /// \return The most profitable vectorization factor and the cost of that VF. 1298 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1299 /// then this vectorization factor will be selected if vectorization is 1300 /// possible. 1301 VectorizationFactor 1302 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1303 1304 VectorizationFactor 1305 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1306 const LoopVectorizationPlanner &LVP); 1307 1308 /// Setup cost-based decisions for user vectorization factor. 1309 /// \return true if the UserVF is a feasible VF to be chosen. 1310 bool selectUserVectorizationFactor(ElementCount UserVF) { 1311 collectUniformsAndScalars(UserVF); 1312 collectInstsToScalarize(UserVF); 1313 return expectedCost(UserVF).first.isValid(); 1314 } 1315 1316 /// \return The size (in bits) of the smallest and widest types in the code 1317 /// that needs to be vectorized. We ignore values that remain scalar such as 1318 /// 64 bit loop indices. 1319 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1320 1321 /// \return The desired interleave count. 1322 /// If interleave count has been specified by metadata it will be returned. 1323 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1324 /// are the selected vectorization factor and the cost of the selected VF. 1325 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1326 1327 /// Memory access instruction may be vectorized in more than one way. 1328 /// Form of instruction after vectorization depends on cost. 1329 /// This function takes cost-based decisions for Load/Store instructions 1330 /// and collects them in a map. This decisions map is used for building 1331 /// the lists of loop-uniform and loop-scalar instructions. 1332 /// The calculated cost is saved with widening decision in order to 1333 /// avoid redundant calculations. 1334 void setCostBasedWideningDecision(ElementCount VF); 1335 1336 /// A struct that represents some properties of the register usage 1337 /// of a loop. 1338 struct RegisterUsage { 1339 /// Holds the number of loop invariant values that are used in the loop. 1340 /// The key is ClassID of target-provided register class. 1341 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1342 /// Holds the maximum number of concurrent live intervals in the loop. 1343 /// The key is ClassID of target-provided register class. 1344 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1345 }; 1346 1347 /// \return Returns information about the register usages of the loop for the 1348 /// given vectorization factors. 1349 SmallVector<RegisterUsage, 8> 1350 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1351 1352 /// Collect values we want to ignore in the cost model. 1353 void collectValuesToIgnore(); 1354 1355 /// Collect all element types in the loop for which widening is needed. 1356 void collectElementTypesForWidening(); 1357 1358 /// Split reductions into those that happen in the loop, and those that happen 1359 /// outside. In loop reductions are collected into InLoopReductionChains. 1360 void collectInLoopReductions(); 1361 1362 /// Returns true if we should use strict in-order reductions for the given 1363 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, 1364 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering 1365 /// of FP operations. 1366 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) { 1367 return !Hints->allowReordering() && RdxDesc.isOrdered(); 1368 } 1369 1370 /// \returns The smallest bitwidth each instruction can be represented with. 1371 /// The vector equivalents of these instructions should be truncated to this 1372 /// type. 1373 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1374 return MinBWs; 1375 } 1376 1377 /// \returns True if it is more profitable to scalarize instruction \p I for 1378 /// vectorization factor \p VF. 1379 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1380 assert(VF.isVector() && 1381 "Profitable to scalarize relevant only for VF > 1."); 1382 1383 // Cost model is not run in the VPlan-native path - return conservative 1384 // result until this changes. 1385 if (EnableVPlanNativePath) 1386 return false; 1387 1388 auto Scalars = InstsToScalarize.find(VF); 1389 assert(Scalars != InstsToScalarize.end() && 1390 "VF not yet analyzed for scalarization profitability"); 1391 return Scalars->second.find(I) != Scalars->second.end(); 1392 } 1393 1394 /// Returns true if \p I is known to be uniform after vectorization. 1395 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1396 if (VF.isScalar()) 1397 return true; 1398 1399 // Cost model is not run in the VPlan-native path - return conservative 1400 // result until this changes. 1401 if (EnableVPlanNativePath) 1402 return false; 1403 1404 auto UniformsPerVF = Uniforms.find(VF); 1405 assert(UniformsPerVF != Uniforms.end() && 1406 "VF not yet analyzed for uniformity"); 1407 return UniformsPerVF->second.count(I); 1408 } 1409 1410 /// Returns true if \p I is known to be scalar after vectorization. 1411 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1412 if (VF.isScalar()) 1413 return true; 1414 1415 // Cost model is not run in the VPlan-native path - return conservative 1416 // result until this changes. 1417 if (EnableVPlanNativePath) 1418 return false; 1419 1420 auto ScalarsPerVF = Scalars.find(VF); 1421 assert(ScalarsPerVF != Scalars.end() && 1422 "Scalar values are not calculated for VF"); 1423 return ScalarsPerVF->second.count(I); 1424 } 1425 1426 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1427 /// for vectorization factor \p VF. 1428 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1429 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1430 !isProfitableToScalarize(I, VF) && 1431 !isScalarAfterVectorization(I, VF); 1432 } 1433 1434 /// Decision that was taken during cost calculation for memory instruction. 1435 enum InstWidening { 1436 CM_Unknown, 1437 CM_Widen, // For consecutive accesses with stride +1. 1438 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1439 CM_Interleave, 1440 CM_GatherScatter, 1441 CM_Scalarize 1442 }; 1443 1444 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1445 /// instruction \p I and vector width \p VF. 1446 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1447 InstructionCost Cost) { 1448 assert(VF.isVector() && "Expected VF >=2"); 1449 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1450 } 1451 1452 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1453 /// interleaving group \p Grp and vector width \p VF. 1454 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1455 ElementCount VF, InstWidening W, 1456 InstructionCost Cost) { 1457 assert(VF.isVector() && "Expected VF >=2"); 1458 /// Broadcast this decicion to all instructions inside the group. 1459 /// But the cost will be assigned to one instruction only. 1460 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1461 if (auto *I = Grp->getMember(i)) { 1462 if (Grp->getInsertPos() == I) 1463 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1464 else 1465 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1466 } 1467 } 1468 } 1469 1470 /// Return the cost model decision for the given instruction \p I and vector 1471 /// width \p VF. Return CM_Unknown if this instruction did not pass 1472 /// through the cost modeling. 1473 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1474 assert(VF.isVector() && "Expected VF to be a vector VF"); 1475 // Cost model is not run in the VPlan-native path - return conservative 1476 // result until this changes. 1477 if (EnableVPlanNativePath) 1478 return CM_GatherScatter; 1479 1480 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1481 auto Itr = WideningDecisions.find(InstOnVF); 1482 if (Itr == WideningDecisions.end()) 1483 return CM_Unknown; 1484 return Itr->second.first; 1485 } 1486 1487 /// Return the vectorization cost for the given instruction \p I and vector 1488 /// width \p VF. 1489 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1490 assert(VF.isVector() && "Expected VF >=2"); 1491 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1492 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1493 "The cost is not calculated"); 1494 return WideningDecisions[InstOnVF].second; 1495 } 1496 1497 /// Return True if instruction \p I is an optimizable truncate whose operand 1498 /// is an induction variable. Such a truncate will be removed by adding a new 1499 /// induction variable with the destination type. 1500 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1501 // If the instruction is not a truncate, return false. 1502 auto *Trunc = dyn_cast<TruncInst>(I); 1503 if (!Trunc) 1504 return false; 1505 1506 // Get the source and destination types of the truncate. 1507 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1508 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1509 1510 // If the truncate is free for the given types, return false. Replacing a 1511 // free truncate with an induction variable would add an induction variable 1512 // update instruction to each iteration of the loop. We exclude from this 1513 // check the primary induction variable since it will need an update 1514 // instruction regardless. 1515 Value *Op = Trunc->getOperand(0); 1516 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1517 return false; 1518 1519 // If the truncated value is not an induction variable, return false. 1520 return Legal->isInductionPhi(Op); 1521 } 1522 1523 /// Collects the instructions to scalarize for each predicated instruction in 1524 /// the loop. 1525 void collectInstsToScalarize(ElementCount VF); 1526 1527 /// Collect Uniform and Scalar values for the given \p VF. 1528 /// The sets depend on CM decision for Load/Store instructions 1529 /// that may be vectorized as interleave, gather-scatter or scalarized. 1530 void collectUniformsAndScalars(ElementCount VF) { 1531 // Do the analysis once. 1532 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1533 return; 1534 setCostBasedWideningDecision(VF); 1535 collectLoopUniforms(VF); 1536 collectLoopScalars(VF); 1537 } 1538 1539 /// Returns true if the target machine supports masked store operation 1540 /// for the given \p DataType and kind of access to \p Ptr. 1541 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1542 return Legal->isConsecutivePtr(DataType, Ptr) && 1543 TTI.isLegalMaskedStore(DataType, Alignment); 1544 } 1545 1546 /// Returns true if the target machine supports masked load operation 1547 /// for the given \p DataType and kind of access to \p Ptr. 1548 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1549 return Legal->isConsecutivePtr(DataType, Ptr) && 1550 TTI.isLegalMaskedLoad(DataType, Alignment); 1551 } 1552 1553 /// Returns true if the target machine can represent \p V as a masked gather 1554 /// or scatter operation. 1555 bool isLegalGatherOrScatter(Value *V, 1556 ElementCount VF = ElementCount::getFixed(1)) { 1557 bool LI = isa<LoadInst>(V); 1558 bool SI = isa<StoreInst>(V); 1559 if (!LI && !SI) 1560 return false; 1561 auto *Ty = getLoadStoreType(V); 1562 Align Align = getLoadStoreAlignment(V); 1563 if (VF.isVector()) 1564 Ty = VectorType::get(Ty, VF); 1565 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1566 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1567 } 1568 1569 /// Returns true if the target machine supports all of the reduction 1570 /// variables found for the given VF. 1571 bool canVectorizeReductions(ElementCount VF) const { 1572 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1573 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1574 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1575 })); 1576 } 1577 1578 /// Returns true if \p I is an instruction that will be scalarized with 1579 /// predication when vectorizing \p I with vectorization factor \p VF. Such 1580 /// instructions include conditional stores and instructions that may divide 1581 /// by zero. 1582 bool isScalarWithPredication(Instruction *I, ElementCount VF) const; 1583 1584 // Returns true if \p I is an instruction that will be predicated either 1585 // through scalar predication or masked load/store or masked gather/scatter. 1586 // \p VF is the vectorization factor that will be used to vectorize \p I. 1587 // Superset of instructions that return true for isScalarWithPredication. 1588 bool isPredicatedInst(Instruction *I, ElementCount VF, 1589 bool IsKnownUniform = false) { 1590 // When we know the load is uniform and the original scalar loop was not 1591 // predicated we don't need to mark it as a predicated instruction. Any 1592 // vectorised blocks created when tail-folding are something artificial we 1593 // have introduced and we know there is always at least one active lane. 1594 // That's why we call Legal->blockNeedsPredication here because it doesn't 1595 // query tail-folding. 1596 if (IsKnownUniform && isa<LoadInst>(I) && 1597 !Legal->blockNeedsPredication(I->getParent())) 1598 return false; 1599 if (!blockNeedsPredicationForAnyReason(I->getParent())) 1600 return false; 1601 // Loads and stores that need some form of masked operation are predicated 1602 // instructions. 1603 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1604 return Legal->isMaskRequired(I); 1605 return isScalarWithPredication(I, VF); 1606 } 1607 1608 /// Returns true if \p I is a memory instruction with consecutive memory 1609 /// access that can be widened. 1610 bool 1611 memoryInstructionCanBeWidened(Instruction *I, 1612 ElementCount VF = ElementCount::getFixed(1)); 1613 1614 /// Returns true if \p I is a memory instruction in an interleaved-group 1615 /// of memory accesses that can be vectorized with wide vector loads/stores 1616 /// and shuffles. 1617 bool 1618 interleavedAccessCanBeWidened(Instruction *I, 1619 ElementCount VF = ElementCount::getFixed(1)); 1620 1621 /// Check if \p Instr belongs to any interleaved access group. 1622 bool isAccessInterleaved(Instruction *Instr) { 1623 return InterleaveInfo.isInterleaved(Instr); 1624 } 1625 1626 /// Get the interleaved access group that \p Instr belongs to. 1627 const InterleaveGroup<Instruction> * 1628 getInterleavedAccessGroup(Instruction *Instr) { 1629 return InterleaveInfo.getInterleaveGroup(Instr); 1630 } 1631 1632 /// Returns true if we're required to use a scalar epilogue for at least 1633 /// the final iteration of the original loop. 1634 bool requiresScalarEpilogue(ElementCount VF) const { 1635 if (!isScalarEpilogueAllowed()) 1636 return false; 1637 // If we might exit from anywhere but the latch, must run the exiting 1638 // iteration in scalar form. 1639 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1640 return true; 1641 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); 1642 } 1643 1644 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1645 /// loop hint annotation. 1646 bool isScalarEpilogueAllowed() const { 1647 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1648 } 1649 1650 /// Returns true if all loop blocks should be masked to fold tail loop. 1651 bool foldTailByMasking() const { return FoldTailByMasking; } 1652 1653 /// Returns true if the instructions in this block requires predication 1654 /// for any reason, e.g. because tail folding now requires a predicate 1655 /// or because the block in the original loop was predicated. 1656 bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const { 1657 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1658 } 1659 1660 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1661 /// nodes to the chain of instructions representing the reductions. Uses a 1662 /// MapVector to ensure deterministic iteration order. 1663 using ReductionChainMap = 1664 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1665 1666 /// Return the chain of instructions representing an inloop reduction. 1667 const ReductionChainMap &getInLoopReductionChains() const { 1668 return InLoopReductionChains; 1669 } 1670 1671 /// Returns true if the Phi is part of an inloop reduction. 1672 bool isInLoopReduction(PHINode *Phi) const { 1673 return InLoopReductionChains.count(Phi); 1674 } 1675 1676 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1677 /// with factor VF. Return the cost of the instruction, including 1678 /// scalarization overhead if it's needed. 1679 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1680 1681 /// Estimate cost of a call instruction CI if it were vectorized with factor 1682 /// VF. Return the cost of the instruction, including scalarization overhead 1683 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1684 /// scalarized - 1685 /// i.e. either vector version isn't available, or is too expensive. 1686 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1687 bool &NeedToScalarize) const; 1688 1689 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1690 /// that of B. 1691 bool isMoreProfitable(const VectorizationFactor &A, 1692 const VectorizationFactor &B) const; 1693 1694 /// Invalidates decisions already taken by the cost model. 1695 void invalidateCostModelingDecisions() { 1696 WideningDecisions.clear(); 1697 Uniforms.clear(); 1698 Scalars.clear(); 1699 } 1700 1701 private: 1702 unsigned NumPredStores = 0; 1703 1704 /// \return An upper bound for the vectorization factors for both 1705 /// fixed and scalable vectorization, where the minimum-known number of 1706 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1707 /// disabled or unsupported, then the scalable part will be equal to 1708 /// ElementCount::getScalable(0). 1709 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1710 ElementCount UserVF, 1711 bool FoldTailByMasking); 1712 1713 /// \return the maximized element count based on the targets vector 1714 /// registers and the loop trip-count, but limited to a maximum safe VF. 1715 /// This is a helper function of computeFeasibleMaxVF. 1716 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure 1717 /// issue that occurred on one of the buildbots which cannot be reproduced 1718 /// without having access to the properietary compiler (see comments on 1719 /// D98509). The issue is currently under investigation and this workaround 1720 /// will be removed as soon as possible. 1721 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1722 unsigned SmallestType, 1723 unsigned WidestType, 1724 const ElementCount &MaxSafeVF, 1725 bool FoldTailByMasking); 1726 1727 /// \return the maximum legal scalable VF, based on the safe max number 1728 /// of elements. 1729 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1730 1731 /// The vectorization cost is a combination of the cost itself and a boolean 1732 /// indicating whether any of the contributing operations will actually 1733 /// operate on vector values after type legalization in the backend. If this 1734 /// latter value is false, then all operations will be scalarized (i.e. no 1735 /// vectorization has actually taken place). 1736 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1737 1738 /// Returns the expected execution cost. The unit of the cost does 1739 /// not matter because we use the 'cost' units to compare different 1740 /// vector widths. The cost that is returned is *not* normalized by 1741 /// the factor width. If \p Invalid is not nullptr, this function 1742 /// will add a pair(Instruction*, ElementCount) to \p Invalid for 1743 /// each instruction that has an Invalid cost for the given VF. 1744 using InstructionVFPair = std::pair<Instruction *, ElementCount>; 1745 VectorizationCostTy 1746 expectedCost(ElementCount VF, 1747 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); 1748 1749 /// Returns the execution time cost of an instruction for a given vector 1750 /// width. Vector width of one means scalar. 1751 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1752 1753 /// The cost-computation logic from getInstructionCost which provides 1754 /// the vector type as an output parameter. 1755 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1756 Type *&VectorTy); 1757 1758 /// Return the cost of instructions in an inloop reduction pattern, if I is 1759 /// part of that pattern. 1760 Optional<InstructionCost> 1761 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, 1762 TTI::TargetCostKind CostKind); 1763 1764 /// Calculate vectorization cost of memory instruction \p I. 1765 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1766 1767 /// The cost computation for scalarized memory instruction. 1768 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1769 1770 /// The cost computation for interleaving group of memory instructions. 1771 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1772 1773 /// The cost computation for Gather/Scatter instruction. 1774 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1775 1776 /// The cost computation for widening instruction \p I with consecutive 1777 /// memory access. 1778 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1779 1780 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1781 /// Load: scalar load + broadcast. 1782 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1783 /// element) 1784 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1785 1786 /// Estimate the overhead of scalarizing an instruction. This is a 1787 /// convenience wrapper for the type-based getScalarizationOverhead API. 1788 InstructionCost getScalarizationOverhead(Instruction *I, 1789 ElementCount VF) const; 1790 1791 /// Returns whether the instruction is a load or store and will be a emitted 1792 /// as a vector operation. 1793 bool isConsecutiveLoadOrStore(Instruction *I); 1794 1795 /// Returns true if an artificially high cost for emulated masked memrefs 1796 /// should be used. 1797 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF); 1798 1799 /// Map of scalar integer values to the smallest bitwidth they can be legally 1800 /// represented as. The vector equivalents of these values should be truncated 1801 /// to this type. 1802 MapVector<Instruction *, uint64_t> MinBWs; 1803 1804 /// A type representing the costs for instructions if they were to be 1805 /// scalarized rather than vectorized. The entries are Instruction-Cost 1806 /// pairs. 1807 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1808 1809 /// A set containing all BasicBlocks that are known to present after 1810 /// vectorization as a predicated block. 1811 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1812 1813 /// Records whether it is allowed to have the original scalar loop execute at 1814 /// least once. This may be needed as a fallback loop in case runtime 1815 /// aliasing/dependence checks fail, or to handle the tail/remainder 1816 /// iterations when the trip count is unknown or doesn't divide by the VF, 1817 /// or as a peel-loop to handle gaps in interleave-groups. 1818 /// Under optsize and when the trip count is very small we don't allow any 1819 /// iterations to execute in the scalar loop. 1820 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1821 1822 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1823 bool FoldTailByMasking = false; 1824 1825 /// A map holding scalar costs for different vectorization factors. The 1826 /// presence of a cost for an instruction in the mapping indicates that the 1827 /// instruction will be scalarized when vectorizing with the associated 1828 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1829 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1830 1831 /// Holds the instructions known to be uniform after vectorization. 1832 /// The data is collected per VF. 1833 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1834 1835 /// Holds the instructions known to be scalar after vectorization. 1836 /// The data is collected per VF. 1837 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1838 1839 /// Holds the instructions (address computations) that are forced to be 1840 /// scalarized. 1841 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1842 1843 /// PHINodes of the reductions that should be expanded in-loop along with 1844 /// their associated chains of reduction operations, in program order from top 1845 /// (PHI) to bottom 1846 ReductionChainMap InLoopReductionChains; 1847 1848 /// A Map of inloop reduction operations and their immediate chain operand. 1849 /// FIXME: This can be removed once reductions can be costed correctly in 1850 /// vplan. This was added to allow quick lookup to the inloop operations, 1851 /// without having to loop through InLoopReductionChains. 1852 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1853 1854 /// Returns the expected difference in cost from scalarizing the expression 1855 /// feeding a predicated instruction \p PredInst. The instructions to 1856 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1857 /// non-negative return value implies the expression will be scalarized. 1858 /// Currently, only single-use chains are considered for scalarization. 1859 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1860 ElementCount VF); 1861 1862 /// Collect the instructions that are uniform after vectorization. An 1863 /// instruction is uniform if we represent it with a single scalar value in 1864 /// the vectorized loop corresponding to each vector iteration. Examples of 1865 /// uniform instructions include pointer operands of consecutive or 1866 /// interleaved memory accesses. Note that although uniformity implies an 1867 /// instruction will be scalar, the reverse is not true. In general, a 1868 /// scalarized instruction will be represented by VF scalar values in the 1869 /// vectorized loop, each corresponding to an iteration of the original 1870 /// scalar loop. 1871 void collectLoopUniforms(ElementCount VF); 1872 1873 /// Collect the instructions that are scalar after vectorization. An 1874 /// instruction is scalar if it is known to be uniform or will be scalarized 1875 /// during vectorization. collectLoopScalars should only add non-uniform nodes 1876 /// to the list if they are used by a load/store instruction that is marked as 1877 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by 1878 /// VF values in the vectorized loop, each corresponding to an iteration of 1879 /// the original scalar loop. 1880 void collectLoopScalars(ElementCount VF); 1881 1882 /// Keeps cost model vectorization decision and cost for instructions. 1883 /// Right now it is used for memory instructions only. 1884 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1885 std::pair<InstWidening, InstructionCost>>; 1886 1887 DecisionList WideningDecisions; 1888 1889 /// Returns true if \p V is expected to be vectorized and it needs to be 1890 /// extracted. 1891 bool needsExtract(Value *V, ElementCount VF) const { 1892 Instruction *I = dyn_cast<Instruction>(V); 1893 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1894 TheLoop->isLoopInvariant(I)) 1895 return false; 1896 1897 // Assume we can vectorize V (and hence we need extraction) if the 1898 // scalars are not computed yet. This can happen, because it is called 1899 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1900 // the scalars are collected. That should be a safe assumption in most 1901 // cases, because we check if the operands have vectorizable types 1902 // beforehand in LoopVectorizationLegality. 1903 return Scalars.find(VF) == Scalars.end() || 1904 !isScalarAfterVectorization(I, VF); 1905 }; 1906 1907 /// Returns a range containing only operands needing to be extracted. 1908 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1909 ElementCount VF) const { 1910 return SmallVector<Value *, 4>(make_filter_range( 1911 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1912 } 1913 1914 /// Determines if we have the infrastructure to vectorize loop \p L and its 1915 /// epilogue, assuming the main loop is vectorized by \p VF. 1916 bool isCandidateForEpilogueVectorization(const Loop &L, 1917 const ElementCount VF) const; 1918 1919 /// Returns true if epilogue vectorization is considered profitable, and 1920 /// false otherwise. 1921 /// \p VF is the vectorization factor chosen for the original loop. 1922 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1923 1924 public: 1925 /// The loop that we evaluate. 1926 Loop *TheLoop; 1927 1928 /// Predicated scalar evolution analysis. 1929 PredicatedScalarEvolution &PSE; 1930 1931 /// Loop Info analysis. 1932 LoopInfo *LI; 1933 1934 /// Vectorization legality. 1935 LoopVectorizationLegality *Legal; 1936 1937 /// Vector target information. 1938 const TargetTransformInfo &TTI; 1939 1940 /// Target Library Info. 1941 const TargetLibraryInfo *TLI; 1942 1943 /// Demanded bits analysis. 1944 DemandedBits *DB; 1945 1946 /// Assumption cache. 1947 AssumptionCache *AC; 1948 1949 /// Interface to emit optimization remarks. 1950 OptimizationRemarkEmitter *ORE; 1951 1952 const Function *TheFunction; 1953 1954 /// Loop Vectorize Hint. 1955 const LoopVectorizeHints *Hints; 1956 1957 /// The interleave access information contains groups of interleaved accesses 1958 /// with the same stride and close to each other. 1959 InterleavedAccessInfo &InterleaveInfo; 1960 1961 /// Values to ignore in the cost model. 1962 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1963 1964 /// Values to ignore in the cost model when VF > 1. 1965 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1966 1967 /// All element types found in the loop. 1968 SmallPtrSet<Type *, 16> ElementTypesInLoop; 1969 1970 /// Profitable vector factors. 1971 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1972 }; 1973 } // end namespace llvm 1974 1975 /// Helper struct to manage generating runtime checks for vectorization. 1976 /// 1977 /// The runtime checks are created up-front in temporary blocks to allow better 1978 /// estimating the cost and un-linked from the existing IR. After deciding to 1979 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1980 /// temporary blocks are completely removed. 1981 class GeneratedRTChecks { 1982 /// Basic block which contains the generated SCEV checks, if any. 1983 BasicBlock *SCEVCheckBlock = nullptr; 1984 1985 /// The value representing the result of the generated SCEV checks. If it is 1986 /// nullptr, either no SCEV checks have been generated or they have been used. 1987 Value *SCEVCheckCond = nullptr; 1988 1989 /// Basic block which contains the generated memory runtime checks, if any. 1990 BasicBlock *MemCheckBlock = nullptr; 1991 1992 /// The value representing the result of the generated memory runtime checks. 1993 /// If it is nullptr, either no memory runtime checks have been generated or 1994 /// they have been used. 1995 Value *MemRuntimeCheckCond = nullptr; 1996 1997 DominatorTree *DT; 1998 LoopInfo *LI; 1999 2000 SCEVExpander SCEVExp; 2001 SCEVExpander MemCheckExp; 2002 2003 public: 2004 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 2005 const DataLayout &DL) 2006 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 2007 MemCheckExp(SE, DL, "scev.check") {} 2008 2009 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 2010 /// accurately estimate the cost of the runtime checks. The blocks are 2011 /// un-linked from the IR and is added back during vector code generation. If 2012 /// there is no vector code generation, the check blocks are removed 2013 /// completely. 2014 void Create(Loop *L, const LoopAccessInfo &LAI, 2015 const SCEVUnionPredicate &UnionPred) { 2016 2017 BasicBlock *LoopHeader = L->getHeader(); 2018 BasicBlock *Preheader = L->getLoopPreheader(); 2019 2020 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 2021 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 2022 // may be used by SCEVExpander. The blocks will be un-linked from their 2023 // predecessors and removed from LI & DT at the end of the function. 2024 if (!UnionPred.isAlwaysTrue()) { 2025 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 2026 nullptr, "vector.scevcheck"); 2027 2028 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 2029 &UnionPred, SCEVCheckBlock->getTerminator()); 2030 } 2031 2032 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 2033 if (RtPtrChecking.Need) { 2034 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 2035 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 2036 "vector.memcheck"); 2037 2038 MemRuntimeCheckCond = 2039 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 2040 RtPtrChecking.getChecks(), MemCheckExp); 2041 assert(MemRuntimeCheckCond && 2042 "no RT checks generated although RtPtrChecking " 2043 "claimed checks are required"); 2044 } 2045 2046 if (!MemCheckBlock && !SCEVCheckBlock) 2047 return; 2048 2049 // Unhook the temporary block with the checks, update various places 2050 // accordingly. 2051 if (SCEVCheckBlock) 2052 SCEVCheckBlock->replaceAllUsesWith(Preheader); 2053 if (MemCheckBlock) 2054 MemCheckBlock->replaceAllUsesWith(Preheader); 2055 2056 if (SCEVCheckBlock) { 2057 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2058 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 2059 Preheader->getTerminator()->eraseFromParent(); 2060 } 2061 if (MemCheckBlock) { 2062 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2063 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 2064 Preheader->getTerminator()->eraseFromParent(); 2065 } 2066 2067 DT->changeImmediateDominator(LoopHeader, Preheader); 2068 if (MemCheckBlock) { 2069 DT->eraseNode(MemCheckBlock); 2070 LI->removeBlock(MemCheckBlock); 2071 } 2072 if (SCEVCheckBlock) { 2073 DT->eraseNode(SCEVCheckBlock); 2074 LI->removeBlock(SCEVCheckBlock); 2075 } 2076 } 2077 2078 /// Remove the created SCEV & memory runtime check blocks & instructions, if 2079 /// unused. 2080 ~GeneratedRTChecks() { 2081 SCEVExpanderCleaner SCEVCleaner(SCEVExp); 2082 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp); 2083 if (!SCEVCheckCond) 2084 SCEVCleaner.markResultUsed(); 2085 2086 if (!MemRuntimeCheckCond) 2087 MemCheckCleaner.markResultUsed(); 2088 2089 if (MemRuntimeCheckCond) { 2090 auto &SE = *MemCheckExp.getSE(); 2091 // Memory runtime check generation creates compares that use expanded 2092 // values. Remove them before running the SCEVExpanderCleaners. 2093 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2094 if (MemCheckExp.isInsertedInstruction(&I)) 2095 continue; 2096 SE.forgetValue(&I); 2097 I.eraseFromParent(); 2098 } 2099 } 2100 MemCheckCleaner.cleanup(); 2101 SCEVCleaner.cleanup(); 2102 2103 if (SCEVCheckCond) 2104 SCEVCheckBlock->eraseFromParent(); 2105 if (MemRuntimeCheckCond) 2106 MemCheckBlock->eraseFromParent(); 2107 } 2108 2109 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2110 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2111 /// depending on the generated condition. 2112 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass, 2113 BasicBlock *LoopVectorPreHeader, 2114 BasicBlock *LoopExitBlock) { 2115 if (!SCEVCheckCond) 2116 return nullptr; 2117 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) 2118 if (C->isZero()) 2119 return nullptr; 2120 2121 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2122 2123 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2124 // Create new preheader for vector loop. 2125 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2126 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2127 2128 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2129 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2130 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2131 SCEVCheckBlock); 2132 2133 DT->addNewBlock(SCEVCheckBlock, Pred); 2134 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2135 2136 ReplaceInstWithInst( 2137 SCEVCheckBlock->getTerminator(), 2138 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); 2139 // Mark the check as used, to prevent it from being removed during cleanup. 2140 SCEVCheckCond = nullptr; 2141 return SCEVCheckBlock; 2142 } 2143 2144 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2145 /// the branches to branch to the vector preheader or \p Bypass, depending on 2146 /// the generated condition. 2147 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass, 2148 BasicBlock *LoopVectorPreHeader) { 2149 // Check if we generated code that checks in runtime if arrays overlap. 2150 if (!MemRuntimeCheckCond) 2151 return nullptr; 2152 2153 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2154 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2155 MemCheckBlock); 2156 2157 DT->addNewBlock(MemCheckBlock, Pred); 2158 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2159 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2160 2161 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2162 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2163 2164 ReplaceInstWithInst( 2165 MemCheckBlock->getTerminator(), 2166 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2167 MemCheckBlock->getTerminator()->setDebugLoc( 2168 Pred->getTerminator()->getDebugLoc()); 2169 2170 // Mark the check as used, to prevent it from being removed during cleanup. 2171 MemRuntimeCheckCond = nullptr; 2172 return MemCheckBlock; 2173 } 2174 }; 2175 2176 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2177 // vectorization. The loop needs to be annotated with #pragma omp simd 2178 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2179 // vector length information is not provided, vectorization is not considered 2180 // explicit. Interleave hints are not allowed either. These limitations will be 2181 // relaxed in the future. 2182 // Please, note that we are currently forced to abuse the pragma 'clang 2183 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2184 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2185 // provides *explicit vectorization hints* (LV can bypass legal checks and 2186 // assume that vectorization is legal). However, both hints are implemented 2187 // using the same metadata (llvm.loop.vectorize, processed by 2188 // LoopVectorizeHints). This will be fixed in the future when the native IR 2189 // representation for pragma 'omp simd' is introduced. 2190 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2191 OptimizationRemarkEmitter *ORE) { 2192 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2193 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2194 2195 // Only outer loops with an explicit vectorization hint are supported. 2196 // Unannotated outer loops are ignored. 2197 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2198 return false; 2199 2200 Function *Fn = OuterLp->getHeader()->getParent(); 2201 if (!Hints.allowVectorization(Fn, OuterLp, 2202 true /*VectorizeOnlyWhenForced*/)) { 2203 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2204 return false; 2205 } 2206 2207 if (Hints.getInterleave() > 1) { 2208 // TODO: Interleave support is future work. 2209 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2210 "outer loops.\n"); 2211 Hints.emitRemarkWithHints(); 2212 return false; 2213 } 2214 2215 return true; 2216 } 2217 2218 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2219 OptimizationRemarkEmitter *ORE, 2220 SmallVectorImpl<Loop *> &V) { 2221 // Collect inner loops and outer loops without irreducible control flow. For 2222 // now, only collect outer loops that have explicit vectorization hints. If we 2223 // are stress testing the VPlan H-CFG construction, we collect the outermost 2224 // loop of every loop nest. 2225 if (L.isInnermost() || VPlanBuildStressTest || 2226 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2227 LoopBlocksRPO RPOT(&L); 2228 RPOT.perform(LI); 2229 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2230 V.push_back(&L); 2231 // TODO: Collect inner loops inside marked outer loops in case 2232 // vectorization fails for the outer loop. Do not invoke 2233 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2234 // already known to be reducible. We can use an inherited attribute for 2235 // that. 2236 return; 2237 } 2238 } 2239 for (Loop *InnerL : L) 2240 collectSupportedLoops(*InnerL, LI, ORE, V); 2241 } 2242 2243 namespace { 2244 2245 /// The LoopVectorize Pass. 2246 struct LoopVectorize : public FunctionPass { 2247 /// Pass identification, replacement for typeid 2248 static char ID; 2249 2250 LoopVectorizePass Impl; 2251 2252 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2253 bool VectorizeOnlyWhenForced = false) 2254 : FunctionPass(ID), 2255 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2256 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2257 } 2258 2259 bool runOnFunction(Function &F) override { 2260 if (skipFunction(F)) 2261 return false; 2262 2263 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2264 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2265 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2266 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2267 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2268 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2269 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2270 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2271 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2272 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2273 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2274 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2275 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2276 2277 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2278 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2279 2280 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2281 GetLAA, *ORE, PSI).MadeAnyChange; 2282 } 2283 2284 void getAnalysisUsage(AnalysisUsage &AU) const override { 2285 AU.addRequired<AssumptionCacheTracker>(); 2286 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2287 AU.addRequired<DominatorTreeWrapperPass>(); 2288 AU.addRequired<LoopInfoWrapperPass>(); 2289 AU.addRequired<ScalarEvolutionWrapperPass>(); 2290 AU.addRequired<TargetTransformInfoWrapperPass>(); 2291 AU.addRequired<AAResultsWrapperPass>(); 2292 AU.addRequired<LoopAccessLegacyAnalysis>(); 2293 AU.addRequired<DemandedBitsWrapperPass>(); 2294 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2295 AU.addRequired<InjectTLIMappingsLegacy>(); 2296 2297 // We currently do not preserve loopinfo/dominator analyses with outer loop 2298 // vectorization. Until this is addressed, mark these analyses as preserved 2299 // only for non-VPlan-native path. 2300 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2301 if (!EnableVPlanNativePath) { 2302 AU.addPreserved<LoopInfoWrapperPass>(); 2303 AU.addPreserved<DominatorTreeWrapperPass>(); 2304 } 2305 2306 AU.addPreserved<BasicAAWrapperPass>(); 2307 AU.addPreserved<GlobalsAAWrapperPass>(); 2308 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2309 } 2310 }; 2311 2312 } // end anonymous namespace 2313 2314 //===----------------------------------------------------------------------===// 2315 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2316 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2317 //===----------------------------------------------------------------------===// 2318 2319 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2320 // We need to place the broadcast of invariant variables outside the loop, 2321 // but only if it's proven safe to do so. Else, broadcast will be inside 2322 // vector loop body. 2323 Instruction *Instr = dyn_cast<Instruction>(V); 2324 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2325 (!Instr || 2326 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2327 // Place the code for broadcasting invariant variables in the new preheader. 2328 IRBuilder<>::InsertPointGuard Guard(Builder); 2329 if (SafeToHoist) 2330 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2331 2332 // Broadcast the scalar into all locations in the vector. 2333 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2334 2335 return Shuf; 2336 } 2337 2338 /// This function adds 2339 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 2340 /// to each vector element of Val. The sequence starts at StartIndex. 2341 /// \p Opcode is relevant for FP induction variable. 2342 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step, 2343 Instruction::BinaryOps BinOp, ElementCount VF, 2344 IRBuilder<> &Builder) { 2345 assert(VF.isVector() && "only vector VFs are supported"); 2346 2347 // Create and check the types. 2348 auto *ValVTy = cast<VectorType>(Val->getType()); 2349 ElementCount VLen = ValVTy->getElementCount(); 2350 2351 Type *STy = Val->getType()->getScalarType(); 2352 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2353 "Induction Step must be an integer or FP"); 2354 assert(Step->getType() == STy && "Step has wrong type"); 2355 2356 SmallVector<Constant *, 8> Indices; 2357 2358 // Create a vector of consecutive numbers from zero to VF. 2359 VectorType *InitVecValVTy = ValVTy; 2360 Type *InitVecValSTy = STy; 2361 if (STy->isFloatingPointTy()) { 2362 InitVecValSTy = 2363 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2364 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2365 } 2366 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2367 2368 // Splat the StartIdx 2369 Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx); 2370 2371 if (STy->isIntegerTy()) { 2372 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2373 Step = Builder.CreateVectorSplat(VLen, Step); 2374 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2375 // FIXME: The newly created binary instructions should contain nsw/nuw 2376 // flags, which can be found from the original scalar operations. 2377 Step = Builder.CreateMul(InitVec, Step); 2378 return Builder.CreateAdd(Val, Step, "induction"); 2379 } 2380 2381 // Floating point induction. 2382 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2383 "Binary Opcode should be specified for FP induction"); 2384 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2385 InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat); 2386 2387 Step = Builder.CreateVectorSplat(VLen, Step); 2388 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2389 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2390 } 2391 2392 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2393 const InductionDescriptor &II, Value *Step, Value *Start, 2394 Instruction *EntryVal, VPValue *Def, VPTransformState &State) { 2395 IRBuilder<> &Builder = State.Builder; 2396 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2397 "Expected either an induction phi-node or a truncate of it!"); 2398 2399 // Construct the initial value of the vector IV in the vector loop preheader 2400 auto CurrIP = Builder.saveIP(); 2401 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2402 if (isa<TruncInst>(EntryVal)) { 2403 assert(Start->getType()->isIntegerTy() && 2404 "Truncation requires an integer type"); 2405 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2406 Step = Builder.CreateTrunc(Step, TruncType); 2407 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2408 } 2409 2410 Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0); 2411 Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start); 2412 Value *SteppedStart = getStepVector( 2413 SplatStart, Zero, Step, II.getInductionOpcode(), State.VF, State.Builder); 2414 2415 // We create vector phi nodes for both integer and floating-point induction 2416 // variables. Here, we determine the kind of arithmetic we will perform. 2417 Instruction::BinaryOps AddOp; 2418 Instruction::BinaryOps MulOp; 2419 if (Step->getType()->isIntegerTy()) { 2420 AddOp = Instruction::Add; 2421 MulOp = Instruction::Mul; 2422 } else { 2423 AddOp = II.getInductionOpcode(); 2424 MulOp = Instruction::FMul; 2425 } 2426 2427 // Multiply the vectorization factor by the step using integer or 2428 // floating-point arithmetic as appropriate. 2429 Type *StepType = Step->getType(); 2430 Value *RuntimeVF; 2431 if (Step->getType()->isFloatingPointTy()) 2432 RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF); 2433 else 2434 RuntimeVF = getRuntimeVF(Builder, StepType, State.VF); 2435 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 2436 2437 // Create a vector splat to use in the induction update. 2438 // 2439 // FIXME: If the step is non-constant, we create the vector splat with 2440 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2441 // handle a constant vector splat. 2442 Value *SplatVF = isa<Constant>(Mul) 2443 ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul)) 2444 : Builder.CreateVectorSplat(State.VF, Mul); 2445 Builder.restoreIP(CurrIP); 2446 2447 // We may need to add the step a number of times, depending on the unroll 2448 // factor. The last of those goes into the PHI. 2449 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2450 &*LoopVectorBody->getFirstInsertionPt()); 2451 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 2452 Instruction *LastInduction = VecInd; 2453 for (unsigned Part = 0; Part < UF; ++Part) { 2454 State.set(Def, LastInduction, Part); 2455 2456 if (isa<TruncInst>(EntryVal)) 2457 addMetadata(LastInduction, EntryVal); 2458 2459 LastInduction = cast<Instruction>( 2460 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 2461 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 2462 } 2463 2464 // Move the last step to the end of the latch block. This ensures consistent 2465 // placement of all induction updates. 2466 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2467 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2468 LastInduction->moveBefore(Br); 2469 LastInduction->setName("vec.ind.next"); 2470 2471 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2472 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2473 } 2474 2475 void InnerLoopVectorizer::widenIntOrFpInduction( 2476 PHINode *IV, VPWidenIntOrFpInductionRecipe *Def, VPTransformState &State, 2477 Value *CanonicalIV) { 2478 Value *Start = Def->getStartValue()->getLiveInIRValue(); 2479 const InductionDescriptor &ID = Def->getInductionDescriptor(); 2480 TruncInst *Trunc = Def->getTruncInst(); 2481 IRBuilder<> &Builder = State.Builder; 2482 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2483 assert(!State.VF.isZero() && "VF must be non-zero"); 2484 2485 // The value from the original loop to which we are mapping the new induction 2486 // variable. 2487 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2488 2489 auto &DL = EntryVal->getModule()->getDataLayout(); 2490 2491 // Generate code for the induction step. Note that induction steps are 2492 // required to be loop-invariant 2493 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 2494 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 2495 "Induction step should be loop invariant"); 2496 if (PSE.getSE()->isSCEVable(IV->getType())) { 2497 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2498 return Exp.expandCodeFor(Step, Step->getType(), 2499 State.CFG.VectorPreHeader->getTerminator()); 2500 } 2501 return cast<SCEVUnknown>(Step)->getValue(); 2502 }; 2503 2504 // The scalar value to broadcast. This is derived from the canonical 2505 // induction variable. If a truncation type is given, truncate the canonical 2506 // induction variable and step. Otherwise, derive these values from the 2507 // induction descriptor. 2508 auto CreateScalarIV = [&](Value *&Step) -> Value * { 2509 Value *ScalarIV = CanonicalIV; 2510 Type *NeededType = IV->getType(); 2511 if (!Def->isCanonical() || ScalarIV->getType() != NeededType) { 2512 ScalarIV = 2513 NeededType->isIntegerTy() 2514 ? Builder.CreateSExtOrTrunc(ScalarIV, NeededType) 2515 : Builder.CreateCast(Instruction::SIToFP, ScalarIV, NeededType); 2516 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID, 2517 State.CFG.PrevBB); 2518 ScalarIV->setName("offset.idx"); 2519 } 2520 if (Trunc) { 2521 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2522 assert(Step->getType()->isIntegerTy() && 2523 "Truncation requires an integer step"); 2524 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2525 Step = Builder.CreateTrunc(Step, TruncType); 2526 } 2527 return ScalarIV; 2528 }; 2529 2530 // Fast-math-flags propagate from the original induction instruction. 2531 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 2532 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 2533 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 2534 2535 // Now do the actual transformations, and start with creating the step value. 2536 Value *Step = CreateStepValue(ID.getStep()); 2537 if (State.VF.isScalar()) { 2538 Value *ScalarIV = CreateScalarIV(Step); 2539 Type *ScalarTy = IntegerType::get(ScalarIV->getContext(), 2540 Step->getType()->getScalarSizeInBits()); 2541 2542 Instruction::BinaryOps IncOp = ID.getInductionOpcode(); 2543 if (IncOp == Instruction::BinaryOpsEnd) 2544 IncOp = Instruction::Add; 2545 for (unsigned Part = 0; Part < UF; ++Part) { 2546 Value *StartIdx = ConstantInt::get(ScalarTy, Part); 2547 Instruction::BinaryOps MulOp = Instruction::Mul; 2548 if (Step->getType()->isFloatingPointTy()) { 2549 StartIdx = Builder.CreateUIToFP(StartIdx, Step->getType()); 2550 MulOp = Instruction::FMul; 2551 } 2552 2553 Value *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2554 Value *EntryPart = Builder.CreateBinOp(IncOp, ScalarIV, Mul, "induction"); 2555 State.set(Def, EntryPart, Part); 2556 if (Trunc) { 2557 assert(!Step->getType()->isFloatingPointTy() && 2558 "fp inductions shouldn't be truncated"); 2559 addMetadata(EntryPart, Trunc); 2560 } 2561 } 2562 return; 2563 } 2564 2565 // Create a new independent vector induction variable, if one is needed. 2566 if (Def->needsVectorIV()) 2567 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, State); 2568 2569 if (Def->needsScalarIV()) { 2570 // Create scalar steps that can be used by instructions we will later 2571 // scalarize. Note that the addition of the scalar steps will not increase 2572 // the number of instructions in the loop in the common case prior to 2573 // InstCombine. We will be trading one vector extract for each scalar step. 2574 Value *ScalarIV = CreateScalarIV(Step); 2575 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, State); 2576 } 2577 } 2578 2579 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2580 Instruction *EntryVal, 2581 const InductionDescriptor &ID, 2582 VPValue *Def, 2583 VPTransformState &State) { 2584 IRBuilder<> &Builder = State.Builder; 2585 // We shouldn't have to build scalar steps if we aren't vectorizing. 2586 assert(State.VF.isVector() && "VF should be greater than one"); 2587 // Get the value type and ensure it and the step have the same integer type. 2588 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2589 assert(ScalarIVTy == Step->getType() && 2590 "Val and Step should have the same type"); 2591 2592 // We build scalar steps for both integer and floating-point induction 2593 // variables. Here, we determine the kind of arithmetic we will perform. 2594 Instruction::BinaryOps AddOp; 2595 Instruction::BinaryOps MulOp; 2596 if (ScalarIVTy->isIntegerTy()) { 2597 AddOp = Instruction::Add; 2598 MulOp = Instruction::Mul; 2599 } else { 2600 AddOp = ID.getInductionOpcode(); 2601 MulOp = Instruction::FMul; 2602 } 2603 2604 // Determine the number of scalars we need to generate for each unroll 2605 // iteration. 2606 bool FirstLaneOnly = vputils::onlyFirstLaneUsed(Def); 2607 unsigned Lanes = FirstLaneOnly ? 1 : State.VF.getKnownMinValue(); 2608 // Compute the scalar steps and save the results in State. 2609 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2610 ScalarIVTy->getScalarSizeInBits()); 2611 Type *VecIVTy = nullptr; 2612 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2613 if (!FirstLaneOnly && State.VF.isScalable()) { 2614 VecIVTy = VectorType::get(ScalarIVTy, State.VF); 2615 UnitStepVec = 2616 Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF)); 2617 SplatStep = Builder.CreateVectorSplat(State.VF, Step); 2618 SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV); 2619 } 2620 2621 for (unsigned Part = 0; Part < State.UF; ++Part) { 2622 Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part); 2623 2624 if (!FirstLaneOnly && State.VF.isScalable()) { 2625 auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0); 2626 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2627 if (ScalarIVTy->isFloatingPointTy()) 2628 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2629 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2630 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2631 State.set(Def, Add, Part); 2632 // It's useful to record the lane values too for the known minimum number 2633 // of elements so we do those below. This improves the code quality when 2634 // trying to extract the first element, for example. 2635 } 2636 2637 if (ScalarIVTy->isFloatingPointTy()) 2638 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2639 2640 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2641 Value *StartIdx = Builder.CreateBinOp( 2642 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2643 // The step returned by `createStepForVF` is a runtime-evaluated value 2644 // when VF is scalable. Otherwise, it should be folded into a Constant. 2645 assert((State.VF.isScalable() || isa<Constant>(StartIdx)) && 2646 "Expected StartIdx to be folded to a constant when VF is not " 2647 "scalable"); 2648 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2649 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2650 State.set(Def, Add, VPIteration(Part, Lane)); 2651 } 2652 } 2653 } 2654 2655 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2656 const VPIteration &Instance, 2657 VPTransformState &State) { 2658 Value *ScalarInst = State.get(Def, Instance); 2659 Value *VectorValue = State.get(Def, Instance.Part); 2660 VectorValue = Builder.CreateInsertElement( 2661 VectorValue, ScalarInst, 2662 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2663 State.set(Def, VectorValue, Instance.Part); 2664 } 2665 2666 // Return whether we allow using masked interleave-groups (for dealing with 2667 // strided loads/stores that reside in predicated blocks, or for dealing 2668 // with gaps). 2669 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2670 // If an override option has been passed in for interleaved accesses, use it. 2671 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2672 return EnableMaskedInterleavedMemAccesses; 2673 2674 return TTI.enableMaskedInterleavedAccessVectorization(); 2675 } 2676 2677 // Try to vectorize the interleave group that \p Instr belongs to. 2678 // 2679 // E.g. Translate following interleaved load group (factor = 3): 2680 // for (i = 0; i < N; i+=3) { 2681 // R = Pic[i]; // Member of index 0 2682 // G = Pic[i+1]; // Member of index 1 2683 // B = Pic[i+2]; // Member of index 2 2684 // ... // do something to R, G, B 2685 // } 2686 // To: 2687 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2688 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2689 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2690 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2691 // 2692 // Or translate following interleaved store group (factor = 3): 2693 // for (i = 0; i < N; i+=3) { 2694 // ... do something to R, G, B 2695 // Pic[i] = R; // Member of index 0 2696 // Pic[i+1] = G; // Member of index 1 2697 // Pic[i+2] = B; // Member of index 2 2698 // } 2699 // To: 2700 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2701 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2702 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2703 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2704 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2705 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2706 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2707 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2708 VPValue *BlockInMask) { 2709 Instruction *Instr = Group->getInsertPos(); 2710 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2711 2712 // Prepare for the vector type of the interleaved load/store. 2713 Type *ScalarTy = getLoadStoreType(Instr); 2714 unsigned InterleaveFactor = Group->getFactor(); 2715 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2716 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2717 2718 // Prepare for the new pointers. 2719 SmallVector<Value *, 2> AddrParts; 2720 unsigned Index = Group->getIndex(Instr); 2721 2722 // TODO: extend the masked interleaved-group support to reversed access. 2723 assert((!BlockInMask || !Group->isReverse()) && 2724 "Reversed masked interleave-group not supported."); 2725 2726 // If the group is reverse, adjust the index to refer to the last vector lane 2727 // instead of the first. We adjust the index from the first vector lane, 2728 // rather than directly getting the pointer for lane VF - 1, because the 2729 // pointer operand of the interleaved access is supposed to be uniform. For 2730 // uniform instructions, we're only required to generate a value for the 2731 // first vector lane in each unroll iteration. 2732 if (Group->isReverse()) 2733 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2734 2735 for (unsigned Part = 0; Part < UF; Part++) { 2736 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2737 setDebugLocFromInst(AddrPart); 2738 2739 // Notice current instruction could be any index. Need to adjust the address 2740 // to the member of index 0. 2741 // 2742 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2743 // b = A[i]; // Member of index 0 2744 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2745 // 2746 // E.g. A[i+1] = a; // Member of index 1 2747 // A[i] = b; // Member of index 0 2748 // A[i+2] = c; // Member of index 2 (Current instruction) 2749 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2750 2751 bool InBounds = false; 2752 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2753 InBounds = gep->isInBounds(); 2754 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2755 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2756 2757 // Cast to the vector pointer type. 2758 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2759 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2760 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2761 } 2762 2763 setDebugLocFromInst(Instr); 2764 Value *PoisonVec = PoisonValue::get(VecTy); 2765 2766 Value *MaskForGaps = nullptr; 2767 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2768 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2769 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2770 } 2771 2772 // Vectorize the interleaved load group. 2773 if (isa<LoadInst>(Instr)) { 2774 // For each unroll part, create a wide load for the group. 2775 SmallVector<Value *, 2> NewLoads; 2776 for (unsigned Part = 0; Part < UF; Part++) { 2777 Instruction *NewLoad; 2778 if (BlockInMask || MaskForGaps) { 2779 assert(useMaskedInterleavedAccesses(*TTI) && 2780 "masked interleaved groups are not allowed."); 2781 Value *GroupMask = MaskForGaps; 2782 if (BlockInMask) { 2783 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2784 Value *ShuffledMask = Builder.CreateShuffleVector( 2785 BlockInMaskPart, 2786 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2787 "interleaved.mask"); 2788 GroupMask = MaskForGaps 2789 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2790 MaskForGaps) 2791 : ShuffledMask; 2792 } 2793 NewLoad = 2794 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), 2795 GroupMask, PoisonVec, "wide.masked.vec"); 2796 } 2797 else 2798 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2799 Group->getAlign(), "wide.vec"); 2800 Group->addMetadata(NewLoad); 2801 NewLoads.push_back(NewLoad); 2802 } 2803 2804 // For each member in the group, shuffle out the appropriate data from the 2805 // wide loads. 2806 unsigned J = 0; 2807 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2808 Instruction *Member = Group->getMember(I); 2809 2810 // Skip the gaps in the group. 2811 if (!Member) 2812 continue; 2813 2814 auto StrideMask = 2815 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2816 for (unsigned Part = 0; Part < UF; Part++) { 2817 Value *StridedVec = Builder.CreateShuffleVector( 2818 NewLoads[Part], StrideMask, "strided.vec"); 2819 2820 // If this member has different type, cast the result type. 2821 if (Member->getType() != ScalarTy) { 2822 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2823 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2824 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2825 } 2826 2827 if (Group->isReverse()) 2828 StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse"); 2829 2830 State.set(VPDefs[J], StridedVec, Part); 2831 } 2832 ++J; 2833 } 2834 return; 2835 } 2836 2837 // The sub vector type for current instruction. 2838 auto *SubVT = VectorType::get(ScalarTy, VF); 2839 2840 // Vectorize the interleaved store group. 2841 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2842 assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && 2843 "masked interleaved groups are not allowed."); 2844 assert((!MaskForGaps || !VF.isScalable()) && 2845 "masking gaps for scalable vectors is not yet supported."); 2846 for (unsigned Part = 0; Part < UF; Part++) { 2847 // Collect the stored vector from each member. 2848 SmallVector<Value *, 4> StoredVecs; 2849 for (unsigned i = 0; i < InterleaveFactor; i++) { 2850 assert((Group->getMember(i) || MaskForGaps) && 2851 "Fail to get a member from an interleaved store group"); 2852 Instruction *Member = Group->getMember(i); 2853 2854 // Skip the gaps in the group. 2855 if (!Member) { 2856 Value *Undef = PoisonValue::get(SubVT); 2857 StoredVecs.push_back(Undef); 2858 continue; 2859 } 2860 2861 Value *StoredVec = State.get(StoredValues[i], Part); 2862 2863 if (Group->isReverse()) 2864 StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse"); 2865 2866 // If this member has different type, cast it to a unified type. 2867 2868 if (StoredVec->getType() != SubVT) 2869 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2870 2871 StoredVecs.push_back(StoredVec); 2872 } 2873 2874 // Concatenate all vectors into a wide vector. 2875 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2876 2877 // Interleave the elements in the wide vector. 2878 Value *IVec = Builder.CreateShuffleVector( 2879 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2880 "interleaved.vec"); 2881 2882 Instruction *NewStoreInstr; 2883 if (BlockInMask || MaskForGaps) { 2884 Value *GroupMask = MaskForGaps; 2885 if (BlockInMask) { 2886 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2887 Value *ShuffledMask = Builder.CreateShuffleVector( 2888 BlockInMaskPart, 2889 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2890 "interleaved.mask"); 2891 GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And, 2892 ShuffledMask, MaskForGaps) 2893 : ShuffledMask; 2894 } 2895 NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part], 2896 Group->getAlign(), GroupMask); 2897 } else 2898 NewStoreInstr = 2899 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2900 2901 Group->addMetadata(NewStoreInstr); 2902 } 2903 } 2904 2905 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2906 VPReplicateRecipe *RepRecipe, 2907 const VPIteration &Instance, 2908 bool IfPredicateInstr, 2909 VPTransformState &State) { 2910 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2911 2912 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 2913 // the first lane and part. 2914 if (isa<NoAliasScopeDeclInst>(Instr)) 2915 if (!Instance.isFirstIteration()) 2916 return; 2917 2918 setDebugLocFromInst(Instr); 2919 2920 // Does this instruction return a value ? 2921 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2922 2923 Instruction *Cloned = Instr->clone(); 2924 if (!IsVoidRetTy) 2925 Cloned->setName(Instr->getName() + ".cloned"); 2926 2927 // If the scalarized instruction contributes to the address computation of a 2928 // widen masked load/store which was in a basic block that needed predication 2929 // and is not predicated after vectorization, we can't propagate 2930 // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized 2931 // instruction could feed a poison value to the base address of the widen 2932 // load/store. 2933 if (State.MayGeneratePoisonRecipes.contains(RepRecipe)) 2934 Cloned->dropPoisonGeneratingFlags(); 2935 2936 State.Builder.SetInsertPoint(Builder.GetInsertBlock(), 2937 Builder.GetInsertPoint()); 2938 // Replace the operands of the cloned instructions with their scalar 2939 // equivalents in the new loop. 2940 for (auto &I : enumerate(RepRecipe->operands())) { 2941 auto InputInstance = Instance; 2942 VPValue *Operand = I.value(); 2943 if (State.Plan->isUniformAfterVectorization(Operand)) 2944 InputInstance.Lane = VPLane::getFirstLane(); 2945 Cloned->setOperand(I.index(), State.get(Operand, InputInstance)); 2946 } 2947 addNewMetadata(Cloned, Instr); 2948 2949 // Place the cloned scalar in the new loop. 2950 Builder.Insert(Cloned); 2951 2952 State.set(RepRecipe, Cloned, Instance); 2953 2954 // If we just cloned a new assumption, add it the assumption cache. 2955 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 2956 AC->registerAssumption(II); 2957 2958 // End if-block. 2959 if (IfPredicateInstr) 2960 PredicatedInstructions.push_back(Cloned); 2961 } 2962 2963 void InnerLoopVectorizer::createHeaderBranch(Loop *L) { 2964 BasicBlock *Header = L->getHeader(); 2965 assert(!L->getLoopLatch() && "loop should not have a latch at this point"); 2966 2967 IRBuilder<> B(Header->getTerminator()); 2968 Instruction *OldInst = 2969 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()); 2970 setDebugLocFromInst(OldInst, &B); 2971 2972 // Connect the header to the exit and header blocks and replace the old 2973 // terminator. 2974 B.CreateCondBr(B.getTrue(), L->getUniqueExitBlock(), Header); 2975 2976 // Now we have two terminators. Remove the old one from the block. 2977 Header->getTerminator()->eraseFromParent(); 2978 } 2979 2980 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2981 if (TripCount) 2982 return TripCount; 2983 2984 assert(L && "Create Trip Count for null loop."); 2985 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2986 // Find the loop boundaries. 2987 ScalarEvolution *SE = PSE.getSE(); 2988 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2989 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 2990 "Invalid loop count"); 2991 2992 Type *IdxTy = Legal->getWidestInductionType(); 2993 assert(IdxTy && "No type for induction"); 2994 2995 // The exit count might have the type of i64 while the phi is i32. This can 2996 // happen if we have an induction variable that is sign extended before the 2997 // compare. The only way that we get a backedge taken count is that the 2998 // induction variable was signed and as such will not overflow. In such a case 2999 // truncation is legal. 3000 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 3001 IdxTy->getPrimitiveSizeInBits()) 3002 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3003 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3004 3005 // Get the total trip count from the count by adding 1. 3006 const SCEV *ExitCount = SE->getAddExpr( 3007 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3008 3009 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3010 3011 // Expand the trip count and place the new instructions in the preheader. 3012 // Notice that the pre-header does not change, only the loop body. 3013 SCEVExpander Exp(*SE, DL, "induction"); 3014 3015 // Count holds the overall loop count (N). 3016 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3017 L->getLoopPreheader()->getTerminator()); 3018 3019 if (TripCount->getType()->isPointerTy()) 3020 TripCount = 3021 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3022 L->getLoopPreheader()->getTerminator()); 3023 3024 return TripCount; 3025 } 3026 3027 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3028 if (VectorTripCount) 3029 return VectorTripCount; 3030 3031 Value *TC = getOrCreateTripCount(L); 3032 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3033 3034 Type *Ty = TC->getType(); 3035 // This is where we can make the step a runtime constant. 3036 Value *Step = createStepForVF(Builder, Ty, VF, UF); 3037 3038 // If the tail is to be folded by masking, round the number of iterations N 3039 // up to a multiple of Step instead of rounding down. This is done by first 3040 // adding Step-1 and then rounding down. Note that it's ok if this addition 3041 // overflows: the vector induction variable will eventually wrap to zero given 3042 // that it starts at zero and its Step is a power of two; the loop will then 3043 // exit, with the last early-exit vector comparison also producing all-true. 3044 if (Cost->foldTailByMasking()) { 3045 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 3046 "VF*UF must be a power of 2 when folding tail by masking"); 3047 Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF); 3048 TC = Builder.CreateAdd( 3049 TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up"); 3050 } 3051 3052 // Now we need to generate the expression for the part of the loop that the 3053 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3054 // iterations are not required for correctness, or N - Step, otherwise. Step 3055 // is equal to the vectorization factor (number of SIMD elements) times the 3056 // unroll factor (number of SIMD instructions). 3057 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3058 3059 // There are cases where we *must* run at least one iteration in the remainder 3060 // loop. See the cost model for when this can happen. If the step evenly 3061 // divides the trip count, we set the remainder to be equal to the step. If 3062 // the step does not evenly divide the trip count, no adjustment is necessary 3063 // since there will already be scalar iterations. Note that the minimum 3064 // iterations check ensures that N >= Step. 3065 if (Cost->requiresScalarEpilogue(VF)) { 3066 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3067 R = Builder.CreateSelect(IsZero, Step, R); 3068 } 3069 3070 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3071 3072 return VectorTripCount; 3073 } 3074 3075 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3076 const DataLayout &DL) { 3077 // Verify that V is a vector type with same number of elements as DstVTy. 3078 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 3079 unsigned VF = DstFVTy->getNumElements(); 3080 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 3081 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3082 Type *SrcElemTy = SrcVecTy->getElementType(); 3083 Type *DstElemTy = DstFVTy->getElementType(); 3084 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3085 "Vector elements must have same size"); 3086 3087 // Do a direct cast if element types are castable. 3088 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3089 return Builder.CreateBitOrPointerCast(V, DstFVTy); 3090 } 3091 // V cannot be directly casted to desired vector type. 3092 // May happen when V is a floating point vector but DstVTy is a vector of 3093 // pointers or vice-versa. Handle this using a two-step bitcast using an 3094 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3095 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3096 "Only one type should be a pointer type"); 3097 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3098 "Only one type should be a floating point type"); 3099 Type *IntTy = 3100 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3101 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 3102 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3103 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 3104 } 3105 3106 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3107 BasicBlock *Bypass) { 3108 Value *Count = getOrCreateTripCount(L); 3109 // Reuse existing vector loop preheader for TC checks. 3110 // Note that new preheader block is generated for vector loop. 3111 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 3112 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 3113 3114 // Generate code to check if the loop's trip count is less than VF * UF, or 3115 // equal to it in case a scalar epilogue is required; this implies that the 3116 // vector trip count is zero. This check also covers the case where adding one 3117 // to the backedge-taken count overflowed leading to an incorrect trip count 3118 // of zero. In this case we will also jump to the scalar loop. 3119 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE 3120 : ICmpInst::ICMP_ULT; 3121 3122 // If tail is to be folded, vector loop takes care of all iterations. 3123 Value *CheckMinIters = Builder.getFalse(); 3124 if (!Cost->foldTailByMasking()) { 3125 Value *Step = createStepForVF(Builder, Count->getType(), VF, UF); 3126 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3127 } 3128 // Create new preheader for vector loop. 3129 LoopVectorPreHeader = 3130 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3131 "vector.ph"); 3132 3133 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3134 DT->getNode(Bypass)->getIDom()) && 3135 "TC check is expected to dominate Bypass"); 3136 3137 // Update dominator for Bypass & LoopExit (if needed). 3138 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3139 if (!Cost->requiresScalarEpilogue(VF)) 3140 // If there is an epilogue which must run, there's no edge from the 3141 // middle block to exit blocks and thus no need to update the immediate 3142 // dominator of the exit blocks. 3143 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3144 3145 ReplaceInstWithInst( 3146 TCCheckBlock->getTerminator(), 3147 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3148 LoopBypassBlocks.push_back(TCCheckBlock); 3149 } 3150 3151 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3152 3153 BasicBlock *const SCEVCheckBlock = 3154 RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock); 3155 if (!SCEVCheckBlock) 3156 return nullptr; 3157 3158 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3159 (OptForSizeBasedOnProfile && 3160 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3161 "Cannot SCEV check stride or overflow when optimizing for size"); 3162 3163 3164 // Update dominator only if this is first RT check. 3165 if (LoopBypassBlocks.empty()) { 3166 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3167 if (!Cost->requiresScalarEpilogue(VF)) 3168 // If there is an epilogue which must run, there's no edge from the 3169 // middle block to exit blocks and thus no need to update the immediate 3170 // dominator of the exit blocks. 3171 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3172 } 3173 3174 LoopBypassBlocks.push_back(SCEVCheckBlock); 3175 AddedSafetyChecks = true; 3176 return SCEVCheckBlock; 3177 } 3178 3179 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, 3180 BasicBlock *Bypass) { 3181 // VPlan-native path does not do any analysis for runtime checks currently. 3182 if (EnableVPlanNativePath) 3183 return nullptr; 3184 3185 BasicBlock *const MemCheckBlock = 3186 RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader); 3187 3188 // Check if we generated code that checks in runtime if arrays overlap. We put 3189 // the checks into a separate block to make the more common case of few 3190 // elements faster. 3191 if (!MemCheckBlock) 3192 return nullptr; 3193 3194 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3195 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3196 "Cannot emit memory checks when optimizing for size, unless forced " 3197 "to vectorize."); 3198 ORE->emit([&]() { 3199 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3200 L->getStartLoc(), L->getHeader()) 3201 << "Code-size may be reduced by not forcing " 3202 "vectorization, or by source-code modifications " 3203 "eliminating the need for runtime checks " 3204 "(e.g., adding 'restrict')."; 3205 }); 3206 } 3207 3208 LoopBypassBlocks.push_back(MemCheckBlock); 3209 3210 AddedSafetyChecks = true; 3211 3212 // We currently don't use LoopVersioning for the actual loop cloning but we 3213 // still use it to add the noalias metadata. 3214 LVer = std::make_unique<LoopVersioning>( 3215 *Legal->getLAI(), 3216 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3217 DT, PSE.getSE()); 3218 LVer->prepareNoAliasMetadata(); 3219 return MemCheckBlock; 3220 } 3221 3222 Value *InnerLoopVectorizer::emitTransformedIndex( 3223 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 3224 const InductionDescriptor &ID, BasicBlock *VectorHeader) const { 3225 3226 SCEVExpander Exp(*SE, DL, "induction"); 3227 auto Step = ID.getStep(); 3228 auto StartValue = ID.getStartValue(); 3229 assert(Index->getType()->getScalarType() == Step->getType() && 3230 "Index scalar type does not match StepValue type"); 3231 3232 // Note: the IR at this point is broken. We cannot use SE to create any new 3233 // SCEV and then expand it, hoping that SCEV's simplification will give us 3234 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 3235 // lead to various SCEV crashes. So all we can do is to use builder and rely 3236 // on InstCombine for future simplifications. Here we handle some trivial 3237 // cases only. 3238 auto CreateAdd = [&B](Value *X, Value *Y) { 3239 assert(X->getType() == Y->getType() && "Types don't match!"); 3240 if (auto *CX = dyn_cast<ConstantInt>(X)) 3241 if (CX->isZero()) 3242 return Y; 3243 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3244 if (CY->isZero()) 3245 return X; 3246 return B.CreateAdd(X, Y); 3247 }; 3248 3249 // We allow X to be a vector type, in which case Y will potentially be 3250 // splatted into a vector with the same element count. 3251 auto CreateMul = [&B](Value *X, Value *Y) { 3252 assert(X->getType()->getScalarType() == Y->getType() && 3253 "Types don't match!"); 3254 if (auto *CX = dyn_cast<ConstantInt>(X)) 3255 if (CX->isOne()) 3256 return Y; 3257 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3258 if (CY->isOne()) 3259 return X; 3260 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 3261 if (XVTy && !isa<VectorType>(Y->getType())) 3262 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 3263 return B.CreateMul(X, Y); 3264 }; 3265 3266 // Get a suitable insert point for SCEV expansion. For blocks in the vector 3267 // loop, choose the end of the vector loop header (=VectorHeader), because 3268 // the DomTree is not kept up-to-date for additional blocks generated in the 3269 // vector loop. By using the header as insertion point, we guarantee that the 3270 // expanded instructions dominate all their uses. 3271 auto GetInsertPoint = [this, &B, VectorHeader]() { 3272 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 3273 if (InsertBB != LoopVectorBody && 3274 LI->getLoopFor(VectorHeader) == LI->getLoopFor(InsertBB)) 3275 return VectorHeader->getTerminator(); 3276 return &*B.GetInsertPoint(); 3277 }; 3278 3279 switch (ID.getKind()) { 3280 case InductionDescriptor::IK_IntInduction: { 3281 assert(!isa<VectorType>(Index->getType()) && 3282 "Vector indices not supported for integer inductions yet"); 3283 assert(Index->getType() == StartValue->getType() && 3284 "Index type does not match StartValue type"); 3285 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 3286 return B.CreateSub(StartValue, Index); 3287 auto *Offset = CreateMul( 3288 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 3289 return CreateAdd(StartValue, Offset); 3290 } 3291 case InductionDescriptor::IK_PtrInduction: { 3292 assert(isa<SCEVConstant>(Step) && 3293 "Expected constant step for pointer induction"); 3294 return B.CreateGEP( 3295 ID.getElementType(), StartValue, 3296 CreateMul(Index, 3297 Exp.expandCodeFor(Step, Index->getType()->getScalarType(), 3298 GetInsertPoint()))); 3299 } 3300 case InductionDescriptor::IK_FpInduction: { 3301 assert(!isa<VectorType>(Index->getType()) && 3302 "Vector indices not supported for FP inductions yet"); 3303 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 3304 auto InductionBinOp = ID.getInductionBinOp(); 3305 assert(InductionBinOp && 3306 (InductionBinOp->getOpcode() == Instruction::FAdd || 3307 InductionBinOp->getOpcode() == Instruction::FSub) && 3308 "Original bin op should be defined for FP induction"); 3309 3310 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 3311 Value *MulExp = B.CreateFMul(StepValue, Index); 3312 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 3313 "induction"); 3314 } 3315 case InductionDescriptor::IK_NoInduction: 3316 return nullptr; 3317 } 3318 llvm_unreachable("invalid enum"); 3319 } 3320 3321 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3322 LoopScalarBody = OrigLoop->getHeader(); 3323 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3324 assert(LoopVectorPreHeader && "Invalid loop structure"); 3325 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr 3326 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && 3327 "multiple exit loop without required epilogue?"); 3328 3329 LoopMiddleBlock = 3330 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3331 LI, nullptr, Twine(Prefix) + "middle.block"); 3332 LoopScalarPreHeader = 3333 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3334 nullptr, Twine(Prefix) + "scalar.ph"); 3335 3336 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3337 3338 // Set up the middle block terminator. Two cases: 3339 // 1) If we know that we must execute the scalar epilogue, emit an 3340 // unconditional branch. 3341 // 2) Otherwise, we must have a single unique exit block (due to how we 3342 // implement the multiple exit case). In this case, set up a conditonal 3343 // branch from the middle block to the loop scalar preheader, and the 3344 // exit block. completeLoopSkeleton will update the condition to use an 3345 // iteration check, if required to decide whether to execute the remainder. 3346 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? 3347 BranchInst::Create(LoopScalarPreHeader) : 3348 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, 3349 Builder.getTrue()); 3350 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3351 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3352 3353 // We intentionally don't let SplitBlock to update LoopInfo since 3354 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3355 // LoopVectorBody is explicitly added to the correct place few lines later. 3356 LoopVectorBody = 3357 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3358 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3359 3360 // Update dominator for loop exit. 3361 if (!Cost->requiresScalarEpilogue(VF)) 3362 // If there is an epilogue which must run, there's no edge from the 3363 // middle block to exit blocks and thus no need to update the immediate 3364 // dominator of the exit blocks. 3365 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3366 3367 // Create and register the new vector loop. 3368 Loop *Lp = LI->AllocateLoop(); 3369 Loop *ParentLoop = OrigLoop->getParentLoop(); 3370 3371 // Insert the new loop into the loop nest and register the new basic blocks 3372 // before calling any utilities such as SCEV that require valid LoopInfo. 3373 if (ParentLoop) { 3374 ParentLoop->addChildLoop(Lp); 3375 } else { 3376 LI->addTopLevelLoop(Lp); 3377 } 3378 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3379 return Lp; 3380 } 3381 3382 void InnerLoopVectorizer::createInductionResumeValues( 3383 Loop *L, std::pair<BasicBlock *, Value *> AdditionalBypass) { 3384 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3385 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3386 "Inconsistent information about additional bypass."); 3387 3388 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3389 assert(VectorTripCount && L && "Expected valid arguments"); 3390 // We are going to resume the execution of the scalar loop. 3391 // Go over all of the induction variables that we found and fix the 3392 // PHIs that are left in the scalar version of the loop. 3393 // The starting values of PHI nodes depend on the counter of the last 3394 // iteration in the vectorized loop. 3395 // If we come from a bypass edge then we need to start from the original 3396 // start value. 3397 Instruction *OldInduction = Legal->getPrimaryInduction(); 3398 for (auto &InductionEntry : Legal->getInductionVars()) { 3399 PHINode *OrigPhi = InductionEntry.first; 3400 InductionDescriptor II = InductionEntry.second; 3401 3402 // Create phi nodes to merge from the backedge-taken check block. 3403 PHINode *BCResumeVal = 3404 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3405 LoopScalarPreHeader->getTerminator()); 3406 // Copy original phi DL over to the new one. 3407 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3408 Value *&EndValue = IVEndValues[OrigPhi]; 3409 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3410 if (OrigPhi == OldInduction) { 3411 // We know what the end value is. 3412 EndValue = VectorTripCount; 3413 } else { 3414 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3415 3416 // Fast-math-flags propagate from the original induction instruction. 3417 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3418 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3419 3420 Type *StepType = II.getStep()->getType(); 3421 Instruction::CastOps CastOp = 3422 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3423 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3424 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3425 EndValue = 3426 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II, LoopVectorBody); 3427 EndValue->setName("ind.end"); 3428 3429 // Compute the end value for the additional bypass (if applicable). 3430 if (AdditionalBypass.first) { 3431 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3432 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3433 StepType, true); 3434 CRD = 3435 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3436 EndValueFromAdditionalBypass = 3437 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II, LoopVectorBody); 3438 EndValueFromAdditionalBypass->setName("ind.end"); 3439 } 3440 } 3441 // The new PHI merges the original incoming value, in case of a bypass, 3442 // or the value at the end of the vectorized loop. 3443 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3444 3445 // Fix the scalar body counter (PHI node). 3446 // The old induction's phi node in the scalar body needs the truncated 3447 // value. 3448 for (BasicBlock *BB : LoopBypassBlocks) 3449 BCResumeVal->addIncoming(II.getStartValue(), BB); 3450 3451 if (AdditionalBypass.first) 3452 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3453 EndValueFromAdditionalBypass); 3454 3455 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3456 } 3457 } 3458 3459 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3460 MDNode *OrigLoopID) { 3461 assert(L && "Expected valid loop."); 3462 3463 // The trip counts should be cached by now. 3464 Value *Count = getOrCreateTripCount(L); 3465 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3466 3467 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3468 3469 // Add a check in the middle block to see if we have completed 3470 // all of the iterations in the first vector loop. Three cases: 3471 // 1) If we require a scalar epilogue, there is no conditional branch as 3472 // we unconditionally branch to the scalar preheader. Do nothing. 3473 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. 3474 // Thus if tail is to be folded, we know we don't need to run the 3475 // remainder and we can use the previous value for the condition (true). 3476 // 3) Otherwise, construct a runtime check. 3477 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { 3478 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3479 Count, VectorTripCount, "cmp.n", 3480 LoopMiddleBlock->getTerminator()); 3481 3482 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3483 // of the corresponding compare because they may have ended up with 3484 // different line numbers and we want to avoid awkward line stepping while 3485 // debugging. Eg. if the compare has got a line number inside the loop. 3486 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3487 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3488 } 3489 3490 // Get ready to start creating new instructions into the vectorized body. 3491 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3492 "Inconsistent vector loop preheader"); 3493 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3494 3495 #ifdef EXPENSIVE_CHECKS 3496 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3497 LI->verify(*DT); 3498 #endif 3499 3500 return LoopVectorPreHeader; 3501 } 3502 3503 std::pair<BasicBlock *, Value *> 3504 InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3505 /* 3506 In this function we generate a new loop. The new loop will contain 3507 the vectorized instructions while the old loop will continue to run the 3508 scalar remainder. 3509 3510 [ ] <-- loop iteration number check. 3511 / | 3512 / v 3513 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3514 | / | 3515 | / v 3516 || [ ] <-- vector pre header. 3517 |/ | 3518 | v 3519 | [ ] \ 3520 | [ ]_| <-- vector loop. 3521 | | 3522 | v 3523 \ -[ ] <--- middle-block. 3524 \/ | 3525 /\ v 3526 | ->[ ] <--- new preheader. 3527 | | 3528 (opt) v <-- edge from middle to exit iff epilogue is not required. 3529 | [ ] \ 3530 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). 3531 \ | 3532 \ v 3533 >[ ] <-- exit block(s). 3534 ... 3535 */ 3536 3537 // Get the metadata of the original loop before it gets modified. 3538 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3539 3540 // Workaround! Compute the trip count of the original loop and cache it 3541 // before we start modifying the CFG. This code has a systemic problem 3542 // wherein it tries to run analysis over partially constructed IR; this is 3543 // wrong, and not simply for SCEV. The trip count of the original loop 3544 // simply happens to be prone to hitting this in practice. In theory, we 3545 // can hit the same issue for any SCEV, or ValueTracking query done during 3546 // mutation. See PR49900. 3547 getOrCreateTripCount(OrigLoop); 3548 3549 // Create an empty vector loop, and prepare basic blocks for the runtime 3550 // checks. 3551 Loop *Lp = createVectorLoopSkeleton(""); 3552 3553 // Now, compare the new count to zero. If it is zero skip the vector loop and 3554 // jump to the scalar loop. This check also covers the case where the 3555 // backedge-taken count is uint##_max: adding one to it will overflow leading 3556 // to an incorrect trip count of zero. In this (rare) case we will also jump 3557 // to the scalar loop. 3558 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3559 3560 // Generate the code to check any assumptions that we've made for SCEV 3561 // expressions. 3562 emitSCEVChecks(Lp, LoopScalarPreHeader); 3563 3564 // Generate the code that checks in runtime if arrays overlap. We put the 3565 // checks into a separate block to make the more common case of few elements 3566 // faster. 3567 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3568 3569 createHeaderBranch(Lp); 3570 3571 // Emit phis for the new starting index of the scalar loop. 3572 createInductionResumeValues(Lp); 3573 3574 return {completeLoopSkeleton(Lp, OrigLoopID), nullptr}; 3575 } 3576 3577 // Fix up external users of the induction variable. At this point, we are 3578 // in LCSSA form, with all external PHIs that use the IV having one input value, 3579 // coming from the remainder loop. We need those PHIs to also have a correct 3580 // value for the IV when arriving directly from the middle block. 3581 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3582 const InductionDescriptor &II, 3583 Value *CountRoundDown, Value *EndValue, 3584 BasicBlock *MiddleBlock) { 3585 // There are two kinds of external IV usages - those that use the value 3586 // computed in the last iteration (the PHI) and those that use the penultimate 3587 // value (the value that feeds into the phi from the loop latch). 3588 // We allow both, but they, obviously, have different values. 3589 3590 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3591 3592 DenseMap<Value *, Value *> MissingVals; 3593 3594 // An external user of the last iteration's value should see the value that 3595 // the remainder loop uses to initialize its own IV. 3596 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3597 for (User *U : PostInc->users()) { 3598 Instruction *UI = cast<Instruction>(U); 3599 if (!OrigLoop->contains(UI)) { 3600 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3601 MissingVals[UI] = EndValue; 3602 } 3603 } 3604 3605 // An external user of the penultimate value need to see EndValue - Step. 3606 // The simplest way to get this is to recompute it from the constituent SCEVs, 3607 // that is Start + (Step * (CRD - 1)). 3608 for (User *U : OrigPhi->users()) { 3609 auto *UI = cast<Instruction>(U); 3610 if (!OrigLoop->contains(UI)) { 3611 const DataLayout &DL = 3612 OrigLoop->getHeader()->getModule()->getDataLayout(); 3613 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3614 3615 IRBuilder<> B(MiddleBlock->getTerminator()); 3616 3617 // Fast-math-flags propagate from the original induction instruction. 3618 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3619 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3620 3621 Value *CountMinusOne = B.CreateSub( 3622 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3623 Value *CMO = 3624 !II.getStep()->getType()->isIntegerTy() 3625 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3626 II.getStep()->getType()) 3627 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3628 CMO->setName("cast.cmo"); 3629 Value *Escape = 3630 emitTransformedIndex(B, CMO, PSE.getSE(), DL, II, LoopVectorBody); 3631 Escape->setName("ind.escape"); 3632 MissingVals[UI] = Escape; 3633 } 3634 } 3635 3636 for (auto &I : MissingVals) { 3637 PHINode *PHI = cast<PHINode>(I.first); 3638 // One corner case we have to handle is two IVs "chasing" each-other, 3639 // that is %IV2 = phi [...], [ %IV1, %latch ] 3640 // In this case, if IV1 has an external use, we need to avoid adding both 3641 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3642 // don't already have an incoming value for the middle block. 3643 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3644 PHI->addIncoming(I.second, MiddleBlock); 3645 } 3646 } 3647 3648 namespace { 3649 3650 struct CSEDenseMapInfo { 3651 static bool canHandle(const Instruction *I) { 3652 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3653 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3654 } 3655 3656 static inline Instruction *getEmptyKey() { 3657 return DenseMapInfo<Instruction *>::getEmptyKey(); 3658 } 3659 3660 static inline Instruction *getTombstoneKey() { 3661 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3662 } 3663 3664 static unsigned getHashValue(const Instruction *I) { 3665 assert(canHandle(I) && "Unknown instruction!"); 3666 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3667 I->value_op_end())); 3668 } 3669 3670 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3671 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3672 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3673 return LHS == RHS; 3674 return LHS->isIdenticalTo(RHS); 3675 } 3676 }; 3677 3678 } // end anonymous namespace 3679 3680 ///Perform cse of induction variable instructions. 3681 static void cse(BasicBlock *BB) { 3682 // Perform simple cse. 3683 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3684 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 3685 if (!CSEDenseMapInfo::canHandle(&In)) 3686 continue; 3687 3688 // Check if we can replace this instruction with any of the 3689 // visited instructions. 3690 if (Instruction *V = CSEMap.lookup(&In)) { 3691 In.replaceAllUsesWith(V); 3692 In.eraseFromParent(); 3693 continue; 3694 } 3695 3696 CSEMap[&In] = &In; 3697 } 3698 } 3699 3700 InstructionCost 3701 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3702 bool &NeedToScalarize) const { 3703 Function *F = CI->getCalledFunction(); 3704 Type *ScalarRetTy = CI->getType(); 3705 SmallVector<Type *, 4> Tys, ScalarTys; 3706 for (auto &ArgOp : CI->args()) 3707 ScalarTys.push_back(ArgOp->getType()); 3708 3709 // Estimate cost of scalarized vector call. The source operands are assumed 3710 // to be vectors, so we need to extract individual elements from there, 3711 // execute VF scalar calls, and then gather the result into the vector return 3712 // value. 3713 InstructionCost ScalarCallCost = 3714 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3715 if (VF.isScalar()) 3716 return ScalarCallCost; 3717 3718 // Compute corresponding vector type for return value and arguments. 3719 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3720 for (Type *ScalarTy : ScalarTys) 3721 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3722 3723 // Compute costs of unpacking argument values for the scalar calls and 3724 // packing the return values to a vector. 3725 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3726 3727 InstructionCost Cost = 3728 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3729 3730 // If we can't emit a vector call for this function, then the currently found 3731 // cost is the cost we need to return. 3732 NeedToScalarize = true; 3733 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3734 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3735 3736 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3737 return Cost; 3738 3739 // If the corresponding vector cost is cheaper, return its cost. 3740 InstructionCost VectorCallCost = 3741 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3742 if (VectorCallCost < Cost) { 3743 NeedToScalarize = false; 3744 Cost = VectorCallCost; 3745 } 3746 return Cost; 3747 } 3748 3749 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3750 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3751 return Elt; 3752 return VectorType::get(Elt, VF); 3753 } 3754 3755 InstructionCost 3756 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3757 ElementCount VF) const { 3758 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3759 assert(ID && "Expected intrinsic call!"); 3760 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3761 FastMathFlags FMF; 3762 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3763 FMF = FPMO->getFastMathFlags(); 3764 3765 SmallVector<const Value *> Arguments(CI->args()); 3766 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3767 SmallVector<Type *> ParamTys; 3768 std::transform(FTy->param_begin(), FTy->param_end(), 3769 std::back_inserter(ParamTys), 3770 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3771 3772 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3773 dyn_cast<IntrinsicInst>(CI)); 3774 return TTI.getIntrinsicInstrCost(CostAttrs, 3775 TargetTransformInfo::TCK_RecipThroughput); 3776 } 3777 3778 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3779 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3780 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3781 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3782 } 3783 3784 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3785 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3786 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3787 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3788 } 3789 3790 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3791 // For every instruction `I` in MinBWs, truncate the operands, create a 3792 // truncated version of `I` and reextend its result. InstCombine runs 3793 // later and will remove any ext/trunc pairs. 3794 SmallPtrSet<Value *, 4> Erased; 3795 for (const auto &KV : Cost->getMinimalBitwidths()) { 3796 // If the value wasn't vectorized, we must maintain the original scalar 3797 // type. The absence of the value from State indicates that it 3798 // wasn't vectorized. 3799 // FIXME: Should not rely on getVPValue at this point. 3800 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3801 if (!State.hasAnyVectorValue(Def)) 3802 continue; 3803 for (unsigned Part = 0; Part < UF; ++Part) { 3804 Value *I = State.get(Def, Part); 3805 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3806 continue; 3807 Type *OriginalTy = I->getType(); 3808 Type *ScalarTruncatedTy = 3809 IntegerType::get(OriginalTy->getContext(), KV.second); 3810 auto *TruncatedTy = VectorType::get( 3811 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); 3812 if (TruncatedTy == OriginalTy) 3813 continue; 3814 3815 IRBuilder<> B(cast<Instruction>(I)); 3816 auto ShrinkOperand = [&](Value *V) -> Value * { 3817 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3818 if (ZI->getSrcTy() == TruncatedTy) 3819 return ZI->getOperand(0); 3820 return B.CreateZExtOrTrunc(V, TruncatedTy); 3821 }; 3822 3823 // The actual instruction modification depends on the instruction type, 3824 // unfortunately. 3825 Value *NewI = nullptr; 3826 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3827 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3828 ShrinkOperand(BO->getOperand(1))); 3829 3830 // Any wrapping introduced by shrinking this operation shouldn't be 3831 // considered undefined behavior. So, we can't unconditionally copy 3832 // arithmetic wrapping flags to NewI. 3833 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3834 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3835 NewI = 3836 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3837 ShrinkOperand(CI->getOperand(1))); 3838 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3839 NewI = B.CreateSelect(SI->getCondition(), 3840 ShrinkOperand(SI->getTrueValue()), 3841 ShrinkOperand(SI->getFalseValue())); 3842 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3843 switch (CI->getOpcode()) { 3844 default: 3845 llvm_unreachable("Unhandled cast!"); 3846 case Instruction::Trunc: 3847 NewI = ShrinkOperand(CI->getOperand(0)); 3848 break; 3849 case Instruction::SExt: 3850 NewI = B.CreateSExtOrTrunc( 3851 CI->getOperand(0), 3852 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3853 break; 3854 case Instruction::ZExt: 3855 NewI = B.CreateZExtOrTrunc( 3856 CI->getOperand(0), 3857 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3858 break; 3859 } 3860 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3861 auto Elements0 = 3862 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); 3863 auto *O0 = B.CreateZExtOrTrunc( 3864 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3865 auto Elements1 = 3866 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); 3867 auto *O1 = B.CreateZExtOrTrunc( 3868 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3869 3870 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3871 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3872 // Don't do anything with the operands, just extend the result. 3873 continue; 3874 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3875 auto Elements = 3876 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); 3877 auto *O0 = B.CreateZExtOrTrunc( 3878 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3879 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3880 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3881 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3882 auto Elements = 3883 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); 3884 auto *O0 = B.CreateZExtOrTrunc( 3885 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3886 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3887 } else { 3888 // If we don't know what to do, be conservative and don't do anything. 3889 continue; 3890 } 3891 3892 // Lastly, extend the result. 3893 NewI->takeName(cast<Instruction>(I)); 3894 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3895 I->replaceAllUsesWith(Res); 3896 cast<Instruction>(I)->eraseFromParent(); 3897 Erased.insert(I); 3898 State.reset(Def, Res, Part); 3899 } 3900 } 3901 3902 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3903 for (const auto &KV : Cost->getMinimalBitwidths()) { 3904 // If the value wasn't vectorized, we must maintain the original scalar 3905 // type. The absence of the value from State indicates that it 3906 // wasn't vectorized. 3907 // FIXME: Should not rely on getVPValue at this point. 3908 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3909 if (!State.hasAnyVectorValue(Def)) 3910 continue; 3911 for (unsigned Part = 0; Part < UF; ++Part) { 3912 Value *I = State.get(Def, Part); 3913 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3914 if (Inst && Inst->use_empty()) { 3915 Value *NewI = Inst->getOperand(0); 3916 Inst->eraseFromParent(); 3917 State.reset(Def, NewI, Part); 3918 } 3919 } 3920 } 3921 } 3922 3923 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 3924 // Insert truncates and extends for any truncated instructions as hints to 3925 // InstCombine. 3926 if (VF.isVector()) 3927 truncateToMinimalBitwidths(State); 3928 3929 // Fix widened non-induction PHIs by setting up the PHI operands. 3930 if (OrigPHIsToFix.size()) { 3931 assert(EnableVPlanNativePath && 3932 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3933 fixNonInductionPHIs(State); 3934 } 3935 3936 // At this point every instruction in the original loop is widened to a 3937 // vector form. Now we need to fix the recurrences in the loop. These PHI 3938 // nodes are currently empty because we did not want to introduce cycles. 3939 // This is the second stage of vectorizing recurrences. 3940 fixCrossIterationPHIs(State); 3941 3942 // Forget the original basic block. 3943 PSE.getSE()->forgetLoop(OrigLoop); 3944 3945 // If we inserted an edge from the middle block to the unique exit block, 3946 // update uses outside the loop (phis) to account for the newly inserted 3947 // edge. 3948 if (!Cost->requiresScalarEpilogue(VF)) { 3949 // Fix-up external users of the induction variables. 3950 for (auto &Entry : Legal->getInductionVars()) 3951 fixupIVUsers(Entry.first, Entry.second, 3952 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 3953 IVEndValues[Entry.first], LoopMiddleBlock); 3954 3955 fixLCSSAPHIs(State); 3956 } 3957 3958 for (Instruction *PI : PredicatedInstructions) 3959 sinkScalarOperands(&*PI); 3960 3961 // Remove redundant induction instructions. 3962 cse(LoopVectorBody); 3963 3964 // Set/update profile weights for the vector and remainder loops as original 3965 // loop iterations are now distributed among them. Note that original loop 3966 // represented by LoopScalarBody becomes remainder loop after vectorization. 3967 // 3968 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 3969 // end up getting slightly roughened result but that should be OK since 3970 // profile is not inherently precise anyway. Note also possible bypass of 3971 // vector code caused by legality checks is ignored, assigning all the weight 3972 // to the vector loop, optimistically. 3973 // 3974 // For scalable vectorization we can't know at compile time how many iterations 3975 // of the loop are handled in one vector iteration, so instead assume a pessimistic 3976 // vscale of '1'. 3977 setProfileInfoAfterUnrolling( 3978 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), 3979 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); 3980 } 3981 3982 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 3983 // In order to support recurrences we need to be able to vectorize Phi nodes. 3984 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3985 // stage #2: We now need to fix the recurrences by adding incoming edges to 3986 // the currently empty PHI nodes. At this point every instruction in the 3987 // original loop is widened to a vector form so we can use them to construct 3988 // the incoming edges. 3989 VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock(); 3990 for (VPRecipeBase &R : Header->phis()) { 3991 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) 3992 fixReduction(ReductionPhi, State); 3993 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 3994 fixFirstOrderRecurrence(FOR, State); 3995 } 3996 } 3997 3998 void InnerLoopVectorizer::fixFirstOrderRecurrence( 3999 VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) { 4000 // This is the second phase of vectorizing first-order recurrences. An 4001 // overview of the transformation is described below. Suppose we have the 4002 // following loop. 4003 // 4004 // for (int i = 0; i < n; ++i) 4005 // b[i] = a[i] - a[i - 1]; 4006 // 4007 // There is a first-order recurrence on "a". For this loop, the shorthand 4008 // scalar IR looks like: 4009 // 4010 // scalar.ph: 4011 // s_init = a[-1] 4012 // br scalar.body 4013 // 4014 // scalar.body: 4015 // i = phi [0, scalar.ph], [i+1, scalar.body] 4016 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4017 // s2 = a[i] 4018 // b[i] = s2 - s1 4019 // br cond, scalar.body, ... 4020 // 4021 // In this example, s1 is a recurrence because it's value depends on the 4022 // previous iteration. In the first phase of vectorization, we created a 4023 // vector phi v1 for s1. We now complete the vectorization and produce the 4024 // shorthand vector IR shown below (for VF = 4, UF = 1). 4025 // 4026 // vector.ph: 4027 // v_init = vector(..., ..., ..., a[-1]) 4028 // br vector.body 4029 // 4030 // vector.body 4031 // i = phi [0, vector.ph], [i+4, vector.body] 4032 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4033 // v2 = a[i, i+1, i+2, i+3]; 4034 // v3 = vector(v1(3), v2(0, 1, 2)) 4035 // b[i, i+1, i+2, i+3] = v2 - v3 4036 // br cond, vector.body, middle.block 4037 // 4038 // middle.block: 4039 // x = v2(3) 4040 // br scalar.ph 4041 // 4042 // scalar.ph: 4043 // s_init = phi [x, middle.block], [a[-1], otherwise] 4044 // br scalar.body 4045 // 4046 // After execution completes the vector loop, we extract the next value of 4047 // the recurrence (x) to use as the initial value in the scalar loop. 4048 4049 // Extract the last vector element in the middle block. This will be the 4050 // initial value for the recurrence when jumping to the scalar loop. 4051 VPValue *PreviousDef = PhiR->getBackedgeValue(); 4052 Value *Incoming = State.get(PreviousDef, UF - 1); 4053 auto *ExtractForScalar = Incoming; 4054 auto *IdxTy = Builder.getInt32Ty(); 4055 if (VF.isVector()) { 4056 auto *One = ConstantInt::get(IdxTy, 1); 4057 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4058 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4059 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 4060 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 4061 "vector.recur.extract"); 4062 } 4063 // Extract the second last element in the middle block if the 4064 // Phi is used outside the loop. We need to extract the phi itself 4065 // and not the last element (the phi update in the current iteration). This 4066 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4067 // when the scalar loop is not run at all. 4068 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4069 if (VF.isVector()) { 4070 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4071 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 4072 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4073 Incoming, Idx, "vector.recur.extract.for.phi"); 4074 } else if (UF > 1) 4075 // When loop is unrolled without vectorizing, initialize 4076 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 4077 // of `Incoming`. This is analogous to the vectorized case above: extracting 4078 // the second last element when VF > 1. 4079 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 4080 4081 // Fix the initial value of the original recurrence in the scalar loop. 4082 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4083 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); 4084 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4085 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); 4086 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4087 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4088 Start->addIncoming(Incoming, BB); 4089 } 4090 4091 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 4092 Phi->setName("scalar.recur"); 4093 4094 // Finally, fix users of the recurrence outside the loop. The users will need 4095 // either the last value of the scalar recurrence or the last value of the 4096 // vector recurrence we extracted in the middle block. Since the loop is in 4097 // LCSSA form, we just need to find all the phi nodes for the original scalar 4098 // recurrence in the exit block, and then add an edge for the middle block. 4099 // Note that LCSSA does not imply single entry when the original scalar loop 4100 // had multiple exiting edges (as we always run the last iteration in the 4101 // scalar epilogue); in that case, there is no edge from middle to exit and 4102 // and thus no phis which needed updated. 4103 if (!Cost->requiresScalarEpilogue(VF)) 4104 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4105 if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi)) 4106 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4107 } 4108 4109 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, 4110 VPTransformState &State) { 4111 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 4112 // Get it's reduction variable descriptor. 4113 assert(Legal->isReductionVariable(OrigPhi) && 4114 "Unable to find the reduction variable"); 4115 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 4116 4117 RecurKind RK = RdxDesc.getRecurrenceKind(); 4118 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4119 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4120 setDebugLocFromInst(ReductionStartValue); 4121 4122 VPValue *LoopExitInstDef = PhiR->getBackedgeValue(); 4123 // This is the vector-clone of the value that leaves the loop. 4124 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 4125 4126 // Wrap flags are in general invalid after vectorization, clear them. 4127 clearReductionWrapFlags(RdxDesc, State); 4128 4129 // Before each round, move the insertion point right between 4130 // the PHIs and the values we are going to write. 4131 // This allows us to write both PHINodes and the extractelement 4132 // instructions. 4133 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4134 4135 setDebugLocFromInst(LoopExitInst); 4136 4137 Type *PhiTy = OrigPhi->getType(); 4138 // If tail is folded by masking, the vector value to leave the loop should be 4139 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 4140 // instead of the former. For an inloop reduction the reduction will already 4141 // be predicated, and does not need to be handled here. 4142 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { 4143 for (unsigned Part = 0; Part < UF; ++Part) { 4144 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 4145 Value *Sel = nullptr; 4146 for (User *U : VecLoopExitInst->users()) { 4147 if (isa<SelectInst>(U)) { 4148 assert(!Sel && "Reduction exit feeding two selects"); 4149 Sel = U; 4150 } else 4151 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 4152 } 4153 assert(Sel && "Reduction exit feeds no select"); 4154 State.reset(LoopExitInstDef, Sel, Part); 4155 4156 // If the target can create a predicated operator for the reduction at no 4157 // extra cost in the loop (for example a predicated vadd), it can be 4158 // cheaper for the select to remain in the loop than be sunk out of it, 4159 // and so use the select value for the phi instead of the old 4160 // LoopExitValue. 4161 if (PreferPredicatedReductionSelect || 4162 TTI->preferPredicatedReductionSelect( 4163 RdxDesc.getOpcode(), PhiTy, 4164 TargetTransformInfo::ReductionFlags())) { 4165 auto *VecRdxPhi = 4166 cast<PHINode>(State.get(PhiR, Part)); 4167 VecRdxPhi->setIncomingValueForBlock( 4168 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 4169 } 4170 } 4171 } 4172 4173 // If the vector reduction can be performed in a smaller type, we truncate 4174 // then extend the loop exit value to enable InstCombine to evaluate the 4175 // entire expression in the smaller type. 4176 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 4177 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"); 4178 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4179 Builder.SetInsertPoint( 4180 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4181 VectorParts RdxParts(UF); 4182 for (unsigned Part = 0; Part < UF; ++Part) { 4183 RdxParts[Part] = State.get(LoopExitInstDef, Part); 4184 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4185 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4186 : Builder.CreateZExt(Trunc, VecTy); 4187 for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users())) 4188 if (U != Trunc) { 4189 U->replaceUsesOfWith(RdxParts[Part], Extnd); 4190 RdxParts[Part] = Extnd; 4191 } 4192 } 4193 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4194 for (unsigned Part = 0; Part < UF; ++Part) { 4195 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4196 State.reset(LoopExitInstDef, RdxParts[Part], Part); 4197 } 4198 } 4199 4200 // Reduce all of the unrolled parts into a single vector. 4201 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 4202 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 4203 4204 // The middle block terminator has already been assigned a DebugLoc here (the 4205 // OrigLoop's single latch terminator). We want the whole middle block to 4206 // appear to execute on this line because: (a) it is all compiler generated, 4207 // (b) these instructions are always executed after evaluating the latch 4208 // conditional branch, and (c) other passes may add new predecessors which 4209 // terminate on this line. This is the easiest way to ensure we don't 4210 // accidentally cause an extra step back into the loop while debugging. 4211 setDebugLocFromInst(LoopMiddleBlock->getTerminator()); 4212 if (PhiR->isOrdered()) 4213 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 4214 else { 4215 // Floating-point operations should have some FMF to enable the reduction. 4216 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 4217 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 4218 for (unsigned Part = 1; Part < UF; ++Part) { 4219 Value *RdxPart = State.get(LoopExitInstDef, Part); 4220 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 4221 ReducedPartRdx = Builder.CreateBinOp( 4222 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 4223 } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) 4224 ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK, 4225 ReducedPartRdx, RdxPart); 4226 else 4227 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4228 } 4229 } 4230 4231 // Create the reduction after the loop. Note that inloop reductions create the 4232 // target reduction in the loop using a Reduction recipe. 4233 if (VF.isVector() && !PhiR->isInLoop()) { 4234 ReducedPartRdx = 4235 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi); 4236 // If the reduction can be performed in a smaller type, we need to extend 4237 // the reduction to the wider type before we branch to the original loop. 4238 if (PhiTy != RdxDesc.getRecurrenceType()) 4239 ReducedPartRdx = RdxDesc.isSigned() 4240 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 4241 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 4242 } 4243 4244 PHINode *ResumePhi = 4245 dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue()); 4246 4247 // Create a phi node that merges control-flow from the backedge-taken check 4248 // block and the middle block. 4249 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 4250 LoopScalarPreHeader->getTerminator()); 4251 4252 // If we are fixing reductions in the epilogue loop then we should already 4253 // have created a bc.merge.rdx Phi after the main vector body. Ensure that 4254 // we carry over the incoming values correctly. 4255 for (auto *Incoming : predecessors(LoopScalarPreHeader)) { 4256 if (Incoming == LoopMiddleBlock) 4257 BCBlockPhi->addIncoming(ReducedPartRdx, Incoming); 4258 else if (ResumePhi && llvm::is_contained(ResumePhi->blocks(), Incoming)) 4259 BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(Incoming), 4260 Incoming); 4261 else 4262 BCBlockPhi->addIncoming(ReductionStartValue, Incoming); 4263 } 4264 4265 // Set the resume value for this reduction 4266 ReductionResumeValues.insert({&RdxDesc, BCBlockPhi}); 4267 4268 // Now, we need to fix the users of the reduction variable 4269 // inside and outside of the scalar remainder loop. 4270 4271 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4272 // in the exit blocks. See comment on analogous loop in 4273 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4274 if (!Cost->requiresScalarEpilogue(VF)) 4275 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4276 if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst)) 4277 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4278 4279 // Fix the scalar loop reduction variable with the incoming reduction sum 4280 // from the vector body and from the backedge value. 4281 int IncomingEdgeBlockIdx = 4282 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4283 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4284 // Pick the other block. 4285 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4286 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4287 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4288 } 4289 4290 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 4291 VPTransformState &State) { 4292 RecurKind RK = RdxDesc.getRecurrenceKind(); 4293 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4294 return; 4295 4296 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4297 assert(LoopExitInstr && "null loop exit instruction"); 4298 SmallVector<Instruction *, 8> Worklist; 4299 SmallPtrSet<Instruction *, 8> Visited; 4300 Worklist.push_back(LoopExitInstr); 4301 Visited.insert(LoopExitInstr); 4302 4303 while (!Worklist.empty()) { 4304 Instruction *Cur = Worklist.pop_back_val(); 4305 if (isa<OverflowingBinaryOperator>(Cur)) 4306 for (unsigned Part = 0; Part < UF; ++Part) { 4307 // FIXME: Should not rely on getVPValue at this point. 4308 Value *V = State.get(State.Plan->getVPValue(Cur, true), Part); 4309 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4310 } 4311 4312 for (User *U : Cur->users()) { 4313 Instruction *UI = cast<Instruction>(U); 4314 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4315 Visited.insert(UI).second) 4316 Worklist.push_back(UI); 4317 } 4318 } 4319 } 4320 4321 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4322 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4323 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4324 // Some phis were already hand updated by the reduction and recurrence 4325 // code above, leave them alone. 4326 continue; 4327 4328 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4329 // Non-instruction incoming values will have only one value. 4330 4331 VPLane Lane = VPLane::getFirstLane(); 4332 if (isa<Instruction>(IncomingValue) && 4333 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), 4334 VF)) 4335 Lane = VPLane::getLastLaneForVF(VF); 4336 4337 // Can be a loop invariant incoming value or the last scalar value to be 4338 // extracted from the vectorized loop. 4339 // FIXME: Should not rely on getVPValue at this point. 4340 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4341 Value *lastIncomingValue = 4342 OrigLoop->isLoopInvariant(IncomingValue) 4343 ? IncomingValue 4344 : State.get(State.Plan->getVPValue(IncomingValue, true), 4345 VPIteration(UF - 1, Lane)); 4346 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4347 } 4348 } 4349 4350 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4351 // The basic block and loop containing the predicated instruction. 4352 auto *PredBB = PredInst->getParent(); 4353 auto *VectorLoop = LI->getLoopFor(PredBB); 4354 4355 // Initialize a worklist with the operands of the predicated instruction. 4356 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4357 4358 // Holds instructions that we need to analyze again. An instruction may be 4359 // reanalyzed if we don't yet know if we can sink it or not. 4360 SmallVector<Instruction *, 8> InstsToReanalyze; 4361 4362 // Returns true if a given use occurs in the predicated block. Phi nodes use 4363 // their operands in their corresponding predecessor blocks. 4364 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4365 auto *I = cast<Instruction>(U.getUser()); 4366 BasicBlock *BB = I->getParent(); 4367 if (auto *Phi = dyn_cast<PHINode>(I)) 4368 BB = Phi->getIncomingBlock( 4369 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4370 return BB == PredBB; 4371 }; 4372 4373 // Iteratively sink the scalarized operands of the predicated instruction 4374 // into the block we created for it. When an instruction is sunk, it's 4375 // operands are then added to the worklist. The algorithm ends after one pass 4376 // through the worklist doesn't sink a single instruction. 4377 bool Changed; 4378 do { 4379 // Add the instructions that need to be reanalyzed to the worklist, and 4380 // reset the changed indicator. 4381 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4382 InstsToReanalyze.clear(); 4383 Changed = false; 4384 4385 while (!Worklist.empty()) { 4386 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4387 4388 // We can't sink an instruction if it is a phi node, is not in the loop, 4389 // or may have side effects. 4390 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4391 I->mayHaveSideEffects()) 4392 continue; 4393 4394 // If the instruction is already in PredBB, check if we can sink its 4395 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4396 // sinking the scalar instruction I, hence it appears in PredBB; but it 4397 // may have failed to sink I's operands (recursively), which we try 4398 // (again) here. 4399 if (I->getParent() == PredBB) { 4400 Worklist.insert(I->op_begin(), I->op_end()); 4401 continue; 4402 } 4403 4404 // It's legal to sink the instruction if all its uses occur in the 4405 // predicated block. Otherwise, there's nothing to do yet, and we may 4406 // need to reanalyze the instruction. 4407 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4408 InstsToReanalyze.push_back(I); 4409 continue; 4410 } 4411 4412 // Move the instruction to the beginning of the predicated block, and add 4413 // it's operands to the worklist. 4414 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4415 Worklist.insert(I->op_begin(), I->op_end()); 4416 4417 // The sinking may have enabled other instructions to be sunk, so we will 4418 // need to iterate. 4419 Changed = true; 4420 } 4421 } while (Changed); 4422 } 4423 4424 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4425 for (PHINode *OrigPhi : OrigPHIsToFix) { 4426 VPWidenPHIRecipe *VPPhi = 4427 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4428 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4429 // Make sure the builder has a valid insert point. 4430 Builder.SetInsertPoint(NewPhi); 4431 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4432 VPValue *Inc = VPPhi->getIncomingValue(i); 4433 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4434 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4435 } 4436 } 4437 } 4438 4439 bool InnerLoopVectorizer::useOrderedReductions( 4440 const RecurrenceDescriptor &RdxDesc) { 4441 return Cost->useOrderedReductions(RdxDesc); 4442 } 4443 4444 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4445 VPWidenPHIRecipe *PhiR, 4446 VPTransformState &State) { 4447 PHINode *P = cast<PHINode>(PN); 4448 if (EnableVPlanNativePath) { 4449 // Currently we enter here in the VPlan-native path for non-induction 4450 // PHIs where all control flow is uniform. We simply widen these PHIs. 4451 // Create a vector phi with no operands - the vector phi operands will be 4452 // set at the end of vector code generation. 4453 Type *VecTy = (State.VF.isScalar()) 4454 ? PN->getType() 4455 : VectorType::get(PN->getType(), State.VF); 4456 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4457 State.set(PhiR, VecPhi, 0); 4458 OrigPHIsToFix.push_back(P); 4459 4460 return; 4461 } 4462 4463 assert(PN->getParent() == OrigLoop->getHeader() && 4464 "Non-header phis should have been handled elsewhere"); 4465 4466 // In order to support recurrences we need to be able to vectorize Phi nodes. 4467 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4468 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4469 // this value when we vectorize all of the instructions that use the PHI. 4470 4471 assert(!Legal->isReductionVariable(P) && 4472 "reductions should be handled elsewhere"); 4473 4474 setDebugLocFromInst(P); 4475 4476 // This PHINode must be an induction variable. 4477 // Make sure that we know about it. 4478 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4479 4480 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4481 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4482 4483 auto *IVR = PhiR->getParent()->getPlan()->getCanonicalIV(); 4484 PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0)); 4485 4486 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4487 // which can be found from the original scalar operations. 4488 switch (II.getKind()) { 4489 case InductionDescriptor::IK_NoInduction: 4490 llvm_unreachable("Unknown induction"); 4491 case InductionDescriptor::IK_IntInduction: 4492 case InductionDescriptor::IK_FpInduction: 4493 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4494 case InductionDescriptor::IK_PtrInduction: { 4495 // Handle the pointer induction variable case. 4496 assert(P->getType()->isPointerTy() && "Unexpected type."); 4497 4498 if (Cost->isScalarAfterVectorization(P, State.VF)) { 4499 // This is the normalized GEP that starts counting at zero. 4500 Value *PtrInd = 4501 Builder.CreateSExtOrTrunc(CanonicalIV, II.getStep()->getType()); 4502 // Determine the number of scalars we need to generate for each unroll 4503 // iteration. If the instruction is uniform, we only need to generate the 4504 // first lane. Otherwise, we generate all VF values. 4505 bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF); 4506 assert((IsUniform || !State.VF.isScalable()) && 4507 "Cannot scalarize a scalable VF"); 4508 unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue(); 4509 4510 for (unsigned Part = 0; Part < UF; ++Part) { 4511 Value *PartStart = 4512 createStepForVF(Builder, PtrInd->getType(), VF, Part); 4513 4514 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4515 Value *Idx = Builder.CreateAdd( 4516 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 4517 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4518 Value *SclrGep = emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), 4519 DL, II, State.CFG.PrevBB); 4520 SclrGep->setName("next.gep"); 4521 State.set(PhiR, SclrGep, VPIteration(Part, Lane)); 4522 } 4523 } 4524 return; 4525 } 4526 assert(isa<SCEVConstant>(II.getStep()) && 4527 "Induction step not a SCEV constant!"); 4528 Type *PhiType = II.getStep()->getType(); 4529 4530 // Build a pointer phi 4531 Value *ScalarStartValue = PhiR->getStartValue()->getLiveInIRValue(); 4532 Type *ScStValueType = ScalarStartValue->getType(); 4533 PHINode *NewPointerPhi = 4534 PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV); 4535 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4536 4537 // A pointer induction, performed by using a gep 4538 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4539 Instruction *InductionLoc = LoopLatch->getTerminator(); 4540 const SCEV *ScalarStep = II.getStep(); 4541 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4542 Value *ScalarStepValue = 4543 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4544 Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF); 4545 Value *NumUnrolledElems = 4546 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 4547 Value *InductionGEP = GetElementPtrInst::Create( 4548 II.getElementType(), NewPointerPhi, 4549 Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 4550 InductionLoc); 4551 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4552 4553 // Create UF many actual address geps that use the pointer 4554 // phi as base and a vectorized version of the step value 4555 // (<step*0, ..., step*N>) as offset. 4556 for (unsigned Part = 0; Part < State.UF; ++Part) { 4557 Type *VecPhiType = VectorType::get(PhiType, State.VF); 4558 Value *StartOffsetScalar = 4559 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 4560 Value *StartOffset = 4561 Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 4562 // Create a vector of consecutive numbers from zero to VF. 4563 StartOffset = 4564 Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType)); 4565 4566 Value *GEP = Builder.CreateGEP( 4567 II.getElementType(), NewPointerPhi, 4568 Builder.CreateMul( 4569 StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue), 4570 "vector.gep")); 4571 State.set(PhiR, GEP, Part); 4572 } 4573 } 4574 } 4575 } 4576 4577 /// A helper function for checking whether an integer division-related 4578 /// instruction may divide by zero (in which case it must be predicated if 4579 /// executed conditionally in the scalar code). 4580 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4581 /// Non-zero divisors that are non compile-time constants will not be 4582 /// converted into multiplication, so we will still end up scalarizing 4583 /// the division, but can do so w/o predication. 4584 static bool mayDivideByZero(Instruction &I) { 4585 assert((I.getOpcode() == Instruction::UDiv || 4586 I.getOpcode() == Instruction::SDiv || 4587 I.getOpcode() == Instruction::URem || 4588 I.getOpcode() == Instruction::SRem) && 4589 "Unexpected instruction"); 4590 Value *Divisor = I.getOperand(1); 4591 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4592 return !CInt || CInt->isZero(); 4593 } 4594 4595 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4596 VPUser &ArgOperands, 4597 VPTransformState &State) { 4598 assert(!isa<DbgInfoIntrinsic>(I) && 4599 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4600 setDebugLocFromInst(&I); 4601 4602 Module *M = I.getParent()->getParent()->getParent(); 4603 auto *CI = cast<CallInst>(&I); 4604 4605 SmallVector<Type *, 4> Tys; 4606 for (Value *ArgOperand : CI->args()) 4607 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4608 4609 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4610 4611 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4612 // version of the instruction. 4613 // Is it beneficial to perform intrinsic call compared to lib call? 4614 bool NeedToScalarize = false; 4615 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4616 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4617 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4618 assert((UseVectorIntrinsic || !NeedToScalarize) && 4619 "Instruction should be scalarized elsewhere."); 4620 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 4621 "Either the intrinsic cost or vector call cost must be valid"); 4622 4623 for (unsigned Part = 0; Part < UF; ++Part) { 4624 SmallVector<Type *, 2> TysForDecl = {CI->getType()}; 4625 SmallVector<Value *, 4> Args; 4626 for (auto &I : enumerate(ArgOperands.operands())) { 4627 // Some intrinsics have a scalar argument - don't replace it with a 4628 // vector. 4629 Value *Arg; 4630 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4631 Arg = State.get(I.value(), Part); 4632 else { 4633 Arg = State.get(I.value(), VPIteration(0, 0)); 4634 if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index())) 4635 TysForDecl.push_back(Arg->getType()); 4636 } 4637 Args.push_back(Arg); 4638 } 4639 4640 Function *VectorF; 4641 if (UseVectorIntrinsic) { 4642 // Use vector version of the intrinsic. 4643 if (VF.isVector()) 4644 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4645 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4646 assert(VectorF && "Can't retrieve vector intrinsic."); 4647 } else { 4648 // Use vector version of the function call. 4649 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 4650 #ifndef NDEBUG 4651 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4652 "Can't create vector function."); 4653 #endif 4654 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4655 } 4656 SmallVector<OperandBundleDef, 1> OpBundles; 4657 CI->getOperandBundlesAsDefs(OpBundles); 4658 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4659 4660 if (isa<FPMathOperator>(V)) 4661 V->copyFastMathFlags(CI); 4662 4663 State.set(Def, V, Part); 4664 addMetadata(V, &I); 4665 } 4666 } 4667 4668 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 4669 // We should not collect Scalars more than once per VF. Right now, this 4670 // function is called from collectUniformsAndScalars(), which already does 4671 // this check. Collecting Scalars for VF=1 does not make any sense. 4672 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 4673 "This function should not be visited twice for the same VF"); 4674 4675 SmallSetVector<Instruction *, 8> Worklist; 4676 4677 // These sets are used to seed the analysis with pointers used by memory 4678 // accesses that will remain scalar. 4679 SmallSetVector<Instruction *, 8> ScalarPtrs; 4680 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4681 auto *Latch = TheLoop->getLoopLatch(); 4682 4683 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4684 // The pointer operands of loads and stores will be scalar as long as the 4685 // memory access is not a gather or scatter operation. The value operand of a 4686 // store will remain scalar if the store is scalarized. 4687 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4688 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4689 assert(WideningDecision != CM_Unknown && 4690 "Widening decision should be ready at this moment"); 4691 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4692 if (Ptr == Store->getValueOperand()) 4693 return WideningDecision == CM_Scalarize; 4694 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4695 "Ptr is neither a value or pointer operand"); 4696 return WideningDecision != CM_GatherScatter; 4697 }; 4698 4699 // A helper that returns true if the given value is a bitcast or 4700 // getelementptr instruction contained in the loop. 4701 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4702 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4703 isa<GetElementPtrInst>(V)) && 4704 !TheLoop->isLoopInvariant(V); 4705 }; 4706 4707 // A helper that evaluates a memory access's use of a pointer. If the use will 4708 // be a scalar use and the pointer is only used by memory accesses, we place 4709 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4710 // PossibleNonScalarPtrs. 4711 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4712 // We only care about bitcast and getelementptr instructions contained in 4713 // the loop. 4714 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4715 return; 4716 4717 // If the pointer has already been identified as scalar (e.g., if it was 4718 // also identified as uniform), there's nothing to do. 4719 auto *I = cast<Instruction>(Ptr); 4720 if (Worklist.count(I)) 4721 return; 4722 4723 // If the use of the pointer will be a scalar use, and all users of the 4724 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4725 // place the pointer in PossibleNonScalarPtrs. 4726 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4727 return isa<LoadInst>(U) || isa<StoreInst>(U); 4728 })) 4729 ScalarPtrs.insert(I); 4730 else 4731 PossibleNonScalarPtrs.insert(I); 4732 }; 4733 4734 // We seed the scalars analysis with three classes of instructions: (1) 4735 // instructions marked uniform-after-vectorization and (2) bitcast, 4736 // getelementptr and (pointer) phi instructions used by memory accesses 4737 // requiring a scalar use. 4738 // 4739 // (1) Add to the worklist all instructions that have been identified as 4740 // uniform-after-vectorization. 4741 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4742 4743 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4744 // memory accesses requiring a scalar use. The pointer operands of loads and 4745 // stores will be scalar as long as the memory accesses is not a gather or 4746 // scatter operation. The value operand of a store will remain scalar if the 4747 // store is scalarized. 4748 for (auto *BB : TheLoop->blocks()) 4749 for (auto &I : *BB) { 4750 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4751 evaluatePtrUse(Load, Load->getPointerOperand()); 4752 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4753 evaluatePtrUse(Store, Store->getPointerOperand()); 4754 evaluatePtrUse(Store, Store->getValueOperand()); 4755 } 4756 } 4757 for (auto *I : ScalarPtrs) 4758 if (!PossibleNonScalarPtrs.count(I)) { 4759 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4760 Worklist.insert(I); 4761 } 4762 4763 // Insert the forced scalars. 4764 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4765 // induction variable when the PHI user is scalarized. 4766 auto ForcedScalar = ForcedScalars.find(VF); 4767 if (ForcedScalar != ForcedScalars.end()) 4768 for (auto *I : ForcedScalar->second) 4769 Worklist.insert(I); 4770 4771 // Expand the worklist by looking through any bitcasts and getelementptr 4772 // instructions we've already identified as scalar. This is similar to the 4773 // expansion step in collectLoopUniforms(); however, here we're only 4774 // expanding to include additional bitcasts and getelementptr instructions. 4775 unsigned Idx = 0; 4776 while (Idx != Worklist.size()) { 4777 Instruction *Dst = Worklist[Idx++]; 4778 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4779 continue; 4780 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4781 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4782 auto *J = cast<Instruction>(U); 4783 return !TheLoop->contains(J) || Worklist.count(J) || 4784 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4785 isScalarUse(J, Src)); 4786 })) { 4787 Worklist.insert(Src); 4788 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4789 } 4790 } 4791 4792 // An induction variable will remain scalar if all users of the induction 4793 // variable and induction variable update remain scalar. 4794 for (auto &Induction : Legal->getInductionVars()) { 4795 auto *Ind = Induction.first; 4796 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4797 4798 // If tail-folding is applied, the primary induction variable will be used 4799 // to feed a vector compare. 4800 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 4801 continue; 4802 4803 // Returns true if \p Indvar is a pointer induction that is used directly by 4804 // load/store instruction \p I. 4805 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar, 4806 Instruction *I) { 4807 return Induction.second.getKind() == 4808 InductionDescriptor::IK_PtrInduction && 4809 (isa<LoadInst>(I) || isa<StoreInst>(I)) && 4810 Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar); 4811 }; 4812 4813 // Determine if all users of the induction variable are scalar after 4814 // vectorization. 4815 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4816 auto *I = cast<Instruction>(U); 4817 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4818 IsDirectLoadStoreFromPtrIndvar(Ind, I); 4819 }); 4820 if (!ScalarInd) 4821 continue; 4822 4823 // Determine if all users of the induction variable update instruction are 4824 // scalar after vectorization. 4825 auto ScalarIndUpdate = 4826 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4827 auto *I = cast<Instruction>(U); 4828 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4829 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I); 4830 }); 4831 if (!ScalarIndUpdate) 4832 continue; 4833 4834 // The induction variable and its update instruction will remain scalar. 4835 Worklist.insert(Ind); 4836 Worklist.insert(IndUpdate); 4837 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4838 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4839 << "\n"); 4840 } 4841 4842 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4843 } 4844 4845 bool LoopVectorizationCostModel::isScalarWithPredication( 4846 Instruction *I, ElementCount VF) const { 4847 if (!blockNeedsPredicationForAnyReason(I->getParent())) 4848 return false; 4849 switch(I->getOpcode()) { 4850 default: 4851 break; 4852 case Instruction::Load: 4853 case Instruction::Store: { 4854 if (!Legal->isMaskRequired(I)) 4855 return false; 4856 auto *Ptr = getLoadStorePointerOperand(I); 4857 auto *Ty = getLoadStoreType(I); 4858 Type *VTy = Ty; 4859 if (VF.isVector()) 4860 VTy = VectorType::get(Ty, VF); 4861 const Align Alignment = getLoadStoreAlignment(I); 4862 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 4863 TTI.isLegalMaskedGather(VTy, Alignment)) 4864 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 4865 TTI.isLegalMaskedScatter(VTy, Alignment)); 4866 } 4867 case Instruction::UDiv: 4868 case Instruction::SDiv: 4869 case Instruction::SRem: 4870 case Instruction::URem: 4871 return mayDivideByZero(*I); 4872 } 4873 return false; 4874 } 4875 4876 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 4877 Instruction *I, ElementCount VF) { 4878 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4879 assert(getWideningDecision(I, VF) == CM_Unknown && 4880 "Decision should not be set yet."); 4881 auto *Group = getInterleavedAccessGroup(I); 4882 assert(Group && "Must have a group."); 4883 4884 // If the instruction's allocated size doesn't equal it's type size, it 4885 // requires padding and will be scalarized. 4886 auto &DL = I->getModule()->getDataLayout(); 4887 auto *ScalarTy = getLoadStoreType(I); 4888 if (hasIrregularType(ScalarTy, DL)) 4889 return false; 4890 4891 // Check if masking is required. 4892 // A Group may need masking for one of two reasons: it resides in a block that 4893 // needs predication, or it was decided to use masking to deal with gaps 4894 // (either a gap at the end of a load-access that may result in a speculative 4895 // load, or any gaps in a store-access). 4896 bool PredicatedAccessRequiresMasking = 4897 blockNeedsPredicationForAnyReason(I->getParent()) && 4898 Legal->isMaskRequired(I); 4899 bool LoadAccessWithGapsRequiresEpilogMasking = 4900 isa<LoadInst>(I) && Group->requiresScalarEpilogue() && 4901 !isScalarEpilogueAllowed(); 4902 bool StoreAccessWithGapsRequiresMasking = 4903 isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()); 4904 if (!PredicatedAccessRequiresMasking && 4905 !LoadAccessWithGapsRequiresEpilogMasking && 4906 !StoreAccessWithGapsRequiresMasking) 4907 return true; 4908 4909 // If masked interleaving is required, we expect that the user/target had 4910 // enabled it, because otherwise it either wouldn't have been created or 4911 // it should have been invalidated by the CostModel. 4912 assert(useMaskedInterleavedAccesses(TTI) && 4913 "Masked interleave-groups for predicated accesses are not enabled."); 4914 4915 if (Group->isReverse()) 4916 return false; 4917 4918 auto *Ty = getLoadStoreType(I); 4919 const Align Alignment = getLoadStoreAlignment(I); 4920 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 4921 : TTI.isLegalMaskedStore(Ty, Alignment); 4922 } 4923 4924 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 4925 Instruction *I, ElementCount VF) { 4926 // Get and ensure we have a valid memory instruction. 4927 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction"); 4928 4929 auto *Ptr = getLoadStorePointerOperand(I); 4930 auto *ScalarTy = getLoadStoreType(I); 4931 4932 // In order to be widened, the pointer should be consecutive, first of all. 4933 if (!Legal->isConsecutivePtr(ScalarTy, Ptr)) 4934 return false; 4935 4936 // If the instruction is a store located in a predicated block, it will be 4937 // scalarized. 4938 if (isScalarWithPredication(I, VF)) 4939 return false; 4940 4941 // If the instruction's allocated size doesn't equal it's type size, it 4942 // requires padding and will be scalarized. 4943 auto &DL = I->getModule()->getDataLayout(); 4944 if (hasIrregularType(ScalarTy, DL)) 4945 return false; 4946 4947 return true; 4948 } 4949 4950 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 4951 // We should not collect Uniforms more than once per VF. Right now, 4952 // this function is called from collectUniformsAndScalars(), which 4953 // already does this check. Collecting Uniforms for VF=1 does not make any 4954 // sense. 4955 4956 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 4957 "This function should not be visited twice for the same VF"); 4958 4959 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4960 // not analyze again. Uniforms.count(VF) will return 1. 4961 Uniforms[VF].clear(); 4962 4963 // We now know that the loop is vectorizable! 4964 // Collect instructions inside the loop that will remain uniform after 4965 // vectorization. 4966 4967 // Global values, params and instructions outside of current loop are out of 4968 // scope. 4969 auto isOutOfScope = [&](Value *V) -> bool { 4970 Instruction *I = dyn_cast<Instruction>(V); 4971 return (!I || !TheLoop->contains(I)); 4972 }; 4973 4974 // Worklist containing uniform instructions demanding lane 0. 4975 SetVector<Instruction *> Worklist; 4976 BasicBlock *Latch = TheLoop->getLoopLatch(); 4977 4978 // Add uniform instructions demanding lane 0 to the worklist. Instructions 4979 // that are scalar with predication must not be considered uniform after 4980 // vectorization, because that would create an erroneous replicating region 4981 // where only a single instance out of VF should be formed. 4982 // TODO: optimize such seldom cases if found important, see PR40816. 4983 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 4984 if (isOutOfScope(I)) { 4985 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 4986 << *I << "\n"); 4987 return; 4988 } 4989 if (isScalarWithPredication(I, VF)) { 4990 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 4991 << *I << "\n"); 4992 return; 4993 } 4994 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 4995 Worklist.insert(I); 4996 }; 4997 4998 // Start with the conditional branch. If the branch condition is an 4999 // instruction contained in the loop that is only used by the branch, it is 5000 // uniform. 5001 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5002 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 5003 addToWorklistIfAllowed(Cmp); 5004 5005 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 5006 InstWidening WideningDecision = getWideningDecision(I, VF); 5007 assert(WideningDecision != CM_Unknown && 5008 "Widening decision should be ready at this moment"); 5009 5010 // A uniform memory op is itself uniform. We exclude uniform stores 5011 // here as they demand the last lane, not the first one. 5012 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 5013 assert(WideningDecision == CM_Scalarize); 5014 return true; 5015 } 5016 5017 return (WideningDecision == CM_Widen || 5018 WideningDecision == CM_Widen_Reverse || 5019 WideningDecision == CM_Interleave); 5020 }; 5021 5022 5023 // Returns true if Ptr is the pointer operand of a memory access instruction 5024 // I, and I is known to not require scalarization. 5025 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5026 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5027 }; 5028 5029 // Holds a list of values which are known to have at least one uniform use. 5030 // Note that there may be other uses which aren't uniform. A "uniform use" 5031 // here is something which only demands lane 0 of the unrolled iterations; 5032 // it does not imply that all lanes produce the same value (e.g. this is not 5033 // the usual meaning of uniform) 5034 SetVector<Value *> HasUniformUse; 5035 5036 // Scan the loop for instructions which are either a) known to have only 5037 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 5038 for (auto *BB : TheLoop->blocks()) 5039 for (auto &I : *BB) { 5040 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) { 5041 switch (II->getIntrinsicID()) { 5042 case Intrinsic::sideeffect: 5043 case Intrinsic::experimental_noalias_scope_decl: 5044 case Intrinsic::assume: 5045 case Intrinsic::lifetime_start: 5046 case Intrinsic::lifetime_end: 5047 if (TheLoop->hasLoopInvariantOperands(&I)) 5048 addToWorklistIfAllowed(&I); 5049 break; 5050 default: 5051 break; 5052 } 5053 } 5054 5055 // ExtractValue instructions must be uniform, because the operands are 5056 // known to be loop-invariant. 5057 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) { 5058 assert(isOutOfScope(EVI->getAggregateOperand()) && 5059 "Expected aggregate value to be loop invariant"); 5060 addToWorklistIfAllowed(EVI); 5061 continue; 5062 } 5063 5064 // If there's no pointer operand, there's nothing to do. 5065 auto *Ptr = getLoadStorePointerOperand(&I); 5066 if (!Ptr) 5067 continue; 5068 5069 // A uniform memory op is itself uniform. We exclude uniform stores 5070 // here as they demand the last lane, not the first one. 5071 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 5072 addToWorklistIfAllowed(&I); 5073 5074 if (isUniformDecision(&I, VF)) { 5075 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 5076 HasUniformUse.insert(Ptr); 5077 } 5078 } 5079 5080 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 5081 // demanding) users. Since loops are assumed to be in LCSSA form, this 5082 // disallows uses outside the loop as well. 5083 for (auto *V : HasUniformUse) { 5084 if (isOutOfScope(V)) 5085 continue; 5086 auto *I = cast<Instruction>(V); 5087 auto UsersAreMemAccesses = 5088 llvm::all_of(I->users(), [&](User *U) -> bool { 5089 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 5090 }); 5091 if (UsersAreMemAccesses) 5092 addToWorklistIfAllowed(I); 5093 } 5094 5095 // Expand Worklist in topological order: whenever a new instruction 5096 // is added , its users should be already inside Worklist. It ensures 5097 // a uniform instruction will only be used by uniform instructions. 5098 unsigned idx = 0; 5099 while (idx != Worklist.size()) { 5100 Instruction *I = Worklist[idx++]; 5101 5102 for (auto OV : I->operand_values()) { 5103 // isOutOfScope operands cannot be uniform instructions. 5104 if (isOutOfScope(OV)) 5105 continue; 5106 // First order recurrence Phi's should typically be considered 5107 // non-uniform. 5108 auto *OP = dyn_cast<PHINode>(OV); 5109 if (OP && Legal->isFirstOrderRecurrence(OP)) 5110 continue; 5111 // If all the users of the operand are uniform, then add the 5112 // operand into the uniform worklist. 5113 auto *OI = cast<Instruction>(OV); 5114 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5115 auto *J = cast<Instruction>(U); 5116 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 5117 })) 5118 addToWorklistIfAllowed(OI); 5119 } 5120 } 5121 5122 // For an instruction to be added into Worklist above, all its users inside 5123 // the loop should also be in Worklist. However, this condition cannot be 5124 // true for phi nodes that form a cyclic dependence. We must process phi 5125 // nodes separately. An induction variable will remain uniform if all users 5126 // of the induction variable and induction variable update remain uniform. 5127 // The code below handles both pointer and non-pointer induction variables. 5128 for (auto &Induction : Legal->getInductionVars()) { 5129 auto *Ind = Induction.first; 5130 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5131 5132 // Determine if all users of the induction variable are uniform after 5133 // vectorization. 5134 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5135 auto *I = cast<Instruction>(U); 5136 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5137 isVectorizedMemAccessUse(I, Ind); 5138 }); 5139 if (!UniformInd) 5140 continue; 5141 5142 // Determine if all users of the induction variable update instruction are 5143 // uniform after vectorization. 5144 auto UniformIndUpdate = 5145 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5146 auto *I = cast<Instruction>(U); 5147 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5148 isVectorizedMemAccessUse(I, IndUpdate); 5149 }); 5150 if (!UniformIndUpdate) 5151 continue; 5152 5153 // The induction variable and its update instruction will remain uniform. 5154 addToWorklistIfAllowed(Ind); 5155 addToWorklistIfAllowed(IndUpdate); 5156 } 5157 5158 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5159 } 5160 5161 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5162 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5163 5164 if (Legal->getRuntimePointerChecking()->Need) { 5165 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5166 "runtime pointer checks needed. Enable vectorization of this " 5167 "loop with '#pragma clang loop vectorize(enable)' when " 5168 "compiling with -Os/-Oz", 5169 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5170 return true; 5171 } 5172 5173 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5174 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5175 "runtime SCEV checks needed. Enable vectorization of this " 5176 "loop with '#pragma clang loop vectorize(enable)' when " 5177 "compiling with -Os/-Oz", 5178 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5179 return true; 5180 } 5181 5182 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5183 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5184 reportVectorizationFailure("Runtime stride check for small trip count", 5185 "runtime stride == 1 checks needed. Enable vectorization of " 5186 "this loop without such check by compiling with -Os/-Oz", 5187 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5188 return true; 5189 } 5190 5191 return false; 5192 } 5193 5194 ElementCount 5195 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 5196 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) 5197 return ElementCount::getScalable(0); 5198 5199 if (Hints->isScalableVectorizationDisabled()) { 5200 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 5201 "ScalableVectorizationDisabled", ORE, TheLoop); 5202 return ElementCount::getScalable(0); 5203 } 5204 5205 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n"); 5206 5207 auto MaxScalableVF = ElementCount::getScalable( 5208 std::numeric_limits<ElementCount::ScalarTy>::max()); 5209 5210 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 5211 // FIXME: While for scalable vectors this is currently sufficient, this should 5212 // be replaced by a more detailed mechanism that filters out specific VFs, 5213 // instead of invalidating vectorization for a whole set of VFs based on the 5214 // MaxVF. 5215 5216 // Disable scalable vectorization if the loop contains unsupported reductions. 5217 if (!canVectorizeReductions(MaxScalableVF)) { 5218 reportVectorizationInfo( 5219 "Scalable vectorization not supported for the reduction " 5220 "operations found in this loop.", 5221 "ScalableVFUnfeasible", ORE, TheLoop); 5222 return ElementCount::getScalable(0); 5223 } 5224 5225 // Disable scalable vectorization if the loop contains any instructions 5226 // with element types not supported for scalable vectors. 5227 if (any_of(ElementTypesInLoop, [&](Type *Ty) { 5228 return !Ty->isVoidTy() && 5229 !this->TTI.isElementTypeLegalForScalableVector(Ty); 5230 })) { 5231 reportVectorizationInfo("Scalable vectorization is not supported " 5232 "for all element types found in this loop.", 5233 "ScalableVFUnfeasible", ORE, TheLoop); 5234 return ElementCount::getScalable(0); 5235 } 5236 5237 if (Legal->isSafeForAnyVectorWidth()) 5238 return MaxScalableVF; 5239 5240 // Limit MaxScalableVF by the maximum safe dependence distance. 5241 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5242 if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) 5243 MaxVScale = 5244 TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax(); 5245 MaxScalableVF = ElementCount::getScalable( 5246 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5247 if (!MaxScalableVF) 5248 reportVectorizationInfo( 5249 "Max legal vector width too small, scalable vectorization " 5250 "unfeasible.", 5251 "ScalableVFUnfeasible", ORE, TheLoop); 5252 5253 return MaxScalableVF; 5254 } 5255 5256 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF( 5257 unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) { 5258 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5259 unsigned SmallestType, WidestType; 5260 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5261 5262 // Get the maximum safe dependence distance in bits computed by LAA. 5263 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5264 // the memory accesses that is most restrictive (involved in the smallest 5265 // dependence distance). 5266 unsigned MaxSafeElements = 5267 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 5268 5269 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 5270 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 5271 5272 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 5273 << ".\n"); 5274 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 5275 << ".\n"); 5276 5277 // First analyze the UserVF, fall back if the UserVF should be ignored. 5278 if (UserVF) { 5279 auto MaxSafeUserVF = 5280 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 5281 5282 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { 5283 // If `VF=vscale x N` is safe, then so is `VF=N` 5284 if (UserVF.isScalable()) 5285 return FixedScalableVFPair( 5286 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); 5287 else 5288 return UserVF; 5289 } 5290 5291 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 5292 5293 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 5294 // is better to ignore the hint and let the compiler choose a suitable VF. 5295 if (!UserVF.isScalable()) { 5296 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5297 << " is unsafe, clamping to max safe VF=" 5298 << MaxSafeFixedVF << ".\n"); 5299 ORE->emit([&]() { 5300 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5301 TheLoop->getStartLoc(), 5302 TheLoop->getHeader()) 5303 << "User-specified vectorization factor " 5304 << ore::NV("UserVectorizationFactor", UserVF) 5305 << " is unsafe, clamping to maximum safe vectorization factor " 5306 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 5307 }); 5308 return MaxSafeFixedVF; 5309 } 5310 5311 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 5312 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5313 << " is ignored because scalable vectors are not " 5314 "available.\n"); 5315 ORE->emit([&]() { 5316 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5317 TheLoop->getStartLoc(), 5318 TheLoop->getHeader()) 5319 << "User-specified vectorization factor " 5320 << ore::NV("UserVectorizationFactor", UserVF) 5321 << " is ignored because the target does not support scalable " 5322 "vectors. The compiler will pick a more suitable value."; 5323 }); 5324 } else { 5325 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5326 << " is unsafe. Ignoring scalable UserVF.\n"); 5327 ORE->emit([&]() { 5328 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5329 TheLoop->getStartLoc(), 5330 TheLoop->getHeader()) 5331 << "User-specified vectorization factor " 5332 << ore::NV("UserVectorizationFactor", UserVF) 5333 << " is unsafe. Ignoring the hint to let the compiler pick a " 5334 "more suitable value."; 5335 }); 5336 } 5337 } 5338 5339 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5340 << " / " << WidestType << " bits.\n"); 5341 5342 FixedScalableVFPair Result(ElementCount::getFixed(1), 5343 ElementCount::getScalable(0)); 5344 if (auto MaxVF = 5345 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 5346 MaxSafeFixedVF, FoldTailByMasking)) 5347 Result.FixedVF = MaxVF; 5348 5349 if (auto MaxVF = 5350 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 5351 MaxSafeScalableVF, FoldTailByMasking)) 5352 if (MaxVF.isScalable()) { 5353 Result.ScalableVF = MaxVF; 5354 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 5355 << "\n"); 5356 } 5357 5358 return Result; 5359 } 5360 5361 FixedScalableVFPair 5362 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5363 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5364 // TODO: It may by useful to do since it's still likely to be dynamically 5365 // uniform if the target can skip. 5366 reportVectorizationFailure( 5367 "Not inserting runtime ptr check for divergent target", 5368 "runtime pointer checks needed. Not enabled for divergent target", 5369 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5370 return FixedScalableVFPair::getNone(); 5371 } 5372 5373 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5374 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5375 if (TC == 1) { 5376 reportVectorizationFailure("Single iteration (non) loop", 5377 "loop trip count is one, irrelevant for vectorization", 5378 "SingleIterationLoop", ORE, TheLoop); 5379 return FixedScalableVFPair::getNone(); 5380 } 5381 5382 switch (ScalarEpilogueStatus) { 5383 case CM_ScalarEpilogueAllowed: 5384 return computeFeasibleMaxVF(TC, UserVF, false); 5385 case CM_ScalarEpilogueNotAllowedUsePredicate: 5386 LLVM_FALLTHROUGH; 5387 case CM_ScalarEpilogueNotNeededUsePredicate: 5388 LLVM_DEBUG( 5389 dbgs() << "LV: vector predicate hint/switch found.\n" 5390 << "LV: Not allowing scalar epilogue, creating predicated " 5391 << "vector loop.\n"); 5392 break; 5393 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5394 // fallthrough as a special case of OptForSize 5395 case CM_ScalarEpilogueNotAllowedOptSize: 5396 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5397 LLVM_DEBUG( 5398 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5399 else 5400 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5401 << "count.\n"); 5402 5403 // Bail if runtime checks are required, which are not good when optimising 5404 // for size. 5405 if (runtimeChecksRequired()) 5406 return FixedScalableVFPair::getNone(); 5407 5408 break; 5409 } 5410 5411 // The only loops we can vectorize without a scalar epilogue, are loops with 5412 // a bottom-test and a single exiting block. We'd have to handle the fact 5413 // that not every instruction executes on the last iteration. This will 5414 // require a lane mask which varies through the vector loop body. (TODO) 5415 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5416 // If there was a tail-folding hint/switch, but we can't fold the tail by 5417 // masking, fallback to a vectorization with a scalar epilogue. 5418 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5419 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5420 "scalar epilogue instead.\n"); 5421 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5422 return computeFeasibleMaxVF(TC, UserVF, false); 5423 } 5424 return FixedScalableVFPair::getNone(); 5425 } 5426 5427 // Now try the tail folding 5428 5429 // Invalidate interleave groups that require an epilogue if we can't mask 5430 // the interleave-group. 5431 if (!useMaskedInterleavedAccesses(TTI)) { 5432 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5433 "No decisions should have been taken at this point"); 5434 // Note: There is no need to invalidate any cost modeling decisions here, as 5435 // non where taken so far. 5436 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5437 } 5438 5439 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true); 5440 // Avoid tail folding if the trip count is known to be a multiple of any VF 5441 // we chose. 5442 // FIXME: The condition below pessimises the case for fixed-width vectors, 5443 // when scalable VFs are also candidates for vectorization. 5444 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5445 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5446 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5447 "MaxFixedVF must be a power of 2"); 5448 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5449 : MaxFixedVF.getFixedValue(); 5450 ScalarEvolution *SE = PSE.getSE(); 5451 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5452 const SCEV *ExitCount = SE->getAddExpr( 5453 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5454 const SCEV *Rem = SE->getURemExpr( 5455 SE->applyLoopGuards(ExitCount, TheLoop), 5456 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5457 if (Rem->isZero()) { 5458 // Accept MaxFixedVF if we do not have a tail. 5459 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5460 return MaxFactors; 5461 } 5462 } 5463 5464 // For scalable vectors don't use tail folding for low trip counts or 5465 // optimizing for code size. We only permit this if the user has explicitly 5466 // requested it. 5467 if (ScalarEpilogueStatus != CM_ScalarEpilogueNotNeededUsePredicate && 5468 ScalarEpilogueStatus != CM_ScalarEpilogueNotAllowedUsePredicate && 5469 MaxFactors.ScalableVF.isVector()) 5470 MaxFactors.ScalableVF = ElementCount::getScalable(0); 5471 5472 // If we don't know the precise trip count, or if the trip count that we 5473 // found modulo the vectorization factor is not zero, try to fold the tail 5474 // by masking. 5475 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5476 if (Legal->prepareToFoldTailByMasking()) { 5477 FoldTailByMasking = true; 5478 return MaxFactors; 5479 } 5480 5481 // If there was a tail-folding hint/switch, but we can't fold the tail by 5482 // masking, fallback to a vectorization with a scalar epilogue. 5483 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5484 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5485 "scalar epilogue instead.\n"); 5486 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5487 return MaxFactors; 5488 } 5489 5490 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5491 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5492 return FixedScalableVFPair::getNone(); 5493 } 5494 5495 if (TC == 0) { 5496 reportVectorizationFailure( 5497 "Unable to calculate the loop count due to complex control flow", 5498 "unable to calculate the loop count due to complex control flow", 5499 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5500 return FixedScalableVFPair::getNone(); 5501 } 5502 5503 reportVectorizationFailure( 5504 "Cannot optimize for size and vectorize at the same time.", 5505 "cannot optimize for size and vectorize at the same time. " 5506 "Enable vectorization of this loop with '#pragma clang loop " 5507 "vectorize(enable)' when compiling with -Os/-Oz", 5508 "NoTailLoopWithOptForSize", ORE, TheLoop); 5509 return FixedScalableVFPair::getNone(); 5510 } 5511 5512 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5513 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5514 const ElementCount &MaxSafeVF, bool FoldTailByMasking) { 5515 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5516 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5517 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5518 : TargetTransformInfo::RGK_FixedWidthVector); 5519 5520 // Convenience function to return the minimum of two ElementCounts. 5521 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5522 assert((LHS.isScalable() == RHS.isScalable()) && 5523 "Scalable flags must match"); 5524 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5525 }; 5526 5527 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5528 // Note that both WidestRegister and WidestType may not be a powers of 2. 5529 auto MaxVectorElementCount = ElementCount::get( 5530 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5531 ComputeScalableMaxVF); 5532 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5533 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5534 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5535 5536 if (!MaxVectorElementCount) { 5537 LLVM_DEBUG(dbgs() << "LV: The target has no " 5538 << (ComputeScalableMaxVF ? "scalable" : "fixed") 5539 << " vector registers.\n"); 5540 return ElementCount::getFixed(1); 5541 } 5542 5543 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5544 if (ConstTripCount && 5545 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5546 (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) { 5547 // If loop trip count (TC) is known at compile time there is no point in 5548 // choosing VF greater than TC (as done in the loop below). Select maximum 5549 // power of two which doesn't exceed TC. 5550 // If MaxVectorElementCount is scalable, we only fall back on a fixed VF 5551 // when the TC is less than or equal to the known number of lanes. 5552 auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount); 5553 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not " 5554 "exceeding the constant trip count: " 5555 << ClampedConstTripCount << "\n"); 5556 return ElementCount::getFixed(ClampedConstTripCount); 5557 } 5558 5559 ElementCount MaxVF = MaxVectorElementCount; 5560 if (TTI.shouldMaximizeVectorBandwidth() || 5561 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5562 auto MaxVectorElementCountMaxBW = ElementCount::get( 5563 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5564 ComputeScalableMaxVF); 5565 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5566 5567 // Collect all viable vectorization factors larger than the default MaxVF 5568 // (i.e. MaxVectorElementCount). 5569 SmallVector<ElementCount, 8> VFs; 5570 for (ElementCount VS = MaxVectorElementCount * 2; 5571 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5572 VFs.push_back(VS); 5573 5574 // For each VF calculate its register usage. 5575 auto RUs = calculateRegisterUsage(VFs); 5576 5577 // Select the largest VF which doesn't require more registers than existing 5578 // ones. 5579 for (int i = RUs.size() - 1; i >= 0; --i) { 5580 bool Selected = true; 5581 for (auto &pair : RUs[i].MaxLocalUsers) { 5582 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5583 if (pair.second > TargetNumRegisters) 5584 Selected = false; 5585 } 5586 if (Selected) { 5587 MaxVF = VFs[i]; 5588 break; 5589 } 5590 } 5591 if (ElementCount MinVF = 5592 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 5593 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5594 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5595 << ") with target's minimum: " << MinVF << '\n'); 5596 MaxVF = MinVF; 5597 } 5598 } 5599 } 5600 return MaxVF; 5601 } 5602 5603 bool LoopVectorizationCostModel::isMoreProfitable( 5604 const VectorizationFactor &A, const VectorizationFactor &B) const { 5605 InstructionCost CostA = A.Cost; 5606 InstructionCost CostB = B.Cost; 5607 5608 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 5609 5610 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 5611 MaxTripCount) { 5612 // If we are folding the tail and the trip count is a known (possibly small) 5613 // constant, the trip count will be rounded up to an integer number of 5614 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 5615 // which we compare directly. When not folding the tail, the total cost will 5616 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 5617 // approximated with the per-lane cost below instead of using the tripcount 5618 // as here. 5619 auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 5620 auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 5621 return RTCostA < RTCostB; 5622 } 5623 5624 // Improve estimate for the vector width if it is scalable. 5625 unsigned EstimatedWidthA = A.Width.getKnownMinValue(); 5626 unsigned EstimatedWidthB = B.Width.getKnownMinValue(); 5627 if (Optional<unsigned> VScale = TTI.getVScaleForTuning()) { 5628 if (A.Width.isScalable()) 5629 EstimatedWidthA *= VScale.getValue(); 5630 if (B.Width.isScalable()) 5631 EstimatedWidthB *= VScale.getValue(); 5632 } 5633 5634 // Assume vscale may be larger than 1 (or the value being tuned for), 5635 // so that scalable vectorization is slightly favorable over fixed-width 5636 // vectorization. 5637 if (A.Width.isScalable() && !B.Width.isScalable()) 5638 return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA); 5639 5640 // To avoid the need for FP division: 5641 // (CostA / A.Width) < (CostB / B.Width) 5642 // <=> (CostA * B.Width) < (CostB * A.Width) 5643 return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA); 5644 } 5645 5646 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 5647 const ElementCountSet &VFCandidates) { 5648 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5649 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5650 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5651 assert(VFCandidates.count(ElementCount::getFixed(1)) && 5652 "Expected Scalar VF to be a candidate"); 5653 5654 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 5655 VectorizationFactor ChosenFactor = ScalarCost; 5656 5657 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5658 if (ForceVectorization && VFCandidates.size() > 1) { 5659 // Ignore scalar width, because the user explicitly wants vectorization. 5660 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5661 // evaluation. 5662 ChosenFactor.Cost = InstructionCost::getMax(); 5663 } 5664 5665 SmallVector<InstructionVFPair> InvalidCosts; 5666 for (const auto &i : VFCandidates) { 5667 // The cost for scalar VF=1 is already calculated, so ignore it. 5668 if (i.isScalar()) 5669 continue; 5670 5671 VectorizationCostTy C = expectedCost(i, &InvalidCosts); 5672 VectorizationFactor Candidate(i, C.first); 5673 5674 #ifndef NDEBUG 5675 unsigned AssumedMinimumVscale = 1; 5676 if (Optional<unsigned> VScale = TTI.getVScaleForTuning()) 5677 AssumedMinimumVscale = VScale.getValue(); 5678 unsigned Width = 5679 Candidate.Width.isScalable() 5680 ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale 5681 : Candidate.Width.getFixedValue(); 5682 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5683 << " costs: " << (Candidate.Cost / Width)); 5684 if (i.isScalable()) 5685 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of " 5686 << AssumedMinimumVscale << ")"); 5687 LLVM_DEBUG(dbgs() << ".\n"); 5688 #endif 5689 5690 if (!C.second && !ForceVectorization) { 5691 LLVM_DEBUG( 5692 dbgs() << "LV: Not considering vector loop of width " << i 5693 << " because it will not generate any vector instructions.\n"); 5694 continue; 5695 } 5696 5697 // If profitable add it to ProfitableVF list. 5698 if (isMoreProfitable(Candidate, ScalarCost)) 5699 ProfitableVFs.push_back(Candidate); 5700 5701 if (isMoreProfitable(Candidate, ChosenFactor)) 5702 ChosenFactor = Candidate; 5703 } 5704 5705 // Emit a report of VFs with invalid costs in the loop. 5706 if (!InvalidCosts.empty()) { 5707 // Group the remarks per instruction, keeping the instruction order from 5708 // InvalidCosts. 5709 std::map<Instruction *, unsigned> Numbering; 5710 unsigned I = 0; 5711 for (auto &Pair : InvalidCosts) 5712 if (!Numbering.count(Pair.first)) 5713 Numbering[Pair.first] = I++; 5714 5715 // Sort the list, first on instruction(number) then on VF. 5716 llvm::sort(InvalidCosts, 5717 [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { 5718 if (Numbering[A.first] != Numbering[B.first]) 5719 return Numbering[A.first] < Numbering[B.first]; 5720 ElementCountComparator ECC; 5721 return ECC(A.second, B.second); 5722 }); 5723 5724 // For a list of ordered instruction-vf pairs: 5725 // [(load, vf1), (load, vf2), (store, vf1)] 5726 // Group the instructions together to emit separate remarks for: 5727 // load (vf1, vf2) 5728 // store (vf1) 5729 auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); 5730 auto Subset = ArrayRef<InstructionVFPair>(); 5731 do { 5732 if (Subset.empty()) 5733 Subset = Tail.take_front(1); 5734 5735 Instruction *I = Subset.front().first; 5736 5737 // If the next instruction is different, or if there are no other pairs, 5738 // emit a remark for the collated subset. e.g. 5739 // [(load, vf1), (load, vf2))] 5740 // to emit: 5741 // remark: invalid costs for 'load' at VF=(vf, vf2) 5742 if (Subset == Tail || Tail[Subset.size()].first != I) { 5743 std::string OutString; 5744 raw_string_ostream OS(OutString); 5745 assert(!Subset.empty() && "Unexpected empty range"); 5746 OS << "Instruction with invalid costs prevented vectorization at VF=("; 5747 for (auto &Pair : Subset) 5748 OS << (Pair.second == Subset.front().second ? "" : ", ") 5749 << Pair.second; 5750 OS << "):"; 5751 if (auto *CI = dyn_cast<CallInst>(I)) 5752 OS << " call to " << CI->getCalledFunction()->getName(); 5753 else 5754 OS << " " << I->getOpcodeName(); 5755 OS.flush(); 5756 reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); 5757 Tail = Tail.drop_front(Subset.size()); 5758 Subset = {}; 5759 } else 5760 // Grow the subset by one element 5761 Subset = Tail.take_front(Subset.size() + 1); 5762 } while (!Tail.empty()); 5763 } 5764 5765 if (!EnableCondStoresVectorization && NumPredStores) { 5766 reportVectorizationFailure("There are conditional stores.", 5767 "store that is conditionally executed prevents vectorization", 5768 "ConditionalStore", ORE, TheLoop); 5769 ChosenFactor = ScalarCost; 5770 } 5771 5772 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 5773 ChosenFactor.Cost >= ScalarCost.Cost) dbgs() 5774 << "LV: Vectorization seems to be not beneficial, " 5775 << "but was forced by a user.\n"); 5776 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 5777 return ChosenFactor; 5778 } 5779 5780 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5781 const Loop &L, ElementCount VF) const { 5782 // Cross iteration phis such as reductions need special handling and are 5783 // currently unsupported. 5784 if (any_of(L.getHeader()->phis(), 5785 [&](PHINode &Phi) { return Legal->isFirstOrderRecurrence(&Phi); })) 5786 return false; 5787 5788 // Phis with uses outside of the loop require special handling and are 5789 // currently unsupported. 5790 for (auto &Entry : Legal->getInductionVars()) { 5791 // Look for uses of the value of the induction at the last iteration. 5792 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5793 for (User *U : PostInc->users()) 5794 if (!L.contains(cast<Instruction>(U))) 5795 return false; 5796 // Look for uses of penultimate value of the induction. 5797 for (User *U : Entry.first->users()) 5798 if (!L.contains(cast<Instruction>(U))) 5799 return false; 5800 } 5801 5802 // Induction variables that are widened require special handling that is 5803 // currently not supported. 5804 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5805 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5806 this->isProfitableToScalarize(Entry.first, VF)); 5807 })) 5808 return false; 5809 5810 // Epilogue vectorization code has not been auditted to ensure it handles 5811 // non-latch exits properly. It may be fine, but it needs auditted and 5812 // tested. 5813 if (L.getExitingBlock() != L.getLoopLatch()) 5814 return false; 5815 5816 return true; 5817 } 5818 5819 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5820 const ElementCount VF) const { 5821 // FIXME: We need a much better cost-model to take different parameters such 5822 // as register pressure, code size increase and cost of extra branches into 5823 // account. For now we apply a very crude heuristic and only consider loops 5824 // with vectorization factors larger than a certain value. 5825 // We also consider epilogue vectorization unprofitable for targets that don't 5826 // consider interleaving beneficial (eg. MVE). 5827 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 5828 return false; 5829 if (VF.getFixedValue() >= EpilogueVectorizationMinVF) 5830 return true; 5831 return false; 5832 } 5833 5834 VectorizationFactor 5835 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 5836 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 5837 VectorizationFactor Result = VectorizationFactor::Disabled(); 5838 if (!EnableEpilogueVectorization) { 5839 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 5840 return Result; 5841 } 5842 5843 if (!isScalarEpilogueAllowed()) { 5844 LLVM_DEBUG( 5845 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 5846 "allowed.\n";); 5847 return Result; 5848 } 5849 5850 // Not really a cost consideration, but check for unsupported cases here to 5851 // simplify the logic. 5852 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 5853 LLVM_DEBUG( 5854 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 5855 "not a supported candidate.\n";); 5856 return Result; 5857 } 5858 5859 if (EpilogueVectorizationForceVF > 1) { 5860 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 5861 ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF); 5862 if (LVP.hasPlanWithVF(ForcedEC)) 5863 return {ForcedEC, 0}; 5864 else { 5865 LLVM_DEBUG( 5866 dbgs() 5867 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 5868 return Result; 5869 } 5870 } 5871 5872 if (TheLoop->getHeader()->getParent()->hasOptSize() || 5873 TheLoop->getHeader()->getParent()->hasMinSize()) { 5874 LLVM_DEBUG( 5875 dbgs() 5876 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 5877 return Result; 5878 } 5879 5880 auto FixedMainLoopVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue()); 5881 if (MainLoopVF.isScalable()) 5882 LLVM_DEBUG( 5883 dbgs() << "LEV: Epilogue vectorization using scalable vectors not " 5884 "yet supported. Converting to fixed-width (VF=" 5885 << FixedMainLoopVF << ") instead\n"); 5886 5887 if (!isEpilogueVectorizationProfitable(FixedMainLoopVF)) { 5888 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for " 5889 "this loop\n"); 5890 return Result; 5891 } 5892 5893 for (auto &NextVF : ProfitableVFs) 5894 if (ElementCount::isKnownLT(NextVF.Width, FixedMainLoopVF) && 5895 (Result.Width.getFixedValue() == 1 || 5896 isMoreProfitable(NextVF, Result)) && 5897 LVP.hasPlanWithVF(NextVF.Width)) 5898 Result = NextVF; 5899 5900 if (Result != VectorizationFactor::Disabled()) 5901 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 5902 << Result.Width.getFixedValue() << "\n";); 5903 return Result; 5904 } 5905 5906 std::pair<unsigned, unsigned> 5907 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5908 unsigned MinWidth = -1U; 5909 unsigned MaxWidth = 8; 5910 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5911 // For in-loop reductions, no element types are added to ElementTypesInLoop 5912 // if there are no loads/stores in the loop. In this case, check through the 5913 // reduction variables to determine the maximum width. 5914 if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) { 5915 // Reset MaxWidth so that we can find the smallest type used by recurrences 5916 // in the loop. 5917 MaxWidth = -1U; 5918 for (auto &PhiDescriptorPair : Legal->getReductionVars()) { 5919 const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second; 5920 // When finding the min width used by the recurrence we need to account 5921 // for casts on the input operands of the recurrence. 5922 MaxWidth = std::min<unsigned>( 5923 MaxWidth, std::min<unsigned>( 5924 RdxDesc.getMinWidthCastToRecurrenceTypeInBits(), 5925 RdxDesc.getRecurrenceType()->getScalarSizeInBits())); 5926 } 5927 } else { 5928 for (Type *T : ElementTypesInLoop) { 5929 MinWidth = std::min<unsigned>( 5930 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5931 MaxWidth = std::max<unsigned>( 5932 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5933 } 5934 } 5935 return {MinWidth, MaxWidth}; 5936 } 5937 5938 void LoopVectorizationCostModel::collectElementTypesForWidening() { 5939 ElementTypesInLoop.clear(); 5940 // For each block. 5941 for (BasicBlock *BB : TheLoop->blocks()) { 5942 // For each instruction in the loop. 5943 for (Instruction &I : BB->instructionsWithoutDebug()) { 5944 Type *T = I.getType(); 5945 5946 // Skip ignored values. 5947 if (ValuesToIgnore.count(&I)) 5948 continue; 5949 5950 // Only examine Loads, Stores and PHINodes. 5951 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5952 continue; 5953 5954 // Examine PHI nodes that are reduction variables. Update the type to 5955 // account for the recurrence type. 5956 if (auto *PN = dyn_cast<PHINode>(&I)) { 5957 if (!Legal->isReductionVariable(PN)) 5958 continue; 5959 const RecurrenceDescriptor &RdxDesc = 5960 Legal->getReductionVars().find(PN)->second; 5961 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 5962 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 5963 RdxDesc.getRecurrenceType(), 5964 TargetTransformInfo::ReductionFlags())) 5965 continue; 5966 T = RdxDesc.getRecurrenceType(); 5967 } 5968 5969 // Examine the stored values. 5970 if (auto *ST = dyn_cast<StoreInst>(&I)) 5971 T = ST->getValueOperand()->getType(); 5972 5973 assert(T->isSized() && 5974 "Expected the load/store/recurrence type to be sized"); 5975 5976 ElementTypesInLoop.insert(T); 5977 } 5978 } 5979 } 5980 5981 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 5982 unsigned LoopCost) { 5983 // -- The interleave heuristics -- 5984 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5985 // There are many micro-architectural considerations that we can't predict 5986 // at this level. For example, frontend pressure (on decode or fetch) due to 5987 // code size, or the number and capabilities of the execution ports. 5988 // 5989 // We use the following heuristics to select the interleave count: 5990 // 1. If the code has reductions, then we interleave to break the cross 5991 // iteration dependency. 5992 // 2. If the loop is really small, then we interleave to reduce the loop 5993 // overhead. 5994 // 3. We don't interleave if we think that we will spill registers to memory 5995 // due to the increased register pressure. 5996 5997 if (!isScalarEpilogueAllowed()) 5998 return 1; 5999 6000 // We used the distance for the interleave count. 6001 if (Legal->getMaxSafeDepDistBytes() != -1U) 6002 return 1; 6003 6004 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 6005 const bool HasReductions = !Legal->getReductionVars().empty(); 6006 // Do not interleave loops with a relatively small known or estimated trip 6007 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 6008 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 6009 // because with the above conditions interleaving can expose ILP and break 6010 // cross iteration dependences for reductions. 6011 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 6012 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 6013 return 1; 6014 6015 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6016 // We divide by these constants so assume that we have at least one 6017 // instruction that uses at least one register. 6018 for (auto& pair : R.MaxLocalUsers) { 6019 pair.second = std::max(pair.second, 1U); 6020 } 6021 6022 // We calculate the interleave count using the following formula. 6023 // Subtract the number of loop invariants from the number of available 6024 // registers. These registers are used by all of the interleaved instances. 6025 // Next, divide the remaining registers by the number of registers that is 6026 // required by the loop, in order to estimate how many parallel instances 6027 // fit without causing spills. All of this is rounded down if necessary to be 6028 // a power of two. We want power of two interleave count to simplify any 6029 // addressing operations or alignment considerations. 6030 // We also want power of two interleave counts to ensure that the induction 6031 // variable of the vector loop wraps to zero, when tail is folded by masking; 6032 // this currently happens when OptForSize, in which case IC is set to 1 above. 6033 unsigned IC = UINT_MAX; 6034 6035 for (auto& pair : R.MaxLocalUsers) { 6036 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 6037 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6038 << " registers of " 6039 << TTI.getRegisterClassName(pair.first) << " register class\n"); 6040 if (VF.isScalar()) { 6041 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6042 TargetNumRegisters = ForceTargetNumScalarRegs; 6043 } else { 6044 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6045 TargetNumRegisters = ForceTargetNumVectorRegs; 6046 } 6047 unsigned MaxLocalUsers = pair.second; 6048 unsigned LoopInvariantRegs = 0; 6049 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 6050 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 6051 6052 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 6053 // Don't count the induction variable as interleaved. 6054 if (EnableIndVarRegisterHeur) { 6055 TmpIC = 6056 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 6057 std::max(1U, (MaxLocalUsers - 1))); 6058 } 6059 6060 IC = std::min(IC, TmpIC); 6061 } 6062 6063 // Clamp the interleave ranges to reasonable counts. 6064 unsigned MaxInterleaveCount = 6065 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 6066 6067 // Check if the user has overridden the max. 6068 if (VF.isScalar()) { 6069 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6070 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6071 } else { 6072 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6073 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6074 } 6075 6076 // If trip count is known or estimated compile time constant, limit the 6077 // interleave count to be less than the trip count divided by VF, provided it 6078 // is at least 1. 6079 // 6080 // For scalable vectors we can't know if interleaving is beneficial. It may 6081 // not be beneficial for small loops if none of the lanes in the second vector 6082 // iterations is enabled. However, for larger loops, there is likely to be a 6083 // similar benefit as for fixed-width vectors. For now, we choose to leave 6084 // the InterleaveCount as if vscale is '1', although if some information about 6085 // the vector is known (e.g. min vector size), we can make a better decision. 6086 if (BestKnownTC) { 6087 MaxInterleaveCount = 6088 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 6089 // Make sure MaxInterleaveCount is greater than 0. 6090 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 6091 } 6092 6093 assert(MaxInterleaveCount > 0 && 6094 "Maximum interleave count must be greater than 0"); 6095 6096 // Clamp the calculated IC to be between the 1 and the max interleave count 6097 // that the target and trip count allows. 6098 if (IC > MaxInterleaveCount) 6099 IC = MaxInterleaveCount; 6100 else 6101 // Make sure IC is greater than 0. 6102 IC = std::max(1u, IC); 6103 6104 assert(IC > 0 && "Interleave count must be greater than 0."); 6105 6106 // If we did not calculate the cost for VF (because the user selected the VF) 6107 // then we calculate the cost of VF here. 6108 if (LoopCost == 0) { 6109 InstructionCost C = expectedCost(VF).first; 6110 assert(C.isValid() && "Expected to have chosen a VF with valid cost"); 6111 LoopCost = *C.getValue(); 6112 } 6113 6114 assert(LoopCost && "Non-zero loop cost expected"); 6115 6116 // Interleave if we vectorized this loop and there is a reduction that could 6117 // benefit from interleaving. 6118 if (VF.isVector() && HasReductions) { 6119 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6120 return IC; 6121 } 6122 6123 // Note that if we've already vectorized the loop we will have done the 6124 // runtime check and so interleaving won't require further checks. 6125 bool InterleavingRequiresRuntimePointerCheck = 6126 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 6127 6128 // We want to interleave small loops in order to reduce the loop overhead and 6129 // potentially expose ILP opportunities. 6130 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 6131 << "LV: IC is " << IC << '\n' 6132 << "LV: VF is " << VF << '\n'); 6133 const bool AggressivelyInterleaveReductions = 6134 TTI.enableAggressiveInterleaving(HasReductions); 6135 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6136 // We assume that the cost overhead is 1 and we use the cost model 6137 // to estimate the cost of the loop and interleave until the cost of the 6138 // loop overhead is about 5% of the cost of the loop. 6139 unsigned SmallIC = 6140 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6141 6142 // Interleave until store/load ports (estimated by max interleave count) are 6143 // saturated. 6144 unsigned NumStores = Legal->getNumStores(); 6145 unsigned NumLoads = Legal->getNumLoads(); 6146 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6147 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6148 6149 // There is little point in interleaving for reductions containing selects 6150 // and compares when VF=1 since it may just create more overhead than it's 6151 // worth for loops with small trip counts. This is because we still have to 6152 // do the final reduction after the loop. 6153 bool HasSelectCmpReductions = 6154 HasReductions && 6155 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 6156 const RecurrenceDescriptor &RdxDesc = Reduction.second; 6157 return RecurrenceDescriptor::isSelectCmpRecurrenceKind( 6158 RdxDesc.getRecurrenceKind()); 6159 }); 6160 if (HasSelectCmpReductions) { 6161 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n"); 6162 return 1; 6163 } 6164 6165 // If we have a scalar reduction (vector reductions are already dealt with 6166 // by this point), we can increase the critical path length if the loop 6167 // we're interleaving is inside another loop. For tree-wise reductions 6168 // set the limit to 2, and for ordered reductions it's best to disable 6169 // interleaving entirely. 6170 if (HasReductions && TheLoop->getLoopDepth() > 1) { 6171 bool HasOrderedReductions = 6172 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 6173 const RecurrenceDescriptor &RdxDesc = Reduction.second; 6174 return RdxDesc.isOrdered(); 6175 }); 6176 if (HasOrderedReductions) { 6177 LLVM_DEBUG( 6178 dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); 6179 return 1; 6180 } 6181 6182 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6183 SmallIC = std::min(SmallIC, F); 6184 StoresIC = std::min(StoresIC, F); 6185 LoadsIC = std::min(LoadsIC, F); 6186 } 6187 6188 if (EnableLoadStoreRuntimeInterleave && 6189 std::max(StoresIC, LoadsIC) > SmallIC) { 6190 LLVM_DEBUG( 6191 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6192 return std::max(StoresIC, LoadsIC); 6193 } 6194 6195 // If there are scalar reductions and TTI has enabled aggressive 6196 // interleaving for reductions, we will interleave to expose ILP. 6197 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6198 AggressivelyInterleaveReductions) { 6199 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6200 // Interleave no less than SmallIC but not as aggressive as the normal IC 6201 // to satisfy the rare situation when resources are too limited. 6202 return std::max(IC / 2, SmallIC); 6203 } else { 6204 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6205 return SmallIC; 6206 } 6207 } 6208 6209 // Interleave if this is a large loop (small loops are already dealt with by 6210 // this point) that could benefit from interleaving. 6211 if (AggressivelyInterleaveReductions) { 6212 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6213 return IC; 6214 } 6215 6216 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6217 return 1; 6218 } 6219 6220 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6221 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6222 // This function calculates the register usage by measuring the highest number 6223 // of values that are alive at a single location. Obviously, this is a very 6224 // rough estimation. We scan the loop in a topological order in order and 6225 // assign a number to each instruction. We use RPO to ensure that defs are 6226 // met before their users. We assume that each instruction that has in-loop 6227 // users starts an interval. We record every time that an in-loop value is 6228 // used, so we have a list of the first and last occurrences of each 6229 // instruction. Next, we transpose this data structure into a multi map that 6230 // holds the list of intervals that *end* at a specific location. This multi 6231 // map allows us to perform a linear search. We scan the instructions linearly 6232 // and record each time that a new interval starts, by placing it in a set. 6233 // If we find this value in the multi-map then we remove it from the set. 6234 // The max register usage is the maximum size of the set. 6235 // We also search for instructions that are defined outside the loop, but are 6236 // used inside the loop. We need this number separately from the max-interval 6237 // usage number because when we unroll, loop-invariant values do not take 6238 // more register. 6239 LoopBlocksDFS DFS(TheLoop); 6240 DFS.perform(LI); 6241 6242 RegisterUsage RU; 6243 6244 // Each 'key' in the map opens a new interval. The values 6245 // of the map are the index of the 'last seen' usage of the 6246 // instruction that is the key. 6247 using IntervalMap = DenseMap<Instruction *, unsigned>; 6248 6249 // Maps instruction to its index. 6250 SmallVector<Instruction *, 64> IdxToInstr; 6251 // Marks the end of each interval. 6252 IntervalMap EndPoint; 6253 // Saves the list of instruction indices that are used in the loop. 6254 SmallPtrSet<Instruction *, 8> Ends; 6255 // Saves the list of values that are used in the loop but are 6256 // defined outside the loop, such as arguments and constants. 6257 SmallPtrSet<Value *, 8> LoopInvariants; 6258 6259 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6260 for (Instruction &I : BB->instructionsWithoutDebug()) { 6261 IdxToInstr.push_back(&I); 6262 6263 // Save the end location of each USE. 6264 for (Value *U : I.operands()) { 6265 auto *Instr = dyn_cast<Instruction>(U); 6266 6267 // Ignore non-instruction values such as arguments, constants, etc. 6268 if (!Instr) 6269 continue; 6270 6271 // If this instruction is outside the loop then record it and continue. 6272 if (!TheLoop->contains(Instr)) { 6273 LoopInvariants.insert(Instr); 6274 continue; 6275 } 6276 6277 // Overwrite previous end points. 6278 EndPoint[Instr] = IdxToInstr.size(); 6279 Ends.insert(Instr); 6280 } 6281 } 6282 } 6283 6284 // Saves the list of intervals that end with the index in 'key'. 6285 using InstrList = SmallVector<Instruction *, 2>; 6286 DenseMap<unsigned, InstrList> TransposeEnds; 6287 6288 // Transpose the EndPoints to a list of values that end at each index. 6289 for (auto &Interval : EndPoint) 6290 TransposeEnds[Interval.second].push_back(Interval.first); 6291 6292 SmallPtrSet<Instruction *, 8> OpenIntervals; 6293 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6294 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6295 6296 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6297 6298 // A lambda that gets the register usage for the given type and VF. 6299 const auto &TTICapture = TTI; 6300 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned { 6301 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6302 return 0; 6303 InstructionCost::CostType RegUsage = 6304 *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue(); 6305 assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() && 6306 "Nonsensical values for register usage."); 6307 return RegUsage; 6308 }; 6309 6310 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6311 Instruction *I = IdxToInstr[i]; 6312 6313 // Remove all of the instructions that end at this location. 6314 InstrList &List = TransposeEnds[i]; 6315 for (Instruction *ToRemove : List) 6316 OpenIntervals.erase(ToRemove); 6317 6318 // Ignore instructions that are never used within the loop. 6319 if (!Ends.count(I)) 6320 continue; 6321 6322 // Skip ignored values. 6323 if (ValuesToIgnore.count(I)) 6324 continue; 6325 6326 // For each VF find the maximum usage of registers. 6327 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6328 // Count the number of live intervals. 6329 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6330 6331 if (VFs[j].isScalar()) { 6332 for (auto Inst : OpenIntervals) { 6333 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6334 if (RegUsage.find(ClassID) == RegUsage.end()) 6335 RegUsage[ClassID] = 1; 6336 else 6337 RegUsage[ClassID] += 1; 6338 } 6339 } else { 6340 collectUniformsAndScalars(VFs[j]); 6341 for (auto Inst : OpenIntervals) { 6342 // Skip ignored values for VF > 1. 6343 if (VecValuesToIgnore.count(Inst)) 6344 continue; 6345 if (isScalarAfterVectorization(Inst, VFs[j])) { 6346 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6347 if (RegUsage.find(ClassID) == RegUsage.end()) 6348 RegUsage[ClassID] = 1; 6349 else 6350 RegUsage[ClassID] += 1; 6351 } else { 6352 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6353 if (RegUsage.find(ClassID) == RegUsage.end()) 6354 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6355 else 6356 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6357 } 6358 } 6359 } 6360 6361 for (auto& pair : RegUsage) { 6362 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6363 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6364 else 6365 MaxUsages[j][pair.first] = pair.second; 6366 } 6367 } 6368 6369 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6370 << OpenIntervals.size() << '\n'); 6371 6372 // Add the current instruction to the list of open intervals. 6373 OpenIntervals.insert(I); 6374 } 6375 6376 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6377 SmallMapVector<unsigned, unsigned, 4> Invariant; 6378 6379 for (auto Inst : LoopInvariants) { 6380 unsigned Usage = 6381 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6382 unsigned ClassID = 6383 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6384 if (Invariant.find(ClassID) == Invariant.end()) 6385 Invariant[ClassID] = Usage; 6386 else 6387 Invariant[ClassID] += Usage; 6388 } 6389 6390 LLVM_DEBUG({ 6391 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6392 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6393 << " item\n"; 6394 for (const auto &pair : MaxUsages[i]) { 6395 dbgs() << "LV(REG): RegisterClass: " 6396 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6397 << " registers\n"; 6398 } 6399 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6400 << " item\n"; 6401 for (const auto &pair : Invariant) { 6402 dbgs() << "LV(REG): RegisterClass: " 6403 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6404 << " registers\n"; 6405 } 6406 }); 6407 6408 RU.LoopInvariantRegs = Invariant; 6409 RU.MaxLocalUsers = MaxUsages[i]; 6410 RUs[i] = RU; 6411 } 6412 6413 return RUs; 6414 } 6415 6416 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I, 6417 ElementCount VF) { 6418 // TODO: Cost model for emulated masked load/store is completely 6419 // broken. This hack guides the cost model to use an artificially 6420 // high enough value to practically disable vectorization with such 6421 // operations, except where previously deployed legality hack allowed 6422 // using very low cost values. This is to avoid regressions coming simply 6423 // from moving "masked load/store" check from legality to cost model. 6424 // Masked Load/Gather emulation was previously never allowed. 6425 // Limited number of Masked Store/Scatter emulation was allowed. 6426 assert(isPredicatedInst(I, VF) && "Expecting a scalar emulated instruction"); 6427 return isa<LoadInst>(I) || 6428 (isa<StoreInst>(I) && 6429 NumPredStores > NumberOfStoresToPredicate); 6430 } 6431 6432 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6433 // If we aren't vectorizing the loop, or if we've already collected the 6434 // instructions to scalarize, there's nothing to do. Collection may already 6435 // have occurred if we have a user-selected VF and are now computing the 6436 // expected cost for interleaving. 6437 if (VF.isScalar() || VF.isZero() || 6438 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6439 return; 6440 6441 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6442 // not profitable to scalarize any instructions, the presence of VF in the 6443 // map will indicate that we've analyzed it already. 6444 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6445 6446 // Find all the instructions that are scalar with predication in the loop and 6447 // determine if it would be better to not if-convert the blocks they are in. 6448 // If so, we also record the instructions to scalarize. 6449 for (BasicBlock *BB : TheLoop->blocks()) { 6450 if (!blockNeedsPredicationForAnyReason(BB)) 6451 continue; 6452 for (Instruction &I : *BB) 6453 if (isScalarWithPredication(&I, VF)) { 6454 ScalarCostsTy ScalarCosts; 6455 // Do not apply discount if scalable, because that would lead to 6456 // invalid scalarization costs. 6457 // Do not apply discount logic if hacked cost is needed 6458 // for emulated masked memrefs. 6459 if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) && 6460 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6461 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6462 // Remember that BB will remain after vectorization. 6463 PredicatedBBsAfterVectorization.insert(BB); 6464 } 6465 } 6466 } 6467 6468 int LoopVectorizationCostModel::computePredInstDiscount( 6469 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6470 assert(!isUniformAfterVectorization(PredInst, VF) && 6471 "Instruction marked uniform-after-vectorization will be predicated"); 6472 6473 // Initialize the discount to zero, meaning that the scalar version and the 6474 // vector version cost the same. 6475 InstructionCost Discount = 0; 6476 6477 // Holds instructions to analyze. The instructions we visit are mapped in 6478 // ScalarCosts. Those instructions are the ones that would be scalarized if 6479 // we find that the scalar version costs less. 6480 SmallVector<Instruction *, 8> Worklist; 6481 6482 // Returns true if the given instruction can be scalarized. 6483 auto canBeScalarized = [&](Instruction *I) -> bool { 6484 // We only attempt to scalarize instructions forming a single-use chain 6485 // from the original predicated block that would otherwise be vectorized. 6486 // Although not strictly necessary, we give up on instructions we know will 6487 // already be scalar to avoid traversing chains that are unlikely to be 6488 // beneficial. 6489 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6490 isScalarAfterVectorization(I, VF)) 6491 return false; 6492 6493 // If the instruction is scalar with predication, it will be analyzed 6494 // separately. We ignore it within the context of PredInst. 6495 if (isScalarWithPredication(I, VF)) 6496 return false; 6497 6498 // If any of the instruction's operands are uniform after vectorization, 6499 // the instruction cannot be scalarized. This prevents, for example, a 6500 // masked load from being scalarized. 6501 // 6502 // We assume we will only emit a value for lane zero of an instruction 6503 // marked uniform after vectorization, rather than VF identical values. 6504 // Thus, if we scalarize an instruction that uses a uniform, we would 6505 // create uses of values corresponding to the lanes we aren't emitting code 6506 // for. This behavior can be changed by allowing getScalarValue to clone 6507 // the lane zero values for uniforms rather than asserting. 6508 for (Use &U : I->operands()) 6509 if (auto *J = dyn_cast<Instruction>(U.get())) 6510 if (isUniformAfterVectorization(J, VF)) 6511 return false; 6512 6513 // Otherwise, we can scalarize the instruction. 6514 return true; 6515 }; 6516 6517 // Compute the expected cost discount from scalarizing the entire expression 6518 // feeding the predicated instruction. We currently only consider expressions 6519 // that are single-use instruction chains. 6520 Worklist.push_back(PredInst); 6521 while (!Worklist.empty()) { 6522 Instruction *I = Worklist.pop_back_val(); 6523 6524 // If we've already analyzed the instruction, there's nothing to do. 6525 if (ScalarCosts.find(I) != ScalarCosts.end()) 6526 continue; 6527 6528 // Compute the cost of the vector instruction. Note that this cost already 6529 // includes the scalarization overhead of the predicated instruction. 6530 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6531 6532 // Compute the cost of the scalarized instruction. This cost is the cost of 6533 // the instruction as if it wasn't if-converted and instead remained in the 6534 // predicated block. We will scale this cost by block probability after 6535 // computing the scalarization overhead. 6536 InstructionCost ScalarCost = 6537 VF.getFixedValue() * 6538 getInstructionCost(I, ElementCount::getFixed(1)).first; 6539 6540 // Compute the scalarization overhead of needed insertelement instructions 6541 // and phi nodes. 6542 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) { 6543 ScalarCost += TTI.getScalarizationOverhead( 6544 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6545 APInt::getAllOnes(VF.getFixedValue()), true, false); 6546 ScalarCost += 6547 VF.getFixedValue() * 6548 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6549 } 6550 6551 // Compute the scalarization overhead of needed extractelement 6552 // instructions. For each of the instruction's operands, if the operand can 6553 // be scalarized, add it to the worklist; otherwise, account for the 6554 // overhead. 6555 for (Use &U : I->operands()) 6556 if (auto *J = dyn_cast<Instruction>(U.get())) { 6557 assert(VectorType::isValidElementType(J->getType()) && 6558 "Instruction has non-scalar type"); 6559 if (canBeScalarized(J)) 6560 Worklist.push_back(J); 6561 else if (needsExtract(J, VF)) { 6562 ScalarCost += TTI.getScalarizationOverhead( 6563 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6564 APInt::getAllOnes(VF.getFixedValue()), false, true); 6565 } 6566 } 6567 6568 // Scale the total scalar cost by block probability. 6569 ScalarCost /= getReciprocalPredBlockProb(); 6570 6571 // Compute the discount. A non-negative discount means the vector version 6572 // of the instruction costs more, and scalarizing would be beneficial. 6573 Discount += VectorCost - ScalarCost; 6574 ScalarCosts[I] = ScalarCost; 6575 } 6576 6577 return *Discount.getValue(); 6578 } 6579 6580 LoopVectorizationCostModel::VectorizationCostTy 6581 LoopVectorizationCostModel::expectedCost( 6582 ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { 6583 VectorizationCostTy Cost; 6584 6585 // For each block. 6586 for (BasicBlock *BB : TheLoop->blocks()) { 6587 VectorizationCostTy BlockCost; 6588 6589 // For each instruction in the old loop. 6590 for (Instruction &I : BB->instructionsWithoutDebug()) { 6591 // Skip ignored values. 6592 if (ValuesToIgnore.count(&I) || 6593 (VF.isVector() && VecValuesToIgnore.count(&I))) 6594 continue; 6595 6596 VectorizationCostTy C = getInstructionCost(&I, VF); 6597 6598 // Check if we should override the cost. 6599 if (C.first.isValid() && 6600 ForceTargetInstructionCost.getNumOccurrences() > 0) 6601 C.first = InstructionCost(ForceTargetInstructionCost); 6602 6603 // Keep a list of instructions with invalid costs. 6604 if (Invalid && !C.first.isValid()) 6605 Invalid->emplace_back(&I, VF); 6606 6607 BlockCost.first += C.first; 6608 BlockCost.second |= C.second; 6609 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6610 << " for VF " << VF << " For instruction: " << I 6611 << '\n'); 6612 } 6613 6614 // If we are vectorizing a predicated block, it will have been 6615 // if-converted. This means that the block's instructions (aside from 6616 // stores and instructions that may divide by zero) will now be 6617 // unconditionally executed. For the scalar case, we may not always execute 6618 // the predicated block, if it is an if-else block. Thus, scale the block's 6619 // cost by the probability of executing it. blockNeedsPredication from 6620 // Legal is used so as to not include all blocks in tail folded loops. 6621 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6622 BlockCost.first /= getReciprocalPredBlockProb(); 6623 6624 Cost.first += BlockCost.first; 6625 Cost.second |= BlockCost.second; 6626 } 6627 6628 return Cost; 6629 } 6630 6631 /// Gets Address Access SCEV after verifying that the access pattern 6632 /// is loop invariant except the induction variable dependence. 6633 /// 6634 /// This SCEV can be sent to the Target in order to estimate the address 6635 /// calculation cost. 6636 static const SCEV *getAddressAccessSCEV( 6637 Value *Ptr, 6638 LoopVectorizationLegality *Legal, 6639 PredicatedScalarEvolution &PSE, 6640 const Loop *TheLoop) { 6641 6642 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6643 if (!Gep) 6644 return nullptr; 6645 6646 // We are looking for a gep with all loop invariant indices except for one 6647 // which should be an induction variable. 6648 auto SE = PSE.getSE(); 6649 unsigned NumOperands = Gep->getNumOperands(); 6650 for (unsigned i = 1; i < NumOperands; ++i) { 6651 Value *Opd = Gep->getOperand(i); 6652 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6653 !Legal->isInductionVariable(Opd)) 6654 return nullptr; 6655 } 6656 6657 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6658 return PSE.getSCEV(Ptr); 6659 } 6660 6661 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6662 return Legal->hasStride(I->getOperand(0)) || 6663 Legal->hasStride(I->getOperand(1)); 6664 } 6665 6666 InstructionCost 6667 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6668 ElementCount VF) { 6669 assert(VF.isVector() && 6670 "Scalarization cost of instruction implies vectorization."); 6671 if (VF.isScalable()) 6672 return InstructionCost::getInvalid(); 6673 6674 Type *ValTy = getLoadStoreType(I); 6675 auto SE = PSE.getSE(); 6676 6677 unsigned AS = getLoadStoreAddressSpace(I); 6678 Value *Ptr = getLoadStorePointerOperand(I); 6679 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6680 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost` 6681 // that it is being called from this specific place. 6682 6683 // Figure out whether the access is strided and get the stride value 6684 // if it's known in compile time 6685 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6686 6687 // Get the cost of the scalar memory instruction and address computation. 6688 InstructionCost Cost = 6689 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6690 6691 // Don't pass *I here, since it is scalar but will actually be part of a 6692 // vectorized loop where the user of it is a vectorized instruction. 6693 const Align Alignment = getLoadStoreAlignment(I); 6694 Cost += VF.getKnownMinValue() * 6695 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6696 AS, TTI::TCK_RecipThroughput); 6697 6698 // Get the overhead of the extractelement and insertelement instructions 6699 // we might create due to scalarization. 6700 Cost += getScalarizationOverhead(I, VF); 6701 6702 // If we have a predicated load/store, it will need extra i1 extracts and 6703 // conditional branches, but may not be executed for each vector lane. Scale 6704 // the cost by the probability of executing the predicated block. 6705 if (isPredicatedInst(I, VF)) { 6706 Cost /= getReciprocalPredBlockProb(); 6707 6708 // Add the cost of an i1 extract and a branch 6709 auto *Vec_i1Ty = 6710 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 6711 Cost += TTI.getScalarizationOverhead( 6712 Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()), 6713 /*Insert=*/false, /*Extract=*/true); 6714 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 6715 6716 if (useEmulatedMaskMemRefHack(I, VF)) 6717 // Artificially setting to a high enough value to practically disable 6718 // vectorization with such operations. 6719 Cost = 3000000; 6720 } 6721 6722 return Cost; 6723 } 6724 6725 InstructionCost 6726 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6727 ElementCount VF) { 6728 Type *ValTy = getLoadStoreType(I); 6729 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6730 Value *Ptr = getLoadStorePointerOperand(I); 6731 unsigned AS = getLoadStoreAddressSpace(I); 6732 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr); 6733 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6734 6735 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6736 "Stride should be 1 or -1 for consecutive memory access"); 6737 const Align Alignment = getLoadStoreAlignment(I); 6738 InstructionCost Cost = 0; 6739 if (Legal->isMaskRequired(I)) 6740 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6741 CostKind); 6742 else 6743 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6744 CostKind, I); 6745 6746 bool Reverse = ConsecutiveStride < 0; 6747 if (Reverse) 6748 Cost += 6749 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6750 return Cost; 6751 } 6752 6753 InstructionCost 6754 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6755 ElementCount VF) { 6756 assert(Legal->isUniformMemOp(*I)); 6757 6758 Type *ValTy = getLoadStoreType(I); 6759 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6760 const Align Alignment = getLoadStoreAlignment(I); 6761 unsigned AS = getLoadStoreAddressSpace(I); 6762 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6763 if (isa<LoadInst>(I)) { 6764 return TTI.getAddressComputationCost(ValTy) + 6765 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6766 CostKind) + 6767 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6768 } 6769 StoreInst *SI = cast<StoreInst>(I); 6770 6771 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6772 return TTI.getAddressComputationCost(ValTy) + 6773 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6774 CostKind) + 6775 (isLoopInvariantStoreValue 6776 ? 0 6777 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6778 VF.getKnownMinValue() - 1)); 6779 } 6780 6781 InstructionCost 6782 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6783 ElementCount VF) { 6784 Type *ValTy = getLoadStoreType(I); 6785 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6786 const Align Alignment = getLoadStoreAlignment(I); 6787 const Value *Ptr = getLoadStorePointerOperand(I); 6788 6789 return TTI.getAddressComputationCost(VectorTy) + 6790 TTI.getGatherScatterOpCost( 6791 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6792 TargetTransformInfo::TCK_RecipThroughput, I); 6793 } 6794 6795 InstructionCost 6796 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6797 ElementCount VF) { 6798 // TODO: Once we have support for interleaving with scalable vectors 6799 // we can calculate the cost properly here. 6800 if (VF.isScalable()) 6801 return InstructionCost::getInvalid(); 6802 6803 Type *ValTy = getLoadStoreType(I); 6804 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6805 unsigned AS = getLoadStoreAddressSpace(I); 6806 6807 auto Group = getInterleavedAccessGroup(I); 6808 assert(Group && "Fail to get an interleaved access group."); 6809 6810 unsigned InterleaveFactor = Group->getFactor(); 6811 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6812 6813 // Holds the indices of existing members in the interleaved group. 6814 SmallVector<unsigned, 4> Indices; 6815 for (unsigned IF = 0; IF < InterleaveFactor; IF++) 6816 if (Group->getMember(IF)) 6817 Indices.push_back(IF); 6818 6819 // Calculate the cost of the whole interleaved group. 6820 bool UseMaskForGaps = 6821 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) || 6822 (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor())); 6823 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 6824 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6825 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6826 6827 if (Group->isReverse()) { 6828 // TODO: Add support for reversed masked interleaved access. 6829 assert(!Legal->isMaskRequired(I) && 6830 "Reverse masked interleaved access not supported."); 6831 Cost += 6832 Group->getNumMembers() * 6833 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6834 } 6835 return Cost; 6836 } 6837 6838 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( 6839 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 6840 using namespace llvm::PatternMatch; 6841 // Early exit for no inloop reductions 6842 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 6843 return None; 6844 auto *VectorTy = cast<VectorType>(Ty); 6845 6846 // We are looking for a pattern of, and finding the minimal acceptable cost: 6847 // reduce(mul(ext(A), ext(B))) or 6848 // reduce(mul(A, B)) or 6849 // reduce(ext(A)) or 6850 // reduce(A). 6851 // The basic idea is that we walk down the tree to do that, finding the root 6852 // reduction instruction in InLoopReductionImmediateChains. From there we find 6853 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 6854 // of the components. If the reduction cost is lower then we return it for the 6855 // reduction instruction and 0 for the other instructions in the pattern. If 6856 // it is not we return an invalid cost specifying the orignal cost method 6857 // should be used. 6858 Instruction *RetI = I; 6859 if (match(RetI, m_ZExtOrSExt(m_Value()))) { 6860 if (!RetI->hasOneUser()) 6861 return None; 6862 RetI = RetI->user_back(); 6863 } 6864 if (match(RetI, m_Mul(m_Value(), m_Value())) && 6865 RetI->user_back()->getOpcode() == Instruction::Add) { 6866 if (!RetI->hasOneUser()) 6867 return None; 6868 RetI = RetI->user_back(); 6869 } 6870 6871 // Test if the found instruction is a reduction, and if not return an invalid 6872 // cost specifying the parent to use the original cost modelling. 6873 if (!InLoopReductionImmediateChains.count(RetI)) 6874 return None; 6875 6876 // Find the reduction this chain is a part of and calculate the basic cost of 6877 // the reduction on its own. 6878 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 6879 Instruction *ReductionPhi = LastChain; 6880 while (!isa<PHINode>(ReductionPhi)) 6881 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 6882 6883 const RecurrenceDescriptor &RdxDesc = 6884 Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second; 6885 6886 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 6887 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); 6888 6889 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a 6890 // normal fmul instruction to the cost of the fadd reduction. 6891 if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd) 6892 BaseCost += 6893 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind); 6894 6895 // If we're using ordered reductions then we can just return the base cost 6896 // here, since getArithmeticReductionCost calculates the full ordered 6897 // reduction cost when FP reassociation is not allowed. 6898 if (useOrderedReductions(RdxDesc)) 6899 return BaseCost; 6900 6901 // Get the operand that was not the reduction chain and match it to one of the 6902 // patterns, returning the better cost if it is found. 6903 Instruction *RedOp = RetI->getOperand(1) == LastChain 6904 ? dyn_cast<Instruction>(RetI->getOperand(0)) 6905 : dyn_cast<Instruction>(RetI->getOperand(1)); 6906 6907 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 6908 6909 Instruction *Op0, *Op1; 6910 if (RedOp && 6911 match(RedOp, 6912 m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) && 6913 match(Op0, m_ZExtOrSExt(m_Value())) && 6914 Op0->getOpcode() == Op1->getOpcode() && 6915 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 6916 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) && 6917 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) { 6918 6919 // Matched reduce(ext(mul(ext(A), ext(B))) 6920 // Note that the extend opcodes need to all match, or if A==B they will have 6921 // been converted to zext(mul(sext(A), sext(A))) as it is known positive, 6922 // which is equally fine. 6923 bool IsUnsigned = isa<ZExtInst>(Op0); 6924 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 6925 auto *MulType = VectorType::get(Op0->getType(), VectorTy); 6926 6927 InstructionCost ExtCost = 6928 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType, 6929 TTI::CastContextHint::None, CostKind, Op0); 6930 InstructionCost MulCost = 6931 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind); 6932 InstructionCost Ext2Cost = 6933 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType, 6934 TTI::CastContextHint::None, CostKind, RedOp); 6935 6936 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6937 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6938 CostKind); 6939 6940 if (RedCost.isValid() && 6941 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost) 6942 return I == RetI ? RedCost : 0; 6943 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && 6944 !TheLoop->isLoopInvariant(RedOp)) { 6945 // Matched reduce(ext(A)) 6946 bool IsUnsigned = isa<ZExtInst>(RedOp); 6947 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 6948 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6949 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6950 CostKind); 6951 6952 InstructionCost ExtCost = 6953 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 6954 TTI::CastContextHint::None, CostKind, RedOp); 6955 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 6956 return I == RetI ? RedCost : 0; 6957 } else if (RedOp && 6958 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { 6959 if (match(Op0, m_ZExtOrSExt(m_Value())) && 6960 Op0->getOpcode() == Op1->getOpcode() && 6961 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 6962 bool IsUnsigned = isa<ZExtInst>(Op0); 6963 Type *Op0Ty = Op0->getOperand(0)->getType(); 6964 Type *Op1Ty = Op1->getOperand(0)->getType(); 6965 Type *LargestOpTy = 6966 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty 6967 : Op0Ty; 6968 auto *ExtType = VectorType::get(LargestOpTy, VectorTy); 6969 6970 // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of 6971 // different sizes. We take the largest type as the ext to reduce, and add 6972 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))). 6973 InstructionCost ExtCost0 = TTI.getCastInstrCost( 6974 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy), 6975 TTI::CastContextHint::None, CostKind, Op0); 6976 InstructionCost ExtCost1 = TTI.getCastInstrCost( 6977 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy), 6978 TTI::CastContextHint::None, CostKind, Op1); 6979 InstructionCost MulCost = 6980 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 6981 6982 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6983 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6984 CostKind); 6985 InstructionCost ExtraExtCost = 0; 6986 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) { 6987 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1; 6988 ExtraExtCost = TTI.getCastInstrCost( 6989 ExtraExtOp->getOpcode(), ExtType, 6990 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy), 6991 TTI::CastContextHint::None, CostKind, ExtraExtOp); 6992 } 6993 6994 if (RedCost.isValid() && 6995 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost)) 6996 return I == RetI ? RedCost : 0; 6997 } else if (!match(I, m_ZExtOrSExt(m_Value()))) { 6998 // Matched reduce(mul()) 6999 InstructionCost MulCost = 7000 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7001 7002 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7003 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 7004 CostKind); 7005 7006 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 7007 return I == RetI ? RedCost : 0; 7008 } 7009 } 7010 7011 return I == RetI ? Optional<InstructionCost>(BaseCost) : None; 7012 } 7013 7014 InstructionCost 7015 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 7016 ElementCount VF) { 7017 // Calculate scalar cost only. Vectorization cost should be ready at this 7018 // moment. 7019 if (VF.isScalar()) { 7020 Type *ValTy = getLoadStoreType(I); 7021 const Align Alignment = getLoadStoreAlignment(I); 7022 unsigned AS = getLoadStoreAddressSpace(I); 7023 7024 return TTI.getAddressComputationCost(ValTy) + 7025 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 7026 TTI::TCK_RecipThroughput, I); 7027 } 7028 return getWideningCost(I, VF); 7029 } 7030 7031 LoopVectorizationCostModel::VectorizationCostTy 7032 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7033 ElementCount VF) { 7034 // If we know that this instruction will remain uniform, check the cost of 7035 // the scalar version. 7036 if (isUniformAfterVectorization(I, VF)) 7037 VF = ElementCount::getFixed(1); 7038 7039 if (VF.isVector() && isProfitableToScalarize(I, VF)) 7040 return VectorizationCostTy(InstsToScalarize[VF][I], false); 7041 7042 // Forced scalars do not have any scalarization overhead. 7043 auto ForcedScalar = ForcedScalars.find(VF); 7044 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 7045 auto InstSet = ForcedScalar->second; 7046 if (InstSet.count(I)) 7047 return VectorizationCostTy( 7048 (getInstructionCost(I, ElementCount::getFixed(1)).first * 7049 VF.getKnownMinValue()), 7050 false); 7051 } 7052 7053 Type *VectorTy; 7054 InstructionCost C = getInstructionCost(I, VF, VectorTy); 7055 7056 bool TypeNotScalarized = false; 7057 if (VF.isVector() && VectorTy->isVectorTy()) { 7058 unsigned NumParts = TTI.getNumberOfParts(VectorTy); 7059 if (NumParts) 7060 TypeNotScalarized = NumParts < VF.getKnownMinValue(); 7061 else 7062 C = InstructionCost::getInvalid(); 7063 } 7064 return VectorizationCostTy(C, TypeNotScalarized); 7065 } 7066 7067 InstructionCost 7068 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 7069 ElementCount VF) const { 7070 7071 // There is no mechanism yet to create a scalable scalarization loop, 7072 // so this is currently Invalid. 7073 if (VF.isScalable()) 7074 return InstructionCost::getInvalid(); 7075 7076 if (VF.isScalar()) 7077 return 0; 7078 7079 InstructionCost Cost = 0; 7080 Type *RetTy = ToVectorTy(I->getType(), VF); 7081 if (!RetTy->isVoidTy() && 7082 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 7083 Cost += TTI.getScalarizationOverhead( 7084 cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true, 7085 false); 7086 7087 // Some targets keep addresses scalar. 7088 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 7089 return Cost; 7090 7091 // Some targets support efficient element stores. 7092 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 7093 return Cost; 7094 7095 // Collect operands to consider. 7096 CallInst *CI = dyn_cast<CallInst>(I); 7097 Instruction::op_range Ops = CI ? CI->args() : I->operands(); 7098 7099 // Skip operands that do not require extraction/scalarization and do not incur 7100 // any overhead. 7101 SmallVector<Type *> Tys; 7102 for (auto *V : filterExtractingOperands(Ops, VF)) 7103 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 7104 return Cost + TTI.getOperandsScalarizationOverhead( 7105 filterExtractingOperands(Ops, VF), Tys); 7106 } 7107 7108 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 7109 if (VF.isScalar()) 7110 return; 7111 NumPredStores = 0; 7112 for (BasicBlock *BB : TheLoop->blocks()) { 7113 // For each instruction in the old loop. 7114 for (Instruction &I : *BB) { 7115 Value *Ptr = getLoadStorePointerOperand(&I); 7116 if (!Ptr) 7117 continue; 7118 7119 // TODO: We should generate better code and update the cost model for 7120 // predicated uniform stores. Today they are treated as any other 7121 // predicated store (see added test cases in 7122 // invariant-store-vectorization.ll). 7123 if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF)) 7124 NumPredStores++; 7125 7126 if (Legal->isUniformMemOp(I)) { 7127 // TODO: Avoid replicating loads and stores instead of 7128 // relying on instcombine to remove them. 7129 // Load: Scalar load + broadcast 7130 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 7131 InstructionCost Cost; 7132 if (isa<StoreInst>(&I) && VF.isScalable() && 7133 isLegalGatherOrScatter(&I, VF)) { 7134 Cost = getGatherScatterCost(&I, VF); 7135 setWideningDecision(&I, VF, CM_GatherScatter, Cost); 7136 } else { 7137 assert((isa<LoadInst>(&I) || !VF.isScalable()) && 7138 "Cannot yet scalarize uniform stores"); 7139 Cost = getUniformMemOpCost(&I, VF); 7140 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7141 } 7142 continue; 7143 } 7144 7145 // We assume that widening is the best solution when possible. 7146 if (memoryInstructionCanBeWidened(&I, VF)) { 7147 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 7148 int ConsecutiveStride = Legal->isConsecutivePtr( 7149 getLoadStoreType(&I), getLoadStorePointerOperand(&I)); 7150 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7151 "Expected consecutive stride."); 7152 InstWidening Decision = 7153 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 7154 setWideningDecision(&I, VF, Decision, Cost); 7155 continue; 7156 } 7157 7158 // Choose between Interleaving, Gather/Scatter or Scalarization. 7159 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 7160 unsigned NumAccesses = 1; 7161 if (isAccessInterleaved(&I)) { 7162 auto Group = getInterleavedAccessGroup(&I); 7163 assert(Group && "Fail to get an interleaved access group."); 7164 7165 // Make one decision for the whole group. 7166 if (getWideningDecision(&I, VF) != CM_Unknown) 7167 continue; 7168 7169 NumAccesses = Group->getNumMembers(); 7170 if (interleavedAccessCanBeWidened(&I, VF)) 7171 InterleaveCost = getInterleaveGroupCost(&I, VF); 7172 } 7173 7174 InstructionCost GatherScatterCost = 7175 isLegalGatherOrScatter(&I, VF) 7176 ? getGatherScatterCost(&I, VF) * NumAccesses 7177 : InstructionCost::getInvalid(); 7178 7179 InstructionCost ScalarizationCost = 7180 getMemInstScalarizationCost(&I, VF) * NumAccesses; 7181 7182 // Choose better solution for the current VF, 7183 // write down this decision and use it during vectorization. 7184 InstructionCost Cost; 7185 InstWidening Decision; 7186 if (InterleaveCost <= GatherScatterCost && 7187 InterleaveCost < ScalarizationCost) { 7188 Decision = CM_Interleave; 7189 Cost = InterleaveCost; 7190 } else if (GatherScatterCost < ScalarizationCost) { 7191 Decision = CM_GatherScatter; 7192 Cost = GatherScatterCost; 7193 } else { 7194 Decision = CM_Scalarize; 7195 Cost = ScalarizationCost; 7196 } 7197 // If the instructions belongs to an interleave group, the whole group 7198 // receives the same decision. The whole group receives the cost, but 7199 // the cost will actually be assigned to one instruction. 7200 if (auto Group = getInterleavedAccessGroup(&I)) 7201 setWideningDecision(Group, VF, Decision, Cost); 7202 else 7203 setWideningDecision(&I, VF, Decision, Cost); 7204 } 7205 } 7206 7207 // Make sure that any load of address and any other address computation 7208 // remains scalar unless there is gather/scatter support. This avoids 7209 // inevitable extracts into address registers, and also has the benefit of 7210 // activating LSR more, since that pass can't optimize vectorized 7211 // addresses. 7212 if (TTI.prefersVectorizedAddressing()) 7213 return; 7214 7215 // Start with all scalar pointer uses. 7216 SmallPtrSet<Instruction *, 8> AddrDefs; 7217 for (BasicBlock *BB : TheLoop->blocks()) 7218 for (Instruction &I : *BB) { 7219 Instruction *PtrDef = 7220 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7221 if (PtrDef && TheLoop->contains(PtrDef) && 7222 getWideningDecision(&I, VF) != CM_GatherScatter) 7223 AddrDefs.insert(PtrDef); 7224 } 7225 7226 // Add all instructions used to generate the addresses. 7227 SmallVector<Instruction *, 4> Worklist; 7228 append_range(Worklist, AddrDefs); 7229 while (!Worklist.empty()) { 7230 Instruction *I = Worklist.pop_back_val(); 7231 for (auto &Op : I->operands()) 7232 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7233 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7234 AddrDefs.insert(InstOp).second) 7235 Worklist.push_back(InstOp); 7236 } 7237 7238 for (auto *I : AddrDefs) { 7239 if (isa<LoadInst>(I)) { 7240 // Setting the desired widening decision should ideally be handled in 7241 // by cost functions, but since this involves the task of finding out 7242 // if the loaded register is involved in an address computation, it is 7243 // instead changed here when we know this is the case. 7244 InstWidening Decision = getWideningDecision(I, VF); 7245 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7246 // Scalarize a widened load of address. 7247 setWideningDecision( 7248 I, VF, CM_Scalarize, 7249 (VF.getKnownMinValue() * 7250 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7251 else if (auto Group = getInterleavedAccessGroup(I)) { 7252 // Scalarize an interleave group of address loads. 7253 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7254 if (Instruction *Member = Group->getMember(I)) 7255 setWideningDecision( 7256 Member, VF, CM_Scalarize, 7257 (VF.getKnownMinValue() * 7258 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7259 } 7260 } 7261 } else 7262 // Make sure I gets scalarized and a cost estimate without 7263 // scalarization overhead. 7264 ForcedScalars[VF].insert(I); 7265 } 7266 } 7267 7268 InstructionCost 7269 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 7270 Type *&VectorTy) { 7271 Type *RetTy = I->getType(); 7272 if (canTruncateToMinimalBitwidth(I, VF)) 7273 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7274 auto SE = PSE.getSE(); 7275 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7276 7277 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 7278 ElementCount VF) -> bool { 7279 if (VF.isScalar()) 7280 return true; 7281 7282 auto Scalarized = InstsToScalarize.find(VF); 7283 assert(Scalarized != InstsToScalarize.end() && 7284 "VF not yet analyzed for scalarization profitability"); 7285 return !Scalarized->second.count(I) && 7286 llvm::all_of(I->users(), [&](User *U) { 7287 auto *UI = cast<Instruction>(U); 7288 return !Scalarized->second.count(UI); 7289 }); 7290 }; 7291 (void) hasSingleCopyAfterVectorization; 7292 7293 if (isScalarAfterVectorization(I, VF)) { 7294 // With the exception of GEPs and PHIs, after scalarization there should 7295 // only be one copy of the instruction generated in the loop. This is 7296 // because the VF is either 1, or any instructions that need scalarizing 7297 // have already been dealt with by the the time we get here. As a result, 7298 // it means we don't have to multiply the instruction cost by VF. 7299 assert(I->getOpcode() == Instruction::GetElementPtr || 7300 I->getOpcode() == Instruction::PHI || 7301 (I->getOpcode() == Instruction::BitCast && 7302 I->getType()->isPointerTy()) || 7303 hasSingleCopyAfterVectorization(I, VF)); 7304 VectorTy = RetTy; 7305 } else 7306 VectorTy = ToVectorTy(RetTy, VF); 7307 7308 // TODO: We need to estimate the cost of intrinsic calls. 7309 switch (I->getOpcode()) { 7310 case Instruction::GetElementPtr: 7311 // We mark this instruction as zero-cost because the cost of GEPs in 7312 // vectorized code depends on whether the corresponding memory instruction 7313 // is scalarized or not. Therefore, we handle GEPs with the memory 7314 // instruction cost. 7315 return 0; 7316 case Instruction::Br: { 7317 // In cases of scalarized and predicated instructions, there will be VF 7318 // predicated blocks in the vectorized loop. Each branch around these 7319 // blocks requires also an extract of its vector compare i1 element. 7320 bool ScalarPredicatedBB = false; 7321 BranchInst *BI = cast<BranchInst>(I); 7322 if (VF.isVector() && BI->isConditional() && 7323 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7324 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7325 ScalarPredicatedBB = true; 7326 7327 if (ScalarPredicatedBB) { 7328 // Not possible to scalarize scalable vector with predicated instructions. 7329 if (VF.isScalable()) 7330 return InstructionCost::getInvalid(); 7331 // Return cost for branches around scalarized and predicated blocks. 7332 auto *Vec_i1Ty = 7333 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7334 return ( 7335 TTI.getScalarizationOverhead( 7336 Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) + 7337 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); 7338 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7339 // The back-edge branch will remain, as will all scalar branches. 7340 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7341 else 7342 // This branch will be eliminated by if-conversion. 7343 return 0; 7344 // Note: We currently assume zero cost for an unconditional branch inside 7345 // a predicated block since it will become a fall-through, although we 7346 // may decide in the future to call TTI for all branches. 7347 } 7348 case Instruction::PHI: { 7349 auto *Phi = cast<PHINode>(I); 7350 7351 // First-order recurrences are replaced by vector shuffles inside the loop. 7352 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7353 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7354 return TTI.getShuffleCost( 7355 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7356 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7357 7358 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7359 // converted into select instructions. We require N - 1 selects per phi 7360 // node, where N is the number of incoming values. 7361 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7362 return (Phi->getNumIncomingValues() - 1) * 7363 TTI.getCmpSelInstrCost( 7364 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7365 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7366 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7367 7368 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7369 } 7370 case Instruction::UDiv: 7371 case Instruction::SDiv: 7372 case Instruction::URem: 7373 case Instruction::SRem: 7374 // If we have a predicated instruction, it may not be executed for each 7375 // vector lane. Get the scalarization cost and scale this amount by the 7376 // probability of executing the predicated block. If the instruction is not 7377 // predicated, we fall through to the next case. 7378 if (VF.isVector() && isScalarWithPredication(I, VF)) { 7379 InstructionCost Cost = 0; 7380 7381 // These instructions have a non-void type, so account for the phi nodes 7382 // that we will create. This cost is likely to be zero. The phi node 7383 // cost, if any, should be scaled by the block probability because it 7384 // models a copy at the end of each predicated block. 7385 Cost += VF.getKnownMinValue() * 7386 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7387 7388 // The cost of the non-predicated instruction. 7389 Cost += VF.getKnownMinValue() * 7390 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7391 7392 // The cost of insertelement and extractelement instructions needed for 7393 // scalarization. 7394 Cost += getScalarizationOverhead(I, VF); 7395 7396 // Scale the cost by the probability of executing the predicated blocks. 7397 // This assumes the predicated block for each vector lane is equally 7398 // likely. 7399 return Cost / getReciprocalPredBlockProb(); 7400 } 7401 LLVM_FALLTHROUGH; 7402 case Instruction::Add: 7403 case Instruction::FAdd: 7404 case Instruction::Sub: 7405 case Instruction::FSub: 7406 case Instruction::Mul: 7407 case Instruction::FMul: 7408 case Instruction::FDiv: 7409 case Instruction::FRem: 7410 case Instruction::Shl: 7411 case Instruction::LShr: 7412 case Instruction::AShr: 7413 case Instruction::And: 7414 case Instruction::Or: 7415 case Instruction::Xor: { 7416 // Since we will replace the stride by 1 the multiplication should go away. 7417 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7418 return 0; 7419 7420 // Detect reduction patterns 7421 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7422 return *RedCost; 7423 7424 // Certain instructions can be cheaper to vectorize if they have a constant 7425 // second vector operand. One example of this are shifts on x86. 7426 Value *Op2 = I->getOperand(1); 7427 TargetTransformInfo::OperandValueProperties Op2VP; 7428 TargetTransformInfo::OperandValueKind Op2VK = 7429 TTI.getOperandInfo(Op2, Op2VP); 7430 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7431 Op2VK = TargetTransformInfo::OK_UniformValue; 7432 7433 SmallVector<const Value *, 4> Operands(I->operand_values()); 7434 return TTI.getArithmeticInstrCost( 7435 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7436 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7437 } 7438 case Instruction::FNeg: { 7439 return TTI.getArithmeticInstrCost( 7440 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7441 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7442 TargetTransformInfo::OP_None, I->getOperand(0), I); 7443 } 7444 case Instruction::Select: { 7445 SelectInst *SI = cast<SelectInst>(I); 7446 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7447 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7448 7449 const Value *Op0, *Op1; 7450 using namespace llvm::PatternMatch; 7451 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7452 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7453 // select x, y, false --> x & y 7454 // select x, true, y --> x | y 7455 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7456 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7457 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7458 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7459 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7460 Op1->getType()->getScalarSizeInBits() == 1); 7461 7462 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7463 return TTI.getArithmeticInstrCost( 7464 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7465 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7466 } 7467 7468 Type *CondTy = SI->getCondition()->getType(); 7469 if (!ScalarCond) 7470 CondTy = VectorType::get(CondTy, VF); 7471 7472 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 7473 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition())) 7474 Pred = Cmp->getPredicate(); 7475 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred, 7476 CostKind, I); 7477 } 7478 case Instruction::ICmp: 7479 case Instruction::FCmp: { 7480 Type *ValTy = I->getOperand(0)->getType(); 7481 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7482 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7483 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7484 VectorTy = ToVectorTy(ValTy, VF); 7485 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7486 cast<CmpInst>(I)->getPredicate(), CostKind, 7487 I); 7488 } 7489 case Instruction::Store: 7490 case Instruction::Load: { 7491 ElementCount Width = VF; 7492 if (Width.isVector()) { 7493 InstWidening Decision = getWideningDecision(I, Width); 7494 assert(Decision != CM_Unknown && 7495 "CM decision should be taken at this point"); 7496 if (Decision == CM_Scalarize) 7497 Width = ElementCount::getFixed(1); 7498 } 7499 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7500 return getMemoryInstructionCost(I, VF); 7501 } 7502 case Instruction::BitCast: 7503 if (I->getType()->isPointerTy()) 7504 return 0; 7505 LLVM_FALLTHROUGH; 7506 case Instruction::ZExt: 7507 case Instruction::SExt: 7508 case Instruction::FPToUI: 7509 case Instruction::FPToSI: 7510 case Instruction::FPExt: 7511 case Instruction::PtrToInt: 7512 case Instruction::IntToPtr: 7513 case Instruction::SIToFP: 7514 case Instruction::UIToFP: 7515 case Instruction::Trunc: 7516 case Instruction::FPTrunc: { 7517 // Computes the CastContextHint from a Load/Store instruction. 7518 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7519 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7520 "Expected a load or a store!"); 7521 7522 if (VF.isScalar() || !TheLoop->contains(I)) 7523 return TTI::CastContextHint::Normal; 7524 7525 switch (getWideningDecision(I, VF)) { 7526 case LoopVectorizationCostModel::CM_GatherScatter: 7527 return TTI::CastContextHint::GatherScatter; 7528 case LoopVectorizationCostModel::CM_Interleave: 7529 return TTI::CastContextHint::Interleave; 7530 case LoopVectorizationCostModel::CM_Scalarize: 7531 case LoopVectorizationCostModel::CM_Widen: 7532 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7533 : TTI::CastContextHint::Normal; 7534 case LoopVectorizationCostModel::CM_Widen_Reverse: 7535 return TTI::CastContextHint::Reversed; 7536 case LoopVectorizationCostModel::CM_Unknown: 7537 llvm_unreachable("Instr did not go through cost modelling?"); 7538 } 7539 7540 llvm_unreachable("Unhandled case!"); 7541 }; 7542 7543 unsigned Opcode = I->getOpcode(); 7544 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7545 // For Trunc, the context is the only user, which must be a StoreInst. 7546 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7547 if (I->hasOneUse()) 7548 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7549 CCH = ComputeCCH(Store); 7550 } 7551 // For Z/Sext, the context is the operand, which must be a LoadInst. 7552 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7553 Opcode == Instruction::FPExt) { 7554 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7555 CCH = ComputeCCH(Load); 7556 } 7557 7558 // We optimize the truncation of induction variables having constant 7559 // integer steps. The cost of these truncations is the same as the scalar 7560 // operation. 7561 if (isOptimizableIVTruncate(I, VF)) { 7562 auto *Trunc = cast<TruncInst>(I); 7563 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7564 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7565 } 7566 7567 // Detect reduction patterns 7568 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7569 return *RedCost; 7570 7571 Type *SrcScalarTy = I->getOperand(0)->getType(); 7572 Type *SrcVecTy = 7573 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7574 if (canTruncateToMinimalBitwidth(I, VF)) { 7575 // This cast is going to be shrunk. This may remove the cast or it might 7576 // turn it into slightly different cast. For example, if MinBW == 16, 7577 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7578 // 7579 // Calculate the modified src and dest types. 7580 Type *MinVecTy = VectorTy; 7581 if (Opcode == Instruction::Trunc) { 7582 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7583 VectorTy = 7584 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7585 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7586 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7587 VectorTy = 7588 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7589 } 7590 } 7591 7592 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7593 } 7594 case Instruction::Call: { 7595 if (RecurrenceDescriptor::isFMulAddIntrinsic(I)) 7596 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7597 return *RedCost; 7598 bool NeedToScalarize; 7599 CallInst *CI = cast<CallInst>(I); 7600 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7601 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7602 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7603 return std::min(CallCost, IntrinsicCost); 7604 } 7605 return CallCost; 7606 } 7607 case Instruction::ExtractValue: 7608 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7609 case Instruction::Alloca: 7610 // We cannot easily widen alloca to a scalable alloca, as 7611 // the result would need to be a vector of pointers. 7612 if (VF.isScalable()) 7613 return InstructionCost::getInvalid(); 7614 LLVM_FALLTHROUGH; 7615 default: 7616 // This opcode is unknown. Assume that it is the same as 'mul'. 7617 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7618 } // end of switch. 7619 } 7620 7621 char LoopVectorize::ID = 0; 7622 7623 static const char lv_name[] = "Loop Vectorization"; 7624 7625 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7626 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7627 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7628 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7629 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7630 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7631 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7632 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7633 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7634 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7635 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7636 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7637 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7638 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7639 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7640 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7641 7642 namespace llvm { 7643 7644 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7645 7646 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7647 bool VectorizeOnlyWhenForced) { 7648 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7649 } 7650 7651 } // end namespace llvm 7652 7653 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7654 // Check if the pointer operand of a load or store instruction is 7655 // consecutive. 7656 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7657 return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr); 7658 return false; 7659 } 7660 7661 void LoopVectorizationCostModel::collectValuesToIgnore() { 7662 // Ignore ephemeral values. 7663 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7664 7665 // Ignore type-promoting instructions we identified during reduction 7666 // detection. 7667 for (auto &Reduction : Legal->getReductionVars()) { 7668 const RecurrenceDescriptor &RedDes = Reduction.second; 7669 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7670 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7671 } 7672 // Ignore type-casting instructions we identified during induction 7673 // detection. 7674 for (auto &Induction : Legal->getInductionVars()) { 7675 const InductionDescriptor &IndDes = Induction.second; 7676 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7677 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7678 } 7679 } 7680 7681 void LoopVectorizationCostModel::collectInLoopReductions() { 7682 for (auto &Reduction : Legal->getReductionVars()) { 7683 PHINode *Phi = Reduction.first; 7684 const RecurrenceDescriptor &RdxDesc = Reduction.second; 7685 7686 // We don't collect reductions that are type promoted (yet). 7687 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7688 continue; 7689 7690 // If the target would prefer this reduction to happen "in-loop", then we 7691 // want to record it as such. 7692 unsigned Opcode = RdxDesc.getOpcode(); 7693 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 7694 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7695 TargetTransformInfo::ReductionFlags())) 7696 continue; 7697 7698 // Check that we can correctly put the reductions into the loop, by 7699 // finding the chain of operations that leads from the phi to the loop 7700 // exit value. 7701 SmallVector<Instruction *, 4> ReductionOperations = 7702 RdxDesc.getReductionOpChain(Phi, TheLoop); 7703 bool InLoop = !ReductionOperations.empty(); 7704 if (InLoop) { 7705 InLoopReductionChains[Phi] = ReductionOperations; 7706 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7707 Instruction *LastChain = Phi; 7708 for (auto *I : ReductionOperations) { 7709 InLoopReductionImmediateChains[I] = LastChain; 7710 LastChain = I; 7711 } 7712 } 7713 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7714 << " reduction for phi: " << *Phi << "\n"); 7715 } 7716 } 7717 7718 // TODO: we could return a pair of values that specify the max VF and 7719 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7720 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7721 // doesn't have a cost model that can choose which plan to execute if 7722 // more than one is generated. 7723 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7724 LoopVectorizationCostModel &CM) { 7725 unsigned WidestType; 7726 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7727 return WidestVectorRegBits / WidestType; 7728 } 7729 7730 VectorizationFactor 7731 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7732 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7733 ElementCount VF = UserVF; 7734 // Outer loop handling: They may require CFG and instruction level 7735 // transformations before even evaluating whether vectorization is profitable. 7736 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7737 // the vectorization pipeline. 7738 if (!OrigLoop->isInnermost()) { 7739 // If the user doesn't provide a vectorization factor, determine a 7740 // reasonable one. 7741 if (UserVF.isZero()) { 7742 VF = ElementCount::getFixed(determineVPlanVF( 7743 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 7744 .getFixedSize(), 7745 CM)); 7746 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7747 7748 // Make sure we have a VF > 1 for stress testing. 7749 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7750 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7751 << "overriding computed VF.\n"); 7752 VF = ElementCount::getFixed(4); 7753 } 7754 } 7755 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7756 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7757 "VF needs to be a power of two"); 7758 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7759 << "VF " << VF << " to build VPlans.\n"); 7760 buildVPlans(VF, VF); 7761 7762 // For VPlan build stress testing, we bail out after VPlan construction. 7763 if (VPlanBuildStressTest) 7764 return VectorizationFactor::Disabled(); 7765 7766 return {VF, 0 /*Cost*/}; 7767 } 7768 7769 LLVM_DEBUG( 7770 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7771 "VPlan-native path.\n"); 7772 return VectorizationFactor::Disabled(); 7773 } 7774 7775 Optional<VectorizationFactor> 7776 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7777 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7778 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 7779 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 7780 return None; 7781 7782 // Invalidate interleave groups if all blocks of loop will be predicated. 7783 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) && 7784 !useMaskedInterleavedAccesses(*TTI)) { 7785 LLVM_DEBUG( 7786 dbgs() 7787 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7788 "which requires masked-interleaved support.\n"); 7789 if (CM.InterleaveInfo.invalidateGroups()) 7790 // Invalidating interleave groups also requires invalidating all decisions 7791 // based on them, which includes widening decisions and uniform and scalar 7792 // values. 7793 CM.invalidateCostModelingDecisions(); 7794 } 7795 7796 ElementCount MaxUserVF = 7797 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 7798 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 7799 if (!UserVF.isZero() && UserVFIsLegal) { 7800 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 7801 "VF needs to be a power of two"); 7802 // Collect the instructions (and their associated costs) that will be more 7803 // profitable to scalarize. 7804 if (CM.selectUserVectorizationFactor(UserVF)) { 7805 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 7806 CM.collectInLoopReductions(); 7807 buildVPlansWithVPRecipes(UserVF, UserVF); 7808 LLVM_DEBUG(printPlans(dbgs())); 7809 return {{UserVF, 0}}; 7810 } else 7811 reportVectorizationInfo("UserVF ignored because of invalid costs.", 7812 "InvalidCost", ORE, OrigLoop); 7813 } 7814 7815 // Populate the set of Vectorization Factor Candidates. 7816 ElementCountSet VFCandidates; 7817 for (auto VF = ElementCount::getFixed(1); 7818 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 7819 VFCandidates.insert(VF); 7820 for (auto VF = ElementCount::getScalable(1); 7821 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 7822 VFCandidates.insert(VF); 7823 7824 for (const auto &VF : VFCandidates) { 7825 // Collect Uniform and Scalar instructions after vectorization with VF. 7826 CM.collectUniformsAndScalars(VF); 7827 7828 // Collect the instructions (and their associated costs) that will be more 7829 // profitable to scalarize. 7830 if (VF.isVector()) 7831 CM.collectInstsToScalarize(VF); 7832 } 7833 7834 CM.collectInLoopReductions(); 7835 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 7836 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 7837 7838 LLVM_DEBUG(printPlans(dbgs())); 7839 if (!MaxFactors.hasVector()) 7840 return VectorizationFactor::Disabled(); 7841 7842 // Select the optimal vectorization factor. 7843 auto SelectedVF = CM.selectVectorizationFactor(VFCandidates); 7844 7845 // Check if it is profitable to vectorize with runtime checks. 7846 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 7847 if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { 7848 bool PragmaThresholdReached = 7849 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 7850 bool ThresholdReached = 7851 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 7852 if ((ThresholdReached && !Hints.allowReordering()) || 7853 PragmaThresholdReached) { 7854 ORE->emit([&]() { 7855 return OptimizationRemarkAnalysisAliasing( 7856 DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(), 7857 OrigLoop->getHeader()) 7858 << "loop not vectorized: cannot prove it is safe to reorder " 7859 "memory operations"; 7860 }); 7861 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 7862 Hints.emitRemarkWithHints(); 7863 return VectorizationFactor::Disabled(); 7864 } 7865 } 7866 return SelectedVF; 7867 } 7868 7869 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const { 7870 assert(count_if(VPlans, 7871 [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) == 7872 1 && 7873 "Best VF has not a single VPlan."); 7874 7875 for (const VPlanPtr &Plan : VPlans) { 7876 if (Plan->hasVF(VF)) 7877 return *Plan.get(); 7878 } 7879 llvm_unreachable("No plan found!"); 7880 } 7881 7882 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7883 SmallVector<Metadata *, 4> MDs; 7884 // Reserve first location for self reference to the LoopID metadata node. 7885 MDs.push_back(nullptr); 7886 bool IsUnrollMetadata = false; 7887 MDNode *LoopID = L->getLoopID(); 7888 if (LoopID) { 7889 // First find existing loop unrolling disable metadata. 7890 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7891 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7892 if (MD) { 7893 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7894 IsUnrollMetadata = 7895 S && S->getString().startswith("llvm.loop.unroll.disable"); 7896 } 7897 MDs.push_back(LoopID->getOperand(i)); 7898 } 7899 } 7900 7901 if (!IsUnrollMetadata) { 7902 // Add runtime unroll disable metadata. 7903 LLVMContext &Context = L->getHeader()->getContext(); 7904 SmallVector<Metadata *, 1> DisableOperands; 7905 DisableOperands.push_back( 7906 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7907 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7908 MDs.push_back(DisableNode); 7909 MDNode *NewLoopID = MDNode::get(Context, MDs); 7910 // Set operand 0 to refer to the loop id itself. 7911 NewLoopID->replaceOperandWith(0, NewLoopID); 7912 L->setLoopID(NewLoopID); 7913 } 7914 } 7915 7916 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF, 7917 VPlan &BestVPlan, 7918 InnerLoopVectorizer &ILV, 7919 DominatorTree *DT) { 7920 LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF 7921 << '\n'); 7922 7923 // Perform the actual loop transformation. 7924 7925 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7926 VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan}; 7927 Value *CanonicalIVStartValue; 7928 std::tie(State.CFG.PrevBB, CanonicalIVStartValue) = 7929 ILV.createVectorizedLoopSkeleton(); 7930 ILV.collectPoisonGeneratingRecipes(State); 7931 7932 ILV.printDebugTracesAtStart(); 7933 7934 //===------------------------------------------------===// 7935 // 7936 // Notice: any optimization or new instruction that go 7937 // into the code below should also be implemented in 7938 // the cost-model. 7939 // 7940 //===------------------------------------------------===// 7941 7942 // 2. Copy and widen instructions from the old loop into the new loop. 7943 BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr), 7944 ILV.getOrCreateVectorTripCount(nullptr), 7945 CanonicalIVStartValue, State); 7946 BestVPlan.execute(&State); 7947 7948 // Keep all loop hints from the original loop on the vector loop (we'll 7949 // replace the vectorizer-specific hints below). 7950 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7951 7952 Optional<MDNode *> VectorizedLoopID = 7953 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 7954 LLVMLoopVectorizeFollowupVectorized}); 7955 7956 Loop *L = LI->getLoopFor(State.CFG.PrevBB); 7957 if (VectorizedLoopID.hasValue()) 7958 L->setLoopID(VectorizedLoopID.getValue()); 7959 else { 7960 // Keep all loop hints from the original loop on the vector loop (we'll 7961 // replace the vectorizer-specific hints below). 7962 if (MDNode *LID = OrigLoop->getLoopID()) 7963 L->setLoopID(LID); 7964 7965 LoopVectorizeHints Hints(L, true, *ORE); 7966 Hints.setAlreadyVectorized(); 7967 } 7968 // Disable runtime unrolling when vectorizing the epilogue loop. 7969 if (CanonicalIVStartValue) 7970 AddRuntimeUnrollDisableMetaData(L); 7971 7972 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7973 // predication, updating analyses. 7974 ILV.fixVectorizedLoop(State); 7975 7976 ILV.printDebugTracesAtEnd(); 7977 } 7978 7979 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 7980 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 7981 for (const auto &Plan : VPlans) 7982 if (PrintVPlansInDotFormat) 7983 Plan->printDOT(O); 7984 else 7985 Plan->print(O); 7986 } 7987 #endif 7988 7989 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 7990 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 7991 7992 // We create new control-flow for the vectorized loop, so the original exit 7993 // conditions will be dead after vectorization if it's only used by the 7994 // terminator 7995 SmallVector<BasicBlock*> ExitingBlocks; 7996 OrigLoop->getExitingBlocks(ExitingBlocks); 7997 for (auto *BB : ExitingBlocks) { 7998 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 7999 if (!Cmp || !Cmp->hasOneUse()) 8000 continue; 8001 8002 // TODO: we should introduce a getUniqueExitingBlocks on Loop 8003 if (!DeadInstructions.insert(Cmp).second) 8004 continue; 8005 8006 // The operands of the icmp is often a dead trunc, used by IndUpdate. 8007 // TODO: can recurse through operands in general 8008 for (Value *Op : Cmp->operands()) { 8009 if (isa<TruncInst>(Op) && Op->hasOneUse()) 8010 DeadInstructions.insert(cast<Instruction>(Op)); 8011 } 8012 } 8013 8014 // We create new "steps" for induction variable updates to which the original 8015 // induction variables map. An original update instruction will be dead if 8016 // all its users except the induction variable are dead. 8017 auto *Latch = OrigLoop->getLoopLatch(); 8018 for (auto &Induction : Legal->getInductionVars()) { 8019 PHINode *Ind = Induction.first; 8020 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 8021 8022 // If the tail is to be folded by masking, the primary induction variable, 8023 // if exists, isn't dead: it will be used for masking. Don't kill it. 8024 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 8025 continue; 8026 8027 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 8028 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 8029 })) 8030 DeadInstructions.insert(IndUpdate); 8031 } 8032 } 8033 8034 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 8035 8036 //===--------------------------------------------------------------------===// 8037 // EpilogueVectorizerMainLoop 8038 //===--------------------------------------------------------------------===// 8039 8040 /// This function is partially responsible for generating the control flow 8041 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8042 std::pair<BasicBlock *, Value *> 8043 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 8044 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8045 Loop *Lp = createVectorLoopSkeleton(""); 8046 8047 // Generate the code to check the minimum iteration count of the vector 8048 // epilogue (see below). 8049 EPI.EpilogueIterationCountCheck = 8050 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); 8051 EPI.EpilogueIterationCountCheck->setName("iter.check"); 8052 8053 // Generate the code to check any assumptions that we've made for SCEV 8054 // expressions. 8055 EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader); 8056 8057 // Generate the code that checks at runtime if arrays overlap. We put the 8058 // checks into a separate block to make the more common case of few elements 8059 // faster. 8060 EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 8061 8062 // Generate the iteration count check for the main loop, *after* the check 8063 // for the epilogue loop, so that the path-length is shorter for the case 8064 // that goes directly through the vector epilogue. The longer-path length for 8065 // the main loop is compensated for, by the gain from vectorizing the larger 8066 // trip count. Note: the branch will get updated later on when we vectorize 8067 // the epilogue. 8068 EPI.MainLoopIterationCountCheck = 8069 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); 8070 8071 // Generate the induction variable. 8072 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8073 EPI.VectorTripCount = CountRoundDown; 8074 createHeaderBranch(Lp); 8075 8076 // Skip induction resume value creation here because they will be created in 8077 // the second pass. If we created them here, they wouldn't be used anyway, 8078 // because the vplan in the second pass still contains the inductions from the 8079 // original loop. 8080 8081 return {completeLoopSkeleton(Lp, OrigLoopID), nullptr}; 8082 } 8083 8084 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 8085 LLVM_DEBUG({ 8086 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 8087 << "Main Loop VF:" << EPI.MainLoopVF 8088 << ", Main Loop UF:" << EPI.MainLoopUF 8089 << ", Epilogue Loop VF:" << EPI.EpilogueVF 8090 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8091 }); 8092 } 8093 8094 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 8095 DEBUG_WITH_TYPE(VerboseDebug, { 8096 dbgs() << "intermediate fn:\n" 8097 << *OrigLoop->getHeader()->getParent() << "\n"; 8098 }); 8099 } 8100 8101 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( 8102 Loop *L, BasicBlock *Bypass, bool ForEpilogue) { 8103 assert(L && "Expected valid Loop."); 8104 assert(Bypass && "Expected valid bypass basic block."); 8105 ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF; 8106 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 8107 Value *Count = getOrCreateTripCount(L); 8108 // Reuse existing vector loop preheader for TC checks. 8109 // Note that new preheader block is generated for vector loop. 8110 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 8111 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 8112 8113 // Generate code to check if the loop's trip count is less than VF * UF of the 8114 // main vector loop. 8115 auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? 8116 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8117 8118 Value *CheckMinIters = Builder.CreateICmp( 8119 P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor), 8120 "min.iters.check"); 8121 8122 if (!ForEpilogue) 8123 TCCheckBlock->setName("vector.main.loop.iter.check"); 8124 8125 // Create new preheader for vector loop. 8126 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 8127 DT, LI, nullptr, "vector.ph"); 8128 8129 if (ForEpilogue) { 8130 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 8131 DT->getNode(Bypass)->getIDom()) && 8132 "TC check is expected to dominate Bypass"); 8133 8134 // Update dominator for Bypass & LoopExit. 8135 DT->changeImmediateDominator(Bypass, TCCheckBlock); 8136 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8137 // For loops with multiple exits, there's no edge from the middle block 8138 // to exit blocks (as the epilogue must run) and thus no need to update 8139 // the immediate dominator of the exit blocks. 8140 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 8141 8142 LoopBypassBlocks.push_back(TCCheckBlock); 8143 8144 // Save the trip count so we don't have to regenerate it in the 8145 // vec.epilog.iter.check. This is safe to do because the trip count 8146 // generated here dominates the vector epilog iter check. 8147 EPI.TripCount = Count; 8148 } 8149 8150 ReplaceInstWithInst( 8151 TCCheckBlock->getTerminator(), 8152 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8153 8154 return TCCheckBlock; 8155 } 8156 8157 //===--------------------------------------------------------------------===// 8158 // EpilogueVectorizerEpilogueLoop 8159 //===--------------------------------------------------------------------===// 8160 8161 /// This function is partially responsible for generating the control flow 8162 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8163 std::pair<BasicBlock *, Value *> 8164 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 8165 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8166 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 8167 8168 // Now, compare the remaining count and if there aren't enough iterations to 8169 // execute the vectorized epilogue skip to the scalar part. 8170 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 8171 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 8172 LoopVectorPreHeader = 8173 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 8174 LI, nullptr, "vec.epilog.ph"); 8175 emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, 8176 VecEpilogueIterationCountCheck); 8177 8178 // Adjust the control flow taking the state info from the main loop 8179 // vectorization into account. 8180 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 8181 "expected this to be saved from the previous pass."); 8182 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 8183 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 8184 8185 DT->changeImmediateDominator(LoopVectorPreHeader, 8186 EPI.MainLoopIterationCountCheck); 8187 8188 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 8189 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8190 8191 if (EPI.SCEVSafetyCheck) 8192 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 8193 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8194 if (EPI.MemSafetyCheck) 8195 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 8196 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8197 8198 DT->changeImmediateDominator( 8199 VecEpilogueIterationCountCheck, 8200 VecEpilogueIterationCountCheck->getSinglePredecessor()); 8201 8202 DT->changeImmediateDominator(LoopScalarPreHeader, 8203 EPI.EpilogueIterationCountCheck); 8204 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8205 // If there is an epilogue which must run, there's no edge from the 8206 // middle block to exit blocks and thus no need to update the immediate 8207 // dominator of the exit blocks. 8208 DT->changeImmediateDominator(LoopExitBlock, 8209 EPI.EpilogueIterationCountCheck); 8210 8211 // Keep track of bypass blocks, as they feed start values to the induction 8212 // phis in the scalar loop preheader. 8213 if (EPI.SCEVSafetyCheck) 8214 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 8215 if (EPI.MemSafetyCheck) 8216 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 8217 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 8218 8219 // The vec.epilog.iter.check block may contain Phi nodes from reductions which 8220 // merge control-flow from the latch block and the middle block. Update the 8221 // incoming values here and move the Phi into the preheader. 8222 SmallVector<PHINode *, 4> PhisInBlock; 8223 for (PHINode &Phi : VecEpilogueIterationCountCheck->phis()) 8224 PhisInBlock.push_back(&Phi); 8225 8226 for (PHINode *Phi : PhisInBlock) { 8227 Phi->replaceIncomingBlockWith( 8228 VecEpilogueIterationCountCheck->getSinglePredecessor(), 8229 VecEpilogueIterationCountCheck); 8230 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck); 8231 if (EPI.SCEVSafetyCheck) 8232 Phi->removeIncomingValue(EPI.SCEVSafetyCheck); 8233 if (EPI.MemSafetyCheck) 8234 Phi->removeIncomingValue(EPI.MemSafetyCheck); 8235 Phi->moveBefore(LoopVectorPreHeader->getFirstNonPHI()); 8236 } 8237 8238 // Generate a resume induction for the vector epilogue and put it in the 8239 // vector epilogue preheader 8240 Type *IdxTy = Legal->getWidestInductionType(); 8241 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 8242 LoopVectorPreHeader->getFirstNonPHI()); 8243 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 8244 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 8245 EPI.MainLoopIterationCountCheck); 8246 8247 // Generate the induction variable. 8248 createHeaderBranch(Lp); 8249 8250 // Generate induction resume values. These variables save the new starting 8251 // indexes for the scalar loop. They are used to test if there are any tail 8252 // iterations left once the vector loop has completed. 8253 // Note that when the vectorized epilogue is skipped due to iteration count 8254 // check, then the resume value for the induction variable comes from 8255 // the trip count of the main vector loop, hence passing the AdditionalBypass 8256 // argument. 8257 createInductionResumeValues(Lp, {VecEpilogueIterationCountCheck, 8258 EPI.VectorTripCount} /* AdditionalBypass */); 8259 8260 return {completeLoopSkeleton(Lp, OrigLoopID), EPResumeVal}; 8261 } 8262 8263 BasicBlock * 8264 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 8265 Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { 8266 8267 assert(EPI.TripCount && 8268 "Expected trip count to have been safed in the first pass."); 8269 assert( 8270 (!isa<Instruction>(EPI.TripCount) || 8271 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 8272 "saved trip count does not dominate insertion point."); 8273 Value *TC = EPI.TripCount; 8274 IRBuilder<> Builder(Insert->getTerminator()); 8275 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 8276 8277 // Generate code to check if the loop's trip count is less than VF * UF of the 8278 // vector epilogue loop. 8279 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? 8280 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8281 8282 Value *CheckMinIters = 8283 Builder.CreateICmp(P, Count, 8284 createStepForVF(Builder, Count->getType(), 8285 EPI.EpilogueVF, EPI.EpilogueUF), 8286 "min.epilog.iters.check"); 8287 8288 ReplaceInstWithInst( 8289 Insert->getTerminator(), 8290 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8291 8292 LoopBypassBlocks.push_back(Insert); 8293 return Insert; 8294 } 8295 8296 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 8297 LLVM_DEBUG({ 8298 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 8299 << "Epilogue Loop VF:" << EPI.EpilogueVF 8300 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8301 }); 8302 } 8303 8304 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 8305 DEBUG_WITH_TYPE(VerboseDebug, { 8306 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n"; 8307 }); 8308 } 8309 8310 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8311 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8312 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8313 bool PredicateAtRangeStart = Predicate(Range.Start); 8314 8315 for (ElementCount TmpVF = Range.Start * 2; 8316 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8317 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8318 Range.End = TmpVF; 8319 break; 8320 } 8321 8322 return PredicateAtRangeStart; 8323 } 8324 8325 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8326 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8327 /// of VF's starting at a given VF and extending it as much as possible. Each 8328 /// vectorization decision can potentially shorten this sub-range during 8329 /// buildVPlan(). 8330 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8331 ElementCount MaxVF) { 8332 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8333 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8334 VFRange SubRange = {VF, MaxVFPlusOne}; 8335 VPlans.push_back(buildVPlan(SubRange)); 8336 VF = SubRange.End; 8337 } 8338 } 8339 8340 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8341 VPlanPtr &Plan) { 8342 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8343 8344 // Look for cached value. 8345 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8346 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8347 if (ECEntryIt != EdgeMaskCache.end()) 8348 return ECEntryIt->second; 8349 8350 VPValue *SrcMask = createBlockInMask(Src, Plan); 8351 8352 // The terminator has to be a branch inst! 8353 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8354 assert(BI && "Unexpected terminator found"); 8355 8356 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8357 return EdgeMaskCache[Edge] = SrcMask; 8358 8359 // If source is an exiting block, we know the exit edge is dynamically dead 8360 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8361 // adding uses of an otherwise potentially dead instruction. 8362 if (OrigLoop->isLoopExiting(Src)) 8363 return EdgeMaskCache[Edge] = SrcMask; 8364 8365 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8366 assert(EdgeMask && "No Edge Mask found for condition"); 8367 8368 if (BI->getSuccessor(0) != Dst) 8369 EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc()); 8370 8371 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8372 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8373 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8374 // The select version does not introduce new UB if SrcMask is false and 8375 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8376 VPValue *False = Plan->getOrAddVPValue( 8377 ConstantInt::getFalse(BI->getCondition()->getType())); 8378 EdgeMask = 8379 Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc()); 8380 } 8381 8382 return EdgeMaskCache[Edge] = EdgeMask; 8383 } 8384 8385 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8386 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8387 8388 // Look for cached value. 8389 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8390 if (BCEntryIt != BlockMaskCache.end()) 8391 return BCEntryIt->second; 8392 8393 // All-one mask is modelled as no-mask following the convention for masked 8394 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8395 VPValue *BlockMask = nullptr; 8396 8397 if (OrigLoop->getHeader() == BB) { 8398 if (!CM.blockNeedsPredicationForAnyReason(BB)) 8399 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8400 8401 // Introduce the early-exit compare IV <= BTC to form header block mask. 8402 // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by 8403 // constructing the desired canonical IV in the header block as its first 8404 // non-phi instructions. 8405 assert(CM.foldTailByMasking() && "must fold the tail"); 8406 VPBasicBlock *HeaderVPBB = Plan->getEntry()->getEntryBasicBlock(); 8407 auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi(); 8408 auto *IV = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV()); 8409 HeaderVPBB->insert(IV, HeaderVPBB->getFirstNonPhi()); 8410 8411 VPBuilder::InsertPointGuard Guard(Builder); 8412 Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint); 8413 if (CM.TTI.emitGetActiveLaneMask()) { 8414 VPValue *TC = Plan->getOrCreateTripCount(); 8415 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC}); 8416 } else { 8417 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8418 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8419 } 8420 return BlockMaskCache[BB] = BlockMask; 8421 } 8422 8423 // This is the block mask. We OR all incoming edges. 8424 for (auto *Predecessor : predecessors(BB)) { 8425 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8426 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8427 return BlockMaskCache[BB] = EdgeMask; 8428 8429 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8430 BlockMask = EdgeMask; 8431 continue; 8432 } 8433 8434 BlockMask = Builder.createOr(BlockMask, EdgeMask, {}); 8435 } 8436 8437 return BlockMaskCache[BB] = BlockMask; 8438 } 8439 8440 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8441 ArrayRef<VPValue *> Operands, 8442 VFRange &Range, 8443 VPlanPtr &Plan) { 8444 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8445 "Must be called with either a load or store"); 8446 8447 auto willWiden = [&](ElementCount VF) -> bool { 8448 if (VF.isScalar()) 8449 return false; 8450 LoopVectorizationCostModel::InstWidening Decision = 8451 CM.getWideningDecision(I, VF); 8452 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8453 "CM decision should be taken at this point."); 8454 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8455 return true; 8456 if (CM.isScalarAfterVectorization(I, VF) || 8457 CM.isProfitableToScalarize(I, VF)) 8458 return false; 8459 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8460 }; 8461 8462 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8463 return nullptr; 8464 8465 VPValue *Mask = nullptr; 8466 if (Legal->isMaskRequired(I)) 8467 Mask = createBlockInMask(I->getParent(), Plan); 8468 8469 // Determine if the pointer operand of the access is either consecutive or 8470 // reverse consecutive. 8471 LoopVectorizationCostModel::InstWidening Decision = 8472 CM.getWideningDecision(I, Range.Start); 8473 bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse; 8474 bool Consecutive = 8475 Reverse || Decision == LoopVectorizationCostModel::CM_Widen; 8476 8477 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8478 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask, 8479 Consecutive, Reverse); 8480 8481 StoreInst *Store = cast<StoreInst>(I); 8482 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8483 Mask, Consecutive, Reverse); 8484 } 8485 8486 static VPWidenIntOrFpInductionRecipe * 8487 createWidenInductionRecipe(PHINode *Phi, Instruction *PhiOrTrunc, 8488 VPValue *Start, const InductionDescriptor &IndDesc, 8489 LoopVectorizationCostModel &CM, Loop &OrigLoop, 8490 VFRange &Range) { 8491 // Returns true if an instruction \p I should be scalarized instead of 8492 // vectorized for the chosen vectorization factor. 8493 auto ShouldScalarizeInstruction = [&CM](Instruction *I, ElementCount VF) { 8494 return CM.isScalarAfterVectorization(I, VF) || 8495 CM.isProfitableToScalarize(I, VF); 8496 }; 8497 8498 bool NeedsScalarIV = LoopVectorizationPlanner::getDecisionAndClampRange( 8499 [&](ElementCount VF) { 8500 // Returns true if we should generate a scalar version of \p IV. 8501 if (ShouldScalarizeInstruction(PhiOrTrunc, VF)) 8502 return true; 8503 auto isScalarInst = [&](User *U) -> bool { 8504 auto *I = cast<Instruction>(U); 8505 return OrigLoop.contains(I) && ShouldScalarizeInstruction(I, VF); 8506 }; 8507 return any_of(PhiOrTrunc->users(), isScalarInst); 8508 }, 8509 Range); 8510 bool NeedsScalarIVOnly = LoopVectorizationPlanner::getDecisionAndClampRange( 8511 [&](ElementCount VF) { 8512 return ShouldScalarizeInstruction(PhiOrTrunc, VF); 8513 }, 8514 Range); 8515 assert(IndDesc.getStartValue() == 8516 Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader())); 8517 if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) { 8518 return new VPWidenIntOrFpInductionRecipe(Phi, Start, IndDesc, TruncI, 8519 NeedsScalarIV, !NeedsScalarIVOnly); 8520 } 8521 assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here"); 8522 return new VPWidenIntOrFpInductionRecipe(Phi, Start, IndDesc, NeedsScalarIV, 8523 !NeedsScalarIVOnly); 8524 } 8525 8526 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionPHI( 8527 PHINode *Phi, ArrayRef<VPValue *> Operands, VFRange &Range) const { 8528 8529 // Check if this is an integer or fp induction. If so, build the recipe that 8530 // produces its scalar and vector values. 8531 if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi)) 8532 return createWidenInductionRecipe(Phi, Phi, Operands[0], *II, CM, *OrigLoop, 8533 Range); 8534 8535 return nullptr; 8536 } 8537 8538 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8539 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, 8540 VPlan &Plan) const { 8541 // Optimize the special case where the source is a constant integer 8542 // induction variable. Notice that we can only optimize the 'trunc' case 8543 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8544 // (c) other casts depend on pointer size. 8545 8546 // Determine whether \p K is a truncation based on an induction variable that 8547 // can be optimized. 8548 auto isOptimizableIVTruncate = 8549 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8550 return [=](ElementCount VF) -> bool { 8551 return CM.isOptimizableIVTruncate(K, VF); 8552 }; 8553 }; 8554 8555 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8556 isOptimizableIVTruncate(I), Range)) { 8557 8558 auto *Phi = cast<PHINode>(I->getOperand(0)); 8559 const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi); 8560 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8561 return createWidenInductionRecipe(Phi, I, Start, II, CM, *OrigLoop, Range); 8562 } 8563 return nullptr; 8564 } 8565 8566 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8567 ArrayRef<VPValue *> Operands, 8568 VPlanPtr &Plan) { 8569 // If all incoming values are equal, the incoming VPValue can be used directly 8570 // instead of creating a new VPBlendRecipe. 8571 VPValue *FirstIncoming = Operands[0]; 8572 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8573 return FirstIncoming == Inc; 8574 })) { 8575 return Operands[0]; 8576 } 8577 8578 // We know that all PHIs in non-header blocks are converted into selects, so 8579 // we don't have to worry about the insertion order and we can just use the 8580 // builder. At this point we generate the predication tree. There may be 8581 // duplications since this is a simple recursive scan, but future 8582 // optimizations will clean it up. 8583 SmallVector<VPValue *, 2> OperandsWithMask; 8584 unsigned NumIncoming = Phi->getNumIncomingValues(); 8585 8586 for (unsigned In = 0; In < NumIncoming; In++) { 8587 VPValue *EdgeMask = 8588 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8589 assert((EdgeMask || NumIncoming == 1) && 8590 "Multiple predecessors with one having a full mask"); 8591 OperandsWithMask.push_back(Operands[In]); 8592 if (EdgeMask) 8593 OperandsWithMask.push_back(EdgeMask); 8594 } 8595 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8596 } 8597 8598 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8599 ArrayRef<VPValue *> Operands, 8600 VFRange &Range) const { 8601 8602 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8603 [this, CI](ElementCount VF) { 8604 return CM.isScalarWithPredication(CI, VF); 8605 }, 8606 Range); 8607 8608 if (IsPredicated) 8609 return nullptr; 8610 8611 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8612 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8613 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8614 ID == Intrinsic::pseudoprobe || 8615 ID == Intrinsic::experimental_noalias_scope_decl)) 8616 return nullptr; 8617 8618 auto willWiden = [&](ElementCount VF) -> bool { 8619 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8620 // The following case may be scalarized depending on the VF. 8621 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8622 // version of the instruction. 8623 // Is it beneficial to perform intrinsic call compared to lib call? 8624 bool NeedToScalarize = false; 8625 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8626 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8627 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8628 return UseVectorIntrinsic || !NeedToScalarize; 8629 }; 8630 8631 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8632 return nullptr; 8633 8634 ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size()); 8635 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8636 } 8637 8638 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8639 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8640 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8641 // Instruction should be widened, unless it is scalar after vectorization, 8642 // scalarization is profitable or it is predicated. 8643 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8644 return CM.isScalarAfterVectorization(I, VF) || 8645 CM.isProfitableToScalarize(I, VF) || 8646 CM.isScalarWithPredication(I, VF); 8647 }; 8648 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8649 Range); 8650 } 8651 8652 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8653 ArrayRef<VPValue *> Operands) const { 8654 auto IsVectorizableOpcode = [](unsigned Opcode) { 8655 switch (Opcode) { 8656 case Instruction::Add: 8657 case Instruction::And: 8658 case Instruction::AShr: 8659 case Instruction::BitCast: 8660 case Instruction::FAdd: 8661 case Instruction::FCmp: 8662 case Instruction::FDiv: 8663 case Instruction::FMul: 8664 case Instruction::FNeg: 8665 case Instruction::FPExt: 8666 case Instruction::FPToSI: 8667 case Instruction::FPToUI: 8668 case Instruction::FPTrunc: 8669 case Instruction::FRem: 8670 case Instruction::FSub: 8671 case Instruction::ICmp: 8672 case Instruction::IntToPtr: 8673 case Instruction::LShr: 8674 case Instruction::Mul: 8675 case Instruction::Or: 8676 case Instruction::PtrToInt: 8677 case Instruction::SDiv: 8678 case Instruction::Select: 8679 case Instruction::SExt: 8680 case Instruction::Shl: 8681 case Instruction::SIToFP: 8682 case Instruction::SRem: 8683 case Instruction::Sub: 8684 case Instruction::Trunc: 8685 case Instruction::UDiv: 8686 case Instruction::UIToFP: 8687 case Instruction::URem: 8688 case Instruction::Xor: 8689 case Instruction::ZExt: 8690 return true; 8691 } 8692 return false; 8693 }; 8694 8695 if (!IsVectorizableOpcode(I->getOpcode())) 8696 return nullptr; 8697 8698 // Success: widen this instruction. 8699 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 8700 } 8701 8702 void VPRecipeBuilder::fixHeaderPhis() { 8703 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 8704 for (VPHeaderPHIRecipe *R : PhisToFix) { 8705 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 8706 VPRecipeBase *IncR = 8707 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 8708 R->addOperand(IncR->getVPSingleValue()); 8709 } 8710 } 8711 8712 VPBasicBlock *VPRecipeBuilder::handleReplication( 8713 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8714 VPlanPtr &Plan) { 8715 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8716 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8717 Range); 8718 8719 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8720 [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); }, 8721 Range); 8722 8723 // Even if the instruction is not marked as uniform, there are certain 8724 // intrinsic calls that can be effectively treated as such, so we check for 8725 // them here. Conservatively, we only do this for scalable vectors, since 8726 // for fixed-width VFs we can always fall back on full scalarization. 8727 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) { 8728 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) { 8729 case Intrinsic::assume: 8730 case Intrinsic::lifetime_start: 8731 case Intrinsic::lifetime_end: 8732 // For scalable vectors if one of the operands is variant then we still 8733 // want to mark as uniform, which will generate one instruction for just 8734 // the first lane of the vector. We can't scalarize the call in the same 8735 // way as for fixed-width vectors because we don't know how many lanes 8736 // there are. 8737 // 8738 // The reasons for doing it this way for scalable vectors are: 8739 // 1. For the assume intrinsic generating the instruction for the first 8740 // lane is still be better than not generating any at all. For 8741 // example, the input may be a splat across all lanes. 8742 // 2. For the lifetime start/end intrinsics the pointer operand only 8743 // does anything useful when the input comes from a stack object, 8744 // which suggests it should always be uniform. For non-stack objects 8745 // the effect is to poison the object, which still allows us to 8746 // remove the call. 8747 IsUniform = true; 8748 break; 8749 default: 8750 break; 8751 } 8752 } 8753 8754 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8755 IsUniform, IsPredicated); 8756 setRecipe(I, Recipe); 8757 Plan->addVPValue(I, Recipe); 8758 8759 // Find if I uses a predicated instruction. If so, it will use its scalar 8760 // value. Avoid hoisting the insert-element which packs the scalar value into 8761 // a vector value, as that happens iff all users use the vector value. 8762 for (VPValue *Op : Recipe->operands()) { 8763 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 8764 if (!PredR) 8765 continue; 8766 auto *RepR = 8767 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 8768 assert(RepR->isPredicated() && 8769 "expected Replicate recipe to be predicated"); 8770 RepR->setAlsoPack(false); 8771 } 8772 8773 // Finalize the recipe for Instr, first if it is not predicated. 8774 if (!IsPredicated) { 8775 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8776 VPBB->appendRecipe(Recipe); 8777 return VPBB; 8778 } 8779 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8780 8781 VPBlockBase *SingleSucc = VPBB->getSingleSuccessor(); 8782 assert(SingleSucc && "VPBB must have a single successor when handling " 8783 "predicated replication."); 8784 VPBlockUtils::disconnectBlocks(VPBB, SingleSucc); 8785 // Record predicated instructions for above packing optimizations. 8786 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8787 VPBlockUtils::insertBlockAfter(Region, VPBB); 8788 auto *RegSucc = new VPBasicBlock(); 8789 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8790 VPBlockUtils::connectBlocks(RegSucc, SingleSucc); 8791 return RegSucc; 8792 } 8793 8794 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8795 VPRecipeBase *PredRecipe, 8796 VPlanPtr &Plan) { 8797 // Instructions marked for predication are replicated and placed under an 8798 // if-then construct to prevent side-effects. 8799 8800 // Generate recipes to compute the block mask for this region. 8801 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8802 8803 // Build the triangular if-then region. 8804 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8805 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8806 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8807 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8808 auto *PHIRecipe = Instr->getType()->isVoidTy() 8809 ? nullptr 8810 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8811 if (PHIRecipe) { 8812 Plan->removeVPValueFor(Instr); 8813 Plan->addVPValue(Instr, PHIRecipe); 8814 } 8815 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8816 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8817 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8818 8819 // Note: first set Entry as region entry and then connect successors starting 8820 // from it in order, to propagate the "parent" of each VPBasicBlock. 8821 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8822 VPBlockUtils::connectBlocks(Pred, Exit); 8823 8824 return Region; 8825 } 8826 8827 VPRecipeOrVPValueTy 8828 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8829 ArrayRef<VPValue *> Operands, 8830 VFRange &Range, VPlanPtr &Plan) { 8831 // First, check for specific widening recipes that deal with calls, memory 8832 // operations, inductions and Phi nodes. 8833 if (auto *CI = dyn_cast<CallInst>(Instr)) 8834 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 8835 8836 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8837 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 8838 8839 VPRecipeBase *Recipe; 8840 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8841 if (Phi->getParent() != OrigLoop->getHeader()) 8842 return tryToBlend(Phi, Operands, Plan); 8843 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, Range))) 8844 return toVPRecipeResult(Recipe); 8845 8846 VPHeaderPHIRecipe *PhiRecipe = nullptr; 8847 if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) { 8848 VPValue *StartV = Operands[0]; 8849 if (Legal->isReductionVariable(Phi)) { 8850 const RecurrenceDescriptor &RdxDesc = 8851 Legal->getReductionVars().find(Phi)->second; 8852 assert(RdxDesc.getRecurrenceStartValue() == 8853 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8854 PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, 8855 CM.isInLoopReduction(Phi), 8856 CM.useOrderedReductions(RdxDesc)); 8857 } else { 8858 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); 8859 } 8860 8861 // Record the incoming value from the backedge, so we can add the incoming 8862 // value from the backedge after all recipes have been created. 8863 recordRecipeOf(cast<Instruction>( 8864 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 8865 PhisToFix.push_back(PhiRecipe); 8866 } else { 8867 // TODO: record backedge value for remaining pointer induction phis. 8868 assert(Phi->getType()->isPointerTy() && 8869 "only pointer phis should be handled here"); 8870 assert(Legal->getInductionVars().count(Phi) && 8871 "Not an induction variable"); 8872 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8873 VPValue *Start = Plan->getOrAddVPValue(II.getStartValue()); 8874 PhiRecipe = new VPWidenPHIRecipe(Phi, Start); 8875 } 8876 8877 return toVPRecipeResult(PhiRecipe); 8878 } 8879 8880 if (isa<TruncInst>(Instr) && 8881 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 8882 Range, *Plan))) 8883 return toVPRecipeResult(Recipe); 8884 8885 if (!shouldWiden(Instr, Range)) 8886 return nullptr; 8887 8888 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8889 return toVPRecipeResult(new VPWidenGEPRecipe( 8890 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 8891 8892 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8893 bool InvariantCond = 8894 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8895 return toVPRecipeResult(new VPWidenSelectRecipe( 8896 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 8897 } 8898 8899 return toVPRecipeResult(tryToWiden(Instr, Operands)); 8900 } 8901 8902 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8903 ElementCount MaxVF) { 8904 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8905 8906 // Collect instructions from the original loop that will become trivially dead 8907 // in the vectorized loop. We don't need to vectorize these instructions. For 8908 // example, original induction update instructions can become dead because we 8909 // separately emit induction "steps" when generating code for the new loop. 8910 // Similarly, we create a new latch condition when setting up the structure 8911 // of the new loop, so the old one can become dead. 8912 SmallPtrSet<Instruction *, 4> DeadInstructions; 8913 collectTriviallyDeadInstructions(DeadInstructions); 8914 8915 // Add assume instructions we need to drop to DeadInstructions, to prevent 8916 // them from being added to the VPlan. 8917 // TODO: We only need to drop assumes in blocks that get flattend. If the 8918 // control flow is preserved, we should keep them. 8919 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8920 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8921 8922 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8923 // Dead instructions do not need sinking. Remove them from SinkAfter. 8924 for (Instruction *I : DeadInstructions) 8925 SinkAfter.erase(I); 8926 8927 // Cannot sink instructions after dead instructions (there won't be any 8928 // recipes for them). Instead, find the first non-dead previous instruction. 8929 for (auto &P : Legal->getSinkAfter()) { 8930 Instruction *SinkTarget = P.second; 8931 Instruction *FirstInst = &*SinkTarget->getParent()->begin(); 8932 (void)FirstInst; 8933 while (DeadInstructions.contains(SinkTarget)) { 8934 assert( 8935 SinkTarget != FirstInst && 8936 "Must find a live instruction (at least the one feeding the " 8937 "first-order recurrence PHI) before reaching beginning of the block"); 8938 SinkTarget = SinkTarget->getPrevNode(); 8939 assert(SinkTarget != P.first && 8940 "sink source equals target, no sinking required"); 8941 } 8942 P.second = SinkTarget; 8943 } 8944 8945 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8946 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8947 VFRange SubRange = {VF, MaxVFPlusOne}; 8948 VPlans.push_back( 8949 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 8950 VF = SubRange.End; 8951 } 8952 } 8953 8954 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header, a 8955 // CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF and a 8956 // BranchOnCount VPInstruction to the latch. 8957 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL, 8958 bool HasNUW, bool IsVPlanNative) { 8959 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8960 auto *StartV = Plan.getOrAddVPValue(StartIdx); 8961 8962 auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL); 8963 VPRegionBlock *TopRegion = Plan.getVectorLoopRegion(); 8964 VPBasicBlock *Header = TopRegion->getEntryBasicBlock(); 8965 if (IsVPlanNative) 8966 Header = cast<VPBasicBlock>(Header->getSingleSuccessor()); 8967 Header->insert(CanonicalIVPHI, Header->begin()); 8968 8969 auto *CanonicalIVIncrement = 8970 new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW 8971 : VPInstruction::CanonicalIVIncrement, 8972 {CanonicalIVPHI}, DL); 8973 CanonicalIVPHI->addOperand(CanonicalIVIncrement); 8974 8975 VPBasicBlock *EB = TopRegion->getExitBasicBlock(); 8976 if (IsVPlanNative) { 8977 EB = cast<VPBasicBlock>(EB->getSinglePredecessor()); 8978 EB->setCondBit(nullptr); 8979 } 8980 EB->appendRecipe(CanonicalIVIncrement); 8981 8982 auto *BranchOnCount = 8983 new VPInstruction(VPInstruction::BranchOnCount, 8984 {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL); 8985 EB->appendRecipe(BranchOnCount); 8986 } 8987 8988 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 8989 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 8990 const MapVector<Instruction *, Instruction *> &SinkAfter) { 8991 8992 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 8993 8994 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 8995 8996 // --------------------------------------------------------------------------- 8997 // Pre-construction: record ingredients whose recipes we'll need to further 8998 // process after constructing the initial VPlan. 8999 // --------------------------------------------------------------------------- 9000 9001 // Mark instructions we'll need to sink later and their targets as 9002 // ingredients whose recipe we'll need to record. 9003 for (auto &Entry : SinkAfter) { 9004 RecipeBuilder.recordRecipeOf(Entry.first); 9005 RecipeBuilder.recordRecipeOf(Entry.second); 9006 } 9007 for (auto &Reduction : CM.getInLoopReductionChains()) { 9008 PHINode *Phi = Reduction.first; 9009 RecurKind Kind = 9010 Legal->getReductionVars().find(Phi)->second.getRecurrenceKind(); 9011 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9012 9013 RecipeBuilder.recordRecipeOf(Phi); 9014 for (auto &R : ReductionOperations) { 9015 RecipeBuilder.recordRecipeOf(R); 9016 // For min/max reducitons, where we have a pair of icmp/select, we also 9017 // need to record the ICmp recipe, so it can be removed later. 9018 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9019 "Only min/max recurrences allowed for inloop reductions"); 9020 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 9021 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 9022 } 9023 } 9024 9025 // For each interleave group which is relevant for this (possibly trimmed) 9026 // Range, add it to the set of groups to be later applied to the VPlan and add 9027 // placeholders for its members' Recipes which we'll be replacing with a 9028 // single VPInterleaveRecipe. 9029 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 9030 auto applyIG = [IG, this](ElementCount VF) -> bool { 9031 return (VF.isVector() && // Query is illegal for VF == 1 9032 CM.getWideningDecision(IG->getInsertPos(), VF) == 9033 LoopVectorizationCostModel::CM_Interleave); 9034 }; 9035 if (!getDecisionAndClampRange(applyIG, Range)) 9036 continue; 9037 InterleaveGroups.insert(IG); 9038 for (unsigned i = 0; i < IG->getFactor(); i++) 9039 if (Instruction *Member = IG->getMember(i)) 9040 RecipeBuilder.recordRecipeOf(Member); 9041 }; 9042 9043 // --------------------------------------------------------------------------- 9044 // Build initial VPlan: Scan the body of the loop in a topological order to 9045 // visit each basic block after having visited its predecessor basic blocks. 9046 // --------------------------------------------------------------------------- 9047 9048 // Create initial VPlan skeleton, with separate header and latch blocks. 9049 VPBasicBlock *HeaderVPBB = new VPBasicBlock(); 9050 VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch"); 9051 VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB); 9052 auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop"); 9053 auto Plan = std::make_unique<VPlan>(TopRegion); 9054 9055 Instruction *DLInst = 9056 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()); 9057 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), 9058 DLInst ? DLInst->getDebugLoc() : DebugLoc(), 9059 !CM.foldTailByMasking(), false); 9060 9061 // Scan the body of the loop in a topological order to visit each basic block 9062 // after having visited its predecessor basic blocks. 9063 LoopBlocksDFS DFS(OrigLoop); 9064 DFS.perform(LI); 9065 9066 VPBasicBlock *VPBB = HeaderVPBB; 9067 SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove; 9068 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 9069 // Relevant instructions from basic block BB will be grouped into VPRecipe 9070 // ingredients and fill a new VPBasicBlock. 9071 unsigned VPBBsForBB = 0; 9072 VPBB->setName(BB->getName()); 9073 Builder.setInsertPoint(VPBB); 9074 9075 // Introduce each ingredient into VPlan. 9076 // TODO: Model and preserve debug instrinsics in VPlan. 9077 for (Instruction &I : BB->instructionsWithoutDebug()) { 9078 Instruction *Instr = &I; 9079 9080 // First filter out irrelevant instructions, to ensure no recipes are 9081 // built for them. 9082 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 9083 continue; 9084 9085 SmallVector<VPValue *, 4> Operands; 9086 auto *Phi = dyn_cast<PHINode>(Instr); 9087 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 9088 Operands.push_back(Plan->getOrAddVPValue( 9089 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 9090 } else { 9091 auto OpRange = Plan->mapToVPValues(Instr->operands()); 9092 Operands = {OpRange.begin(), OpRange.end()}; 9093 } 9094 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 9095 Instr, Operands, Range, Plan)) { 9096 // If Instr can be simplified to an existing VPValue, use it. 9097 if (RecipeOrValue.is<VPValue *>()) { 9098 auto *VPV = RecipeOrValue.get<VPValue *>(); 9099 Plan->addVPValue(Instr, VPV); 9100 // If the re-used value is a recipe, register the recipe for the 9101 // instruction, in case the recipe for Instr needs to be recorded. 9102 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 9103 RecipeBuilder.setRecipe(Instr, R); 9104 continue; 9105 } 9106 // Otherwise, add the new recipe. 9107 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 9108 for (auto *Def : Recipe->definedValues()) { 9109 auto *UV = Def->getUnderlyingValue(); 9110 Plan->addVPValue(UV, Def); 9111 } 9112 9113 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && 9114 HeaderVPBB->getFirstNonPhi() != VPBB->end()) { 9115 // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section 9116 // of the header block. That can happen for truncates of induction 9117 // variables. Those recipes are moved to the phi section of the header 9118 // block after applying SinkAfter, which relies on the original 9119 // position of the trunc. 9120 assert(isa<TruncInst>(Instr)); 9121 InductionsToMove.push_back( 9122 cast<VPWidenIntOrFpInductionRecipe>(Recipe)); 9123 } 9124 RecipeBuilder.setRecipe(Instr, Recipe); 9125 VPBB->appendRecipe(Recipe); 9126 continue; 9127 } 9128 9129 // Otherwise, if all widening options failed, Instruction is to be 9130 // replicated. This may create a successor for VPBB. 9131 VPBasicBlock *NextVPBB = 9132 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 9133 if (NextVPBB != VPBB) { 9134 VPBB = NextVPBB; 9135 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 9136 : ""); 9137 } 9138 } 9139 9140 VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB); 9141 VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor()); 9142 } 9143 9144 // Fold the last, empty block into its predecessor. 9145 VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB); 9146 assert(VPBB && "expected to fold last (empty) block"); 9147 // After here, VPBB should not be used. 9148 VPBB = nullptr; 9149 9150 assert(isa<VPRegionBlock>(Plan->getEntry()) && 9151 !Plan->getEntry()->getEntryBasicBlock()->empty() && 9152 "entry block must be set to a VPRegionBlock having a non-empty entry " 9153 "VPBasicBlock"); 9154 RecipeBuilder.fixHeaderPhis(); 9155 9156 // --------------------------------------------------------------------------- 9157 // Transform initial VPlan: Apply previously taken decisions, in order, to 9158 // bring the VPlan to its final state. 9159 // --------------------------------------------------------------------------- 9160 9161 // Apply Sink-After legal constraints. 9162 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 9163 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 9164 if (Region && Region->isReplicator()) { 9165 assert(Region->getNumSuccessors() == 1 && 9166 Region->getNumPredecessors() == 1 && "Expected SESE region!"); 9167 assert(R->getParent()->size() == 1 && 9168 "A recipe in an original replicator region must be the only " 9169 "recipe in its block"); 9170 return Region; 9171 } 9172 return nullptr; 9173 }; 9174 for (auto &Entry : SinkAfter) { 9175 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 9176 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 9177 9178 auto *TargetRegion = GetReplicateRegion(Target); 9179 auto *SinkRegion = GetReplicateRegion(Sink); 9180 if (!SinkRegion) { 9181 // If the sink source is not a replicate region, sink the recipe directly. 9182 if (TargetRegion) { 9183 // The target is in a replication region, make sure to move Sink to 9184 // the block after it, not into the replication region itself. 9185 VPBasicBlock *NextBlock = 9186 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 9187 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 9188 } else 9189 Sink->moveAfter(Target); 9190 continue; 9191 } 9192 9193 // The sink source is in a replicate region. Unhook the region from the CFG. 9194 auto *SinkPred = SinkRegion->getSinglePredecessor(); 9195 auto *SinkSucc = SinkRegion->getSingleSuccessor(); 9196 VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); 9197 VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); 9198 VPBlockUtils::connectBlocks(SinkPred, SinkSucc); 9199 9200 if (TargetRegion) { 9201 // The target recipe is also in a replicate region, move the sink region 9202 // after the target region. 9203 auto *TargetSucc = TargetRegion->getSingleSuccessor(); 9204 VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); 9205 VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); 9206 VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); 9207 } else { 9208 // The sink source is in a replicate region, we need to move the whole 9209 // replicate region, which should only contain a single recipe in the 9210 // main block. 9211 auto *SplitBlock = 9212 Target->getParent()->splitAt(std::next(Target->getIterator())); 9213 9214 auto *SplitPred = SplitBlock->getSinglePredecessor(); 9215 9216 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 9217 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 9218 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 9219 } 9220 } 9221 9222 VPlanTransforms::removeRedundantCanonicalIVs(*Plan); 9223 VPlanTransforms::removeRedundantInductionCasts(*Plan); 9224 9225 // Now that sink-after is done, move induction recipes for optimized truncates 9226 // to the phi section of the header block. 9227 for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove) 9228 Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi()); 9229 9230 // Adjust the recipes for any inloop reductions. 9231 adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExit()), Plan, 9232 RecipeBuilder, Range.Start); 9233 9234 // Introduce a recipe to combine the incoming and previous values of a 9235 // first-order recurrence. 9236 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9237 auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); 9238 if (!RecurPhi) 9239 continue; 9240 9241 VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); 9242 VPBasicBlock *InsertBlock = PrevRecipe->getParent(); 9243 auto *Region = GetReplicateRegion(PrevRecipe); 9244 if (Region) 9245 InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor()); 9246 if (Region || PrevRecipe->isPhi()) 9247 Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi()); 9248 else 9249 Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator())); 9250 9251 auto *RecurSplice = cast<VPInstruction>( 9252 Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 9253 {RecurPhi, RecurPhi->getBackedgeValue()})); 9254 9255 RecurPhi->replaceAllUsesWith(RecurSplice); 9256 // Set the first operand of RecurSplice to RecurPhi again, after replacing 9257 // all users. 9258 RecurSplice->setOperand(0, RecurPhi); 9259 } 9260 9261 // Interleave memory: for each Interleave Group we marked earlier as relevant 9262 // for this VPlan, replace the Recipes widening its memory instructions with a 9263 // single VPInterleaveRecipe at its insertion point. 9264 for (auto IG : InterleaveGroups) { 9265 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 9266 RecipeBuilder.getRecipe(IG->getInsertPos())); 9267 SmallVector<VPValue *, 4> StoredValues; 9268 for (unsigned i = 0; i < IG->getFactor(); ++i) 9269 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 9270 auto *StoreR = 9271 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); 9272 StoredValues.push_back(StoreR->getStoredValue()); 9273 } 9274 9275 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 9276 Recipe->getMask()); 9277 VPIG->insertBefore(Recipe); 9278 unsigned J = 0; 9279 for (unsigned i = 0; i < IG->getFactor(); ++i) 9280 if (Instruction *Member = IG->getMember(i)) { 9281 if (!Member->getType()->isVoidTy()) { 9282 VPValue *OriginalV = Plan->getVPValue(Member); 9283 Plan->removeVPValueFor(Member); 9284 Plan->addVPValue(Member, VPIG->getVPValue(J)); 9285 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 9286 J++; 9287 } 9288 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9289 } 9290 } 9291 9292 // From this point onwards, VPlan-to-VPlan transformations may change the plan 9293 // in ways that accessing values using original IR values is incorrect. 9294 Plan->disableValue2VPValue(); 9295 9296 VPlanTransforms::sinkScalarOperands(*Plan); 9297 VPlanTransforms::mergeReplicateRegions(*Plan); 9298 9299 std::string PlanName; 9300 raw_string_ostream RSO(PlanName); 9301 ElementCount VF = Range.Start; 9302 Plan->addVF(VF); 9303 RSO << "Initial VPlan for VF={" << VF; 9304 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9305 Plan->addVF(VF); 9306 RSO << "," << VF; 9307 } 9308 RSO << "},UF>=1"; 9309 RSO.flush(); 9310 Plan->setName(PlanName); 9311 9312 // Fold Exit block into its predecessor if possible. 9313 // TODO: Fold block earlier once all VPlan transforms properly maintain a 9314 // VPBasicBlock as exit. 9315 VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExit()); 9316 9317 assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid"); 9318 return Plan; 9319 } 9320 9321 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9322 // Outer loop handling: They may require CFG and instruction level 9323 // transformations before even evaluating whether vectorization is profitable. 9324 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9325 // the vectorization pipeline. 9326 assert(!OrigLoop->isInnermost()); 9327 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9328 9329 // Create new empty VPlan 9330 auto Plan = std::make_unique<VPlan>(); 9331 9332 // Build hierarchical CFG 9333 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9334 HCFGBuilder.buildHierarchicalCFG(); 9335 9336 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9337 VF *= 2) 9338 Plan->addVF(VF); 9339 9340 if (EnableVPlanPredication) { 9341 VPlanPredicator VPP(*Plan); 9342 VPP.predicate(); 9343 9344 // Avoid running transformation to recipes until masked code generation in 9345 // VPlan-native path is in place. 9346 return Plan; 9347 } 9348 9349 SmallPtrSet<Instruction *, 1> DeadInstructions; 9350 VPlanTransforms::VPInstructionsToVPRecipes( 9351 OrigLoop, Plan, 9352 [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); }, 9353 DeadInstructions, *PSE.getSE()); 9354 9355 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(), 9356 true, true); 9357 return Plan; 9358 } 9359 9360 // Adjust the recipes for reductions. For in-loop reductions the chain of 9361 // instructions leading from the loop exit instr to the phi need to be converted 9362 // to reductions, with one operand being vector and the other being the scalar 9363 // reduction chain. For other reductions, a select is introduced between the phi 9364 // and live-out recipes when folding the tail. 9365 void LoopVectorizationPlanner::adjustRecipesForReductions( 9366 VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, 9367 ElementCount MinVF) { 9368 for (auto &Reduction : CM.getInLoopReductionChains()) { 9369 PHINode *Phi = Reduction.first; 9370 const RecurrenceDescriptor &RdxDesc = 9371 Legal->getReductionVars().find(Phi)->second; 9372 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9373 9374 if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) 9375 continue; 9376 9377 // ReductionOperations are orders top-down from the phi's use to the 9378 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9379 // which of the two operands will remain scalar and which will be reduced. 9380 // For minmax the chain will be the select instructions. 9381 Instruction *Chain = Phi; 9382 for (Instruction *R : ReductionOperations) { 9383 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9384 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9385 9386 VPValue *ChainOp = Plan->getVPValue(Chain); 9387 unsigned FirstOpId; 9388 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9389 "Only min/max recurrences allowed for inloop reductions"); 9390 // Recognize a call to the llvm.fmuladd intrinsic. 9391 bool IsFMulAdd = (Kind == RecurKind::FMulAdd); 9392 assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) && 9393 "Expected instruction to be a call to the llvm.fmuladd intrinsic"); 9394 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9395 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9396 "Expected to replace a VPWidenSelectSC"); 9397 FirstOpId = 1; 9398 } else { 9399 assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) || 9400 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) && 9401 "Expected to replace a VPWidenSC"); 9402 FirstOpId = 0; 9403 } 9404 unsigned VecOpId = 9405 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9406 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9407 9408 auto *CondOp = CM.foldTailByMasking() 9409 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9410 : nullptr; 9411 9412 if (IsFMulAdd) { 9413 // If the instruction is a call to the llvm.fmuladd intrinsic then we 9414 // need to create an fmul recipe to use as the vector operand for the 9415 // fadd reduction. 9416 VPInstruction *FMulRecipe = new VPInstruction( 9417 Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))}); 9418 FMulRecipe->setFastMathFlags(R->getFastMathFlags()); 9419 WidenRecipe->getParent()->insert(FMulRecipe, 9420 WidenRecipe->getIterator()); 9421 VecOp = FMulRecipe; 9422 } 9423 VPReductionRecipe *RedRecipe = 9424 new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9425 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9426 Plan->removeVPValueFor(R); 9427 Plan->addVPValue(R, RedRecipe); 9428 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9429 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9430 WidenRecipe->eraseFromParent(); 9431 9432 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9433 VPRecipeBase *CompareRecipe = 9434 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9435 assert(isa<VPWidenRecipe>(CompareRecipe) && 9436 "Expected to replace a VPWidenSC"); 9437 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9438 "Expected no remaining users"); 9439 CompareRecipe->eraseFromParent(); 9440 } 9441 Chain = R; 9442 } 9443 } 9444 9445 // If tail is folded by masking, introduce selects between the phi 9446 // and the live-out instruction of each reduction, at the beginning of the 9447 // dedicated latch block. 9448 if (CM.foldTailByMasking()) { 9449 Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin()); 9450 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9451 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R); 9452 if (!PhiR || PhiR->isInLoop()) 9453 continue; 9454 VPValue *Cond = 9455 RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9456 VPValue *Red = PhiR->getBackedgeValue(); 9457 assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB && 9458 "reduction recipe must be defined before latch"); 9459 Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR}); 9460 } 9461 } 9462 } 9463 9464 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9465 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9466 VPSlotTracker &SlotTracker) const { 9467 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9468 IG->getInsertPos()->printAsOperand(O, false); 9469 O << ", "; 9470 getAddr()->printAsOperand(O, SlotTracker); 9471 VPValue *Mask = getMask(); 9472 if (Mask) { 9473 O << ", "; 9474 Mask->printAsOperand(O, SlotTracker); 9475 } 9476 9477 unsigned OpIdx = 0; 9478 for (unsigned i = 0; i < IG->getFactor(); ++i) { 9479 if (!IG->getMember(i)) 9480 continue; 9481 if (getNumStoreOperands() > 0) { 9482 O << "\n" << Indent << " store "; 9483 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker); 9484 O << " to index " << i; 9485 } else { 9486 O << "\n" << Indent << " "; 9487 getVPValue(OpIdx)->printAsOperand(O, SlotTracker); 9488 O << " = load from index " << i; 9489 } 9490 ++OpIdx; 9491 } 9492 } 9493 #endif 9494 9495 void VPWidenCallRecipe::execute(VPTransformState &State) { 9496 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9497 *this, State); 9498 } 9499 9500 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9501 auto &I = *cast<SelectInst>(getUnderlyingInstr()); 9502 State.ILV->setDebugLocFromInst(&I); 9503 9504 // The condition can be loop invariant but still defined inside the 9505 // loop. This means that we can't just use the original 'cond' value. 9506 // We have to take the 'vectorized' value and pick the first lane. 9507 // Instcombine will make this a no-op. 9508 auto *InvarCond = 9509 InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr; 9510 9511 for (unsigned Part = 0; Part < State.UF; ++Part) { 9512 Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part); 9513 Value *Op0 = State.get(getOperand(1), Part); 9514 Value *Op1 = State.get(getOperand(2), Part); 9515 Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1); 9516 State.set(this, Sel, Part); 9517 State.ILV->addMetadata(Sel, &I); 9518 } 9519 } 9520 9521 void VPWidenRecipe::execute(VPTransformState &State) { 9522 auto &I = *cast<Instruction>(getUnderlyingValue()); 9523 auto &Builder = State.Builder; 9524 switch (I.getOpcode()) { 9525 case Instruction::Call: 9526 case Instruction::Br: 9527 case Instruction::PHI: 9528 case Instruction::GetElementPtr: 9529 case Instruction::Select: 9530 llvm_unreachable("This instruction is handled by a different recipe."); 9531 case Instruction::UDiv: 9532 case Instruction::SDiv: 9533 case Instruction::SRem: 9534 case Instruction::URem: 9535 case Instruction::Add: 9536 case Instruction::FAdd: 9537 case Instruction::Sub: 9538 case Instruction::FSub: 9539 case Instruction::FNeg: 9540 case Instruction::Mul: 9541 case Instruction::FMul: 9542 case Instruction::FDiv: 9543 case Instruction::FRem: 9544 case Instruction::Shl: 9545 case Instruction::LShr: 9546 case Instruction::AShr: 9547 case Instruction::And: 9548 case Instruction::Or: 9549 case Instruction::Xor: { 9550 // Just widen unops and binops. 9551 State.ILV->setDebugLocFromInst(&I); 9552 9553 for (unsigned Part = 0; Part < State.UF; ++Part) { 9554 SmallVector<Value *, 2> Ops; 9555 for (VPValue *VPOp : operands()) 9556 Ops.push_back(State.get(VPOp, Part)); 9557 9558 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 9559 9560 if (auto *VecOp = dyn_cast<Instruction>(V)) { 9561 VecOp->copyIRFlags(&I); 9562 9563 // If the instruction is vectorized and was in a basic block that needed 9564 // predication, we can't propagate poison-generating flags (nuw/nsw, 9565 // exact, etc.). The control flow has been linearized and the 9566 // instruction is no longer guarded by the predicate, which could make 9567 // the flag properties to no longer hold. 9568 if (State.MayGeneratePoisonRecipes.contains(this)) 9569 VecOp->dropPoisonGeneratingFlags(); 9570 } 9571 9572 // Use this vector value for all users of the original instruction. 9573 State.set(this, V, Part); 9574 State.ILV->addMetadata(V, &I); 9575 } 9576 9577 break; 9578 } 9579 case Instruction::ICmp: 9580 case Instruction::FCmp: { 9581 // Widen compares. Generate vector compares. 9582 bool FCmp = (I.getOpcode() == Instruction::FCmp); 9583 auto *Cmp = cast<CmpInst>(&I); 9584 State.ILV->setDebugLocFromInst(Cmp); 9585 for (unsigned Part = 0; Part < State.UF; ++Part) { 9586 Value *A = State.get(getOperand(0), Part); 9587 Value *B = State.get(getOperand(1), Part); 9588 Value *C = nullptr; 9589 if (FCmp) { 9590 // Propagate fast math flags. 9591 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9592 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 9593 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 9594 } else { 9595 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 9596 } 9597 State.set(this, C, Part); 9598 State.ILV->addMetadata(C, &I); 9599 } 9600 9601 break; 9602 } 9603 9604 case Instruction::ZExt: 9605 case Instruction::SExt: 9606 case Instruction::FPToUI: 9607 case Instruction::FPToSI: 9608 case Instruction::FPExt: 9609 case Instruction::PtrToInt: 9610 case Instruction::IntToPtr: 9611 case Instruction::SIToFP: 9612 case Instruction::UIToFP: 9613 case Instruction::Trunc: 9614 case Instruction::FPTrunc: 9615 case Instruction::BitCast: { 9616 auto *CI = cast<CastInst>(&I); 9617 State.ILV->setDebugLocFromInst(CI); 9618 9619 /// Vectorize casts. 9620 Type *DestTy = (State.VF.isScalar()) 9621 ? CI->getType() 9622 : VectorType::get(CI->getType(), State.VF); 9623 9624 for (unsigned Part = 0; Part < State.UF; ++Part) { 9625 Value *A = State.get(getOperand(0), Part); 9626 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 9627 State.set(this, Cast, Part); 9628 State.ILV->addMetadata(Cast, &I); 9629 } 9630 break; 9631 } 9632 default: 9633 // This instruction is not vectorized by simple widening. 9634 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 9635 llvm_unreachable("Unhandled instruction!"); 9636 } // end of switch. 9637 } 9638 9639 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9640 auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr()); 9641 // Construct a vector GEP by widening the operands of the scalar GEP as 9642 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 9643 // results in a vector of pointers when at least one operand of the GEP 9644 // is vector-typed. Thus, to keep the representation compact, we only use 9645 // vector-typed operands for loop-varying values. 9646 9647 if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 9648 // If we are vectorizing, but the GEP has only loop-invariant operands, 9649 // the GEP we build (by only using vector-typed operands for 9650 // loop-varying values) would be a scalar pointer. Thus, to ensure we 9651 // produce a vector of pointers, we need to either arbitrarily pick an 9652 // operand to broadcast, or broadcast a clone of the original GEP. 9653 // Here, we broadcast a clone of the original. 9654 // 9655 // TODO: If at some point we decide to scalarize instructions having 9656 // loop-invariant operands, this special case will no longer be 9657 // required. We would add the scalarization decision to 9658 // collectLoopScalars() and teach getVectorValue() to broadcast 9659 // the lane-zero scalar value. 9660 auto *Clone = State.Builder.Insert(GEP->clone()); 9661 for (unsigned Part = 0; Part < State.UF; ++Part) { 9662 Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone); 9663 State.set(this, EntryPart, Part); 9664 State.ILV->addMetadata(EntryPart, GEP); 9665 } 9666 } else { 9667 // If the GEP has at least one loop-varying operand, we are sure to 9668 // produce a vector of pointers. But if we are only unrolling, we want 9669 // to produce a scalar GEP for each unroll part. Thus, the GEP we 9670 // produce with the code below will be scalar (if VF == 1) or vector 9671 // (otherwise). Note that for the unroll-only case, we still maintain 9672 // values in the vector mapping with initVector, as we do for other 9673 // instructions. 9674 for (unsigned Part = 0; Part < State.UF; ++Part) { 9675 // The pointer operand of the new GEP. If it's loop-invariant, we 9676 // won't broadcast it. 9677 auto *Ptr = IsPtrLoopInvariant 9678 ? State.get(getOperand(0), VPIteration(0, 0)) 9679 : State.get(getOperand(0), Part); 9680 9681 // Collect all the indices for the new GEP. If any index is 9682 // loop-invariant, we won't broadcast it. 9683 SmallVector<Value *, 4> Indices; 9684 for (unsigned I = 1, E = getNumOperands(); I < E; I++) { 9685 VPValue *Operand = getOperand(I); 9686 if (IsIndexLoopInvariant[I - 1]) 9687 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 9688 else 9689 Indices.push_back(State.get(Operand, Part)); 9690 } 9691 9692 // If the GEP instruction is vectorized and was in a basic block that 9693 // needed predication, we can't propagate the poison-generating 'inbounds' 9694 // flag. The control flow has been linearized and the GEP is no longer 9695 // guarded by the predicate, which could make the 'inbounds' properties to 9696 // no longer hold. 9697 bool IsInBounds = 9698 GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0; 9699 9700 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 9701 // but it should be a vector, otherwise. 9702 auto *NewGEP = IsInBounds 9703 ? State.Builder.CreateInBoundsGEP( 9704 GEP->getSourceElementType(), Ptr, Indices) 9705 : State.Builder.CreateGEP(GEP->getSourceElementType(), 9706 Ptr, Indices); 9707 assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) && 9708 "NewGEP is not a pointer vector"); 9709 State.set(this, NewGEP, Part); 9710 State.ILV->addMetadata(NewGEP, GEP); 9711 } 9712 } 9713 } 9714 9715 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9716 assert(!State.Instance && "Int or FP induction being replicated."); 9717 auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0); 9718 State.ILV->widenIntOrFpInduction(IV, this, State, CanonicalIV); 9719 } 9720 9721 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9722 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this, 9723 State); 9724 } 9725 9726 void VPBlendRecipe::execute(VPTransformState &State) { 9727 State.ILV->setDebugLocFromInst(Phi, &State.Builder); 9728 // We know that all PHIs in non-header blocks are converted into 9729 // selects, so we don't have to worry about the insertion order and we 9730 // can just use the builder. 9731 // At this point we generate the predication tree. There may be 9732 // duplications since this is a simple recursive scan, but future 9733 // optimizations will clean it up. 9734 9735 unsigned NumIncoming = getNumIncomingValues(); 9736 9737 // Generate a sequence of selects of the form: 9738 // SELECT(Mask3, In3, 9739 // SELECT(Mask2, In2, 9740 // SELECT(Mask1, In1, 9741 // In0))) 9742 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9743 // are essentially undef are taken from In0. 9744 InnerLoopVectorizer::VectorParts Entry(State.UF); 9745 for (unsigned In = 0; In < NumIncoming; ++In) { 9746 for (unsigned Part = 0; Part < State.UF; ++Part) { 9747 // We might have single edge PHIs (blocks) - use an identity 9748 // 'select' for the first PHI operand. 9749 Value *In0 = State.get(getIncomingValue(In), Part); 9750 if (In == 0) 9751 Entry[Part] = In0; // Initialize with the first incoming value. 9752 else { 9753 // Select between the current value and the previous incoming edge 9754 // based on the incoming mask. 9755 Value *Cond = State.get(getMask(In), Part); 9756 Entry[Part] = 9757 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9758 } 9759 } 9760 } 9761 for (unsigned Part = 0; Part < State.UF; ++Part) 9762 State.set(this, Entry[Part], Part); 9763 } 9764 9765 void VPInterleaveRecipe::execute(VPTransformState &State) { 9766 assert(!State.Instance && "Interleave group being replicated."); 9767 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9768 getStoredValues(), getMask()); 9769 } 9770 9771 void VPReductionRecipe::execute(VPTransformState &State) { 9772 assert(!State.Instance && "Reduction being replicated."); 9773 Value *PrevInChain = State.get(getChainOp(), 0); 9774 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9775 bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); 9776 // Propagate the fast-math flags carried by the underlying instruction. 9777 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 9778 State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags()); 9779 for (unsigned Part = 0; Part < State.UF; ++Part) { 9780 Value *NewVecOp = State.get(getVecOp(), Part); 9781 if (VPValue *Cond = getCondOp()) { 9782 Value *NewCond = State.get(Cond, Part); 9783 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9784 Value *Iden = RdxDesc->getRecurrenceIdentity( 9785 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9786 Value *IdenVec = 9787 State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden); 9788 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9789 NewVecOp = Select; 9790 } 9791 Value *NewRed; 9792 Value *NextInChain; 9793 if (IsOrdered) { 9794 if (State.VF.isVector()) 9795 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9796 PrevInChain); 9797 else 9798 NewRed = State.Builder.CreateBinOp( 9799 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain, 9800 NewVecOp); 9801 PrevInChain = NewRed; 9802 } else { 9803 PrevInChain = State.get(getChainOp(), Part); 9804 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9805 } 9806 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9807 NextInChain = 9808 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9809 NewRed, PrevInChain); 9810 } else if (IsOrdered) 9811 NextInChain = NewRed; 9812 else 9813 NextInChain = State.Builder.CreateBinOp( 9814 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed, 9815 PrevInChain); 9816 State.set(this, NextInChain, Part); 9817 } 9818 } 9819 9820 void VPReplicateRecipe::execute(VPTransformState &State) { 9821 if (State.Instance) { // Generate a single instance. 9822 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9823 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance, 9824 IsPredicated, State); 9825 // Insert scalar instance packing it into a vector. 9826 if (AlsoPack && State.VF.isVector()) { 9827 // If we're constructing lane 0, initialize to start from poison. 9828 if (State.Instance->Lane.isFirstLane()) { 9829 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9830 Value *Poison = PoisonValue::get( 9831 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9832 State.set(this, Poison, State.Instance->Part); 9833 } 9834 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9835 } 9836 return; 9837 } 9838 9839 // Generate scalar instances for all VF lanes of all UF parts, unless the 9840 // instruction is uniform inwhich case generate only the first lane for each 9841 // of the UF parts. 9842 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9843 assert((!State.VF.isScalable() || IsUniform) && 9844 "Can't scalarize a scalable vector"); 9845 for (unsigned Part = 0; Part < State.UF; ++Part) 9846 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9847 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, 9848 VPIteration(Part, Lane), IsPredicated, 9849 State); 9850 } 9851 9852 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9853 assert(State.Instance && "Branch on Mask works only on single instance."); 9854 9855 unsigned Part = State.Instance->Part; 9856 unsigned Lane = State.Instance->Lane.getKnownLane(); 9857 9858 Value *ConditionBit = nullptr; 9859 VPValue *BlockInMask = getMask(); 9860 if (BlockInMask) { 9861 ConditionBit = State.get(BlockInMask, Part); 9862 if (ConditionBit->getType()->isVectorTy()) 9863 ConditionBit = State.Builder.CreateExtractElement( 9864 ConditionBit, State.Builder.getInt32(Lane)); 9865 } else // Block in mask is all-one. 9866 ConditionBit = State.Builder.getTrue(); 9867 9868 // Replace the temporary unreachable terminator with a new conditional branch, 9869 // whose two destinations will be set later when they are created. 9870 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9871 assert(isa<UnreachableInst>(CurrentTerminator) && 9872 "Expected to replace unreachable terminator with conditional branch."); 9873 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9874 CondBr->setSuccessor(0, nullptr); 9875 ReplaceInstWithInst(CurrentTerminator, CondBr); 9876 } 9877 9878 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9879 assert(State.Instance && "Predicated instruction PHI works per instance."); 9880 Instruction *ScalarPredInst = 9881 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9882 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9883 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9884 assert(PredicatingBB && "Predicated block has no single predecessor."); 9885 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9886 "operand must be VPReplicateRecipe"); 9887 9888 // By current pack/unpack logic we need to generate only a single phi node: if 9889 // a vector value for the predicated instruction exists at this point it means 9890 // the instruction has vector users only, and a phi for the vector value is 9891 // needed. In this case the recipe of the predicated instruction is marked to 9892 // also do that packing, thereby "hoisting" the insert-element sequence. 9893 // Otherwise, a phi node for the scalar value is needed. 9894 unsigned Part = State.Instance->Part; 9895 if (State.hasVectorValue(getOperand(0), Part)) { 9896 Value *VectorValue = State.get(getOperand(0), Part); 9897 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9898 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9899 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9900 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9901 if (State.hasVectorValue(this, Part)) 9902 State.reset(this, VPhi, Part); 9903 else 9904 State.set(this, VPhi, Part); 9905 // NOTE: Currently we need to update the value of the operand, so the next 9906 // predicated iteration inserts its generated value in the correct vector. 9907 State.reset(getOperand(0), VPhi, Part); 9908 } else { 9909 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9910 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9911 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9912 PredicatingBB); 9913 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9914 if (State.hasScalarValue(this, *State.Instance)) 9915 State.reset(this, Phi, *State.Instance); 9916 else 9917 State.set(this, Phi, *State.Instance); 9918 // NOTE: Currently we need to update the value of the operand, so the next 9919 // predicated iteration inserts its generated value in the correct vector. 9920 State.reset(getOperand(0), Phi, *State.Instance); 9921 } 9922 } 9923 9924 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9925 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9926 9927 // Attempt to issue a wide load. 9928 LoadInst *LI = dyn_cast<LoadInst>(&Ingredient); 9929 StoreInst *SI = dyn_cast<StoreInst>(&Ingredient); 9930 9931 assert((LI || SI) && "Invalid Load/Store instruction"); 9932 assert((!SI || StoredValue) && "No stored value provided for widened store"); 9933 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 9934 9935 Type *ScalarDataTy = getLoadStoreType(&Ingredient); 9936 9937 auto *DataTy = VectorType::get(ScalarDataTy, State.VF); 9938 const Align Alignment = getLoadStoreAlignment(&Ingredient); 9939 bool CreateGatherScatter = !Consecutive; 9940 9941 auto &Builder = State.Builder; 9942 InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF); 9943 bool isMaskRequired = getMask(); 9944 if (isMaskRequired) 9945 for (unsigned Part = 0; Part < State.UF; ++Part) 9946 BlockInMaskParts[Part] = State.get(getMask(), Part); 9947 9948 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 9949 // Calculate the pointer for the specific unroll-part. 9950 GetElementPtrInst *PartPtr = nullptr; 9951 9952 bool InBounds = false; 9953 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 9954 InBounds = gep->isInBounds(); 9955 if (Reverse) { 9956 // If the address is consecutive but reversed, then the 9957 // wide store needs to start at the last vector element. 9958 // RunTimeVF = VScale * VF.getKnownMinValue() 9959 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 9960 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF); 9961 // NumElt = -Part * RunTimeVF 9962 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 9963 // LastLane = 1 - RunTimeVF 9964 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 9965 PartPtr = 9966 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 9967 PartPtr->setIsInBounds(InBounds); 9968 PartPtr = cast<GetElementPtrInst>( 9969 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 9970 PartPtr->setIsInBounds(InBounds); 9971 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 9972 BlockInMaskParts[Part] = 9973 Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse"); 9974 } else { 9975 Value *Increment = 9976 createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part); 9977 PartPtr = cast<GetElementPtrInst>( 9978 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 9979 PartPtr->setIsInBounds(InBounds); 9980 } 9981 9982 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 9983 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 9984 }; 9985 9986 // Handle Stores: 9987 if (SI) { 9988 State.ILV->setDebugLocFromInst(SI); 9989 9990 for (unsigned Part = 0; Part < State.UF; ++Part) { 9991 Instruction *NewSI = nullptr; 9992 Value *StoredVal = State.get(StoredValue, Part); 9993 if (CreateGatherScatter) { 9994 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 9995 Value *VectorGep = State.get(getAddr(), Part); 9996 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 9997 MaskPart); 9998 } else { 9999 if (Reverse) { 10000 // If we store to reverse consecutive memory locations, then we need 10001 // to reverse the order of elements in the stored value. 10002 StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse"); 10003 // We don't want to update the value in the map as it might be used in 10004 // another expression. So don't call resetVectorValue(StoredVal). 10005 } 10006 auto *VecPtr = 10007 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 10008 if (isMaskRequired) 10009 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 10010 BlockInMaskParts[Part]); 10011 else 10012 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 10013 } 10014 State.ILV->addMetadata(NewSI, SI); 10015 } 10016 return; 10017 } 10018 10019 // Handle loads. 10020 assert(LI && "Must have a load instruction"); 10021 State.ILV->setDebugLocFromInst(LI); 10022 for (unsigned Part = 0; Part < State.UF; ++Part) { 10023 Value *NewLI; 10024 if (CreateGatherScatter) { 10025 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 10026 Value *VectorGep = State.get(getAddr(), Part); 10027 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, 10028 nullptr, "wide.masked.gather"); 10029 State.ILV->addMetadata(NewLI, LI); 10030 } else { 10031 auto *VecPtr = 10032 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 10033 if (isMaskRequired) 10034 NewLI = Builder.CreateMaskedLoad( 10035 DataTy, VecPtr, Alignment, BlockInMaskParts[Part], 10036 PoisonValue::get(DataTy), "wide.masked.load"); 10037 else 10038 NewLI = 10039 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 10040 10041 // Add metadata to the load, but setVectorValue to the reverse shuffle. 10042 State.ILV->addMetadata(NewLI, LI); 10043 if (Reverse) 10044 NewLI = Builder.CreateVectorReverse(NewLI, "reverse"); 10045 } 10046 10047 State.set(this, NewLI, Part); 10048 } 10049 } 10050 10051 // Determine how to lower the scalar epilogue, which depends on 1) optimising 10052 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 10053 // predication, and 4) a TTI hook that analyses whether the loop is suitable 10054 // for predication. 10055 static ScalarEpilogueLowering getScalarEpilogueLowering( 10056 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 10057 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 10058 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 10059 LoopVectorizationLegality &LVL) { 10060 // 1) OptSize takes precedence over all other options, i.e. if this is set, 10061 // don't look at hints or options, and don't request a scalar epilogue. 10062 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 10063 // LoopAccessInfo (due to code dependency and not being able to reliably get 10064 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 10065 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 10066 // versioning when the vectorization is forced, unlike hasOptSize. So revert 10067 // back to the old way and vectorize with versioning when forced. See D81345.) 10068 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 10069 PGSOQueryType::IRPass) && 10070 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 10071 return CM_ScalarEpilogueNotAllowedOptSize; 10072 10073 // 2) If set, obey the directives 10074 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 10075 switch (PreferPredicateOverEpilogue) { 10076 case PreferPredicateTy::ScalarEpilogue: 10077 return CM_ScalarEpilogueAllowed; 10078 case PreferPredicateTy::PredicateElseScalarEpilogue: 10079 return CM_ScalarEpilogueNotNeededUsePredicate; 10080 case PreferPredicateTy::PredicateOrDontVectorize: 10081 return CM_ScalarEpilogueNotAllowedUsePredicate; 10082 }; 10083 } 10084 10085 // 3) If set, obey the hints 10086 switch (Hints.getPredicate()) { 10087 case LoopVectorizeHints::FK_Enabled: 10088 return CM_ScalarEpilogueNotNeededUsePredicate; 10089 case LoopVectorizeHints::FK_Disabled: 10090 return CM_ScalarEpilogueAllowed; 10091 }; 10092 10093 // 4) if the TTI hook indicates this is profitable, request predication. 10094 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 10095 LVL.getLAI())) 10096 return CM_ScalarEpilogueNotNeededUsePredicate; 10097 10098 return CM_ScalarEpilogueAllowed; 10099 } 10100 10101 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 10102 // If Values have been set for this Def return the one relevant for \p Part. 10103 if (hasVectorValue(Def, Part)) 10104 return Data.PerPartOutput[Def][Part]; 10105 10106 if (!hasScalarValue(Def, {Part, 0})) { 10107 Value *IRV = Def->getLiveInIRValue(); 10108 Value *B = ILV->getBroadcastInstrs(IRV); 10109 set(Def, B, Part); 10110 return B; 10111 } 10112 10113 Value *ScalarValue = get(Def, {Part, 0}); 10114 // If we aren't vectorizing, we can just copy the scalar map values over 10115 // to the vector map. 10116 if (VF.isScalar()) { 10117 set(Def, ScalarValue, Part); 10118 return ScalarValue; 10119 } 10120 10121 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 10122 bool IsUniform = RepR && RepR->isUniform(); 10123 10124 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 10125 // Check if there is a scalar value for the selected lane. 10126 if (!hasScalarValue(Def, {Part, LastLane})) { 10127 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 10128 assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) && 10129 "unexpected recipe found to be invariant"); 10130 IsUniform = true; 10131 LastLane = 0; 10132 } 10133 10134 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 10135 // Set the insert point after the last scalarized instruction or after the 10136 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 10137 // will directly follow the scalar definitions. 10138 auto OldIP = Builder.saveIP(); 10139 auto NewIP = 10140 isa<PHINode>(LastInst) 10141 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 10142 : std::next(BasicBlock::iterator(LastInst)); 10143 Builder.SetInsertPoint(&*NewIP); 10144 10145 // However, if we are vectorizing, we need to construct the vector values. 10146 // If the value is known to be uniform after vectorization, we can just 10147 // broadcast the scalar value corresponding to lane zero for each unroll 10148 // iteration. Otherwise, we construct the vector values using 10149 // insertelement instructions. Since the resulting vectors are stored in 10150 // State, we will only generate the insertelements once. 10151 Value *VectorValue = nullptr; 10152 if (IsUniform) { 10153 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 10154 set(Def, VectorValue, Part); 10155 } else { 10156 // Initialize packing with insertelements to start from undef. 10157 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 10158 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 10159 set(Def, Undef, Part); 10160 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 10161 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 10162 VectorValue = get(Def, Part); 10163 } 10164 Builder.restoreIP(OldIP); 10165 return VectorValue; 10166 } 10167 10168 // Process the loop in the VPlan-native vectorization path. This path builds 10169 // VPlan upfront in the vectorization pipeline, which allows to apply 10170 // VPlan-to-VPlan transformations from the very beginning without modifying the 10171 // input LLVM IR. 10172 static bool processLoopInVPlanNativePath( 10173 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 10174 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 10175 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 10176 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 10177 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 10178 LoopVectorizationRequirements &Requirements) { 10179 10180 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 10181 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 10182 return false; 10183 } 10184 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 10185 Function *F = L->getHeader()->getParent(); 10186 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 10187 10188 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10189 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 10190 10191 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 10192 &Hints, IAI); 10193 // Use the planner for outer loop vectorization. 10194 // TODO: CM is not used at this point inside the planner. Turn CM into an 10195 // optional argument if we don't need it in the future. 10196 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 10197 Requirements, ORE); 10198 10199 // Get user vectorization factor. 10200 ElementCount UserVF = Hints.getWidth(); 10201 10202 CM.collectElementTypesForWidening(); 10203 10204 // Plan how to best vectorize, return the best VF and its cost. 10205 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 10206 10207 // If we are stress testing VPlan builds, do not attempt to generate vector 10208 // code. Masked vector code generation support will follow soon. 10209 // Also, do not attempt to vectorize if no vector code will be produced. 10210 if (VPlanBuildStressTest || EnableVPlanPredication || 10211 VectorizationFactor::Disabled() == VF) 10212 return false; 10213 10214 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10215 10216 { 10217 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10218 F->getParent()->getDataLayout()); 10219 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 10220 &CM, BFI, PSI, Checks); 10221 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 10222 << L->getHeader()->getParent()->getName() << "\"\n"); 10223 LVP.executePlan(VF.Width, 1, BestPlan, LB, DT); 10224 } 10225 10226 // Mark the loop as already vectorized to avoid vectorizing again. 10227 Hints.setAlreadyVectorized(); 10228 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10229 return true; 10230 } 10231 10232 // Emit a remark if there are stores to floats that required a floating point 10233 // extension. If the vectorized loop was generated with floating point there 10234 // will be a performance penalty from the conversion overhead and the change in 10235 // the vector width. 10236 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 10237 SmallVector<Instruction *, 4> Worklist; 10238 for (BasicBlock *BB : L->getBlocks()) { 10239 for (Instruction &Inst : *BB) { 10240 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 10241 if (S->getValueOperand()->getType()->isFloatTy()) 10242 Worklist.push_back(S); 10243 } 10244 } 10245 } 10246 10247 // Traverse the floating point stores upwards searching, for floating point 10248 // conversions. 10249 SmallPtrSet<const Instruction *, 4> Visited; 10250 SmallPtrSet<const Instruction *, 4> EmittedRemark; 10251 while (!Worklist.empty()) { 10252 auto *I = Worklist.pop_back_val(); 10253 if (!L->contains(I)) 10254 continue; 10255 if (!Visited.insert(I).second) 10256 continue; 10257 10258 // Emit a remark if the floating point store required a floating 10259 // point conversion. 10260 // TODO: More work could be done to identify the root cause such as a 10261 // constant or a function return type and point the user to it. 10262 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 10263 ORE->emit([&]() { 10264 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 10265 I->getDebugLoc(), L->getHeader()) 10266 << "floating point conversion changes vector width. " 10267 << "Mixed floating point precision requires an up/down " 10268 << "cast that will negatively impact performance."; 10269 }); 10270 10271 for (Use &Op : I->operands()) 10272 if (auto *OpI = dyn_cast<Instruction>(Op)) 10273 Worklist.push_back(OpI); 10274 } 10275 } 10276 10277 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 10278 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 10279 !EnableLoopInterleaving), 10280 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 10281 !EnableLoopVectorization) {} 10282 10283 bool LoopVectorizePass::processLoop(Loop *L) { 10284 assert((EnableVPlanNativePath || L->isInnermost()) && 10285 "VPlan-native path is not enabled. Only process inner loops."); 10286 10287 #ifndef NDEBUG 10288 const std::string DebugLocStr = getDebugLocString(L); 10289 #endif /* NDEBUG */ 10290 10291 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 10292 << L->getHeader()->getParent()->getName() << "\" from " 10293 << DebugLocStr << "\n"); 10294 10295 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI); 10296 10297 LLVM_DEBUG( 10298 dbgs() << "LV: Loop hints:" 10299 << " force=" 10300 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 10301 ? "disabled" 10302 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 10303 ? "enabled" 10304 : "?")) 10305 << " width=" << Hints.getWidth() 10306 << " interleave=" << Hints.getInterleave() << "\n"); 10307 10308 // Function containing loop 10309 Function *F = L->getHeader()->getParent(); 10310 10311 // Looking at the diagnostic output is the only way to determine if a loop 10312 // was vectorized (other than looking at the IR or machine code), so it 10313 // is important to generate an optimization remark for each loop. Most of 10314 // these messages are generated as OptimizationRemarkAnalysis. Remarks 10315 // generated as OptimizationRemark and OptimizationRemarkMissed are 10316 // less verbose reporting vectorized loops and unvectorized loops that may 10317 // benefit from vectorization, respectively. 10318 10319 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 10320 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 10321 return false; 10322 } 10323 10324 PredicatedScalarEvolution PSE(*SE, *L); 10325 10326 // Check if it is legal to vectorize the loop. 10327 LoopVectorizationRequirements Requirements; 10328 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 10329 &Requirements, &Hints, DB, AC, BFI, PSI); 10330 if (!LVL.canVectorize(EnableVPlanNativePath)) { 10331 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 10332 Hints.emitRemarkWithHints(); 10333 return false; 10334 } 10335 10336 // Check the function attributes and profiles to find out if this function 10337 // should be optimized for size. 10338 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10339 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 10340 10341 // Entrance to the VPlan-native vectorization path. Outer loops are processed 10342 // here. They may require CFG and instruction level transformations before 10343 // even evaluating whether vectorization is profitable. Since we cannot modify 10344 // the incoming IR, we need to build VPlan upfront in the vectorization 10345 // pipeline. 10346 if (!L->isInnermost()) 10347 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 10348 ORE, BFI, PSI, Hints, Requirements); 10349 10350 assert(L->isInnermost() && "Inner loop expected."); 10351 10352 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 10353 // count by optimizing for size, to minimize overheads. 10354 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 10355 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 10356 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 10357 << "This loop is worth vectorizing only if no scalar " 10358 << "iteration overheads are incurred."); 10359 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 10360 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 10361 else { 10362 LLVM_DEBUG(dbgs() << "\n"); 10363 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 10364 } 10365 } 10366 10367 // Check the function attributes to see if implicit floats are allowed. 10368 // FIXME: This check doesn't seem possibly correct -- what if the loop is 10369 // an integer loop and the vector instructions selected are purely integer 10370 // vector instructions? 10371 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10372 reportVectorizationFailure( 10373 "Can't vectorize when the NoImplicitFloat attribute is used", 10374 "loop not vectorized due to NoImplicitFloat attribute", 10375 "NoImplicitFloat", ORE, L); 10376 Hints.emitRemarkWithHints(); 10377 return false; 10378 } 10379 10380 // Check if the target supports potentially unsafe FP vectorization. 10381 // FIXME: Add a check for the type of safety issue (denormal, signaling) 10382 // for the target we're vectorizing for, to make sure none of the 10383 // additional fp-math flags can help. 10384 if (Hints.isPotentiallyUnsafe() && 10385 TTI->isFPVectorizationPotentiallyUnsafe()) { 10386 reportVectorizationFailure( 10387 "Potentially unsafe FP op prevents vectorization", 10388 "loop not vectorized due to unsafe FP support.", 10389 "UnsafeFP", ORE, L); 10390 Hints.emitRemarkWithHints(); 10391 return false; 10392 } 10393 10394 bool AllowOrderedReductions; 10395 // If the flag is set, use that instead and override the TTI behaviour. 10396 if (ForceOrderedReductions.getNumOccurrences() > 0) 10397 AllowOrderedReductions = ForceOrderedReductions; 10398 else 10399 AllowOrderedReductions = TTI->enableOrderedReductions(); 10400 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) { 10401 ORE->emit([&]() { 10402 auto *ExactFPMathInst = Requirements.getExactFPInst(); 10403 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 10404 ExactFPMathInst->getDebugLoc(), 10405 ExactFPMathInst->getParent()) 10406 << "loop not vectorized: cannot prove it is safe to reorder " 10407 "floating-point operations"; 10408 }); 10409 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 10410 "reorder floating-point operations\n"); 10411 Hints.emitRemarkWithHints(); 10412 return false; 10413 } 10414 10415 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 10416 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 10417 10418 // If an override option has been passed in for interleaved accesses, use it. 10419 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 10420 UseInterleaved = EnableInterleavedMemAccesses; 10421 10422 // Analyze interleaved memory accesses. 10423 if (UseInterleaved) { 10424 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10425 } 10426 10427 // Use the cost model. 10428 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10429 F, &Hints, IAI); 10430 CM.collectValuesToIgnore(); 10431 CM.collectElementTypesForWidening(); 10432 10433 // Use the planner for vectorization. 10434 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 10435 Requirements, ORE); 10436 10437 // Get user vectorization factor and interleave count. 10438 ElementCount UserVF = Hints.getWidth(); 10439 unsigned UserIC = Hints.getInterleave(); 10440 10441 // Plan how to best vectorize, return the best VF and its cost. 10442 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10443 10444 VectorizationFactor VF = VectorizationFactor::Disabled(); 10445 unsigned IC = 1; 10446 10447 if (MaybeVF) { 10448 VF = *MaybeVF; 10449 // Select the interleave count. 10450 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10451 } 10452 10453 // Identify the diagnostic messages that should be produced. 10454 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10455 bool VectorizeLoop = true, InterleaveLoop = true; 10456 if (VF.Width.isScalar()) { 10457 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10458 VecDiagMsg = std::make_pair( 10459 "VectorizationNotBeneficial", 10460 "the cost-model indicates that vectorization is not beneficial"); 10461 VectorizeLoop = false; 10462 } 10463 10464 if (!MaybeVF && UserIC > 1) { 10465 // Tell the user interleaving was avoided up-front, despite being explicitly 10466 // requested. 10467 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10468 "interleaving should be avoided up front\n"); 10469 IntDiagMsg = std::make_pair( 10470 "InterleavingAvoided", 10471 "Ignoring UserIC, because interleaving was avoided up front"); 10472 InterleaveLoop = false; 10473 } else if (IC == 1 && UserIC <= 1) { 10474 // Tell the user interleaving is not beneficial. 10475 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10476 IntDiagMsg = std::make_pair( 10477 "InterleavingNotBeneficial", 10478 "the cost-model indicates that interleaving is not beneficial"); 10479 InterleaveLoop = false; 10480 if (UserIC == 1) { 10481 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10482 IntDiagMsg.second += 10483 " and is explicitly disabled or interleave count is set to 1"; 10484 } 10485 } else if (IC > 1 && UserIC == 1) { 10486 // Tell the user interleaving is beneficial, but it explicitly disabled. 10487 LLVM_DEBUG( 10488 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10489 IntDiagMsg = std::make_pair( 10490 "InterleavingBeneficialButDisabled", 10491 "the cost-model indicates that interleaving is beneficial " 10492 "but is explicitly disabled or interleave count is set to 1"); 10493 InterleaveLoop = false; 10494 } 10495 10496 // Override IC if user provided an interleave count. 10497 IC = UserIC > 0 ? UserIC : IC; 10498 10499 // Emit diagnostic messages, if any. 10500 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10501 if (!VectorizeLoop && !InterleaveLoop) { 10502 // Do not vectorize or interleaving the loop. 10503 ORE->emit([&]() { 10504 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10505 L->getStartLoc(), L->getHeader()) 10506 << VecDiagMsg.second; 10507 }); 10508 ORE->emit([&]() { 10509 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10510 L->getStartLoc(), L->getHeader()) 10511 << IntDiagMsg.second; 10512 }); 10513 return false; 10514 } else if (!VectorizeLoop && InterleaveLoop) { 10515 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10516 ORE->emit([&]() { 10517 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10518 L->getStartLoc(), L->getHeader()) 10519 << VecDiagMsg.second; 10520 }); 10521 } else if (VectorizeLoop && !InterleaveLoop) { 10522 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10523 << ") in " << DebugLocStr << '\n'); 10524 ORE->emit([&]() { 10525 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10526 L->getStartLoc(), L->getHeader()) 10527 << IntDiagMsg.second; 10528 }); 10529 } else if (VectorizeLoop && InterleaveLoop) { 10530 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10531 << ") in " << DebugLocStr << '\n'); 10532 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10533 } 10534 10535 bool DisableRuntimeUnroll = false; 10536 MDNode *OrigLoopID = L->getLoopID(); 10537 { 10538 // Optimistically generate runtime checks. Drop them if they turn out to not 10539 // be profitable. Limit the scope of Checks, so the cleanup happens 10540 // immediately after vector codegeneration is done. 10541 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10542 F->getParent()->getDataLayout()); 10543 if (!VF.Width.isScalar() || IC > 1) 10544 Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate()); 10545 10546 using namespace ore; 10547 if (!VectorizeLoop) { 10548 assert(IC > 1 && "interleave count should not be 1 or 0"); 10549 // If we decided that it is not legal to vectorize the loop, then 10550 // interleave it. 10551 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10552 &CM, BFI, PSI, Checks); 10553 10554 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10555 LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT); 10556 10557 ORE->emit([&]() { 10558 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10559 L->getHeader()) 10560 << "interleaved loop (interleaved count: " 10561 << NV("InterleaveCount", IC) << ")"; 10562 }); 10563 } else { 10564 // If we decided that it is *legal* to vectorize the loop, then do it. 10565 10566 // Consider vectorizing the epilogue too if it's profitable. 10567 VectorizationFactor EpilogueVF = 10568 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10569 if (EpilogueVF.Width.isVector()) { 10570 10571 // The first pass vectorizes the main loop and creates a scalar epilogue 10572 // to be vectorized by executing the plan (potentially with a different 10573 // factor) again shortly afterwards. 10574 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1); 10575 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10576 EPI, &LVL, &CM, BFI, PSI, Checks); 10577 10578 VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF); 10579 LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, 10580 DT); 10581 ++LoopsVectorized; 10582 10583 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10584 formLCSSARecursively(*L, *DT, LI, SE); 10585 10586 // Second pass vectorizes the epilogue and adjusts the control flow 10587 // edges from the first pass. 10588 EPI.MainLoopVF = EPI.EpilogueVF; 10589 EPI.MainLoopUF = EPI.EpilogueUF; 10590 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10591 ORE, EPI, &LVL, &CM, BFI, PSI, 10592 Checks); 10593 10594 VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF); 10595 10596 // Ensure that the start values for any VPReductionPHIRecipes are 10597 // updated before vectorising the epilogue loop. 10598 VPBasicBlock *Header = BestEpiPlan.getEntry()->getEntryBasicBlock(); 10599 for (VPRecipeBase &R : Header->phis()) { 10600 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) { 10601 if (auto *Resume = MainILV.getReductionResumeValue( 10602 ReductionPhi->getRecurrenceDescriptor())) { 10603 VPValue *StartVal = new VPValue(Resume); 10604 BestEpiPlan.addExternalDef(StartVal); 10605 ReductionPhi->setOperand(0, StartVal); 10606 } 10607 } 10608 } 10609 10610 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, 10611 DT); 10612 ++LoopsEpilogueVectorized; 10613 10614 if (!MainILV.areSafetyChecksAdded()) 10615 DisableRuntimeUnroll = true; 10616 } else { 10617 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 10618 &LVL, &CM, BFI, PSI, Checks); 10619 10620 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10621 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT); 10622 ++LoopsVectorized; 10623 10624 // Add metadata to disable runtime unrolling a scalar loop when there 10625 // are no runtime checks about strides and memory. A scalar loop that is 10626 // rarely used is not worth unrolling. 10627 if (!LB.areSafetyChecksAdded()) 10628 DisableRuntimeUnroll = true; 10629 } 10630 // Report the vectorization decision. 10631 ORE->emit([&]() { 10632 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10633 L->getHeader()) 10634 << "vectorized loop (vectorization width: " 10635 << NV("VectorizationFactor", VF.Width) 10636 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10637 }); 10638 } 10639 10640 if (ORE->allowExtraAnalysis(LV_NAME)) 10641 checkMixedPrecision(L, ORE); 10642 } 10643 10644 Optional<MDNode *> RemainderLoopID = 10645 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10646 LLVMLoopVectorizeFollowupEpilogue}); 10647 if (RemainderLoopID.hasValue()) { 10648 L->setLoopID(RemainderLoopID.getValue()); 10649 } else { 10650 if (DisableRuntimeUnroll) 10651 AddRuntimeUnrollDisableMetaData(L); 10652 10653 // Mark the loop as already vectorized to avoid vectorizing again. 10654 Hints.setAlreadyVectorized(); 10655 } 10656 10657 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10658 return true; 10659 } 10660 10661 LoopVectorizeResult LoopVectorizePass::runImpl( 10662 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10663 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10664 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10665 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10666 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10667 SE = &SE_; 10668 LI = &LI_; 10669 TTI = &TTI_; 10670 DT = &DT_; 10671 BFI = &BFI_; 10672 TLI = TLI_; 10673 AA = &AA_; 10674 AC = &AC_; 10675 GetLAA = &GetLAA_; 10676 DB = &DB_; 10677 ORE = &ORE_; 10678 PSI = PSI_; 10679 10680 // Don't attempt if 10681 // 1. the target claims to have no vector registers, and 10682 // 2. interleaving won't help ILP. 10683 // 10684 // The second condition is necessary because, even if the target has no 10685 // vector registers, loop vectorization may still enable scalar 10686 // interleaving. 10687 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10688 TTI->getMaxInterleaveFactor(1) < 2) 10689 return LoopVectorizeResult(false, false); 10690 10691 bool Changed = false, CFGChanged = false; 10692 10693 // The vectorizer requires loops to be in simplified form. 10694 // Since simplification may add new inner loops, it has to run before the 10695 // legality and profitability checks. This means running the loop vectorizer 10696 // will simplify all loops, regardless of whether anything end up being 10697 // vectorized. 10698 for (auto &L : *LI) 10699 Changed |= CFGChanged |= 10700 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10701 10702 // Build up a worklist of inner-loops to vectorize. This is necessary as 10703 // the act of vectorizing or partially unrolling a loop creates new loops 10704 // and can invalidate iterators across the loops. 10705 SmallVector<Loop *, 8> Worklist; 10706 10707 for (Loop *L : *LI) 10708 collectSupportedLoops(*L, LI, ORE, Worklist); 10709 10710 LoopsAnalyzed += Worklist.size(); 10711 10712 // Now walk the identified inner loops. 10713 while (!Worklist.empty()) { 10714 Loop *L = Worklist.pop_back_val(); 10715 10716 // For the inner loops we actually process, form LCSSA to simplify the 10717 // transform. 10718 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10719 10720 Changed |= CFGChanged |= processLoop(L); 10721 } 10722 10723 // Process each loop nest in the function. 10724 return LoopVectorizeResult(Changed, CFGChanged); 10725 } 10726 10727 PreservedAnalyses LoopVectorizePass::run(Function &F, 10728 FunctionAnalysisManager &AM) { 10729 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10730 auto &LI = AM.getResult<LoopAnalysis>(F); 10731 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10732 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10733 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10734 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10735 auto &AA = AM.getResult<AAManager>(F); 10736 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10737 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10738 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10739 10740 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10741 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10742 [&](Loop &L) -> const LoopAccessInfo & { 10743 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10744 TLI, TTI, nullptr, nullptr, nullptr}; 10745 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10746 }; 10747 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10748 ProfileSummaryInfo *PSI = 10749 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10750 LoopVectorizeResult Result = 10751 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10752 if (!Result.MadeAnyChange) 10753 return PreservedAnalyses::all(); 10754 PreservedAnalyses PA; 10755 10756 // We currently do not preserve loopinfo/dominator analyses with outer loop 10757 // vectorization. Until this is addressed, mark these analyses as preserved 10758 // only for non-VPlan-native path. 10759 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10760 if (!EnableVPlanNativePath) { 10761 PA.preserve<LoopAnalysis>(); 10762 PA.preserve<DominatorTreeAnalysis>(); 10763 } 10764 10765 if (Result.MadeCFGChange) { 10766 // Making CFG changes likely means a loop got vectorized. Indicate that 10767 // extra simplification passes should be run. 10768 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only 10769 // be run if runtime checks have been added. 10770 AM.getResult<ShouldRunExtraVectorPasses>(F); 10771 PA.preserve<ShouldRunExtraVectorPasses>(); 10772 } else { 10773 PA.preserveSet<CFGAnalyses>(); 10774 } 10775 return PA; 10776 } 10777 10778 void LoopVectorizePass::printPipeline( 10779 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 10780 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline( 10781 OS, MapClassName2PassName); 10782 10783 OS << "<"; 10784 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;"; 10785 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;"; 10786 OS << ">"; 10787 } 10788