1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 91 #include "llvm/Analysis/ProfileSummaryInfo.h" 92 #include "llvm/Analysis/ScalarEvolution.h" 93 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 94 #include "llvm/Analysis/TargetLibraryInfo.h" 95 #include "llvm/Analysis/TargetTransformInfo.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/LLVMContext.h" 116 #include "llvm/IR/Metadata.h" 117 #include "llvm/IR/Module.h" 118 #include "llvm/IR/Operator.h" 119 #include "llvm/IR/PatternMatch.h" 120 #include "llvm/IR/Type.h" 121 #include "llvm/IR/Use.h" 122 #include "llvm/IR/User.h" 123 #include "llvm/IR/Value.h" 124 #include "llvm/IR/ValueHandle.h" 125 #include "llvm/IR/Verifier.h" 126 #include "llvm/InitializePasses.h" 127 #include "llvm/Pass.h" 128 #include "llvm/Support/Casting.h" 129 #include "llvm/Support/CommandLine.h" 130 #include "llvm/Support/Compiler.h" 131 #include "llvm/Support/Debug.h" 132 #include "llvm/Support/ErrorHandling.h" 133 #include "llvm/Support/InstructionCost.h" 134 #include "llvm/Support/MathExtras.h" 135 #include "llvm/Support/raw_ostream.h" 136 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 137 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 138 #include "llvm/Transforms/Utils/LoopSimplify.h" 139 #include "llvm/Transforms/Utils/LoopUtils.h" 140 #include "llvm/Transforms/Utils/LoopVersioning.h" 141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 142 #include "llvm/Transforms/Utils/SizeOpts.h" 143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 144 #include <algorithm> 145 #include <cassert> 146 #include <cstdint> 147 #include <cstdlib> 148 #include <functional> 149 #include <iterator> 150 #include <limits> 151 #include <memory> 152 #include <string> 153 #include <tuple> 154 #include <utility> 155 156 using namespace llvm; 157 158 #define LV_NAME "loop-vectorize" 159 #define DEBUG_TYPE LV_NAME 160 161 #ifndef NDEBUG 162 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 163 #endif 164 165 /// @{ 166 /// Metadata attribute names 167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 168 const char LLVMLoopVectorizeFollowupVectorized[] = 169 "llvm.loop.vectorize.followup_vectorized"; 170 const char LLVMLoopVectorizeFollowupEpilogue[] = 171 "llvm.loop.vectorize.followup_epilogue"; 172 /// @} 173 174 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 177 178 static cl::opt<bool> EnableEpilogueVectorization( 179 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 180 cl::desc("Enable vectorization of epilogue loops.")); 181 182 static cl::opt<unsigned> EpilogueVectorizationForceVF( 183 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 184 cl::desc("When epilogue vectorization is enabled, and a value greater than " 185 "1 is specified, forces the given VF for all applicable epilogue " 186 "loops.")); 187 188 static cl::opt<unsigned> EpilogueVectorizationMinVF( 189 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 190 cl::desc("Only loops with vectorization factor equal to or larger than " 191 "the specified value are considered for epilogue vectorization.")); 192 193 /// Loops with a known constant trip count below this number are vectorized only 194 /// if no scalar iteration overheads are incurred. 195 static cl::opt<unsigned> TinyTripCountVectorThreshold( 196 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 197 cl::desc("Loops with a constant trip count that is smaller than this " 198 "value are vectorized only if no scalar iteration overheads " 199 "are incurred.")); 200 201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 202 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 203 cl::desc("The maximum allowed number of runtime memory checks with a " 204 "vectorize(enable) pragma.")); 205 206 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 207 // that predication is preferred, and this lists all options. I.e., the 208 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 209 // and predicate the instructions accordingly. If tail-folding fails, there are 210 // different fallback strategies depending on these values: 211 namespace PreferPredicateTy { 212 enum Option { 213 ScalarEpilogue = 0, 214 PredicateElseScalarEpilogue, 215 PredicateOrDontVectorize 216 }; 217 } // namespace PreferPredicateTy 218 219 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 220 "prefer-predicate-over-epilogue", 221 cl::init(PreferPredicateTy::ScalarEpilogue), 222 cl::Hidden, 223 cl::desc("Tail-folding and predication preferences over creating a scalar " 224 "epilogue loop."), 225 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 226 "scalar-epilogue", 227 "Don't tail-predicate loops, create scalar epilogue"), 228 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 229 "predicate-else-scalar-epilogue", 230 "prefer tail-folding, create scalar epilogue if tail " 231 "folding fails."), 232 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 233 "predicate-dont-vectorize", 234 "prefers tail-folding, don't attempt vectorization if " 235 "tail-folding fails."))); 236 237 static cl::opt<bool> MaximizeBandwidth( 238 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 239 cl::desc("Maximize bandwidth when selecting vectorization factor which " 240 "will be determined by the smallest type in loop.")); 241 242 static cl::opt<bool> EnableInterleavedMemAccesses( 243 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 244 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 245 246 /// An interleave-group may need masking if it resides in a block that needs 247 /// predication, or in order to mask away gaps. 248 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 249 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 250 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 251 252 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 253 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 254 cl::desc("We don't interleave loops with a estimated constant trip count " 255 "below this number")); 256 257 static cl::opt<unsigned> ForceTargetNumScalarRegs( 258 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 259 cl::desc("A flag that overrides the target's number of scalar registers.")); 260 261 static cl::opt<unsigned> ForceTargetNumVectorRegs( 262 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 263 cl::desc("A flag that overrides the target's number of vector registers.")); 264 265 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 266 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 267 cl::desc("A flag that overrides the target's max interleave factor for " 268 "scalar loops.")); 269 270 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 271 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 272 cl::desc("A flag that overrides the target's max interleave factor for " 273 "vectorized loops.")); 274 275 static cl::opt<unsigned> ForceTargetInstructionCost( 276 "force-target-instruction-cost", cl::init(0), cl::Hidden, 277 cl::desc("A flag that overrides the target's expected cost for " 278 "an instruction to a single constant value. Mostly " 279 "useful for getting consistent testing.")); 280 281 static cl::opt<bool> ForceTargetSupportsScalableVectors( 282 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 283 cl::desc( 284 "Pretend that scalable vectors are supported, even if the target does " 285 "not support them. This flag should only be used for testing.")); 286 287 static cl::opt<unsigned> SmallLoopCost( 288 "small-loop-cost", cl::init(20), cl::Hidden, 289 cl::desc( 290 "The cost of a loop that is considered 'small' by the interleaver.")); 291 292 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 293 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 294 cl::desc("Enable the use of the block frequency analysis to access PGO " 295 "heuristics minimizing code growth in cold regions and being more " 296 "aggressive in hot regions.")); 297 298 // Runtime interleave loops for load/store throughput. 299 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 300 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 301 cl::desc( 302 "Enable runtime interleaving until load/store ports are saturated")); 303 304 /// Interleave small loops with scalar reductions. 305 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 306 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 307 cl::desc("Enable interleaving for loops with small iteration counts that " 308 "contain scalar reductions to expose ILP.")); 309 310 /// The number of stores in a loop that are allowed to need predication. 311 static cl::opt<unsigned> NumberOfStoresToPredicate( 312 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 313 cl::desc("Max number of stores to be predicated behind an if.")); 314 315 static cl::opt<bool> EnableIndVarRegisterHeur( 316 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 317 cl::desc("Count the induction variable only once when interleaving")); 318 319 static cl::opt<bool> EnableCondStoresVectorization( 320 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 321 cl::desc("Enable if predication of stores during vectorization.")); 322 323 static cl::opt<unsigned> MaxNestedScalarReductionIC( 324 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 325 cl::desc("The maximum interleave count to use when interleaving a scalar " 326 "reduction in a nested loop.")); 327 328 static cl::opt<bool> 329 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 330 cl::Hidden, 331 cl::desc("Prefer in-loop vector reductions, " 332 "overriding the targets preference.")); 333 334 static cl::opt<bool> ForceOrderedReductions( 335 "force-ordered-reductions", cl::init(false), cl::Hidden, 336 cl::desc("Enable the vectorisation of loops with in-order (strict) " 337 "FP reductions")); 338 339 static cl::opt<bool> PreferPredicatedReductionSelect( 340 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 341 cl::desc( 342 "Prefer predicating a reduction operation over an after loop select.")); 343 344 cl::opt<bool> EnableVPlanNativePath( 345 "enable-vplan-native-path", cl::init(false), cl::Hidden, 346 cl::desc("Enable VPlan-native vectorization path with " 347 "support for outer loop vectorization.")); 348 349 // FIXME: Remove this switch once we have divergence analysis. Currently we 350 // assume divergent non-backedge branches when this switch is true. 351 cl::opt<bool> EnableVPlanPredication( 352 "enable-vplan-predication", cl::init(false), cl::Hidden, 353 cl::desc("Enable VPlan-native vectorization path predicator with " 354 "support for outer loop vectorization.")); 355 356 // This flag enables the stress testing of the VPlan H-CFG construction in the 357 // VPlan-native vectorization path. It must be used in conjuction with 358 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 359 // verification of the H-CFGs built. 360 static cl::opt<bool> VPlanBuildStressTest( 361 "vplan-build-stress-test", cl::init(false), cl::Hidden, 362 cl::desc( 363 "Build VPlan for every supported loop nest in the function and bail " 364 "out right after the build (stress test the VPlan H-CFG construction " 365 "in the VPlan-native vectorization path).")); 366 367 cl::opt<bool> llvm::EnableLoopInterleaving( 368 "interleave-loops", cl::init(true), cl::Hidden, 369 cl::desc("Enable loop interleaving in Loop vectorization passes")); 370 cl::opt<bool> llvm::EnableLoopVectorization( 371 "vectorize-loops", cl::init(true), cl::Hidden, 372 cl::desc("Run the Loop vectorization passes")); 373 374 cl::opt<bool> PrintVPlansInDotFormat( 375 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 376 cl::desc("Use dot format instead of plain text when dumping VPlans")); 377 378 /// A helper function that returns true if the given type is irregular. The 379 /// type is irregular if its allocated size doesn't equal the store size of an 380 /// element of the corresponding vector type. 381 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 382 // Determine if an array of N elements of type Ty is "bitcast compatible" 383 // with a <N x Ty> vector. 384 // This is only true if there is no padding between the array elements. 385 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 386 } 387 388 /// A helper function that returns the reciprocal of the block probability of 389 /// predicated blocks. If we return X, we are assuming the predicated block 390 /// will execute once for every X iterations of the loop header. 391 /// 392 /// TODO: We should use actual block probability here, if available. Currently, 393 /// we always assume predicated blocks have a 50% chance of executing. 394 static unsigned getReciprocalPredBlockProb() { return 2; } 395 396 /// A helper function that returns an integer or floating-point constant with 397 /// value C. 398 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 399 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 400 : ConstantFP::get(Ty, C); 401 } 402 403 /// Returns "best known" trip count for the specified loop \p L as defined by 404 /// the following procedure: 405 /// 1) Returns exact trip count if it is known. 406 /// 2) Returns expected trip count according to profile data if any. 407 /// 3) Returns upper bound estimate if it is known. 408 /// 4) Returns None if all of the above failed. 409 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 410 // Check if exact trip count is known. 411 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 412 return ExpectedTC; 413 414 // Check if there is an expected trip count available from profile data. 415 if (LoopVectorizeWithBlockFrequency) 416 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 417 return EstimatedTC; 418 419 // Check if upper bound estimate is known. 420 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 421 return ExpectedTC; 422 423 return None; 424 } 425 426 // Forward declare GeneratedRTChecks. 427 class GeneratedRTChecks; 428 429 namespace llvm { 430 431 AnalysisKey ShouldRunExtraVectorPasses::Key; 432 433 /// InnerLoopVectorizer vectorizes loops which contain only one basic 434 /// block to a specified vectorization factor (VF). 435 /// This class performs the widening of scalars into vectors, or multiple 436 /// scalars. This class also implements the following features: 437 /// * It inserts an epilogue loop for handling loops that don't have iteration 438 /// counts that are known to be a multiple of the vectorization factor. 439 /// * It handles the code generation for reduction variables. 440 /// * Scalarization (implementation using scalars) of un-vectorizable 441 /// instructions. 442 /// InnerLoopVectorizer does not perform any vectorization-legality 443 /// checks, and relies on the caller to check for the different legality 444 /// aspects. The InnerLoopVectorizer relies on the 445 /// LoopVectorizationLegality class to provide information about the induction 446 /// and reduction variables that were found to a given vectorization factor. 447 class InnerLoopVectorizer { 448 public: 449 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 450 LoopInfo *LI, DominatorTree *DT, 451 const TargetLibraryInfo *TLI, 452 const TargetTransformInfo *TTI, AssumptionCache *AC, 453 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 454 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 455 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 456 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 457 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 458 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 459 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 460 PSI(PSI), RTChecks(RTChecks) { 461 // Query this against the original loop and save it here because the profile 462 // of the original loop header may change as the transformation happens. 463 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 464 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 465 } 466 467 virtual ~InnerLoopVectorizer() = default; 468 469 /// Create a new empty loop that will contain vectorized instructions later 470 /// on, while the old loop will be used as the scalar remainder. Control flow 471 /// is generated around the vectorized (and scalar epilogue) loops consisting 472 /// of various checks and bypasses. Return the pre-header block of the new 473 /// loop and the start value for the canonical induction, if it is != 0. The 474 /// latter is the case when vectorizing the epilogue loop. In the case of 475 /// epilogue vectorization, this function is overriden to handle the more 476 /// complex control flow around the loops. 477 virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton(); 478 479 /// Widen a single call instruction within the innermost loop. 480 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 481 VPTransformState &State); 482 483 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 484 void fixVectorizedLoop(VPTransformState &State); 485 486 // Return true if any runtime check is added. 487 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 488 489 /// A type for vectorized values in the new loop. Each value from the 490 /// original loop, when vectorized, is represented by UF vector values in the 491 /// new unrolled loop, where UF is the unroll factor. 492 using VectorParts = SmallVector<Value *, 2>; 493 494 /// Vectorize a single first-order recurrence or pointer induction PHINode in 495 /// a block. This method handles the induction variable canonicalization. It 496 /// supports both VF = 1 for unrolled loops and arbitrary length vectors. 497 void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR, 498 VPTransformState &State); 499 500 /// A helper function to scalarize a single Instruction in the innermost loop. 501 /// Generates a sequence of scalar instances for each lane between \p MinLane 502 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 503 /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p 504 /// Instr's operands. 505 void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe, 506 const VPIteration &Instance, bool IfPredicateInstr, 507 VPTransformState &State); 508 509 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 510 /// is provided, the integer induction variable will first be truncated to 511 /// the corresponding type. \p CanonicalIV is the scalar value generated for 512 /// the canonical induction variable. 513 void widenIntOrFpInduction(PHINode *IV, VPWidenIntOrFpInductionRecipe *Def, 514 VPTransformState &State, Value *CanonicalIV); 515 516 /// Construct the vector value of a scalarized value \p V one lane at a time. 517 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 518 VPTransformState &State); 519 520 /// Try to vectorize interleaved access group \p Group with the base address 521 /// given in \p Addr, optionally masking the vector operations if \p 522 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 523 /// values in the vectorized loop. 524 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 525 ArrayRef<VPValue *> VPDefs, 526 VPTransformState &State, VPValue *Addr, 527 ArrayRef<VPValue *> StoredValues, 528 VPValue *BlockInMask = nullptr); 529 530 /// Set the debug location in the builder \p Ptr using the debug location in 531 /// \p V. If \p Ptr is None then it uses the class member's Builder. 532 void setDebugLocFromInst(const Value *V, 533 Optional<IRBuilder<> *> CustomBuilder = None); 534 535 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 536 void fixNonInductionPHIs(VPTransformState &State); 537 538 /// Returns true if the reordering of FP operations is not allowed, but we are 539 /// able to vectorize with strict in-order reductions for the given RdxDesc. 540 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc); 541 542 /// Create a broadcast instruction. This method generates a broadcast 543 /// instruction (shuffle) for loop invariant values and for the induction 544 /// value. If this is the induction variable then we extend it to N, N+1, ... 545 /// this is needed because each iteration in the loop corresponds to a SIMD 546 /// element. 547 virtual Value *getBroadcastInstrs(Value *V); 548 549 /// Add metadata from one instruction to another. 550 /// 551 /// This includes both the original MDs from \p From and additional ones (\see 552 /// addNewMetadata). Use this for *newly created* instructions in the vector 553 /// loop. 554 void addMetadata(Instruction *To, Instruction *From); 555 556 /// Similar to the previous function but it adds the metadata to a 557 /// vector of instructions. 558 void addMetadata(ArrayRef<Value *> To, Instruction *From); 559 560 // Returns the resume value (bc.merge.rdx) for a reduction as 561 // generated by fixReduction. 562 PHINode *getReductionResumeValue(const RecurrenceDescriptor &RdxDesc); 563 564 protected: 565 friend class LoopVectorizationPlanner; 566 567 /// A small list of PHINodes. 568 using PhiVector = SmallVector<PHINode *, 4>; 569 570 /// A type for scalarized values in the new loop. Each value from the 571 /// original loop, when scalarized, is represented by UF x VF scalar values 572 /// in the new unrolled loop, where UF is the unroll factor and VF is the 573 /// vectorization factor. 574 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 575 576 /// Set up the values of the IVs correctly when exiting the vector loop. 577 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 578 Value *CountRoundDown, Value *EndValue, 579 BasicBlock *MiddleBlock); 580 581 /// Introduce a conditional branch (on true, condition to be set later) at the 582 /// end of the header=latch connecting it to itself (across the backedge) and 583 /// to the exit block of \p L. 584 void createHeaderBranch(Loop *L); 585 586 /// Handle all cross-iteration phis in the header. 587 void fixCrossIterationPHIs(VPTransformState &State); 588 589 /// Create the exit value of first order recurrences in the middle block and 590 /// update their users. 591 void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR, 592 VPTransformState &State); 593 594 /// Create code for the loop exit value of the reduction. 595 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); 596 597 /// Clear NSW/NUW flags from reduction instructions if necessary. 598 void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 599 VPTransformState &State); 600 601 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 602 /// means we need to add the appropriate incoming value from the middle 603 /// block as exiting edges from the scalar epilogue loop (if present) are 604 /// already in place, and we exit the vector loop exclusively to the middle 605 /// block. 606 void fixLCSSAPHIs(VPTransformState &State); 607 608 /// Iteratively sink the scalarized operands of a predicated instruction into 609 /// the block that was created for it. 610 void sinkScalarOperands(Instruction *PredInst); 611 612 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 613 /// represented as. 614 void truncateToMinimalBitwidths(VPTransformState &State); 615 616 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 617 /// variable on which to base the steps, \p Step is the size of the step, and 618 /// \p EntryVal is the value from the original loop that maps to the steps. 619 /// Note that \p EntryVal doesn't have to be an induction variable - it 620 /// can also be a truncate instruction. 621 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 622 const InductionDescriptor &ID, VPValue *Def, 623 VPTransformState &State); 624 625 /// Create a vector induction phi node based on an existing scalar one. \p 626 /// EntryVal is the value from the original loop that maps to the vector phi 627 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 628 /// truncate instruction, instead of widening the original IV, we widen a 629 /// version of the IV truncated to \p EntryVal's type. 630 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 631 Value *Step, Value *Start, 632 Instruction *EntryVal, VPValue *Def, 633 VPTransformState &State); 634 635 /// Returns (and creates if needed) the original loop trip count. 636 Value *getOrCreateTripCount(Loop *NewLoop); 637 638 /// Returns (and creates if needed) the trip count of the widened loop. 639 Value *getOrCreateVectorTripCount(Loop *NewLoop); 640 641 /// Returns a bitcasted value to the requested vector type. 642 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 643 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 644 const DataLayout &DL); 645 646 /// Emit a bypass check to see if the vector trip count is zero, including if 647 /// it overflows. 648 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 649 650 /// Emit a bypass check to see if all of the SCEV assumptions we've 651 /// had to make are correct. Returns the block containing the checks or 652 /// nullptr if no checks have been added. 653 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass); 654 655 /// Emit bypass checks to check any memory assumptions we may have made. 656 /// Returns the block containing the checks or nullptr if no checks have been 657 /// added. 658 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 659 660 /// Compute the transformed value of Index at offset StartValue using step 661 /// StepValue. 662 /// For integer induction, returns StartValue + Index * StepValue. 663 /// For pointer induction, returns StartValue[Index * StepValue]. 664 /// FIXME: The newly created binary instructions should contain nsw/nuw 665 /// flags, which can be found from the original scalar operations. 666 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 667 const DataLayout &DL, 668 const InductionDescriptor &ID, 669 BasicBlock *VectorHeader) const; 670 671 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 672 /// vector loop preheader, middle block and scalar preheader. Also 673 /// allocate a loop object for the new vector loop and return it. 674 Loop *createVectorLoopSkeleton(StringRef Prefix); 675 676 /// Create new phi nodes for the induction variables to resume iteration count 677 /// in the scalar epilogue, from where the vectorized loop left off. 678 /// In cases where the loop skeleton is more complicated (eg. epilogue 679 /// vectorization) and the resume values can come from an additional bypass 680 /// block, the \p AdditionalBypass pair provides information about the bypass 681 /// block and the end value on the edge from bypass to this loop. 682 void createInductionResumeValues( 683 Loop *L, 684 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 685 686 /// Complete the loop skeleton by adding debug MDs, creating appropriate 687 /// conditional branches in the middle block, preparing the builder and 688 /// running the verifier. Take in the vector loop \p L as argument, and return 689 /// the preheader of the completed vector loop. 690 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 691 692 /// Add additional metadata to \p To that was not present on \p Orig. 693 /// 694 /// Currently this is used to add the noalias annotations based on the 695 /// inserted memchecks. Use this for instructions that are *cloned* into the 696 /// vector loop. 697 void addNewMetadata(Instruction *To, const Instruction *Orig); 698 699 /// Collect poison-generating recipes that may generate a poison value that is 700 /// used after vectorization, even when their operands are not poison. Those 701 /// recipes meet the following conditions: 702 /// * Contribute to the address computation of a recipe generating a widen 703 /// memory load/store (VPWidenMemoryInstructionRecipe or 704 /// VPInterleaveRecipe). 705 /// * Such a widen memory load/store has at least one underlying Instruction 706 /// that is in a basic block that needs predication and after vectorization 707 /// the generated instruction won't be predicated. 708 void collectPoisonGeneratingRecipes(VPTransformState &State); 709 710 /// Allow subclasses to override and print debug traces before/after vplan 711 /// execution, when trace information is requested. 712 virtual void printDebugTracesAtStart(){}; 713 virtual void printDebugTracesAtEnd(){}; 714 715 /// The original loop. 716 Loop *OrigLoop; 717 718 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 719 /// dynamic knowledge to simplify SCEV expressions and converts them to a 720 /// more usable form. 721 PredicatedScalarEvolution &PSE; 722 723 /// Loop Info. 724 LoopInfo *LI; 725 726 /// Dominator Tree. 727 DominatorTree *DT; 728 729 /// Alias Analysis. 730 AAResults *AA; 731 732 /// Target Library Info. 733 const TargetLibraryInfo *TLI; 734 735 /// Target Transform Info. 736 const TargetTransformInfo *TTI; 737 738 /// Assumption Cache. 739 AssumptionCache *AC; 740 741 /// Interface to emit optimization remarks. 742 OptimizationRemarkEmitter *ORE; 743 744 /// LoopVersioning. It's only set up (non-null) if memchecks were 745 /// used. 746 /// 747 /// This is currently only used to add no-alias metadata based on the 748 /// memchecks. The actually versioning is performed manually. 749 std::unique_ptr<LoopVersioning> LVer; 750 751 /// The vectorization SIMD factor to use. Each vector will have this many 752 /// vector elements. 753 ElementCount VF; 754 755 /// The vectorization unroll factor to use. Each scalar is vectorized to this 756 /// many different vector instructions. 757 unsigned UF; 758 759 /// The builder that we use 760 IRBuilder<> Builder; 761 762 // --- Vectorization state --- 763 764 /// The vector-loop preheader. 765 BasicBlock *LoopVectorPreHeader; 766 767 /// The scalar-loop preheader. 768 BasicBlock *LoopScalarPreHeader; 769 770 /// Middle Block between the vector and the scalar. 771 BasicBlock *LoopMiddleBlock; 772 773 /// The unique ExitBlock of the scalar loop if one exists. Note that 774 /// there can be multiple exiting edges reaching this block. 775 BasicBlock *LoopExitBlock; 776 777 /// The vector loop body. 778 BasicBlock *LoopVectorBody; 779 780 /// The scalar loop body. 781 BasicBlock *LoopScalarBody; 782 783 /// A list of all bypass blocks. The first block is the entry of the loop. 784 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 785 786 /// Store instructions that were predicated. 787 SmallVector<Instruction *, 4> PredicatedInstructions; 788 789 /// Trip count of the original loop. 790 Value *TripCount = nullptr; 791 792 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 793 Value *VectorTripCount = nullptr; 794 795 /// The legality analysis. 796 LoopVectorizationLegality *Legal; 797 798 /// The profitablity analysis. 799 LoopVectorizationCostModel *Cost; 800 801 // Record whether runtime checks are added. 802 bool AddedSafetyChecks = false; 803 804 // Holds the end values for each induction variable. We save the end values 805 // so we can later fix-up the external users of the induction variables. 806 DenseMap<PHINode *, Value *> IVEndValues; 807 808 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 809 // fixed up at the end of vector code generation. 810 SmallVector<PHINode *, 8> OrigPHIsToFix; 811 812 /// BFI and PSI are used to check for profile guided size optimizations. 813 BlockFrequencyInfo *BFI; 814 ProfileSummaryInfo *PSI; 815 816 // Whether this loop should be optimized for size based on profile guided size 817 // optimizatios. 818 bool OptForSizeBasedOnProfile; 819 820 /// Structure to hold information about generated runtime checks, responsible 821 /// for cleaning the checks, if vectorization turns out unprofitable. 822 GeneratedRTChecks &RTChecks; 823 824 // Holds the resume values for reductions in the loops, used to set the 825 // correct start value of reduction PHIs when vectorizing the epilogue. 826 SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4> 827 ReductionResumeValues; 828 }; 829 830 class InnerLoopUnroller : public InnerLoopVectorizer { 831 public: 832 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 833 LoopInfo *LI, DominatorTree *DT, 834 const TargetLibraryInfo *TLI, 835 const TargetTransformInfo *TTI, AssumptionCache *AC, 836 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 837 LoopVectorizationLegality *LVL, 838 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 839 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 840 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 841 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 842 BFI, PSI, Check) {} 843 844 private: 845 Value *getBroadcastInstrs(Value *V) override; 846 }; 847 848 /// Encapsulate information regarding vectorization of a loop and its epilogue. 849 /// This information is meant to be updated and used across two stages of 850 /// epilogue vectorization. 851 struct EpilogueLoopVectorizationInfo { 852 ElementCount MainLoopVF = ElementCount::getFixed(0); 853 unsigned MainLoopUF = 0; 854 ElementCount EpilogueVF = ElementCount::getFixed(0); 855 unsigned EpilogueUF = 0; 856 BasicBlock *MainLoopIterationCountCheck = nullptr; 857 BasicBlock *EpilogueIterationCountCheck = nullptr; 858 BasicBlock *SCEVSafetyCheck = nullptr; 859 BasicBlock *MemSafetyCheck = nullptr; 860 Value *TripCount = nullptr; 861 Value *VectorTripCount = nullptr; 862 863 EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, 864 ElementCount EVF, unsigned EUF) 865 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) { 866 assert(EUF == 1 && 867 "A high UF for the epilogue loop is likely not beneficial."); 868 } 869 }; 870 871 /// An extension of the inner loop vectorizer that creates a skeleton for a 872 /// vectorized loop that has its epilogue (residual) also vectorized. 873 /// The idea is to run the vplan on a given loop twice, firstly to setup the 874 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 875 /// from the first step and vectorize the epilogue. This is achieved by 876 /// deriving two concrete strategy classes from this base class and invoking 877 /// them in succession from the loop vectorizer planner. 878 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 879 public: 880 InnerLoopAndEpilogueVectorizer( 881 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 882 DominatorTree *DT, const TargetLibraryInfo *TLI, 883 const TargetTransformInfo *TTI, AssumptionCache *AC, 884 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 885 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 886 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 887 GeneratedRTChecks &Checks) 888 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 889 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 890 Checks), 891 EPI(EPI) {} 892 893 // Override this function to handle the more complex control flow around the 894 // three loops. 895 std::pair<BasicBlock *, Value *> 896 createVectorizedLoopSkeleton() final override { 897 return createEpilogueVectorizedLoopSkeleton(); 898 } 899 900 /// The interface for creating a vectorized skeleton using one of two 901 /// different strategies, each corresponding to one execution of the vplan 902 /// as described above. 903 virtual std::pair<BasicBlock *, Value *> 904 createEpilogueVectorizedLoopSkeleton() = 0; 905 906 /// Holds and updates state information required to vectorize the main loop 907 /// and its epilogue in two separate passes. This setup helps us avoid 908 /// regenerating and recomputing runtime safety checks. It also helps us to 909 /// shorten the iteration-count-check path length for the cases where the 910 /// iteration count of the loop is so small that the main vector loop is 911 /// completely skipped. 912 EpilogueLoopVectorizationInfo &EPI; 913 }; 914 915 /// A specialized derived class of inner loop vectorizer that performs 916 /// vectorization of *main* loops in the process of vectorizing loops and their 917 /// epilogues. 918 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 919 public: 920 EpilogueVectorizerMainLoop( 921 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 922 DominatorTree *DT, const TargetLibraryInfo *TLI, 923 const TargetTransformInfo *TTI, AssumptionCache *AC, 924 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 925 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 926 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 927 GeneratedRTChecks &Check) 928 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 929 EPI, LVL, CM, BFI, PSI, Check) {} 930 /// Implements the interface for creating a vectorized skeleton using the 931 /// *main loop* strategy (ie the first pass of vplan execution). 932 std::pair<BasicBlock *, Value *> 933 createEpilogueVectorizedLoopSkeleton() final override; 934 935 protected: 936 /// Emits an iteration count bypass check once for the main loop (when \p 937 /// ForEpilogue is false) and once for the epilogue loop (when \p 938 /// ForEpilogue is true). 939 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, 940 bool ForEpilogue); 941 void printDebugTracesAtStart() override; 942 void printDebugTracesAtEnd() override; 943 }; 944 945 // A specialized derived class of inner loop vectorizer that performs 946 // vectorization of *epilogue* loops in the process of vectorizing loops and 947 // their epilogues. 948 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 949 public: 950 EpilogueVectorizerEpilogueLoop( 951 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 952 DominatorTree *DT, const TargetLibraryInfo *TLI, 953 const TargetTransformInfo *TTI, AssumptionCache *AC, 954 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 955 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 956 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 957 GeneratedRTChecks &Checks) 958 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 959 EPI, LVL, CM, BFI, PSI, Checks) {} 960 /// Implements the interface for creating a vectorized skeleton using the 961 /// *epilogue loop* strategy (ie the second pass of vplan execution). 962 std::pair<BasicBlock *, Value *> 963 createEpilogueVectorizedLoopSkeleton() final override; 964 965 protected: 966 /// Emits an iteration count bypass check after the main vector loop has 967 /// finished to see if there are any iterations left to execute by either 968 /// the vector epilogue or the scalar epilogue. 969 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, 970 BasicBlock *Bypass, 971 BasicBlock *Insert); 972 void printDebugTracesAtStart() override; 973 void printDebugTracesAtEnd() override; 974 }; 975 } // end namespace llvm 976 977 /// Look for a meaningful debug location on the instruction or it's 978 /// operands. 979 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 980 if (!I) 981 return I; 982 983 DebugLoc Empty; 984 if (I->getDebugLoc() != Empty) 985 return I; 986 987 for (Use &Op : I->operands()) { 988 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 989 if (OpInst->getDebugLoc() != Empty) 990 return OpInst; 991 } 992 993 return I; 994 } 995 996 void InnerLoopVectorizer::setDebugLocFromInst( 997 const Value *V, Optional<IRBuilder<> *> CustomBuilder) { 998 IRBuilder<> *B = (CustomBuilder == None) ? &Builder : *CustomBuilder; 999 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) { 1000 const DILocation *DIL = Inst->getDebugLoc(); 1001 1002 // When a FSDiscriminator is enabled, we don't need to add the multiply 1003 // factors to the discriminators. 1004 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 1005 !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { 1006 // FIXME: For scalable vectors, assume vscale=1. 1007 auto NewDIL = 1008 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 1009 if (NewDIL) 1010 B->SetCurrentDebugLocation(NewDIL.getValue()); 1011 else 1012 LLVM_DEBUG(dbgs() 1013 << "Failed to create new discriminator: " 1014 << DIL->getFilename() << " Line: " << DIL->getLine()); 1015 } else 1016 B->SetCurrentDebugLocation(DIL); 1017 } else 1018 B->SetCurrentDebugLocation(DebugLoc()); 1019 } 1020 1021 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 1022 /// is passed, the message relates to that particular instruction. 1023 #ifndef NDEBUG 1024 static void debugVectorizationMessage(const StringRef Prefix, 1025 const StringRef DebugMsg, 1026 Instruction *I) { 1027 dbgs() << "LV: " << Prefix << DebugMsg; 1028 if (I != nullptr) 1029 dbgs() << " " << *I; 1030 else 1031 dbgs() << '.'; 1032 dbgs() << '\n'; 1033 } 1034 #endif 1035 1036 /// Create an analysis remark that explains why vectorization failed 1037 /// 1038 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 1039 /// RemarkName is the identifier for the remark. If \p I is passed it is an 1040 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 1041 /// the location of the remark. \return the remark object that can be 1042 /// streamed to. 1043 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1044 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1045 Value *CodeRegion = TheLoop->getHeader(); 1046 DebugLoc DL = TheLoop->getStartLoc(); 1047 1048 if (I) { 1049 CodeRegion = I->getParent(); 1050 // If there is no debug location attached to the instruction, revert back to 1051 // using the loop's. 1052 if (I->getDebugLoc()) 1053 DL = I->getDebugLoc(); 1054 } 1055 1056 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 1057 } 1058 1059 namespace llvm { 1060 1061 /// Return a value for Step multiplied by VF. 1062 Value *createStepForVF(IRBuilder<> &B, Type *Ty, ElementCount VF, 1063 int64_t Step) { 1064 assert(Ty->isIntegerTy() && "Expected an integer step"); 1065 Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue()); 1066 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1067 } 1068 1069 /// Return the runtime value for VF. 1070 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) { 1071 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1072 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1073 } 1074 1075 static Value *getRuntimeVFAsFloat(IRBuilder<> &B, Type *FTy, ElementCount VF) { 1076 assert(FTy->isFloatingPointTy() && "Expected floating point type!"); 1077 Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits()); 1078 Value *RuntimeVF = getRuntimeVF(B, IntTy, VF); 1079 return B.CreateUIToFP(RuntimeVF, FTy); 1080 } 1081 1082 void reportVectorizationFailure(const StringRef DebugMsg, 1083 const StringRef OREMsg, const StringRef ORETag, 1084 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1085 Instruction *I) { 1086 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 1087 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1088 ORE->emit( 1089 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1090 << "loop not vectorized: " << OREMsg); 1091 } 1092 1093 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 1094 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1095 Instruction *I) { 1096 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 1097 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1098 ORE->emit( 1099 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1100 << Msg); 1101 } 1102 1103 } // end namespace llvm 1104 1105 #ifndef NDEBUG 1106 /// \return string containing a file name and a line # for the given loop. 1107 static std::string getDebugLocString(const Loop *L) { 1108 std::string Result; 1109 if (L) { 1110 raw_string_ostream OS(Result); 1111 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1112 LoopDbgLoc.print(OS); 1113 else 1114 // Just print the module name. 1115 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1116 OS.flush(); 1117 } 1118 return Result; 1119 } 1120 #endif 1121 1122 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1123 const Instruction *Orig) { 1124 // If the loop was versioned with memchecks, add the corresponding no-alias 1125 // metadata. 1126 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1127 LVer->annotateInstWithNoAlias(To, Orig); 1128 } 1129 1130 void InnerLoopVectorizer::collectPoisonGeneratingRecipes( 1131 VPTransformState &State) { 1132 1133 // Collect recipes in the backward slice of `Root` that may generate a poison 1134 // value that is used after vectorization. 1135 SmallPtrSet<VPRecipeBase *, 16> Visited; 1136 auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) { 1137 SmallVector<VPRecipeBase *, 16> Worklist; 1138 Worklist.push_back(Root); 1139 1140 // Traverse the backward slice of Root through its use-def chain. 1141 while (!Worklist.empty()) { 1142 VPRecipeBase *CurRec = Worklist.back(); 1143 Worklist.pop_back(); 1144 1145 if (!Visited.insert(CurRec).second) 1146 continue; 1147 1148 // Prune search if we find another recipe generating a widen memory 1149 // instruction. Widen memory instructions involved in address computation 1150 // will lead to gather/scatter instructions, which don't need to be 1151 // handled. 1152 if (isa<VPWidenMemoryInstructionRecipe>(CurRec) || 1153 isa<VPInterleaveRecipe>(CurRec) || 1154 isa<VPCanonicalIVPHIRecipe>(CurRec)) 1155 continue; 1156 1157 // This recipe contributes to the address computation of a widen 1158 // load/store. Collect recipe if its underlying instruction has 1159 // poison-generating flags. 1160 Instruction *Instr = CurRec->getUnderlyingInstr(); 1161 if (Instr && Instr->hasPoisonGeneratingFlags()) 1162 State.MayGeneratePoisonRecipes.insert(CurRec); 1163 1164 // Add new definitions to the worklist. 1165 for (VPValue *operand : CurRec->operands()) 1166 if (VPDef *OpDef = operand->getDef()) 1167 Worklist.push_back(cast<VPRecipeBase>(OpDef)); 1168 } 1169 }); 1170 1171 // Traverse all the recipes in the VPlan and collect the poison-generating 1172 // recipes in the backward slice starting at the address of a VPWidenRecipe or 1173 // VPInterleaveRecipe. 1174 auto Iter = depth_first( 1175 VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry())); 1176 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) { 1177 for (VPRecipeBase &Recipe : *VPBB) { 1178 if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) { 1179 Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr(); 1180 VPDef *AddrDef = WidenRec->getAddr()->getDef(); 1181 if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr && 1182 Legal->blockNeedsPredication(UnderlyingInstr->getParent())) 1183 collectPoisonGeneratingInstrsInBackwardSlice( 1184 cast<VPRecipeBase>(AddrDef)); 1185 } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) { 1186 VPDef *AddrDef = InterleaveRec->getAddr()->getDef(); 1187 if (AddrDef) { 1188 // Check if any member of the interleave group needs predication. 1189 const InterleaveGroup<Instruction> *InterGroup = 1190 InterleaveRec->getInterleaveGroup(); 1191 bool NeedPredication = false; 1192 for (int I = 0, NumMembers = InterGroup->getNumMembers(); 1193 I < NumMembers; ++I) { 1194 Instruction *Member = InterGroup->getMember(I); 1195 if (Member) 1196 NeedPredication |= 1197 Legal->blockNeedsPredication(Member->getParent()); 1198 } 1199 1200 if (NeedPredication) 1201 collectPoisonGeneratingInstrsInBackwardSlice( 1202 cast<VPRecipeBase>(AddrDef)); 1203 } 1204 } 1205 } 1206 } 1207 } 1208 1209 void InnerLoopVectorizer::addMetadata(Instruction *To, 1210 Instruction *From) { 1211 propagateMetadata(To, From); 1212 addNewMetadata(To, From); 1213 } 1214 1215 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1216 Instruction *From) { 1217 for (Value *V : To) { 1218 if (Instruction *I = dyn_cast<Instruction>(V)) 1219 addMetadata(I, From); 1220 } 1221 } 1222 1223 PHINode *InnerLoopVectorizer::getReductionResumeValue( 1224 const RecurrenceDescriptor &RdxDesc) { 1225 auto It = ReductionResumeValues.find(&RdxDesc); 1226 assert(It != ReductionResumeValues.end() && 1227 "Expected to find a resume value for the reduction."); 1228 return It->second; 1229 } 1230 1231 namespace llvm { 1232 1233 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1234 // lowered. 1235 enum ScalarEpilogueLowering { 1236 1237 // The default: allowing scalar epilogues. 1238 CM_ScalarEpilogueAllowed, 1239 1240 // Vectorization with OptForSize: don't allow epilogues. 1241 CM_ScalarEpilogueNotAllowedOptSize, 1242 1243 // A special case of vectorisation with OptForSize: loops with a very small 1244 // trip count are considered for vectorization under OptForSize, thereby 1245 // making sure the cost of their loop body is dominant, free of runtime 1246 // guards and scalar iteration overheads. 1247 CM_ScalarEpilogueNotAllowedLowTripLoop, 1248 1249 // Loop hint predicate indicating an epilogue is undesired. 1250 CM_ScalarEpilogueNotNeededUsePredicate, 1251 1252 // Directive indicating we must either tail fold or not vectorize 1253 CM_ScalarEpilogueNotAllowedUsePredicate 1254 }; 1255 1256 /// ElementCountComparator creates a total ordering for ElementCount 1257 /// for the purposes of using it in a set structure. 1258 struct ElementCountComparator { 1259 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1260 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1261 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1262 } 1263 }; 1264 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1265 1266 /// LoopVectorizationCostModel - estimates the expected speedups due to 1267 /// vectorization. 1268 /// In many cases vectorization is not profitable. This can happen because of 1269 /// a number of reasons. In this class we mainly attempt to predict the 1270 /// expected speedup/slowdowns due to the supported instruction set. We use the 1271 /// TargetTransformInfo to query the different backends for the cost of 1272 /// different operations. 1273 class LoopVectorizationCostModel { 1274 public: 1275 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1276 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1277 LoopVectorizationLegality *Legal, 1278 const TargetTransformInfo &TTI, 1279 const TargetLibraryInfo *TLI, DemandedBits *DB, 1280 AssumptionCache *AC, 1281 OptimizationRemarkEmitter *ORE, const Function *F, 1282 const LoopVectorizeHints *Hints, 1283 InterleavedAccessInfo &IAI) 1284 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1285 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1286 Hints(Hints), InterleaveInfo(IAI) {} 1287 1288 /// \return An upper bound for the vectorization factors (both fixed and 1289 /// scalable). If the factors are 0, vectorization and interleaving should be 1290 /// avoided up front. 1291 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1292 1293 /// \return True if runtime checks are required for vectorization, and false 1294 /// otherwise. 1295 bool runtimeChecksRequired(); 1296 1297 /// \return The most profitable vectorization factor and the cost of that VF. 1298 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1299 /// then this vectorization factor will be selected if vectorization is 1300 /// possible. 1301 VectorizationFactor 1302 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1303 1304 VectorizationFactor 1305 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1306 const LoopVectorizationPlanner &LVP); 1307 1308 /// Setup cost-based decisions for user vectorization factor. 1309 /// \return true if the UserVF is a feasible VF to be chosen. 1310 bool selectUserVectorizationFactor(ElementCount UserVF) { 1311 collectUniformsAndScalars(UserVF); 1312 collectInstsToScalarize(UserVF); 1313 return expectedCost(UserVF).first.isValid(); 1314 } 1315 1316 /// \return The size (in bits) of the smallest and widest types in the code 1317 /// that needs to be vectorized. We ignore values that remain scalar such as 1318 /// 64 bit loop indices. 1319 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1320 1321 /// \return The desired interleave count. 1322 /// If interleave count has been specified by metadata it will be returned. 1323 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1324 /// are the selected vectorization factor and the cost of the selected VF. 1325 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1326 1327 /// Memory access instruction may be vectorized in more than one way. 1328 /// Form of instruction after vectorization depends on cost. 1329 /// This function takes cost-based decisions for Load/Store instructions 1330 /// and collects them in a map. This decisions map is used for building 1331 /// the lists of loop-uniform and loop-scalar instructions. 1332 /// The calculated cost is saved with widening decision in order to 1333 /// avoid redundant calculations. 1334 void setCostBasedWideningDecision(ElementCount VF); 1335 1336 /// A struct that represents some properties of the register usage 1337 /// of a loop. 1338 struct RegisterUsage { 1339 /// Holds the number of loop invariant values that are used in the loop. 1340 /// The key is ClassID of target-provided register class. 1341 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1342 /// Holds the maximum number of concurrent live intervals in the loop. 1343 /// The key is ClassID of target-provided register class. 1344 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1345 }; 1346 1347 /// \return Returns information about the register usages of the loop for the 1348 /// given vectorization factors. 1349 SmallVector<RegisterUsage, 8> 1350 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1351 1352 /// Collect values we want to ignore in the cost model. 1353 void collectValuesToIgnore(); 1354 1355 /// Collect all element types in the loop for which widening is needed. 1356 void collectElementTypesForWidening(); 1357 1358 /// Split reductions into those that happen in the loop, and those that happen 1359 /// outside. In loop reductions are collected into InLoopReductionChains. 1360 void collectInLoopReductions(); 1361 1362 /// Returns true if we should use strict in-order reductions for the given 1363 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, 1364 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering 1365 /// of FP operations. 1366 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) { 1367 return !Hints->allowReordering() && RdxDesc.isOrdered(); 1368 } 1369 1370 /// \returns The smallest bitwidth each instruction can be represented with. 1371 /// The vector equivalents of these instructions should be truncated to this 1372 /// type. 1373 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1374 return MinBWs; 1375 } 1376 1377 /// \returns True if it is more profitable to scalarize instruction \p I for 1378 /// vectorization factor \p VF. 1379 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1380 assert(VF.isVector() && 1381 "Profitable to scalarize relevant only for VF > 1."); 1382 1383 // Cost model is not run in the VPlan-native path - return conservative 1384 // result until this changes. 1385 if (EnableVPlanNativePath) 1386 return false; 1387 1388 auto Scalars = InstsToScalarize.find(VF); 1389 assert(Scalars != InstsToScalarize.end() && 1390 "VF not yet analyzed for scalarization profitability"); 1391 return Scalars->second.find(I) != Scalars->second.end(); 1392 } 1393 1394 /// Returns true if \p I is known to be uniform after vectorization. 1395 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1396 if (VF.isScalar()) 1397 return true; 1398 1399 // Cost model is not run in the VPlan-native path - return conservative 1400 // result until this changes. 1401 if (EnableVPlanNativePath) 1402 return false; 1403 1404 auto UniformsPerVF = Uniforms.find(VF); 1405 assert(UniformsPerVF != Uniforms.end() && 1406 "VF not yet analyzed for uniformity"); 1407 return UniformsPerVF->second.count(I); 1408 } 1409 1410 /// Returns true if \p I is known to be scalar after vectorization. 1411 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1412 if (VF.isScalar()) 1413 return true; 1414 1415 // Cost model is not run in the VPlan-native path - return conservative 1416 // result until this changes. 1417 if (EnableVPlanNativePath) 1418 return false; 1419 1420 auto ScalarsPerVF = Scalars.find(VF); 1421 assert(ScalarsPerVF != Scalars.end() && 1422 "Scalar values are not calculated for VF"); 1423 return ScalarsPerVF->second.count(I); 1424 } 1425 1426 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1427 /// for vectorization factor \p VF. 1428 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1429 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1430 !isProfitableToScalarize(I, VF) && 1431 !isScalarAfterVectorization(I, VF); 1432 } 1433 1434 /// Decision that was taken during cost calculation for memory instruction. 1435 enum InstWidening { 1436 CM_Unknown, 1437 CM_Widen, // For consecutive accesses with stride +1. 1438 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1439 CM_Interleave, 1440 CM_GatherScatter, 1441 CM_Scalarize 1442 }; 1443 1444 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1445 /// instruction \p I and vector width \p VF. 1446 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1447 InstructionCost Cost) { 1448 assert(VF.isVector() && "Expected VF >=2"); 1449 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1450 } 1451 1452 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1453 /// interleaving group \p Grp and vector width \p VF. 1454 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1455 ElementCount VF, InstWidening W, 1456 InstructionCost Cost) { 1457 assert(VF.isVector() && "Expected VF >=2"); 1458 /// Broadcast this decicion to all instructions inside the group. 1459 /// But the cost will be assigned to one instruction only. 1460 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1461 if (auto *I = Grp->getMember(i)) { 1462 if (Grp->getInsertPos() == I) 1463 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1464 else 1465 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1466 } 1467 } 1468 } 1469 1470 /// Return the cost model decision for the given instruction \p I and vector 1471 /// width \p VF. Return CM_Unknown if this instruction did not pass 1472 /// through the cost modeling. 1473 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1474 assert(VF.isVector() && "Expected VF to be a vector VF"); 1475 // Cost model is not run in the VPlan-native path - return conservative 1476 // result until this changes. 1477 if (EnableVPlanNativePath) 1478 return CM_GatherScatter; 1479 1480 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1481 auto Itr = WideningDecisions.find(InstOnVF); 1482 if (Itr == WideningDecisions.end()) 1483 return CM_Unknown; 1484 return Itr->second.first; 1485 } 1486 1487 /// Return the vectorization cost for the given instruction \p I and vector 1488 /// width \p VF. 1489 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1490 assert(VF.isVector() && "Expected VF >=2"); 1491 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1492 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1493 "The cost is not calculated"); 1494 return WideningDecisions[InstOnVF].second; 1495 } 1496 1497 /// Return True if instruction \p I is an optimizable truncate whose operand 1498 /// is an induction variable. Such a truncate will be removed by adding a new 1499 /// induction variable with the destination type. 1500 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1501 // If the instruction is not a truncate, return false. 1502 auto *Trunc = dyn_cast<TruncInst>(I); 1503 if (!Trunc) 1504 return false; 1505 1506 // Get the source and destination types of the truncate. 1507 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1508 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1509 1510 // If the truncate is free for the given types, return false. Replacing a 1511 // free truncate with an induction variable would add an induction variable 1512 // update instruction to each iteration of the loop. We exclude from this 1513 // check the primary induction variable since it will need an update 1514 // instruction regardless. 1515 Value *Op = Trunc->getOperand(0); 1516 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1517 return false; 1518 1519 // If the truncated value is not an induction variable, return false. 1520 return Legal->isInductionPhi(Op); 1521 } 1522 1523 /// Collects the instructions to scalarize for each predicated instruction in 1524 /// the loop. 1525 void collectInstsToScalarize(ElementCount VF); 1526 1527 /// Collect Uniform and Scalar values for the given \p VF. 1528 /// The sets depend on CM decision for Load/Store instructions 1529 /// that may be vectorized as interleave, gather-scatter or scalarized. 1530 void collectUniformsAndScalars(ElementCount VF) { 1531 // Do the analysis once. 1532 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1533 return; 1534 setCostBasedWideningDecision(VF); 1535 collectLoopUniforms(VF); 1536 collectLoopScalars(VF); 1537 } 1538 1539 /// Returns true if the target machine supports masked store operation 1540 /// for the given \p DataType and kind of access to \p Ptr. 1541 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1542 return Legal->isConsecutivePtr(DataType, Ptr) && 1543 TTI.isLegalMaskedStore(DataType, Alignment); 1544 } 1545 1546 /// Returns true if the target machine supports masked load operation 1547 /// for the given \p DataType and kind of access to \p Ptr. 1548 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1549 return Legal->isConsecutivePtr(DataType, Ptr) && 1550 TTI.isLegalMaskedLoad(DataType, Alignment); 1551 } 1552 1553 /// Returns true if the target machine can represent \p V as a masked gather 1554 /// or scatter operation. 1555 bool isLegalGatherOrScatter(Value *V, 1556 ElementCount VF = ElementCount::getFixed(1)) { 1557 bool LI = isa<LoadInst>(V); 1558 bool SI = isa<StoreInst>(V); 1559 if (!LI && !SI) 1560 return false; 1561 auto *Ty = getLoadStoreType(V); 1562 Align Align = getLoadStoreAlignment(V); 1563 if (VF.isVector()) 1564 Ty = VectorType::get(Ty, VF); 1565 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1566 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1567 } 1568 1569 /// Returns true if the target machine supports all of the reduction 1570 /// variables found for the given VF. 1571 bool canVectorizeReductions(ElementCount VF) const { 1572 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1573 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1574 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1575 })); 1576 } 1577 1578 /// Returns true if \p I is an instruction that will be scalarized with 1579 /// predication when vectorizing \p I with vectorization factor \p VF. Such 1580 /// instructions include conditional stores and instructions that may divide 1581 /// by zero. 1582 bool isScalarWithPredication(Instruction *I, ElementCount VF) const; 1583 1584 // Returns true if \p I is an instruction that will be predicated either 1585 // through scalar predication or masked load/store or masked gather/scatter. 1586 // \p VF is the vectorization factor that will be used to vectorize \p I. 1587 // Superset of instructions that return true for isScalarWithPredication. 1588 bool isPredicatedInst(Instruction *I, ElementCount VF, 1589 bool IsKnownUniform = false) { 1590 // When we know the load is uniform and the original scalar loop was not 1591 // predicated we don't need to mark it as a predicated instruction. Any 1592 // vectorised blocks created when tail-folding are something artificial we 1593 // have introduced and we know there is always at least one active lane. 1594 // That's why we call Legal->blockNeedsPredication here because it doesn't 1595 // query tail-folding. 1596 if (IsKnownUniform && isa<LoadInst>(I) && 1597 !Legal->blockNeedsPredication(I->getParent())) 1598 return false; 1599 if (!blockNeedsPredicationForAnyReason(I->getParent())) 1600 return false; 1601 // Loads and stores that need some form of masked operation are predicated 1602 // instructions. 1603 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1604 return Legal->isMaskRequired(I); 1605 return isScalarWithPredication(I, VF); 1606 } 1607 1608 /// Returns true if \p I is a memory instruction with consecutive memory 1609 /// access that can be widened. 1610 bool 1611 memoryInstructionCanBeWidened(Instruction *I, 1612 ElementCount VF = ElementCount::getFixed(1)); 1613 1614 /// Returns true if \p I is a memory instruction in an interleaved-group 1615 /// of memory accesses that can be vectorized with wide vector loads/stores 1616 /// and shuffles. 1617 bool 1618 interleavedAccessCanBeWidened(Instruction *I, 1619 ElementCount VF = ElementCount::getFixed(1)); 1620 1621 /// Check if \p Instr belongs to any interleaved access group. 1622 bool isAccessInterleaved(Instruction *Instr) { 1623 return InterleaveInfo.isInterleaved(Instr); 1624 } 1625 1626 /// Get the interleaved access group that \p Instr belongs to. 1627 const InterleaveGroup<Instruction> * 1628 getInterleavedAccessGroup(Instruction *Instr) { 1629 return InterleaveInfo.getInterleaveGroup(Instr); 1630 } 1631 1632 /// Returns true if we're required to use a scalar epilogue for at least 1633 /// the final iteration of the original loop. 1634 bool requiresScalarEpilogue(ElementCount VF) const { 1635 if (!isScalarEpilogueAllowed()) 1636 return false; 1637 // If we might exit from anywhere but the latch, must run the exiting 1638 // iteration in scalar form. 1639 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1640 return true; 1641 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); 1642 } 1643 1644 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1645 /// loop hint annotation. 1646 bool isScalarEpilogueAllowed() const { 1647 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1648 } 1649 1650 /// Returns true if all loop blocks should be masked to fold tail loop. 1651 bool foldTailByMasking() const { return FoldTailByMasking; } 1652 1653 /// Returns true if the instructions in this block requires predication 1654 /// for any reason, e.g. because tail folding now requires a predicate 1655 /// or because the block in the original loop was predicated. 1656 bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const { 1657 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1658 } 1659 1660 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1661 /// nodes to the chain of instructions representing the reductions. Uses a 1662 /// MapVector to ensure deterministic iteration order. 1663 using ReductionChainMap = 1664 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1665 1666 /// Return the chain of instructions representing an inloop reduction. 1667 const ReductionChainMap &getInLoopReductionChains() const { 1668 return InLoopReductionChains; 1669 } 1670 1671 /// Returns true if the Phi is part of an inloop reduction. 1672 bool isInLoopReduction(PHINode *Phi) const { 1673 return InLoopReductionChains.count(Phi); 1674 } 1675 1676 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1677 /// with factor VF. Return the cost of the instruction, including 1678 /// scalarization overhead if it's needed. 1679 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1680 1681 /// Estimate cost of a call instruction CI if it were vectorized with factor 1682 /// VF. Return the cost of the instruction, including scalarization overhead 1683 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1684 /// scalarized - 1685 /// i.e. either vector version isn't available, or is too expensive. 1686 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1687 bool &NeedToScalarize) const; 1688 1689 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1690 /// that of B. 1691 bool isMoreProfitable(const VectorizationFactor &A, 1692 const VectorizationFactor &B) const; 1693 1694 /// Invalidates decisions already taken by the cost model. 1695 void invalidateCostModelingDecisions() { 1696 WideningDecisions.clear(); 1697 Uniforms.clear(); 1698 Scalars.clear(); 1699 } 1700 1701 private: 1702 unsigned NumPredStores = 0; 1703 1704 /// \return An upper bound for the vectorization factors for both 1705 /// fixed and scalable vectorization, where the minimum-known number of 1706 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1707 /// disabled or unsupported, then the scalable part will be equal to 1708 /// ElementCount::getScalable(0). 1709 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1710 ElementCount UserVF, 1711 bool FoldTailByMasking); 1712 1713 /// \return the maximized element count based on the targets vector 1714 /// registers and the loop trip-count, but limited to a maximum safe VF. 1715 /// This is a helper function of computeFeasibleMaxVF. 1716 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure 1717 /// issue that occurred on one of the buildbots which cannot be reproduced 1718 /// without having access to the properietary compiler (see comments on 1719 /// D98509). The issue is currently under investigation and this workaround 1720 /// will be removed as soon as possible. 1721 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1722 unsigned SmallestType, 1723 unsigned WidestType, 1724 const ElementCount &MaxSafeVF, 1725 bool FoldTailByMasking); 1726 1727 /// \return the maximum legal scalable VF, based on the safe max number 1728 /// of elements. 1729 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1730 1731 /// The vectorization cost is a combination of the cost itself and a boolean 1732 /// indicating whether any of the contributing operations will actually 1733 /// operate on vector values after type legalization in the backend. If this 1734 /// latter value is false, then all operations will be scalarized (i.e. no 1735 /// vectorization has actually taken place). 1736 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1737 1738 /// Returns the expected execution cost. The unit of the cost does 1739 /// not matter because we use the 'cost' units to compare different 1740 /// vector widths. The cost that is returned is *not* normalized by 1741 /// the factor width. If \p Invalid is not nullptr, this function 1742 /// will add a pair(Instruction*, ElementCount) to \p Invalid for 1743 /// each instruction that has an Invalid cost for the given VF. 1744 using InstructionVFPair = std::pair<Instruction *, ElementCount>; 1745 VectorizationCostTy 1746 expectedCost(ElementCount VF, 1747 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); 1748 1749 /// Returns the execution time cost of an instruction for a given vector 1750 /// width. Vector width of one means scalar. 1751 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1752 1753 /// The cost-computation logic from getInstructionCost which provides 1754 /// the vector type as an output parameter. 1755 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1756 Type *&VectorTy); 1757 1758 /// Return the cost of instructions in an inloop reduction pattern, if I is 1759 /// part of that pattern. 1760 Optional<InstructionCost> 1761 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, 1762 TTI::TargetCostKind CostKind); 1763 1764 /// Calculate vectorization cost of memory instruction \p I. 1765 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1766 1767 /// The cost computation for scalarized memory instruction. 1768 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1769 1770 /// The cost computation for interleaving group of memory instructions. 1771 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1772 1773 /// The cost computation for Gather/Scatter instruction. 1774 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1775 1776 /// The cost computation for widening instruction \p I with consecutive 1777 /// memory access. 1778 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1779 1780 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1781 /// Load: scalar load + broadcast. 1782 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1783 /// element) 1784 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1785 1786 /// Estimate the overhead of scalarizing an instruction. This is a 1787 /// convenience wrapper for the type-based getScalarizationOverhead API. 1788 InstructionCost getScalarizationOverhead(Instruction *I, 1789 ElementCount VF) const; 1790 1791 /// Returns whether the instruction is a load or store and will be a emitted 1792 /// as a vector operation. 1793 bool isConsecutiveLoadOrStore(Instruction *I); 1794 1795 /// Returns true if an artificially high cost for emulated masked memrefs 1796 /// should be used. 1797 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF); 1798 1799 /// Map of scalar integer values to the smallest bitwidth they can be legally 1800 /// represented as. The vector equivalents of these values should be truncated 1801 /// to this type. 1802 MapVector<Instruction *, uint64_t> MinBWs; 1803 1804 /// A type representing the costs for instructions if they were to be 1805 /// scalarized rather than vectorized. The entries are Instruction-Cost 1806 /// pairs. 1807 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1808 1809 /// A set containing all BasicBlocks that are known to present after 1810 /// vectorization as a predicated block. 1811 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1812 1813 /// Records whether it is allowed to have the original scalar loop execute at 1814 /// least once. This may be needed as a fallback loop in case runtime 1815 /// aliasing/dependence checks fail, or to handle the tail/remainder 1816 /// iterations when the trip count is unknown or doesn't divide by the VF, 1817 /// or as a peel-loop to handle gaps in interleave-groups. 1818 /// Under optsize and when the trip count is very small we don't allow any 1819 /// iterations to execute in the scalar loop. 1820 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1821 1822 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1823 bool FoldTailByMasking = false; 1824 1825 /// A map holding scalar costs for different vectorization factors. The 1826 /// presence of a cost for an instruction in the mapping indicates that the 1827 /// instruction will be scalarized when vectorizing with the associated 1828 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1829 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1830 1831 /// Holds the instructions known to be uniform after vectorization. 1832 /// The data is collected per VF. 1833 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1834 1835 /// Holds the instructions known to be scalar after vectorization. 1836 /// The data is collected per VF. 1837 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1838 1839 /// Holds the instructions (address computations) that are forced to be 1840 /// scalarized. 1841 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1842 1843 /// PHINodes of the reductions that should be expanded in-loop along with 1844 /// their associated chains of reduction operations, in program order from top 1845 /// (PHI) to bottom 1846 ReductionChainMap InLoopReductionChains; 1847 1848 /// A Map of inloop reduction operations and their immediate chain operand. 1849 /// FIXME: This can be removed once reductions can be costed correctly in 1850 /// vplan. This was added to allow quick lookup to the inloop operations, 1851 /// without having to loop through InLoopReductionChains. 1852 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1853 1854 /// Returns the expected difference in cost from scalarizing the expression 1855 /// feeding a predicated instruction \p PredInst. The instructions to 1856 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1857 /// non-negative return value implies the expression will be scalarized. 1858 /// Currently, only single-use chains are considered for scalarization. 1859 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1860 ElementCount VF); 1861 1862 /// Collect the instructions that are uniform after vectorization. An 1863 /// instruction is uniform if we represent it with a single scalar value in 1864 /// the vectorized loop corresponding to each vector iteration. Examples of 1865 /// uniform instructions include pointer operands of consecutive or 1866 /// interleaved memory accesses. Note that although uniformity implies an 1867 /// instruction will be scalar, the reverse is not true. In general, a 1868 /// scalarized instruction will be represented by VF scalar values in the 1869 /// vectorized loop, each corresponding to an iteration of the original 1870 /// scalar loop. 1871 void collectLoopUniforms(ElementCount VF); 1872 1873 /// Collect the instructions that are scalar after vectorization. An 1874 /// instruction is scalar if it is known to be uniform or will be scalarized 1875 /// during vectorization. collectLoopScalars should only add non-uniform nodes 1876 /// to the list if they are used by a load/store instruction that is marked as 1877 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by 1878 /// VF values in the vectorized loop, each corresponding to an iteration of 1879 /// the original scalar loop. 1880 void collectLoopScalars(ElementCount VF); 1881 1882 /// Keeps cost model vectorization decision and cost for instructions. 1883 /// Right now it is used for memory instructions only. 1884 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1885 std::pair<InstWidening, InstructionCost>>; 1886 1887 DecisionList WideningDecisions; 1888 1889 /// Returns true if \p V is expected to be vectorized and it needs to be 1890 /// extracted. 1891 bool needsExtract(Value *V, ElementCount VF) const { 1892 Instruction *I = dyn_cast<Instruction>(V); 1893 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1894 TheLoop->isLoopInvariant(I)) 1895 return false; 1896 1897 // Assume we can vectorize V (and hence we need extraction) if the 1898 // scalars are not computed yet. This can happen, because it is called 1899 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1900 // the scalars are collected. That should be a safe assumption in most 1901 // cases, because we check if the operands have vectorizable types 1902 // beforehand in LoopVectorizationLegality. 1903 return Scalars.find(VF) == Scalars.end() || 1904 !isScalarAfterVectorization(I, VF); 1905 }; 1906 1907 /// Returns a range containing only operands needing to be extracted. 1908 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1909 ElementCount VF) const { 1910 return SmallVector<Value *, 4>(make_filter_range( 1911 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1912 } 1913 1914 /// Determines if we have the infrastructure to vectorize loop \p L and its 1915 /// epilogue, assuming the main loop is vectorized by \p VF. 1916 bool isCandidateForEpilogueVectorization(const Loop &L, 1917 const ElementCount VF) const; 1918 1919 /// Returns true if epilogue vectorization is considered profitable, and 1920 /// false otherwise. 1921 /// \p VF is the vectorization factor chosen for the original loop. 1922 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1923 1924 public: 1925 /// The loop that we evaluate. 1926 Loop *TheLoop; 1927 1928 /// Predicated scalar evolution analysis. 1929 PredicatedScalarEvolution &PSE; 1930 1931 /// Loop Info analysis. 1932 LoopInfo *LI; 1933 1934 /// Vectorization legality. 1935 LoopVectorizationLegality *Legal; 1936 1937 /// Vector target information. 1938 const TargetTransformInfo &TTI; 1939 1940 /// Target Library Info. 1941 const TargetLibraryInfo *TLI; 1942 1943 /// Demanded bits analysis. 1944 DemandedBits *DB; 1945 1946 /// Assumption cache. 1947 AssumptionCache *AC; 1948 1949 /// Interface to emit optimization remarks. 1950 OptimizationRemarkEmitter *ORE; 1951 1952 const Function *TheFunction; 1953 1954 /// Loop Vectorize Hint. 1955 const LoopVectorizeHints *Hints; 1956 1957 /// The interleave access information contains groups of interleaved accesses 1958 /// with the same stride and close to each other. 1959 InterleavedAccessInfo &InterleaveInfo; 1960 1961 /// Values to ignore in the cost model. 1962 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1963 1964 /// Values to ignore in the cost model when VF > 1. 1965 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1966 1967 /// All element types found in the loop. 1968 SmallPtrSet<Type *, 16> ElementTypesInLoop; 1969 1970 /// Profitable vector factors. 1971 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1972 }; 1973 } // end namespace llvm 1974 1975 /// Helper struct to manage generating runtime checks for vectorization. 1976 /// 1977 /// The runtime checks are created up-front in temporary blocks to allow better 1978 /// estimating the cost and un-linked from the existing IR. After deciding to 1979 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1980 /// temporary blocks are completely removed. 1981 class GeneratedRTChecks { 1982 /// Basic block which contains the generated SCEV checks, if any. 1983 BasicBlock *SCEVCheckBlock = nullptr; 1984 1985 /// The value representing the result of the generated SCEV checks. If it is 1986 /// nullptr, either no SCEV checks have been generated or they have been used. 1987 Value *SCEVCheckCond = nullptr; 1988 1989 /// Basic block which contains the generated memory runtime checks, if any. 1990 BasicBlock *MemCheckBlock = nullptr; 1991 1992 /// The value representing the result of the generated memory runtime checks. 1993 /// If it is nullptr, either no memory runtime checks have been generated or 1994 /// they have been used. 1995 Value *MemRuntimeCheckCond = nullptr; 1996 1997 DominatorTree *DT; 1998 LoopInfo *LI; 1999 2000 SCEVExpander SCEVExp; 2001 SCEVExpander MemCheckExp; 2002 2003 public: 2004 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 2005 const DataLayout &DL) 2006 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 2007 MemCheckExp(SE, DL, "scev.check") {} 2008 2009 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 2010 /// accurately estimate the cost of the runtime checks. The blocks are 2011 /// un-linked from the IR and is added back during vector code generation. If 2012 /// there is no vector code generation, the check blocks are removed 2013 /// completely. 2014 void Create(Loop *L, const LoopAccessInfo &LAI, 2015 const SCEVUnionPredicate &UnionPred) { 2016 2017 BasicBlock *LoopHeader = L->getHeader(); 2018 BasicBlock *Preheader = L->getLoopPreheader(); 2019 2020 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 2021 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 2022 // may be used by SCEVExpander. The blocks will be un-linked from their 2023 // predecessors and removed from LI & DT at the end of the function. 2024 if (!UnionPred.isAlwaysTrue()) { 2025 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 2026 nullptr, "vector.scevcheck"); 2027 2028 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 2029 &UnionPred, SCEVCheckBlock->getTerminator()); 2030 } 2031 2032 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 2033 if (RtPtrChecking.Need) { 2034 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 2035 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 2036 "vector.memcheck"); 2037 2038 MemRuntimeCheckCond = 2039 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 2040 RtPtrChecking.getChecks(), MemCheckExp); 2041 assert(MemRuntimeCheckCond && 2042 "no RT checks generated although RtPtrChecking " 2043 "claimed checks are required"); 2044 } 2045 2046 if (!MemCheckBlock && !SCEVCheckBlock) 2047 return; 2048 2049 // Unhook the temporary block with the checks, update various places 2050 // accordingly. 2051 if (SCEVCheckBlock) 2052 SCEVCheckBlock->replaceAllUsesWith(Preheader); 2053 if (MemCheckBlock) 2054 MemCheckBlock->replaceAllUsesWith(Preheader); 2055 2056 if (SCEVCheckBlock) { 2057 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2058 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 2059 Preheader->getTerminator()->eraseFromParent(); 2060 } 2061 if (MemCheckBlock) { 2062 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2063 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 2064 Preheader->getTerminator()->eraseFromParent(); 2065 } 2066 2067 DT->changeImmediateDominator(LoopHeader, Preheader); 2068 if (MemCheckBlock) { 2069 DT->eraseNode(MemCheckBlock); 2070 LI->removeBlock(MemCheckBlock); 2071 } 2072 if (SCEVCheckBlock) { 2073 DT->eraseNode(SCEVCheckBlock); 2074 LI->removeBlock(SCEVCheckBlock); 2075 } 2076 } 2077 2078 /// Remove the created SCEV & memory runtime check blocks & instructions, if 2079 /// unused. 2080 ~GeneratedRTChecks() { 2081 SCEVExpanderCleaner SCEVCleaner(SCEVExp); 2082 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp); 2083 if (!SCEVCheckCond) 2084 SCEVCleaner.markResultUsed(); 2085 2086 if (!MemRuntimeCheckCond) 2087 MemCheckCleaner.markResultUsed(); 2088 2089 if (MemRuntimeCheckCond) { 2090 auto &SE = *MemCheckExp.getSE(); 2091 // Memory runtime check generation creates compares that use expanded 2092 // values. Remove them before running the SCEVExpanderCleaners. 2093 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2094 if (MemCheckExp.isInsertedInstruction(&I)) 2095 continue; 2096 SE.forgetValue(&I); 2097 I.eraseFromParent(); 2098 } 2099 } 2100 MemCheckCleaner.cleanup(); 2101 SCEVCleaner.cleanup(); 2102 2103 if (SCEVCheckCond) 2104 SCEVCheckBlock->eraseFromParent(); 2105 if (MemRuntimeCheckCond) 2106 MemCheckBlock->eraseFromParent(); 2107 } 2108 2109 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2110 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2111 /// depending on the generated condition. 2112 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass, 2113 BasicBlock *LoopVectorPreHeader, 2114 BasicBlock *LoopExitBlock) { 2115 if (!SCEVCheckCond) 2116 return nullptr; 2117 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) 2118 if (C->isZero()) 2119 return nullptr; 2120 2121 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2122 2123 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2124 // Create new preheader for vector loop. 2125 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2126 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2127 2128 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2129 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2130 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2131 SCEVCheckBlock); 2132 2133 DT->addNewBlock(SCEVCheckBlock, Pred); 2134 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2135 2136 ReplaceInstWithInst( 2137 SCEVCheckBlock->getTerminator(), 2138 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); 2139 // Mark the check as used, to prevent it from being removed during cleanup. 2140 SCEVCheckCond = nullptr; 2141 return SCEVCheckBlock; 2142 } 2143 2144 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2145 /// the branches to branch to the vector preheader or \p Bypass, depending on 2146 /// the generated condition. 2147 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass, 2148 BasicBlock *LoopVectorPreHeader) { 2149 // Check if we generated code that checks in runtime if arrays overlap. 2150 if (!MemRuntimeCheckCond) 2151 return nullptr; 2152 2153 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2154 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2155 MemCheckBlock); 2156 2157 DT->addNewBlock(MemCheckBlock, Pred); 2158 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2159 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2160 2161 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2162 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2163 2164 ReplaceInstWithInst( 2165 MemCheckBlock->getTerminator(), 2166 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2167 MemCheckBlock->getTerminator()->setDebugLoc( 2168 Pred->getTerminator()->getDebugLoc()); 2169 2170 // Mark the check as used, to prevent it from being removed during cleanup. 2171 MemRuntimeCheckCond = nullptr; 2172 return MemCheckBlock; 2173 } 2174 }; 2175 2176 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2177 // vectorization. The loop needs to be annotated with #pragma omp simd 2178 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2179 // vector length information is not provided, vectorization is not considered 2180 // explicit. Interleave hints are not allowed either. These limitations will be 2181 // relaxed in the future. 2182 // Please, note that we are currently forced to abuse the pragma 'clang 2183 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2184 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2185 // provides *explicit vectorization hints* (LV can bypass legal checks and 2186 // assume that vectorization is legal). However, both hints are implemented 2187 // using the same metadata (llvm.loop.vectorize, processed by 2188 // LoopVectorizeHints). This will be fixed in the future when the native IR 2189 // representation for pragma 'omp simd' is introduced. 2190 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2191 OptimizationRemarkEmitter *ORE) { 2192 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2193 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2194 2195 // Only outer loops with an explicit vectorization hint are supported. 2196 // Unannotated outer loops are ignored. 2197 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2198 return false; 2199 2200 Function *Fn = OuterLp->getHeader()->getParent(); 2201 if (!Hints.allowVectorization(Fn, OuterLp, 2202 true /*VectorizeOnlyWhenForced*/)) { 2203 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2204 return false; 2205 } 2206 2207 if (Hints.getInterleave() > 1) { 2208 // TODO: Interleave support is future work. 2209 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2210 "outer loops.\n"); 2211 Hints.emitRemarkWithHints(); 2212 return false; 2213 } 2214 2215 return true; 2216 } 2217 2218 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2219 OptimizationRemarkEmitter *ORE, 2220 SmallVectorImpl<Loop *> &V) { 2221 // Collect inner loops and outer loops without irreducible control flow. For 2222 // now, only collect outer loops that have explicit vectorization hints. If we 2223 // are stress testing the VPlan H-CFG construction, we collect the outermost 2224 // loop of every loop nest. 2225 if (L.isInnermost() || VPlanBuildStressTest || 2226 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2227 LoopBlocksRPO RPOT(&L); 2228 RPOT.perform(LI); 2229 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2230 V.push_back(&L); 2231 // TODO: Collect inner loops inside marked outer loops in case 2232 // vectorization fails for the outer loop. Do not invoke 2233 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2234 // already known to be reducible. We can use an inherited attribute for 2235 // that. 2236 return; 2237 } 2238 } 2239 for (Loop *InnerL : L) 2240 collectSupportedLoops(*InnerL, LI, ORE, V); 2241 } 2242 2243 namespace { 2244 2245 /// The LoopVectorize Pass. 2246 struct LoopVectorize : public FunctionPass { 2247 /// Pass identification, replacement for typeid 2248 static char ID; 2249 2250 LoopVectorizePass Impl; 2251 2252 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2253 bool VectorizeOnlyWhenForced = false) 2254 : FunctionPass(ID), 2255 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2256 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2257 } 2258 2259 bool runOnFunction(Function &F) override { 2260 if (skipFunction(F)) 2261 return false; 2262 2263 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2264 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2265 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2266 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2267 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2268 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2269 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2270 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2271 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2272 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2273 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2274 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2275 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2276 2277 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2278 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2279 2280 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2281 GetLAA, *ORE, PSI).MadeAnyChange; 2282 } 2283 2284 void getAnalysisUsage(AnalysisUsage &AU) const override { 2285 AU.addRequired<AssumptionCacheTracker>(); 2286 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2287 AU.addRequired<DominatorTreeWrapperPass>(); 2288 AU.addRequired<LoopInfoWrapperPass>(); 2289 AU.addRequired<ScalarEvolutionWrapperPass>(); 2290 AU.addRequired<TargetTransformInfoWrapperPass>(); 2291 AU.addRequired<AAResultsWrapperPass>(); 2292 AU.addRequired<LoopAccessLegacyAnalysis>(); 2293 AU.addRequired<DemandedBitsWrapperPass>(); 2294 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2295 AU.addRequired<InjectTLIMappingsLegacy>(); 2296 2297 // We currently do not preserve loopinfo/dominator analyses with outer loop 2298 // vectorization. Until this is addressed, mark these analyses as preserved 2299 // only for non-VPlan-native path. 2300 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2301 if (!EnableVPlanNativePath) { 2302 AU.addPreserved<LoopInfoWrapperPass>(); 2303 AU.addPreserved<DominatorTreeWrapperPass>(); 2304 } 2305 2306 AU.addPreserved<BasicAAWrapperPass>(); 2307 AU.addPreserved<GlobalsAAWrapperPass>(); 2308 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2309 } 2310 }; 2311 2312 } // end anonymous namespace 2313 2314 //===----------------------------------------------------------------------===// 2315 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2316 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2317 //===----------------------------------------------------------------------===// 2318 2319 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2320 // We need to place the broadcast of invariant variables outside the loop, 2321 // but only if it's proven safe to do so. Else, broadcast will be inside 2322 // vector loop body. 2323 Instruction *Instr = dyn_cast<Instruction>(V); 2324 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2325 (!Instr || 2326 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2327 // Place the code for broadcasting invariant variables in the new preheader. 2328 IRBuilder<>::InsertPointGuard Guard(Builder); 2329 if (SafeToHoist) 2330 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2331 2332 // Broadcast the scalar into all locations in the vector. 2333 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2334 2335 return Shuf; 2336 } 2337 2338 /// This function adds 2339 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 2340 /// to each vector element of Val. The sequence starts at StartIndex. 2341 /// \p Opcode is relevant for FP induction variable. 2342 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step, 2343 Instruction::BinaryOps BinOp, ElementCount VF, 2344 IRBuilder<> &Builder) { 2345 assert(VF.isVector() && "only vector VFs are supported"); 2346 2347 // Create and check the types. 2348 auto *ValVTy = cast<VectorType>(Val->getType()); 2349 ElementCount VLen = ValVTy->getElementCount(); 2350 2351 Type *STy = Val->getType()->getScalarType(); 2352 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2353 "Induction Step must be an integer or FP"); 2354 assert(Step->getType() == STy && "Step has wrong type"); 2355 2356 SmallVector<Constant *, 8> Indices; 2357 2358 // Create a vector of consecutive numbers from zero to VF. 2359 VectorType *InitVecValVTy = ValVTy; 2360 Type *InitVecValSTy = STy; 2361 if (STy->isFloatingPointTy()) { 2362 InitVecValSTy = 2363 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2364 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2365 } 2366 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2367 2368 // Splat the StartIdx 2369 Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx); 2370 2371 if (STy->isIntegerTy()) { 2372 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2373 Step = Builder.CreateVectorSplat(VLen, Step); 2374 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2375 // FIXME: The newly created binary instructions should contain nsw/nuw 2376 // flags, which can be found from the original scalar operations. 2377 Step = Builder.CreateMul(InitVec, Step); 2378 return Builder.CreateAdd(Val, Step, "induction"); 2379 } 2380 2381 // Floating point induction. 2382 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2383 "Binary Opcode should be specified for FP induction"); 2384 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2385 InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat); 2386 2387 Step = Builder.CreateVectorSplat(VLen, Step); 2388 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2389 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2390 } 2391 2392 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2393 const InductionDescriptor &II, Value *Step, Value *Start, 2394 Instruction *EntryVal, VPValue *Def, VPTransformState &State) { 2395 IRBuilder<> &Builder = State.Builder; 2396 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2397 "Expected either an induction phi-node or a truncate of it!"); 2398 2399 // Construct the initial value of the vector IV in the vector loop preheader 2400 auto CurrIP = Builder.saveIP(); 2401 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2402 if (isa<TruncInst>(EntryVal)) { 2403 assert(Start->getType()->isIntegerTy() && 2404 "Truncation requires an integer type"); 2405 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2406 Step = Builder.CreateTrunc(Step, TruncType); 2407 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2408 } 2409 2410 Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0); 2411 Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start); 2412 Value *SteppedStart = getStepVector( 2413 SplatStart, Zero, Step, II.getInductionOpcode(), State.VF, State.Builder); 2414 2415 // We create vector phi nodes for both integer and floating-point induction 2416 // variables. Here, we determine the kind of arithmetic we will perform. 2417 Instruction::BinaryOps AddOp; 2418 Instruction::BinaryOps MulOp; 2419 if (Step->getType()->isIntegerTy()) { 2420 AddOp = Instruction::Add; 2421 MulOp = Instruction::Mul; 2422 } else { 2423 AddOp = II.getInductionOpcode(); 2424 MulOp = Instruction::FMul; 2425 } 2426 2427 // Multiply the vectorization factor by the step using integer or 2428 // floating-point arithmetic as appropriate. 2429 Type *StepType = Step->getType(); 2430 Value *RuntimeVF; 2431 if (Step->getType()->isFloatingPointTy()) 2432 RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF); 2433 else 2434 RuntimeVF = getRuntimeVF(Builder, StepType, State.VF); 2435 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 2436 2437 // Create a vector splat to use in the induction update. 2438 // 2439 // FIXME: If the step is non-constant, we create the vector splat with 2440 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2441 // handle a constant vector splat. 2442 Value *SplatVF = isa<Constant>(Mul) 2443 ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul)) 2444 : Builder.CreateVectorSplat(State.VF, Mul); 2445 Builder.restoreIP(CurrIP); 2446 2447 // We may need to add the step a number of times, depending on the unroll 2448 // factor. The last of those goes into the PHI. 2449 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2450 &*LoopVectorBody->getFirstInsertionPt()); 2451 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 2452 Instruction *LastInduction = VecInd; 2453 for (unsigned Part = 0; Part < UF; ++Part) { 2454 State.set(Def, LastInduction, Part); 2455 2456 if (isa<TruncInst>(EntryVal)) 2457 addMetadata(LastInduction, EntryVal); 2458 2459 LastInduction = cast<Instruction>( 2460 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 2461 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 2462 } 2463 2464 // Move the last step to the end of the latch block. This ensures consistent 2465 // placement of all induction updates. 2466 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2467 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2468 LastInduction->moveBefore(Br); 2469 LastInduction->setName("vec.ind.next"); 2470 2471 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2472 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2473 } 2474 2475 void InnerLoopVectorizer::widenIntOrFpInduction( 2476 PHINode *IV, VPWidenIntOrFpInductionRecipe *Def, VPTransformState &State, 2477 Value *CanonicalIV) { 2478 Value *Start = Def->getStartValue()->getLiveInIRValue(); 2479 const InductionDescriptor &ID = Def->getInductionDescriptor(); 2480 TruncInst *Trunc = Def->getTruncInst(); 2481 IRBuilder<> &Builder = State.Builder; 2482 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2483 assert(!State.VF.isZero() && "VF must be non-zero"); 2484 2485 // The value from the original loop to which we are mapping the new induction 2486 // variable. 2487 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2488 2489 auto &DL = EntryVal->getModule()->getDataLayout(); 2490 2491 // Generate code for the induction step. Note that induction steps are 2492 // required to be loop-invariant 2493 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 2494 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 2495 "Induction step should be loop invariant"); 2496 if (PSE.getSE()->isSCEVable(IV->getType())) { 2497 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2498 return Exp.expandCodeFor(Step, Step->getType(), 2499 State.CFG.VectorPreHeader->getTerminator()); 2500 } 2501 return cast<SCEVUnknown>(Step)->getValue(); 2502 }; 2503 2504 // The scalar value to broadcast. This is derived from the canonical 2505 // induction variable. If a truncation type is given, truncate the canonical 2506 // induction variable and step. Otherwise, derive these values from the 2507 // induction descriptor. 2508 auto CreateScalarIV = [&](Value *&Step) -> Value * { 2509 Value *ScalarIV = CanonicalIV; 2510 Type *NeededType = IV->getType(); 2511 if (!Def->isCanonical() || ScalarIV->getType() != NeededType) { 2512 ScalarIV = 2513 NeededType->isIntegerTy() 2514 ? Builder.CreateSExtOrTrunc(ScalarIV, NeededType) 2515 : Builder.CreateCast(Instruction::SIToFP, ScalarIV, NeededType); 2516 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID, 2517 State.CFG.PrevBB); 2518 ScalarIV->setName("offset.idx"); 2519 } 2520 if (Trunc) { 2521 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2522 assert(Step->getType()->isIntegerTy() && 2523 "Truncation requires an integer step"); 2524 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2525 Step = Builder.CreateTrunc(Step, TruncType); 2526 } 2527 return ScalarIV; 2528 }; 2529 2530 // Create the vector values from the scalar IV, in the absence of creating a 2531 // vector IV. 2532 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 2533 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2534 for (unsigned Part = 0; Part < UF; ++Part) { 2535 Value *StartIdx; 2536 if (Step->getType()->isFloatingPointTy()) 2537 StartIdx = 2538 getRuntimeVFAsFloat(Builder, Step->getType(), State.VF * Part); 2539 else 2540 StartIdx = getRuntimeVF(Builder, Step->getType(), State.VF * Part); 2541 2542 Value *EntryPart = 2543 getStepVector(Broadcasted, StartIdx, Step, ID.getInductionOpcode(), 2544 State.VF, State.Builder); 2545 State.set(Def, EntryPart, Part); 2546 if (Trunc) 2547 addMetadata(EntryPart, Trunc); 2548 } 2549 }; 2550 2551 // Fast-math-flags propagate from the original induction instruction. 2552 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 2553 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 2554 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 2555 2556 // Now do the actual transformations, and start with creating the step value. 2557 Value *Step = CreateStepValue(ID.getStep()); 2558 if (State.VF.isScalar()) { 2559 Value *ScalarIV = CreateScalarIV(Step); 2560 Type *ScalarTy = IntegerType::get(ScalarIV->getContext(), 2561 Step->getType()->getScalarSizeInBits()); 2562 2563 Instruction::BinaryOps IncOp = ID.getInductionOpcode(); 2564 if (IncOp == Instruction::BinaryOpsEnd) 2565 IncOp = Instruction::Add; 2566 for (unsigned Part = 0; Part < UF; ++Part) { 2567 Value *StartIdx = ConstantInt::get(ScalarTy, Part); 2568 Instruction::BinaryOps MulOp = Instruction::Mul; 2569 if (Step->getType()->isFloatingPointTy()) { 2570 StartIdx = Builder.CreateUIToFP(StartIdx, Step->getType()); 2571 MulOp = Instruction::FMul; 2572 } 2573 2574 Value *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2575 Value *EntryPart = Builder.CreateBinOp(IncOp, ScalarIV, Mul, "induction"); 2576 State.set(Def, EntryPart, Part); 2577 if (Trunc) { 2578 assert(!Step->getType()->isFloatingPointTy() && 2579 "fp inductions shouldn't be truncated"); 2580 addMetadata(EntryPart, Trunc); 2581 } 2582 } 2583 return; 2584 } 2585 2586 // If only a vector induction is needed, create it and return. 2587 if (!Def->needsScalarIV()) { 2588 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, State); 2589 return; 2590 } 2591 2592 // Try to create a new independent vector induction variable. If we can't 2593 // create the phi node, we will splat the scalar induction variable in each 2594 // loop iteration. 2595 if (Def->needsVectorIV()) { 2596 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, State); 2597 Value *ScalarIV = CreateScalarIV(Step); 2598 // Create scalar steps that can be used by instructions we will later 2599 // scalarize. Note that the addition of the scalar steps will not increase 2600 // the number of instructions in the loop in the common case prior to 2601 // InstCombine. We will be trading one vector extract for each scalar step. 2602 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, State); 2603 return; 2604 } 2605 2606 // All IV users are scalar instructions, so only emit a scalar IV, not a 2607 // vectorised IV. Except when we tail-fold, then the splat IV feeds the 2608 // predicate used by the masked loads/stores. 2609 Value *ScalarIV = CreateScalarIV(Step); 2610 if (!Cost->isScalarEpilogueAllowed()) 2611 CreateSplatIV(ScalarIV, Step); 2612 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, State); 2613 } 2614 2615 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2616 Instruction *EntryVal, 2617 const InductionDescriptor &ID, 2618 VPValue *Def, 2619 VPTransformState &State) { 2620 IRBuilder<> &Builder = State.Builder; 2621 // We shouldn't have to build scalar steps if we aren't vectorizing. 2622 assert(State.VF.isVector() && "VF should be greater than one"); 2623 // Get the value type and ensure it and the step have the same integer type. 2624 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2625 assert(ScalarIVTy == Step->getType() && 2626 "Val and Step should have the same type"); 2627 2628 // We build scalar steps for both integer and floating-point induction 2629 // variables. Here, we determine the kind of arithmetic we will perform. 2630 Instruction::BinaryOps AddOp; 2631 Instruction::BinaryOps MulOp; 2632 if (ScalarIVTy->isIntegerTy()) { 2633 AddOp = Instruction::Add; 2634 MulOp = Instruction::Mul; 2635 } else { 2636 AddOp = ID.getInductionOpcode(); 2637 MulOp = Instruction::FMul; 2638 } 2639 2640 // Determine the number of scalars we need to generate for each unroll 2641 // iteration. If EntryVal is uniform, we only need to generate the first 2642 // lane. Otherwise, we generate all VF values. 2643 bool IsUniform = 2644 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), State.VF); 2645 unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue(); 2646 // Compute the scalar steps and save the results in State. 2647 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2648 ScalarIVTy->getScalarSizeInBits()); 2649 Type *VecIVTy = nullptr; 2650 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2651 if (!IsUniform && State.VF.isScalable()) { 2652 VecIVTy = VectorType::get(ScalarIVTy, State.VF); 2653 UnitStepVec = 2654 Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF)); 2655 SplatStep = Builder.CreateVectorSplat(State.VF, Step); 2656 SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV); 2657 } 2658 2659 for (unsigned Part = 0; Part < State.UF; ++Part) { 2660 Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part); 2661 2662 if (!IsUniform && State.VF.isScalable()) { 2663 auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0); 2664 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2665 if (ScalarIVTy->isFloatingPointTy()) 2666 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2667 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2668 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2669 State.set(Def, Add, Part); 2670 // It's useful to record the lane values too for the known minimum number 2671 // of elements so we do those below. This improves the code quality when 2672 // trying to extract the first element, for example. 2673 } 2674 2675 if (ScalarIVTy->isFloatingPointTy()) 2676 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2677 2678 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2679 Value *StartIdx = Builder.CreateBinOp( 2680 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2681 // The step returned by `createStepForVF` is a runtime-evaluated value 2682 // when VF is scalable. Otherwise, it should be folded into a Constant. 2683 assert((State.VF.isScalable() || isa<Constant>(StartIdx)) && 2684 "Expected StartIdx to be folded to a constant when VF is not " 2685 "scalable"); 2686 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2687 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2688 State.set(Def, Add, VPIteration(Part, Lane)); 2689 } 2690 } 2691 } 2692 2693 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2694 const VPIteration &Instance, 2695 VPTransformState &State) { 2696 Value *ScalarInst = State.get(Def, Instance); 2697 Value *VectorValue = State.get(Def, Instance.Part); 2698 VectorValue = Builder.CreateInsertElement( 2699 VectorValue, ScalarInst, 2700 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2701 State.set(Def, VectorValue, Instance.Part); 2702 } 2703 2704 // Return whether we allow using masked interleave-groups (for dealing with 2705 // strided loads/stores that reside in predicated blocks, or for dealing 2706 // with gaps). 2707 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2708 // If an override option has been passed in for interleaved accesses, use it. 2709 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2710 return EnableMaskedInterleavedMemAccesses; 2711 2712 return TTI.enableMaskedInterleavedAccessVectorization(); 2713 } 2714 2715 // Try to vectorize the interleave group that \p Instr belongs to. 2716 // 2717 // E.g. Translate following interleaved load group (factor = 3): 2718 // for (i = 0; i < N; i+=3) { 2719 // R = Pic[i]; // Member of index 0 2720 // G = Pic[i+1]; // Member of index 1 2721 // B = Pic[i+2]; // Member of index 2 2722 // ... // do something to R, G, B 2723 // } 2724 // To: 2725 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2726 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2727 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2728 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2729 // 2730 // Or translate following interleaved store group (factor = 3): 2731 // for (i = 0; i < N; i+=3) { 2732 // ... do something to R, G, B 2733 // Pic[i] = R; // Member of index 0 2734 // Pic[i+1] = G; // Member of index 1 2735 // Pic[i+2] = B; // Member of index 2 2736 // } 2737 // To: 2738 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2739 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2740 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2741 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2742 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2743 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2744 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2745 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2746 VPValue *BlockInMask) { 2747 Instruction *Instr = Group->getInsertPos(); 2748 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2749 2750 // Prepare for the vector type of the interleaved load/store. 2751 Type *ScalarTy = getLoadStoreType(Instr); 2752 unsigned InterleaveFactor = Group->getFactor(); 2753 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2754 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2755 2756 // Prepare for the new pointers. 2757 SmallVector<Value *, 2> AddrParts; 2758 unsigned Index = Group->getIndex(Instr); 2759 2760 // TODO: extend the masked interleaved-group support to reversed access. 2761 assert((!BlockInMask || !Group->isReverse()) && 2762 "Reversed masked interleave-group not supported."); 2763 2764 // If the group is reverse, adjust the index to refer to the last vector lane 2765 // instead of the first. We adjust the index from the first vector lane, 2766 // rather than directly getting the pointer for lane VF - 1, because the 2767 // pointer operand of the interleaved access is supposed to be uniform. For 2768 // uniform instructions, we're only required to generate a value for the 2769 // first vector lane in each unroll iteration. 2770 if (Group->isReverse()) 2771 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2772 2773 for (unsigned Part = 0; Part < UF; Part++) { 2774 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2775 setDebugLocFromInst(AddrPart); 2776 2777 // Notice current instruction could be any index. Need to adjust the address 2778 // to the member of index 0. 2779 // 2780 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2781 // b = A[i]; // Member of index 0 2782 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2783 // 2784 // E.g. A[i+1] = a; // Member of index 1 2785 // A[i] = b; // Member of index 0 2786 // A[i+2] = c; // Member of index 2 (Current instruction) 2787 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2788 2789 bool InBounds = false; 2790 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2791 InBounds = gep->isInBounds(); 2792 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2793 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2794 2795 // Cast to the vector pointer type. 2796 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2797 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2798 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2799 } 2800 2801 setDebugLocFromInst(Instr); 2802 Value *PoisonVec = PoisonValue::get(VecTy); 2803 2804 Value *MaskForGaps = nullptr; 2805 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2806 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2807 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2808 } 2809 2810 // Vectorize the interleaved load group. 2811 if (isa<LoadInst>(Instr)) { 2812 // For each unroll part, create a wide load for the group. 2813 SmallVector<Value *, 2> NewLoads; 2814 for (unsigned Part = 0; Part < UF; Part++) { 2815 Instruction *NewLoad; 2816 if (BlockInMask || MaskForGaps) { 2817 assert(useMaskedInterleavedAccesses(*TTI) && 2818 "masked interleaved groups are not allowed."); 2819 Value *GroupMask = MaskForGaps; 2820 if (BlockInMask) { 2821 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2822 Value *ShuffledMask = Builder.CreateShuffleVector( 2823 BlockInMaskPart, 2824 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2825 "interleaved.mask"); 2826 GroupMask = MaskForGaps 2827 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2828 MaskForGaps) 2829 : ShuffledMask; 2830 } 2831 NewLoad = 2832 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), 2833 GroupMask, PoisonVec, "wide.masked.vec"); 2834 } 2835 else 2836 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2837 Group->getAlign(), "wide.vec"); 2838 Group->addMetadata(NewLoad); 2839 NewLoads.push_back(NewLoad); 2840 } 2841 2842 // For each member in the group, shuffle out the appropriate data from the 2843 // wide loads. 2844 unsigned J = 0; 2845 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2846 Instruction *Member = Group->getMember(I); 2847 2848 // Skip the gaps in the group. 2849 if (!Member) 2850 continue; 2851 2852 auto StrideMask = 2853 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2854 for (unsigned Part = 0; Part < UF; Part++) { 2855 Value *StridedVec = Builder.CreateShuffleVector( 2856 NewLoads[Part], StrideMask, "strided.vec"); 2857 2858 // If this member has different type, cast the result type. 2859 if (Member->getType() != ScalarTy) { 2860 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2861 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2862 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2863 } 2864 2865 if (Group->isReverse()) 2866 StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse"); 2867 2868 State.set(VPDefs[J], StridedVec, Part); 2869 } 2870 ++J; 2871 } 2872 return; 2873 } 2874 2875 // The sub vector type for current instruction. 2876 auto *SubVT = VectorType::get(ScalarTy, VF); 2877 2878 // Vectorize the interleaved store group. 2879 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2880 assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && 2881 "masked interleaved groups are not allowed."); 2882 assert((!MaskForGaps || !VF.isScalable()) && 2883 "masking gaps for scalable vectors is not yet supported."); 2884 for (unsigned Part = 0; Part < UF; Part++) { 2885 // Collect the stored vector from each member. 2886 SmallVector<Value *, 4> StoredVecs; 2887 for (unsigned i = 0; i < InterleaveFactor; i++) { 2888 assert((Group->getMember(i) || MaskForGaps) && 2889 "Fail to get a member from an interleaved store group"); 2890 Instruction *Member = Group->getMember(i); 2891 2892 // Skip the gaps in the group. 2893 if (!Member) { 2894 Value *Undef = PoisonValue::get(SubVT); 2895 StoredVecs.push_back(Undef); 2896 continue; 2897 } 2898 2899 Value *StoredVec = State.get(StoredValues[i], Part); 2900 2901 if (Group->isReverse()) 2902 StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse"); 2903 2904 // If this member has different type, cast it to a unified type. 2905 2906 if (StoredVec->getType() != SubVT) 2907 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2908 2909 StoredVecs.push_back(StoredVec); 2910 } 2911 2912 // Concatenate all vectors into a wide vector. 2913 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2914 2915 // Interleave the elements in the wide vector. 2916 Value *IVec = Builder.CreateShuffleVector( 2917 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2918 "interleaved.vec"); 2919 2920 Instruction *NewStoreInstr; 2921 if (BlockInMask || MaskForGaps) { 2922 Value *GroupMask = MaskForGaps; 2923 if (BlockInMask) { 2924 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2925 Value *ShuffledMask = Builder.CreateShuffleVector( 2926 BlockInMaskPart, 2927 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2928 "interleaved.mask"); 2929 GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And, 2930 ShuffledMask, MaskForGaps) 2931 : ShuffledMask; 2932 } 2933 NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part], 2934 Group->getAlign(), GroupMask); 2935 } else 2936 NewStoreInstr = 2937 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2938 2939 Group->addMetadata(NewStoreInstr); 2940 } 2941 } 2942 2943 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2944 VPReplicateRecipe *RepRecipe, 2945 const VPIteration &Instance, 2946 bool IfPredicateInstr, 2947 VPTransformState &State) { 2948 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2949 2950 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 2951 // the first lane and part. 2952 if (isa<NoAliasScopeDeclInst>(Instr)) 2953 if (!Instance.isFirstIteration()) 2954 return; 2955 2956 setDebugLocFromInst(Instr); 2957 2958 // Does this instruction return a value ? 2959 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2960 2961 Instruction *Cloned = Instr->clone(); 2962 if (!IsVoidRetTy) 2963 Cloned->setName(Instr->getName() + ".cloned"); 2964 2965 // If the scalarized instruction contributes to the address computation of a 2966 // widen masked load/store which was in a basic block that needed predication 2967 // and is not predicated after vectorization, we can't propagate 2968 // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized 2969 // instruction could feed a poison value to the base address of the widen 2970 // load/store. 2971 if (State.MayGeneratePoisonRecipes.contains(RepRecipe)) 2972 Cloned->dropPoisonGeneratingFlags(); 2973 2974 State.Builder.SetInsertPoint(Builder.GetInsertBlock(), 2975 Builder.GetInsertPoint()); 2976 // Replace the operands of the cloned instructions with their scalar 2977 // equivalents in the new loop. 2978 for (auto &I : enumerate(RepRecipe->operands())) { 2979 auto InputInstance = Instance; 2980 VPValue *Operand = I.value(); 2981 if (State.Plan->isUniformAfterVectorization(Operand)) 2982 InputInstance.Lane = VPLane::getFirstLane(); 2983 Cloned->setOperand(I.index(), State.get(Operand, InputInstance)); 2984 } 2985 addNewMetadata(Cloned, Instr); 2986 2987 // Place the cloned scalar in the new loop. 2988 Builder.Insert(Cloned); 2989 2990 State.set(RepRecipe, Cloned, Instance); 2991 2992 // If we just cloned a new assumption, add it the assumption cache. 2993 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 2994 AC->registerAssumption(II); 2995 2996 // End if-block. 2997 if (IfPredicateInstr) 2998 PredicatedInstructions.push_back(Cloned); 2999 } 3000 3001 void InnerLoopVectorizer::createHeaderBranch(Loop *L) { 3002 BasicBlock *Header = L->getHeader(); 3003 assert(!L->getLoopLatch() && "loop should not have a latch at this point"); 3004 3005 IRBuilder<> B(Header->getTerminator()); 3006 Instruction *OldInst = 3007 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()); 3008 setDebugLocFromInst(OldInst, &B); 3009 3010 // Connect the header to the exit and header blocks and replace the old 3011 // terminator. 3012 B.CreateCondBr(B.getTrue(), L->getUniqueExitBlock(), Header); 3013 3014 // Now we have two terminators. Remove the old one from the block. 3015 Header->getTerminator()->eraseFromParent(); 3016 } 3017 3018 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3019 if (TripCount) 3020 return TripCount; 3021 3022 assert(L && "Create Trip Count for null loop."); 3023 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3024 // Find the loop boundaries. 3025 ScalarEvolution *SE = PSE.getSE(); 3026 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3027 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 3028 "Invalid loop count"); 3029 3030 Type *IdxTy = Legal->getWidestInductionType(); 3031 assert(IdxTy && "No type for induction"); 3032 3033 // The exit count might have the type of i64 while the phi is i32. This can 3034 // happen if we have an induction variable that is sign extended before the 3035 // compare. The only way that we get a backedge taken count is that the 3036 // induction variable was signed and as such will not overflow. In such a case 3037 // truncation is legal. 3038 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 3039 IdxTy->getPrimitiveSizeInBits()) 3040 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3041 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3042 3043 // Get the total trip count from the count by adding 1. 3044 const SCEV *ExitCount = SE->getAddExpr( 3045 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3046 3047 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3048 3049 // Expand the trip count and place the new instructions in the preheader. 3050 // Notice that the pre-header does not change, only the loop body. 3051 SCEVExpander Exp(*SE, DL, "induction"); 3052 3053 // Count holds the overall loop count (N). 3054 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3055 L->getLoopPreheader()->getTerminator()); 3056 3057 if (TripCount->getType()->isPointerTy()) 3058 TripCount = 3059 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3060 L->getLoopPreheader()->getTerminator()); 3061 3062 return TripCount; 3063 } 3064 3065 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3066 if (VectorTripCount) 3067 return VectorTripCount; 3068 3069 Value *TC = getOrCreateTripCount(L); 3070 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3071 3072 Type *Ty = TC->getType(); 3073 // This is where we can make the step a runtime constant. 3074 Value *Step = createStepForVF(Builder, Ty, VF, UF); 3075 3076 // If the tail is to be folded by masking, round the number of iterations N 3077 // up to a multiple of Step instead of rounding down. This is done by first 3078 // adding Step-1 and then rounding down. Note that it's ok if this addition 3079 // overflows: the vector induction variable will eventually wrap to zero given 3080 // that it starts at zero and its Step is a power of two; the loop will then 3081 // exit, with the last early-exit vector comparison also producing all-true. 3082 if (Cost->foldTailByMasking()) { 3083 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 3084 "VF*UF must be a power of 2 when folding tail by masking"); 3085 Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF); 3086 TC = Builder.CreateAdd( 3087 TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up"); 3088 } 3089 3090 // Now we need to generate the expression for the part of the loop that the 3091 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3092 // iterations are not required for correctness, or N - Step, otherwise. Step 3093 // is equal to the vectorization factor (number of SIMD elements) times the 3094 // unroll factor (number of SIMD instructions). 3095 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3096 3097 // There are cases where we *must* run at least one iteration in the remainder 3098 // loop. See the cost model for when this can happen. If the step evenly 3099 // divides the trip count, we set the remainder to be equal to the step. If 3100 // the step does not evenly divide the trip count, no adjustment is necessary 3101 // since there will already be scalar iterations. Note that the minimum 3102 // iterations check ensures that N >= Step. 3103 if (Cost->requiresScalarEpilogue(VF)) { 3104 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3105 R = Builder.CreateSelect(IsZero, Step, R); 3106 } 3107 3108 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3109 3110 return VectorTripCount; 3111 } 3112 3113 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3114 const DataLayout &DL) { 3115 // Verify that V is a vector type with same number of elements as DstVTy. 3116 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 3117 unsigned VF = DstFVTy->getNumElements(); 3118 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 3119 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3120 Type *SrcElemTy = SrcVecTy->getElementType(); 3121 Type *DstElemTy = DstFVTy->getElementType(); 3122 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3123 "Vector elements must have same size"); 3124 3125 // Do a direct cast if element types are castable. 3126 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3127 return Builder.CreateBitOrPointerCast(V, DstFVTy); 3128 } 3129 // V cannot be directly casted to desired vector type. 3130 // May happen when V is a floating point vector but DstVTy is a vector of 3131 // pointers or vice-versa. Handle this using a two-step bitcast using an 3132 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3133 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3134 "Only one type should be a pointer type"); 3135 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3136 "Only one type should be a floating point type"); 3137 Type *IntTy = 3138 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3139 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 3140 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3141 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 3142 } 3143 3144 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3145 BasicBlock *Bypass) { 3146 Value *Count = getOrCreateTripCount(L); 3147 // Reuse existing vector loop preheader for TC checks. 3148 // Note that new preheader block is generated for vector loop. 3149 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 3150 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 3151 3152 // Generate code to check if the loop's trip count is less than VF * UF, or 3153 // equal to it in case a scalar epilogue is required; this implies that the 3154 // vector trip count is zero. This check also covers the case where adding one 3155 // to the backedge-taken count overflowed leading to an incorrect trip count 3156 // of zero. In this case we will also jump to the scalar loop. 3157 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE 3158 : ICmpInst::ICMP_ULT; 3159 3160 // If tail is to be folded, vector loop takes care of all iterations. 3161 Value *CheckMinIters = Builder.getFalse(); 3162 if (!Cost->foldTailByMasking()) { 3163 Value *Step = createStepForVF(Builder, Count->getType(), VF, UF); 3164 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3165 } 3166 // Create new preheader for vector loop. 3167 LoopVectorPreHeader = 3168 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3169 "vector.ph"); 3170 3171 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3172 DT->getNode(Bypass)->getIDom()) && 3173 "TC check is expected to dominate Bypass"); 3174 3175 // Update dominator for Bypass & LoopExit (if needed). 3176 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3177 if (!Cost->requiresScalarEpilogue(VF)) 3178 // If there is an epilogue which must run, there's no edge from the 3179 // middle block to exit blocks and thus no need to update the immediate 3180 // dominator of the exit blocks. 3181 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3182 3183 ReplaceInstWithInst( 3184 TCCheckBlock->getTerminator(), 3185 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3186 LoopBypassBlocks.push_back(TCCheckBlock); 3187 } 3188 3189 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3190 3191 BasicBlock *const SCEVCheckBlock = 3192 RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock); 3193 if (!SCEVCheckBlock) 3194 return nullptr; 3195 3196 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3197 (OptForSizeBasedOnProfile && 3198 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3199 "Cannot SCEV check stride or overflow when optimizing for size"); 3200 3201 3202 // Update dominator only if this is first RT check. 3203 if (LoopBypassBlocks.empty()) { 3204 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3205 if (!Cost->requiresScalarEpilogue(VF)) 3206 // If there is an epilogue which must run, there's no edge from the 3207 // middle block to exit blocks and thus no need to update the immediate 3208 // dominator of the exit blocks. 3209 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3210 } 3211 3212 LoopBypassBlocks.push_back(SCEVCheckBlock); 3213 AddedSafetyChecks = true; 3214 return SCEVCheckBlock; 3215 } 3216 3217 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, 3218 BasicBlock *Bypass) { 3219 // VPlan-native path does not do any analysis for runtime checks currently. 3220 if (EnableVPlanNativePath) 3221 return nullptr; 3222 3223 BasicBlock *const MemCheckBlock = 3224 RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader); 3225 3226 // Check if we generated code that checks in runtime if arrays overlap. We put 3227 // the checks into a separate block to make the more common case of few 3228 // elements faster. 3229 if (!MemCheckBlock) 3230 return nullptr; 3231 3232 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3233 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3234 "Cannot emit memory checks when optimizing for size, unless forced " 3235 "to vectorize."); 3236 ORE->emit([&]() { 3237 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3238 L->getStartLoc(), L->getHeader()) 3239 << "Code-size may be reduced by not forcing " 3240 "vectorization, or by source-code modifications " 3241 "eliminating the need for runtime checks " 3242 "(e.g., adding 'restrict')."; 3243 }); 3244 } 3245 3246 LoopBypassBlocks.push_back(MemCheckBlock); 3247 3248 AddedSafetyChecks = true; 3249 3250 // We currently don't use LoopVersioning for the actual loop cloning but we 3251 // still use it to add the noalias metadata. 3252 LVer = std::make_unique<LoopVersioning>( 3253 *Legal->getLAI(), 3254 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3255 DT, PSE.getSE()); 3256 LVer->prepareNoAliasMetadata(); 3257 return MemCheckBlock; 3258 } 3259 3260 Value *InnerLoopVectorizer::emitTransformedIndex( 3261 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 3262 const InductionDescriptor &ID, BasicBlock *VectorHeader) const { 3263 3264 SCEVExpander Exp(*SE, DL, "induction"); 3265 auto Step = ID.getStep(); 3266 auto StartValue = ID.getStartValue(); 3267 assert(Index->getType()->getScalarType() == Step->getType() && 3268 "Index scalar type does not match StepValue type"); 3269 3270 // Note: the IR at this point is broken. We cannot use SE to create any new 3271 // SCEV and then expand it, hoping that SCEV's simplification will give us 3272 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 3273 // lead to various SCEV crashes. So all we can do is to use builder and rely 3274 // on InstCombine for future simplifications. Here we handle some trivial 3275 // cases only. 3276 auto CreateAdd = [&B](Value *X, Value *Y) { 3277 assert(X->getType() == Y->getType() && "Types don't match!"); 3278 if (auto *CX = dyn_cast<ConstantInt>(X)) 3279 if (CX->isZero()) 3280 return Y; 3281 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3282 if (CY->isZero()) 3283 return X; 3284 return B.CreateAdd(X, Y); 3285 }; 3286 3287 // We allow X to be a vector type, in which case Y will potentially be 3288 // splatted into a vector with the same element count. 3289 auto CreateMul = [&B](Value *X, Value *Y) { 3290 assert(X->getType()->getScalarType() == Y->getType() && 3291 "Types don't match!"); 3292 if (auto *CX = dyn_cast<ConstantInt>(X)) 3293 if (CX->isOne()) 3294 return Y; 3295 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3296 if (CY->isOne()) 3297 return X; 3298 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 3299 if (XVTy && !isa<VectorType>(Y->getType())) 3300 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 3301 return B.CreateMul(X, Y); 3302 }; 3303 3304 // Get a suitable insert point for SCEV expansion. For blocks in the vector 3305 // loop, choose the end of the vector loop header (=VectorHeader), because 3306 // the DomTree is not kept up-to-date for additional blocks generated in the 3307 // vector loop. By using the header as insertion point, we guarantee that the 3308 // expanded instructions dominate all their uses. 3309 auto GetInsertPoint = [this, &B, VectorHeader]() { 3310 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 3311 if (InsertBB != LoopVectorBody && 3312 LI->getLoopFor(VectorHeader) == LI->getLoopFor(InsertBB)) 3313 return VectorHeader->getTerminator(); 3314 return &*B.GetInsertPoint(); 3315 }; 3316 3317 switch (ID.getKind()) { 3318 case InductionDescriptor::IK_IntInduction: { 3319 assert(!isa<VectorType>(Index->getType()) && 3320 "Vector indices not supported for integer inductions yet"); 3321 assert(Index->getType() == StartValue->getType() && 3322 "Index type does not match StartValue type"); 3323 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 3324 return B.CreateSub(StartValue, Index); 3325 auto *Offset = CreateMul( 3326 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 3327 return CreateAdd(StartValue, Offset); 3328 } 3329 case InductionDescriptor::IK_PtrInduction: { 3330 assert(isa<SCEVConstant>(Step) && 3331 "Expected constant step for pointer induction"); 3332 return B.CreateGEP( 3333 ID.getElementType(), StartValue, 3334 CreateMul(Index, 3335 Exp.expandCodeFor(Step, Index->getType()->getScalarType(), 3336 GetInsertPoint()))); 3337 } 3338 case InductionDescriptor::IK_FpInduction: { 3339 assert(!isa<VectorType>(Index->getType()) && 3340 "Vector indices not supported for FP inductions yet"); 3341 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 3342 auto InductionBinOp = ID.getInductionBinOp(); 3343 assert(InductionBinOp && 3344 (InductionBinOp->getOpcode() == Instruction::FAdd || 3345 InductionBinOp->getOpcode() == Instruction::FSub) && 3346 "Original bin op should be defined for FP induction"); 3347 3348 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 3349 Value *MulExp = B.CreateFMul(StepValue, Index); 3350 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 3351 "induction"); 3352 } 3353 case InductionDescriptor::IK_NoInduction: 3354 return nullptr; 3355 } 3356 llvm_unreachable("invalid enum"); 3357 } 3358 3359 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3360 LoopScalarBody = OrigLoop->getHeader(); 3361 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3362 assert(LoopVectorPreHeader && "Invalid loop structure"); 3363 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr 3364 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && 3365 "multiple exit loop without required epilogue?"); 3366 3367 LoopMiddleBlock = 3368 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3369 LI, nullptr, Twine(Prefix) + "middle.block"); 3370 LoopScalarPreHeader = 3371 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3372 nullptr, Twine(Prefix) + "scalar.ph"); 3373 3374 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3375 3376 // Set up the middle block terminator. Two cases: 3377 // 1) If we know that we must execute the scalar epilogue, emit an 3378 // unconditional branch. 3379 // 2) Otherwise, we must have a single unique exit block (due to how we 3380 // implement the multiple exit case). In this case, set up a conditonal 3381 // branch from the middle block to the loop scalar preheader, and the 3382 // exit block. completeLoopSkeleton will update the condition to use an 3383 // iteration check, if required to decide whether to execute the remainder. 3384 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? 3385 BranchInst::Create(LoopScalarPreHeader) : 3386 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, 3387 Builder.getTrue()); 3388 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3389 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3390 3391 // We intentionally don't let SplitBlock to update LoopInfo since 3392 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3393 // LoopVectorBody is explicitly added to the correct place few lines later. 3394 LoopVectorBody = 3395 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3396 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3397 3398 // Update dominator for loop exit. 3399 if (!Cost->requiresScalarEpilogue(VF)) 3400 // If there is an epilogue which must run, there's no edge from the 3401 // middle block to exit blocks and thus no need to update the immediate 3402 // dominator of the exit blocks. 3403 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3404 3405 // Create and register the new vector loop. 3406 Loop *Lp = LI->AllocateLoop(); 3407 Loop *ParentLoop = OrigLoop->getParentLoop(); 3408 3409 // Insert the new loop into the loop nest and register the new basic blocks 3410 // before calling any utilities such as SCEV that require valid LoopInfo. 3411 if (ParentLoop) { 3412 ParentLoop->addChildLoop(Lp); 3413 } else { 3414 LI->addTopLevelLoop(Lp); 3415 } 3416 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3417 return Lp; 3418 } 3419 3420 void InnerLoopVectorizer::createInductionResumeValues( 3421 Loop *L, std::pair<BasicBlock *, Value *> AdditionalBypass) { 3422 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3423 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3424 "Inconsistent information about additional bypass."); 3425 3426 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3427 assert(VectorTripCount && L && "Expected valid arguments"); 3428 // We are going to resume the execution of the scalar loop. 3429 // Go over all of the induction variables that we found and fix the 3430 // PHIs that are left in the scalar version of the loop. 3431 // The starting values of PHI nodes depend on the counter of the last 3432 // iteration in the vectorized loop. 3433 // If we come from a bypass edge then we need to start from the original 3434 // start value. 3435 Instruction *OldInduction = Legal->getPrimaryInduction(); 3436 for (auto &InductionEntry : Legal->getInductionVars()) { 3437 PHINode *OrigPhi = InductionEntry.first; 3438 InductionDescriptor II = InductionEntry.second; 3439 3440 // Create phi nodes to merge from the backedge-taken check block. 3441 PHINode *BCResumeVal = 3442 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3443 LoopScalarPreHeader->getTerminator()); 3444 // Copy original phi DL over to the new one. 3445 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3446 Value *&EndValue = IVEndValues[OrigPhi]; 3447 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3448 if (OrigPhi == OldInduction) { 3449 // We know what the end value is. 3450 EndValue = VectorTripCount; 3451 } else { 3452 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3453 3454 // Fast-math-flags propagate from the original induction instruction. 3455 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3456 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3457 3458 Type *StepType = II.getStep()->getType(); 3459 Instruction::CastOps CastOp = 3460 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3461 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3462 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3463 EndValue = 3464 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II, LoopVectorBody); 3465 EndValue->setName("ind.end"); 3466 3467 // Compute the end value for the additional bypass (if applicable). 3468 if (AdditionalBypass.first) { 3469 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3470 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3471 StepType, true); 3472 CRD = 3473 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3474 EndValueFromAdditionalBypass = 3475 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II, LoopVectorBody); 3476 EndValueFromAdditionalBypass->setName("ind.end"); 3477 } 3478 } 3479 // The new PHI merges the original incoming value, in case of a bypass, 3480 // or the value at the end of the vectorized loop. 3481 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3482 3483 // Fix the scalar body counter (PHI node). 3484 // The old induction's phi node in the scalar body needs the truncated 3485 // value. 3486 for (BasicBlock *BB : LoopBypassBlocks) 3487 BCResumeVal->addIncoming(II.getStartValue(), BB); 3488 3489 if (AdditionalBypass.first) 3490 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3491 EndValueFromAdditionalBypass); 3492 3493 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3494 } 3495 } 3496 3497 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3498 MDNode *OrigLoopID) { 3499 assert(L && "Expected valid loop."); 3500 3501 // The trip counts should be cached by now. 3502 Value *Count = getOrCreateTripCount(L); 3503 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3504 3505 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3506 3507 // Add a check in the middle block to see if we have completed 3508 // all of the iterations in the first vector loop. Three cases: 3509 // 1) If we require a scalar epilogue, there is no conditional branch as 3510 // we unconditionally branch to the scalar preheader. Do nothing. 3511 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. 3512 // Thus if tail is to be folded, we know we don't need to run the 3513 // remainder and we can use the previous value for the condition (true). 3514 // 3) Otherwise, construct a runtime check. 3515 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { 3516 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3517 Count, VectorTripCount, "cmp.n", 3518 LoopMiddleBlock->getTerminator()); 3519 3520 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3521 // of the corresponding compare because they may have ended up with 3522 // different line numbers and we want to avoid awkward line stepping while 3523 // debugging. Eg. if the compare has got a line number inside the loop. 3524 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3525 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3526 } 3527 3528 // Get ready to start creating new instructions into the vectorized body. 3529 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3530 "Inconsistent vector loop preheader"); 3531 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3532 3533 #ifdef EXPENSIVE_CHECKS 3534 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3535 LI->verify(*DT); 3536 #endif 3537 3538 return LoopVectorPreHeader; 3539 } 3540 3541 std::pair<BasicBlock *, Value *> 3542 InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3543 /* 3544 In this function we generate a new loop. The new loop will contain 3545 the vectorized instructions while the old loop will continue to run the 3546 scalar remainder. 3547 3548 [ ] <-- loop iteration number check. 3549 / | 3550 / v 3551 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3552 | / | 3553 | / v 3554 || [ ] <-- vector pre header. 3555 |/ | 3556 | v 3557 | [ ] \ 3558 | [ ]_| <-- vector loop. 3559 | | 3560 | v 3561 \ -[ ] <--- middle-block. 3562 \/ | 3563 /\ v 3564 | ->[ ] <--- new preheader. 3565 | | 3566 (opt) v <-- edge from middle to exit iff epilogue is not required. 3567 | [ ] \ 3568 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). 3569 \ | 3570 \ v 3571 >[ ] <-- exit block(s). 3572 ... 3573 */ 3574 3575 // Get the metadata of the original loop before it gets modified. 3576 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3577 3578 // Workaround! Compute the trip count of the original loop and cache it 3579 // before we start modifying the CFG. This code has a systemic problem 3580 // wherein it tries to run analysis over partially constructed IR; this is 3581 // wrong, and not simply for SCEV. The trip count of the original loop 3582 // simply happens to be prone to hitting this in practice. In theory, we 3583 // can hit the same issue for any SCEV, or ValueTracking query done during 3584 // mutation. See PR49900. 3585 getOrCreateTripCount(OrigLoop); 3586 3587 // Create an empty vector loop, and prepare basic blocks for the runtime 3588 // checks. 3589 Loop *Lp = createVectorLoopSkeleton(""); 3590 3591 // Now, compare the new count to zero. If it is zero skip the vector loop and 3592 // jump to the scalar loop. This check also covers the case where the 3593 // backedge-taken count is uint##_max: adding one to it will overflow leading 3594 // to an incorrect trip count of zero. In this (rare) case we will also jump 3595 // to the scalar loop. 3596 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3597 3598 // Generate the code to check any assumptions that we've made for SCEV 3599 // expressions. 3600 emitSCEVChecks(Lp, LoopScalarPreHeader); 3601 3602 // Generate the code that checks in runtime if arrays overlap. We put the 3603 // checks into a separate block to make the more common case of few elements 3604 // faster. 3605 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3606 3607 createHeaderBranch(Lp); 3608 3609 // Emit phis for the new starting index of the scalar loop. 3610 createInductionResumeValues(Lp); 3611 3612 return {completeLoopSkeleton(Lp, OrigLoopID), nullptr}; 3613 } 3614 3615 // Fix up external users of the induction variable. At this point, we are 3616 // in LCSSA form, with all external PHIs that use the IV having one input value, 3617 // coming from the remainder loop. We need those PHIs to also have a correct 3618 // value for the IV when arriving directly from the middle block. 3619 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3620 const InductionDescriptor &II, 3621 Value *CountRoundDown, Value *EndValue, 3622 BasicBlock *MiddleBlock) { 3623 // There are two kinds of external IV usages - those that use the value 3624 // computed in the last iteration (the PHI) and those that use the penultimate 3625 // value (the value that feeds into the phi from the loop latch). 3626 // We allow both, but they, obviously, have different values. 3627 3628 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3629 3630 DenseMap<Value *, Value *> MissingVals; 3631 3632 // An external user of the last iteration's value should see the value that 3633 // the remainder loop uses to initialize its own IV. 3634 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3635 for (User *U : PostInc->users()) { 3636 Instruction *UI = cast<Instruction>(U); 3637 if (!OrigLoop->contains(UI)) { 3638 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3639 MissingVals[UI] = EndValue; 3640 } 3641 } 3642 3643 // An external user of the penultimate value need to see EndValue - Step. 3644 // The simplest way to get this is to recompute it from the constituent SCEVs, 3645 // that is Start + (Step * (CRD - 1)). 3646 for (User *U : OrigPhi->users()) { 3647 auto *UI = cast<Instruction>(U); 3648 if (!OrigLoop->contains(UI)) { 3649 const DataLayout &DL = 3650 OrigLoop->getHeader()->getModule()->getDataLayout(); 3651 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3652 3653 IRBuilder<> B(MiddleBlock->getTerminator()); 3654 3655 // Fast-math-flags propagate from the original induction instruction. 3656 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3657 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3658 3659 Value *CountMinusOne = B.CreateSub( 3660 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3661 Value *CMO = 3662 !II.getStep()->getType()->isIntegerTy() 3663 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3664 II.getStep()->getType()) 3665 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3666 CMO->setName("cast.cmo"); 3667 Value *Escape = 3668 emitTransformedIndex(B, CMO, PSE.getSE(), DL, II, LoopVectorBody); 3669 Escape->setName("ind.escape"); 3670 MissingVals[UI] = Escape; 3671 } 3672 } 3673 3674 for (auto &I : MissingVals) { 3675 PHINode *PHI = cast<PHINode>(I.first); 3676 // One corner case we have to handle is two IVs "chasing" each-other, 3677 // that is %IV2 = phi [...], [ %IV1, %latch ] 3678 // In this case, if IV1 has an external use, we need to avoid adding both 3679 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3680 // don't already have an incoming value for the middle block. 3681 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3682 PHI->addIncoming(I.second, MiddleBlock); 3683 } 3684 } 3685 3686 namespace { 3687 3688 struct CSEDenseMapInfo { 3689 static bool canHandle(const Instruction *I) { 3690 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3691 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3692 } 3693 3694 static inline Instruction *getEmptyKey() { 3695 return DenseMapInfo<Instruction *>::getEmptyKey(); 3696 } 3697 3698 static inline Instruction *getTombstoneKey() { 3699 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3700 } 3701 3702 static unsigned getHashValue(const Instruction *I) { 3703 assert(canHandle(I) && "Unknown instruction!"); 3704 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3705 I->value_op_end())); 3706 } 3707 3708 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3709 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3710 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3711 return LHS == RHS; 3712 return LHS->isIdenticalTo(RHS); 3713 } 3714 }; 3715 3716 } // end anonymous namespace 3717 3718 ///Perform cse of induction variable instructions. 3719 static void cse(BasicBlock *BB) { 3720 // Perform simple cse. 3721 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3722 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 3723 if (!CSEDenseMapInfo::canHandle(&In)) 3724 continue; 3725 3726 // Check if we can replace this instruction with any of the 3727 // visited instructions. 3728 if (Instruction *V = CSEMap.lookup(&In)) { 3729 In.replaceAllUsesWith(V); 3730 In.eraseFromParent(); 3731 continue; 3732 } 3733 3734 CSEMap[&In] = &In; 3735 } 3736 } 3737 3738 InstructionCost 3739 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3740 bool &NeedToScalarize) const { 3741 Function *F = CI->getCalledFunction(); 3742 Type *ScalarRetTy = CI->getType(); 3743 SmallVector<Type *, 4> Tys, ScalarTys; 3744 for (auto &ArgOp : CI->args()) 3745 ScalarTys.push_back(ArgOp->getType()); 3746 3747 // Estimate cost of scalarized vector call. The source operands are assumed 3748 // to be vectors, so we need to extract individual elements from there, 3749 // execute VF scalar calls, and then gather the result into the vector return 3750 // value. 3751 InstructionCost ScalarCallCost = 3752 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3753 if (VF.isScalar()) 3754 return ScalarCallCost; 3755 3756 // Compute corresponding vector type for return value and arguments. 3757 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3758 for (Type *ScalarTy : ScalarTys) 3759 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3760 3761 // Compute costs of unpacking argument values for the scalar calls and 3762 // packing the return values to a vector. 3763 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3764 3765 InstructionCost Cost = 3766 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3767 3768 // If we can't emit a vector call for this function, then the currently found 3769 // cost is the cost we need to return. 3770 NeedToScalarize = true; 3771 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3772 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3773 3774 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3775 return Cost; 3776 3777 // If the corresponding vector cost is cheaper, return its cost. 3778 InstructionCost VectorCallCost = 3779 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3780 if (VectorCallCost < Cost) { 3781 NeedToScalarize = false; 3782 Cost = VectorCallCost; 3783 } 3784 return Cost; 3785 } 3786 3787 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3788 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3789 return Elt; 3790 return VectorType::get(Elt, VF); 3791 } 3792 3793 InstructionCost 3794 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3795 ElementCount VF) const { 3796 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3797 assert(ID && "Expected intrinsic call!"); 3798 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3799 FastMathFlags FMF; 3800 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3801 FMF = FPMO->getFastMathFlags(); 3802 3803 SmallVector<const Value *> Arguments(CI->args()); 3804 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3805 SmallVector<Type *> ParamTys; 3806 std::transform(FTy->param_begin(), FTy->param_end(), 3807 std::back_inserter(ParamTys), 3808 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3809 3810 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3811 dyn_cast<IntrinsicInst>(CI)); 3812 return TTI.getIntrinsicInstrCost(CostAttrs, 3813 TargetTransformInfo::TCK_RecipThroughput); 3814 } 3815 3816 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3817 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3818 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3819 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3820 } 3821 3822 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3823 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3824 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3825 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3826 } 3827 3828 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3829 // For every instruction `I` in MinBWs, truncate the operands, create a 3830 // truncated version of `I` and reextend its result. InstCombine runs 3831 // later and will remove any ext/trunc pairs. 3832 SmallPtrSet<Value *, 4> Erased; 3833 for (const auto &KV : Cost->getMinimalBitwidths()) { 3834 // If the value wasn't vectorized, we must maintain the original scalar 3835 // type. The absence of the value from State indicates that it 3836 // wasn't vectorized. 3837 // FIXME: Should not rely on getVPValue at this point. 3838 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3839 if (!State.hasAnyVectorValue(Def)) 3840 continue; 3841 for (unsigned Part = 0; Part < UF; ++Part) { 3842 Value *I = State.get(Def, Part); 3843 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3844 continue; 3845 Type *OriginalTy = I->getType(); 3846 Type *ScalarTruncatedTy = 3847 IntegerType::get(OriginalTy->getContext(), KV.second); 3848 auto *TruncatedTy = VectorType::get( 3849 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); 3850 if (TruncatedTy == OriginalTy) 3851 continue; 3852 3853 IRBuilder<> B(cast<Instruction>(I)); 3854 auto ShrinkOperand = [&](Value *V) -> Value * { 3855 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3856 if (ZI->getSrcTy() == TruncatedTy) 3857 return ZI->getOperand(0); 3858 return B.CreateZExtOrTrunc(V, TruncatedTy); 3859 }; 3860 3861 // The actual instruction modification depends on the instruction type, 3862 // unfortunately. 3863 Value *NewI = nullptr; 3864 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3865 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3866 ShrinkOperand(BO->getOperand(1))); 3867 3868 // Any wrapping introduced by shrinking this operation shouldn't be 3869 // considered undefined behavior. So, we can't unconditionally copy 3870 // arithmetic wrapping flags to NewI. 3871 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3872 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3873 NewI = 3874 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3875 ShrinkOperand(CI->getOperand(1))); 3876 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3877 NewI = B.CreateSelect(SI->getCondition(), 3878 ShrinkOperand(SI->getTrueValue()), 3879 ShrinkOperand(SI->getFalseValue())); 3880 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3881 switch (CI->getOpcode()) { 3882 default: 3883 llvm_unreachable("Unhandled cast!"); 3884 case Instruction::Trunc: 3885 NewI = ShrinkOperand(CI->getOperand(0)); 3886 break; 3887 case Instruction::SExt: 3888 NewI = B.CreateSExtOrTrunc( 3889 CI->getOperand(0), 3890 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3891 break; 3892 case Instruction::ZExt: 3893 NewI = B.CreateZExtOrTrunc( 3894 CI->getOperand(0), 3895 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3896 break; 3897 } 3898 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3899 auto Elements0 = 3900 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); 3901 auto *O0 = B.CreateZExtOrTrunc( 3902 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3903 auto Elements1 = 3904 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); 3905 auto *O1 = B.CreateZExtOrTrunc( 3906 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3907 3908 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3909 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3910 // Don't do anything with the operands, just extend the result. 3911 continue; 3912 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3913 auto Elements = 3914 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); 3915 auto *O0 = B.CreateZExtOrTrunc( 3916 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3917 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3918 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3919 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3920 auto Elements = 3921 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); 3922 auto *O0 = B.CreateZExtOrTrunc( 3923 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3924 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3925 } else { 3926 // If we don't know what to do, be conservative and don't do anything. 3927 continue; 3928 } 3929 3930 // Lastly, extend the result. 3931 NewI->takeName(cast<Instruction>(I)); 3932 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3933 I->replaceAllUsesWith(Res); 3934 cast<Instruction>(I)->eraseFromParent(); 3935 Erased.insert(I); 3936 State.reset(Def, Res, Part); 3937 } 3938 } 3939 3940 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3941 for (const auto &KV : Cost->getMinimalBitwidths()) { 3942 // If the value wasn't vectorized, we must maintain the original scalar 3943 // type. The absence of the value from State indicates that it 3944 // wasn't vectorized. 3945 // FIXME: Should not rely on getVPValue at this point. 3946 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3947 if (!State.hasAnyVectorValue(Def)) 3948 continue; 3949 for (unsigned Part = 0; Part < UF; ++Part) { 3950 Value *I = State.get(Def, Part); 3951 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3952 if (Inst && Inst->use_empty()) { 3953 Value *NewI = Inst->getOperand(0); 3954 Inst->eraseFromParent(); 3955 State.reset(Def, NewI, Part); 3956 } 3957 } 3958 } 3959 } 3960 3961 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 3962 // Insert truncates and extends for any truncated instructions as hints to 3963 // InstCombine. 3964 if (VF.isVector()) 3965 truncateToMinimalBitwidths(State); 3966 3967 // Fix widened non-induction PHIs by setting up the PHI operands. 3968 if (OrigPHIsToFix.size()) { 3969 assert(EnableVPlanNativePath && 3970 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3971 fixNonInductionPHIs(State); 3972 } 3973 3974 // At this point every instruction in the original loop is widened to a 3975 // vector form. Now we need to fix the recurrences in the loop. These PHI 3976 // nodes are currently empty because we did not want to introduce cycles. 3977 // This is the second stage of vectorizing recurrences. 3978 fixCrossIterationPHIs(State); 3979 3980 // Forget the original basic block. 3981 PSE.getSE()->forgetLoop(OrigLoop); 3982 3983 // If we inserted an edge from the middle block to the unique exit block, 3984 // update uses outside the loop (phis) to account for the newly inserted 3985 // edge. 3986 if (!Cost->requiresScalarEpilogue(VF)) { 3987 // Fix-up external users of the induction variables. 3988 for (auto &Entry : Legal->getInductionVars()) 3989 fixupIVUsers(Entry.first, Entry.second, 3990 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 3991 IVEndValues[Entry.first], LoopMiddleBlock); 3992 3993 fixLCSSAPHIs(State); 3994 } 3995 3996 for (Instruction *PI : PredicatedInstructions) 3997 sinkScalarOperands(&*PI); 3998 3999 // Remove redundant induction instructions. 4000 cse(LoopVectorBody); 4001 4002 // Set/update profile weights for the vector and remainder loops as original 4003 // loop iterations are now distributed among them. Note that original loop 4004 // represented by LoopScalarBody becomes remainder loop after vectorization. 4005 // 4006 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 4007 // end up getting slightly roughened result but that should be OK since 4008 // profile is not inherently precise anyway. Note also possible bypass of 4009 // vector code caused by legality checks is ignored, assigning all the weight 4010 // to the vector loop, optimistically. 4011 // 4012 // For scalable vectorization we can't know at compile time how many iterations 4013 // of the loop are handled in one vector iteration, so instead assume a pessimistic 4014 // vscale of '1'. 4015 setProfileInfoAfterUnrolling( 4016 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), 4017 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); 4018 } 4019 4020 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 4021 // In order to support recurrences we need to be able to vectorize Phi nodes. 4022 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4023 // stage #2: We now need to fix the recurrences by adding incoming edges to 4024 // the currently empty PHI nodes. At this point every instruction in the 4025 // original loop is widened to a vector form so we can use them to construct 4026 // the incoming edges. 4027 VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock(); 4028 for (VPRecipeBase &R : Header->phis()) { 4029 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) 4030 fixReduction(ReductionPhi, State); 4031 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 4032 fixFirstOrderRecurrence(FOR, State); 4033 } 4034 } 4035 4036 void InnerLoopVectorizer::fixFirstOrderRecurrence( 4037 VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) { 4038 // This is the second phase of vectorizing first-order recurrences. An 4039 // overview of the transformation is described below. Suppose we have the 4040 // following loop. 4041 // 4042 // for (int i = 0; i < n; ++i) 4043 // b[i] = a[i] - a[i - 1]; 4044 // 4045 // There is a first-order recurrence on "a". For this loop, the shorthand 4046 // scalar IR looks like: 4047 // 4048 // scalar.ph: 4049 // s_init = a[-1] 4050 // br scalar.body 4051 // 4052 // scalar.body: 4053 // i = phi [0, scalar.ph], [i+1, scalar.body] 4054 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4055 // s2 = a[i] 4056 // b[i] = s2 - s1 4057 // br cond, scalar.body, ... 4058 // 4059 // In this example, s1 is a recurrence because it's value depends on the 4060 // previous iteration. In the first phase of vectorization, we created a 4061 // vector phi v1 for s1. We now complete the vectorization and produce the 4062 // shorthand vector IR shown below (for VF = 4, UF = 1). 4063 // 4064 // vector.ph: 4065 // v_init = vector(..., ..., ..., a[-1]) 4066 // br vector.body 4067 // 4068 // vector.body 4069 // i = phi [0, vector.ph], [i+4, vector.body] 4070 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4071 // v2 = a[i, i+1, i+2, i+3]; 4072 // v3 = vector(v1(3), v2(0, 1, 2)) 4073 // b[i, i+1, i+2, i+3] = v2 - v3 4074 // br cond, vector.body, middle.block 4075 // 4076 // middle.block: 4077 // x = v2(3) 4078 // br scalar.ph 4079 // 4080 // scalar.ph: 4081 // s_init = phi [x, middle.block], [a[-1], otherwise] 4082 // br scalar.body 4083 // 4084 // After execution completes the vector loop, we extract the next value of 4085 // the recurrence (x) to use as the initial value in the scalar loop. 4086 4087 // Extract the last vector element in the middle block. This will be the 4088 // initial value for the recurrence when jumping to the scalar loop. 4089 VPValue *PreviousDef = PhiR->getBackedgeValue(); 4090 Value *Incoming = State.get(PreviousDef, UF - 1); 4091 auto *ExtractForScalar = Incoming; 4092 auto *IdxTy = Builder.getInt32Ty(); 4093 if (VF.isVector()) { 4094 auto *One = ConstantInt::get(IdxTy, 1); 4095 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4096 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4097 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 4098 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 4099 "vector.recur.extract"); 4100 } 4101 // Extract the second last element in the middle block if the 4102 // Phi is used outside the loop. We need to extract the phi itself 4103 // and not the last element (the phi update in the current iteration). This 4104 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4105 // when the scalar loop is not run at all. 4106 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4107 if (VF.isVector()) { 4108 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4109 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 4110 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4111 Incoming, Idx, "vector.recur.extract.for.phi"); 4112 } else if (UF > 1) 4113 // When loop is unrolled without vectorizing, initialize 4114 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 4115 // of `Incoming`. This is analogous to the vectorized case above: extracting 4116 // the second last element when VF > 1. 4117 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 4118 4119 // Fix the initial value of the original recurrence in the scalar loop. 4120 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4121 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); 4122 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4123 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); 4124 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4125 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4126 Start->addIncoming(Incoming, BB); 4127 } 4128 4129 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 4130 Phi->setName("scalar.recur"); 4131 4132 // Finally, fix users of the recurrence outside the loop. The users will need 4133 // either the last value of the scalar recurrence or the last value of the 4134 // vector recurrence we extracted in the middle block. Since the loop is in 4135 // LCSSA form, we just need to find all the phi nodes for the original scalar 4136 // recurrence in the exit block, and then add an edge for the middle block. 4137 // Note that LCSSA does not imply single entry when the original scalar loop 4138 // had multiple exiting edges (as we always run the last iteration in the 4139 // scalar epilogue); in that case, there is no edge from middle to exit and 4140 // and thus no phis which needed updated. 4141 if (!Cost->requiresScalarEpilogue(VF)) 4142 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4143 if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi)) 4144 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4145 } 4146 4147 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, 4148 VPTransformState &State) { 4149 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 4150 // Get it's reduction variable descriptor. 4151 assert(Legal->isReductionVariable(OrigPhi) && 4152 "Unable to find the reduction variable"); 4153 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 4154 4155 RecurKind RK = RdxDesc.getRecurrenceKind(); 4156 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4157 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4158 setDebugLocFromInst(ReductionStartValue); 4159 4160 VPValue *LoopExitInstDef = PhiR->getBackedgeValue(); 4161 // This is the vector-clone of the value that leaves the loop. 4162 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 4163 4164 // Wrap flags are in general invalid after vectorization, clear them. 4165 clearReductionWrapFlags(RdxDesc, State); 4166 4167 // Before each round, move the insertion point right between 4168 // the PHIs and the values we are going to write. 4169 // This allows us to write both PHINodes and the extractelement 4170 // instructions. 4171 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4172 4173 setDebugLocFromInst(LoopExitInst); 4174 4175 Type *PhiTy = OrigPhi->getType(); 4176 // If tail is folded by masking, the vector value to leave the loop should be 4177 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 4178 // instead of the former. For an inloop reduction the reduction will already 4179 // be predicated, and does not need to be handled here. 4180 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { 4181 for (unsigned Part = 0; Part < UF; ++Part) { 4182 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 4183 Value *Sel = nullptr; 4184 for (User *U : VecLoopExitInst->users()) { 4185 if (isa<SelectInst>(U)) { 4186 assert(!Sel && "Reduction exit feeding two selects"); 4187 Sel = U; 4188 } else 4189 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 4190 } 4191 assert(Sel && "Reduction exit feeds no select"); 4192 State.reset(LoopExitInstDef, Sel, Part); 4193 4194 // If the target can create a predicated operator for the reduction at no 4195 // extra cost in the loop (for example a predicated vadd), it can be 4196 // cheaper for the select to remain in the loop than be sunk out of it, 4197 // and so use the select value for the phi instead of the old 4198 // LoopExitValue. 4199 if (PreferPredicatedReductionSelect || 4200 TTI->preferPredicatedReductionSelect( 4201 RdxDesc.getOpcode(), PhiTy, 4202 TargetTransformInfo::ReductionFlags())) { 4203 auto *VecRdxPhi = 4204 cast<PHINode>(State.get(PhiR, Part)); 4205 VecRdxPhi->setIncomingValueForBlock( 4206 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 4207 } 4208 } 4209 } 4210 4211 // If the vector reduction can be performed in a smaller type, we truncate 4212 // then extend the loop exit value to enable InstCombine to evaluate the 4213 // entire expression in the smaller type. 4214 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 4215 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"); 4216 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4217 Builder.SetInsertPoint( 4218 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4219 VectorParts RdxParts(UF); 4220 for (unsigned Part = 0; Part < UF; ++Part) { 4221 RdxParts[Part] = State.get(LoopExitInstDef, Part); 4222 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4223 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4224 : Builder.CreateZExt(Trunc, VecTy); 4225 for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users())) 4226 if (U != Trunc) { 4227 U->replaceUsesOfWith(RdxParts[Part], Extnd); 4228 RdxParts[Part] = Extnd; 4229 } 4230 } 4231 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4232 for (unsigned Part = 0; Part < UF; ++Part) { 4233 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4234 State.reset(LoopExitInstDef, RdxParts[Part], Part); 4235 } 4236 } 4237 4238 // Reduce all of the unrolled parts into a single vector. 4239 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 4240 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 4241 4242 // The middle block terminator has already been assigned a DebugLoc here (the 4243 // OrigLoop's single latch terminator). We want the whole middle block to 4244 // appear to execute on this line because: (a) it is all compiler generated, 4245 // (b) these instructions are always executed after evaluating the latch 4246 // conditional branch, and (c) other passes may add new predecessors which 4247 // terminate on this line. This is the easiest way to ensure we don't 4248 // accidentally cause an extra step back into the loop while debugging. 4249 setDebugLocFromInst(LoopMiddleBlock->getTerminator()); 4250 if (PhiR->isOrdered()) 4251 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 4252 else { 4253 // Floating-point operations should have some FMF to enable the reduction. 4254 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 4255 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 4256 for (unsigned Part = 1; Part < UF; ++Part) { 4257 Value *RdxPart = State.get(LoopExitInstDef, Part); 4258 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 4259 ReducedPartRdx = Builder.CreateBinOp( 4260 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 4261 } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) 4262 ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK, 4263 ReducedPartRdx, RdxPart); 4264 else 4265 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4266 } 4267 } 4268 4269 // Create the reduction after the loop. Note that inloop reductions create the 4270 // target reduction in the loop using a Reduction recipe. 4271 if (VF.isVector() && !PhiR->isInLoop()) { 4272 ReducedPartRdx = 4273 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi); 4274 // If the reduction can be performed in a smaller type, we need to extend 4275 // the reduction to the wider type before we branch to the original loop. 4276 if (PhiTy != RdxDesc.getRecurrenceType()) 4277 ReducedPartRdx = RdxDesc.isSigned() 4278 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 4279 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 4280 } 4281 4282 PHINode *ResumePhi = 4283 dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue()); 4284 4285 // Create a phi node that merges control-flow from the backedge-taken check 4286 // block and the middle block. 4287 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 4288 LoopScalarPreHeader->getTerminator()); 4289 4290 // If we are fixing reductions in the epilogue loop then we should already 4291 // have created a bc.merge.rdx Phi after the main vector body. Ensure that 4292 // we carry over the incoming values correctly. 4293 for (auto *Incoming : predecessors(LoopScalarPreHeader)) { 4294 if (Incoming == LoopMiddleBlock) 4295 BCBlockPhi->addIncoming(ReducedPartRdx, Incoming); 4296 else if (ResumePhi && llvm::is_contained(ResumePhi->blocks(), Incoming)) 4297 BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(Incoming), 4298 Incoming); 4299 else 4300 BCBlockPhi->addIncoming(ReductionStartValue, Incoming); 4301 } 4302 4303 // Set the resume value for this reduction 4304 ReductionResumeValues.insert({&RdxDesc, BCBlockPhi}); 4305 4306 // Now, we need to fix the users of the reduction variable 4307 // inside and outside of the scalar remainder loop. 4308 4309 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4310 // in the exit blocks. See comment on analogous loop in 4311 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4312 if (!Cost->requiresScalarEpilogue(VF)) 4313 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4314 if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst)) 4315 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4316 4317 // Fix the scalar loop reduction variable with the incoming reduction sum 4318 // from the vector body and from the backedge value. 4319 int IncomingEdgeBlockIdx = 4320 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4321 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4322 // Pick the other block. 4323 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4324 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4325 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4326 } 4327 4328 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 4329 VPTransformState &State) { 4330 RecurKind RK = RdxDesc.getRecurrenceKind(); 4331 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4332 return; 4333 4334 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4335 assert(LoopExitInstr && "null loop exit instruction"); 4336 SmallVector<Instruction *, 8> Worklist; 4337 SmallPtrSet<Instruction *, 8> Visited; 4338 Worklist.push_back(LoopExitInstr); 4339 Visited.insert(LoopExitInstr); 4340 4341 while (!Worklist.empty()) { 4342 Instruction *Cur = Worklist.pop_back_val(); 4343 if (isa<OverflowingBinaryOperator>(Cur)) 4344 for (unsigned Part = 0; Part < UF; ++Part) { 4345 // FIXME: Should not rely on getVPValue at this point. 4346 Value *V = State.get(State.Plan->getVPValue(Cur, true), Part); 4347 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4348 } 4349 4350 for (User *U : Cur->users()) { 4351 Instruction *UI = cast<Instruction>(U); 4352 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4353 Visited.insert(UI).second) 4354 Worklist.push_back(UI); 4355 } 4356 } 4357 } 4358 4359 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4360 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4361 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4362 // Some phis were already hand updated by the reduction and recurrence 4363 // code above, leave them alone. 4364 continue; 4365 4366 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4367 // Non-instruction incoming values will have only one value. 4368 4369 VPLane Lane = VPLane::getFirstLane(); 4370 if (isa<Instruction>(IncomingValue) && 4371 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), 4372 VF)) 4373 Lane = VPLane::getLastLaneForVF(VF); 4374 4375 // Can be a loop invariant incoming value or the last scalar value to be 4376 // extracted from the vectorized loop. 4377 // FIXME: Should not rely on getVPValue at this point. 4378 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4379 Value *lastIncomingValue = 4380 OrigLoop->isLoopInvariant(IncomingValue) 4381 ? IncomingValue 4382 : State.get(State.Plan->getVPValue(IncomingValue, true), 4383 VPIteration(UF - 1, Lane)); 4384 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4385 } 4386 } 4387 4388 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4389 // The basic block and loop containing the predicated instruction. 4390 auto *PredBB = PredInst->getParent(); 4391 auto *VectorLoop = LI->getLoopFor(PredBB); 4392 4393 // Initialize a worklist with the operands of the predicated instruction. 4394 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4395 4396 // Holds instructions that we need to analyze again. An instruction may be 4397 // reanalyzed if we don't yet know if we can sink it or not. 4398 SmallVector<Instruction *, 8> InstsToReanalyze; 4399 4400 // Returns true if a given use occurs in the predicated block. Phi nodes use 4401 // their operands in their corresponding predecessor blocks. 4402 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4403 auto *I = cast<Instruction>(U.getUser()); 4404 BasicBlock *BB = I->getParent(); 4405 if (auto *Phi = dyn_cast<PHINode>(I)) 4406 BB = Phi->getIncomingBlock( 4407 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4408 return BB == PredBB; 4409 }; 4410 4411 // Iteratively sink the scalarized operands of the predicated instruction 4412 // into the block we created for it. When an instruction is sunk, it's 4413 // operands are then added to the worklist. The algorithm ends after one pass 4414 // through the worklist doesn't sink a single instruction. 4415 bool Changed; 4416 do { 4417 // Add the instructions that need to be reanalyzed to the worklist, and 4418 // reset the changed indicator. 4419 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4420 InstsToReanalyze.clear(); 4421 Changed = false; 4422 4423 while (!Worklist.empty()) { 4424 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4425 4426 // We can't sink an instruction if it is a phi node, is not in the loop, 4427 // or may have side effects. 4428 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4429 I->mayHaveSideEffects()) 4430 continue; 4431 4432 // If the instruction is already in PredBB, check if we can sink its 4433 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4434 // sinking the scalar instruction I, hence it appears in PredBB; but it 4435 // may have failed to sink I's operands (recursively), which we try 4436 // (again) here. 4437 if (I->getParent() == PredBB) { 4438 Worklist.insert(I->op_begin(), I->op_end()); 4439 continue; 4440 } 4441 4442 // It's legal to sink the instruction if all its uses occur in the 4443 // predicated block. Otherwise, there's nothing to do yet, and we may 4444 // need to reanalyze the instruction. 4445 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4446 InstsToReanalyze.push_back(I); 4447 continue; 4448 } 4449 4450 // Move the instruction to the beginning of the predicated block, and add 4451 // it's operands to the worklist. 4452 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4453 Worklist.insert(I->op_begin(), I->op_end()); 4454 4455 // The sinking may have enabled other instructions to be sunk, so we will 4456 // need to iterate. 4457 Changed = true; 4458 } 4459 } while (Changed); 4460 } 4461 4462 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4463 for (PHINode *OrigPhi : OrigPHIsToFix) { 4464 VPWidenPHIRecipe *VPPhi = 4465 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4466 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4467 // Make sure the builder has a valid insert point. 4468 Builder.SetInsertPoint(NewPhi); 4469 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4470 VPValue *Inc = VPPhi->getIncomingValue(i); 4471 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4472 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4473 } 4474 } 4475 } 4476 4477 bool InnerLoopVectorizer::useOrderedReductions( 4478 const RecurrenceDescriptor &RdxDesc) { 4479 return Cost->useOrderedReductions(RdxDesc); 4480 } 4481 4482 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4483 VPWidenPHIRecipe *PhiR, 4484 VPTransformState &State) { 4485 PHINode *P = cast<PHINode>(PN); 4486 if (EnableVPlanNativePath) { 4487 // Currently we enter here in the VPlan-native path for non-induction 4488 // PHIs where all control flow is uniform. We simply widen these PHIs. 4489 // Create a vector phi with no operands - the vector phi operands will be 4490 // set at the end of vector code generation. 4491 Type *VecTy = (State.VF.isScalar()) 4492 ? PN->getType() 4493 : VectorType::get(PN->getType(), State.VF); 4494 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4495 State.set(PhiR, VecPhi, 0); 4496 OrigPHIsToFix.push_back(P); 4497 4498 return; 4499 } 4500 4501 assert(PN->getParent() == OrigLoop->getHeader() && 4502 "Non-header phis should have been handled elsewhere"); 4503 4504 // In order to support recurrences we need to be able to vectorize Phi nodes. 4505 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4506 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4507 // this value when we vectorize all of the instructions that use the PHI. 4508 4509 assert(!Legal->isReductionVariable(P) && 4510 "reductions should be handled elsewhere"); 4511 4512 setDebugLocFromInst(P); 4513 4514 // This PHINode must be an induction variable. 4515 // Make sure that we know about it. 4516 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4517 4518 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4519 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4520 4521 auto *IVR = PhiR->getParent()->getPlan()->getCanonicalIV(); 4522 PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0)); 4523 4524 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4525 // which can be found from the original scalar operations. 4526 switch (II.getKind()) { 4527 case InductionDescriptor::IK_NoInduction: 4528 llvm_unreachable("Unknown induction"); 4529 case InductionDescriptor::IK_IntInduction: 4530 case InductionDescriptor::IK_FpInduction: 4531 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4532 case InductionDescriptor::IK_PtrInduction: { 4533 // Handle the pointer induction variable case. 4534 assert(P->getType()->isPointerTy() && "Unexpected type."); 4535 4536 if (Cost->isScalarAfterVectorization(P, State.VF)) { 4537 // This is the normalized GEP that starts counting at zero. 4538 Value *PtrInd = 4539 Builder.CreateSExtOrTrunc(CanonicalIV, II.getStep()->getType()); 4540 // Determine the number of scalars we need to generate for each unroll 4541 // iteration. If the instruction is uniform, we only need to generate the 4542 // first lane. Otherwise, we generate all VF values. 4543 bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF); 4544 assert((IsUniform || !State.VF.isScalable()) && 4545 "Cannot scalarize a scalable VF"); 4546 unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue(); 4547 4548 for (unsigned Part = 0; Part < UF; ++Part) { 4549 Value *PartStart = 4550 createStepForVF(Builder, PtrInd->getType(), VF, Part); 4551 4552 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4553 Value *Idx = Builder.CreateAdd( 4554 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 4555 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4556 Value *SclrGep = emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), 4557 DL, II, State.CFG.PrevBB); 4558 SclrGep->setName("next.gep"); 4559 State.set(PhiR, SclrGep, VPIteration(Part, Lane)); 4560 } 4561 } 4562 return; 4563 } 4564 assert(isa<SCEVConstant>(II.getStep()) && 4565 "Induction step not a SCEV constant!"); 4566 Type *PhiType = II.getStep()->getType(); 4567 4568 // Build a pointer phi 4569 Value *ScalarStartValue = PhiR->getStartValue()->getLiveInIRValue(); 4570 Type *ScStValueType = ScalarStartValue->getType(); 4571 PHINode *NewPointerPhi = 4572 PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV); 4573 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4574 4575 // A pointer induction, performed by using a gep 4576 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4577 Instruction *InductionLoc = LoopLatch->getTerminator(); 4578 const SCEV *ScalarStep = II.getStep(); 4579 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4580 Value *ScalarStepValue = 4581 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4582 Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF); 4583 Value *NumUnrolledElems = 4584 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 4585 Value *InductionGEP = GetElementPtrInst::Create( 4586 II.getElementType(), NewPointerPhi, 4587 Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 4588 InductionLoc); 4589 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4590 4591 // Create UF many actual address geps that use the pointer 4592 // phi as base and a vectorized version of the step value 4593 // (<step*0, ..., step*N>) as offset. 4594 for (unsigned Part = 0; Part < State.UF; ++Part) { 4595 Type *VecPhiType = VectorType::get(PhiType, State.VF); 4596 Value *StartOffsetScalar = 4597 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 4598 Value *StartOffset = 4599 Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 4600 // Create a vector of consecutive numbers from zero to VF. 4601 StartOffset = 4602 Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType)); 4603 4604 Value *GEP = Builder.CreateGEP( 4605 II.getElementType(), NewPointerPhi, 4606 Builder.CreateMul( 4607 StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue), 4608 "vector.gep")); 4609 State.set(PhiR, GEP, Part); 4610 } 4611 } 4612 } 4613 } 4614 4615 /// A helper function for checking whether an integer division-related 4616 /// instruction may divide by zero (in which case it must be predicated if 4617 /// executed conditionally in the scalar code). 4618 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4619 /// Non-zero divisors that are non compile-time constants will not be 4620 /// converted into multiplication, so we will still end up scalarizing 4621 /// the division, but can do so w/o predication. 4622 static bool mayDivideByZero(Instruction &I) { 4623 assert((I.getOpcode() == Instruction::UDiv || 4624 I.getOpcode() == Instruction::SDiv || 4625 I.getOpcode() == Instruction::URem || 4626 I.getOpcode() == Instruction::SRem) && 4627 "Unexpected instruction"); 4628 Value *Divisor = I.getOperand(1); 4629 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4630 return !CInt || CInt->isZero(); 4631 } 4632 4633 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4634 VPUser &ArgOperands, 4635 VPTransformState &State) { 4636 assert(!isa<DbgInfoIntrinsic>(I) && 4637 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4638 setDebugLocFromInst(&I); 4639 4640 Module *M = I.getParent()->getParent()->getParent(); 4641 auto *CI = cast<CallInst>(&I); 4642 4643 SmallVector<Type *, 4> Tys; 4644 for (Value *ArgOperand : CI->args()) 4645 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4646 4647 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4648 4649 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4650 // version of the instruction. 4651 // Is it beneficial to perform intrinsic call compared to lib call? 4652 bool NeedToScalarize = false; 4653 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4654 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4655 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4656 assert((UseVectorIntrinsic || !NeedToScalarize) && 4657 "Instruction should be scalarized elsewhere."); 4658 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 4659 "Either the intrinsic cost or vector call cost must be valid"); 4660 4661 for (unsigned Part = 0; Part < UF; ++Part) { 4662 SmallVector<Type *, 2> TysForDecl = {CI->getType()}; 4663 SmallVector<Value *, 4> Args; 4664 for (auto &I : enumerate(ArgOperands.operands())) { 4665 // Some intrinsics have a scalar argument - don't replace it with a 4666 // vector. 4667 Value *Arg; 4668 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4669 Arg = State.get(I.value(), Part); 4670 else { 4671 Arg = State.get(I.value(), VPIteration(0, 0)); 4672 if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index())) 4673 TysForDecl.push_back(Arg->getType()); 4674 } 4675 Args.push_back(Arg); 4676 } 4677 4678 Function *VectorF; 4679 if (UseVectorIntrinsic) { 4680 // Use vector version of the intrinsic. 4681 if (VF.isVector()) 4682 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4683 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4684 assert(VectorF && "Can't retrieve vector intrinsic."); 4685 } else { 4686 // Use vector version of the function call. 4687 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 4688 #ifndef NDEBUG 4689 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4690 "Can't create vector function."); 4691 #endif 4692 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4693 } 4694 SmallVector<OperandBundleDef, 1> OpBundles; 4695 CI->getOperandBundlesAsDefs(OpBundles); 4696 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4697 4698 if (isa<FPMathOperator>(V)) 4699 V->copyFastMathFlags(CI); 4700 4701 State.set(Def, V, Part); 4702 addMetadata(V, &I); 4703 } 4704 } 4705 4706 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 4707 // We should not collect Scalars more than once per VF. Right now, this 4708 // function is called from collectUniformsAndScalars(), which already does 4709 // this check. Collecting Scalars for VF=1 does not make any sense. 4710 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 4711 "This function should not be visited twice for the same VF"); 4712 4713 SmallSetVector<Instruction *, 8> Worklist; 4714 4715 // These sets are used to seed the analysis with pointers used by memory 4716 // accesses that will remain scalar. 4717 SmallSetVector<Instruction *, 8> ScalarPtrs; 4718 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4719 auto *Latch = TheLoop->getLoopLatch(); 4720 4721 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4722 // The pointer operands of loads and stores will be scalar as long as the 4723 // memory access is not a gather or scatter operation. The value operand of a 4724 // store will remain scalar if the store is scalarized. 4725 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4726 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4727 assert(WideningDecision != CM_Unknown && 4728 "Widening decision should be ready at this moment"); 4729 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4730 if (Ptr == Store->getValueOperand()) 4731 return WideningDecision == CM_Scalarize; 4732 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4733 "Ptr is neither a value or pointer operand"); 4734 return WideningDecision != CM_GatherScatter; 4735 }; 4736 4737 // A helper that returns true if the given value is a bitcast or 4738 // getelementptr instruction contained in the loop. 4739 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4740 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4741 isa<GetElementPtrInst>(V)) && 4742 !TheLoop->isLoopInvariant(V); 4743 }; 4744 4745 // A helper that evaluates a memory access's use of a pointer. If the use will 4746 // be a scalar use and the pointer is only used by memory accesses, we place 4747 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4748 // PossibleNonScalarPtrs. 4749 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4750 // We only care about bitcast and getelementptr instructions contained in 4751 // the loop. 4752 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4753 return; 4754 4755 // If the pointer has already been identified as scalar (e.g., if it was 4756 // also identified as uniform), there's nothing to do. 4757 auto *I = cast<Instruction>(Ptr); 4758 if (Worklist.count(I)) 4759 return; 4760 4761 // If the use of the pointer will be a scalar use, and all users of the 4762 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4763 // place the pointer in PossibleNonScalarPtrs. 4764 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4765 return isa<LoadInst>(U) || isa<StoreInst>(U); 4766 })) 4767 ScalarPtrs.insert(I); 4768 else 4769 PossibleNonScalarPtrs.insert(I); 4770 }; 4771 4772 // We seed the scalars analysis with three classes of instructions: (1) 4773 // instructions marked uniform-after-vectorization and (2) bitcast, 4774 // getelementptr and (pointer) phi instructions used by memory accesses 4775 // requiring a scalar use. 4776 // 4777 // (1) Add to the worklist all instructions that have been identified as 4778 // uniform-after-vectorization. 4779 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4780 4781 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4782 // memory accesses requiring a scalar use. The pointer operands of loads and 4783 // stores will be scalar as long as the memory accesses is not a gather or 4784 // scatter operation. The value operand of a store will remain scalar if the 4785 // store is scalarized. 4786 for (auto *BB : TheLoop->blocks()) 4787 for (auto &I : *BB) { 4788 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4789 evaluatePtrUse(Load, Load->getPointerOperand()); 4790 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4791 evaluatePtrUse(Store, Store->getPointerOperand()); 4792 evaluatePtrUse(Store, Store->getValueOperand()); 4793 } 4794 } 4795 for (auto *I : ScalarPtrs) 4796 if (!PossibleNonScalarPtrs.count(I)) { 4797 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4798 Worklist.insert(I); 4799 } 4800 4801 // Insert the forced scalars. 4802 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4803 // induction variable when the PHI user is scalarized. 4804 auto ForcedScalar = ForcedScalars.find(VF); 4805 if (ForcedScalar != ForcedScalars.end()) 4806 for (auto *I : ForcedScalar->second) 4807 Worklist.insert(I); 4808 4809 // Expand the worklist by looking through any bitcasts and getelementptr 4810 // instructions we've already identified as scalar. This is similar to the 4811 // expansion step in collectLoopUniforms(); however, here we're only 4812 // expanding to include additional bitcasts and getelementptr instructions. 4813 unsigned Idx = 0; 4814 while (Idx != Worklist.size()) { 4815 Instruction *Dst = Worklist[Idx++]; 4816 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4817 continue; 4818 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4819 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4820 auto *J = cast<Instruction>(U); 4821 return !TheLoop->contains(J) || Worklist.count(J) || 4822 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4823 isScalarUse(J, Src)); 4824 })) { 4825 Worklist.insert(Src); 4826 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4827 } 4828 } 4829 4830 // An induction variable will remain scalar if all users of the induction 4831 // variable and induction variable update remain scalar. 4832 for (auto &Induction : Legal->getInductionVars()) { 4833 auto *Ind = Induction.first; 4834 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4835 4836 // If tail-folding is applied, the primary induction variable will be used 4837 // to feed a vector compare. 4838 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 4839 continue; 4840 4841 // Returns true if \p Indvar is a pointer induction that is used directly by 4842 // load/store instruction \p I. 4843 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar, 4844 Instruction *I) { 4845 return Induction.second.getKind() == 4846 InductionDescriptor::IK_PtrInduction && 4847 (isa<LoadInst>(I) || isa<StoreInst>(I)) && 4848 Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar); 4849 }; 4850 4851 // Determine if all users of the induction variable are scalar after 4852 // vectorization. 4853 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4854 auto *I = cast<Instruction>(U); 4855 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4856 IsDirectLoadStoreFromPtrIndvar(Ind, I); 4857 }); 4858 if (!ScalarInd) 4859 continue; 4860 4861 // Determine if all users of the induction variable update instruction are 4862 // scalar after vectorization. 4863 auto ScalarIndUpdate = 4864 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4865 auto *I = cast<Instruction>(U); 4866 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4867 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I); 4868 }); 4869 if (!ScalarIndUpdate) 4870 continue; 4871 4872 // The induction variable and its update instruction will remain scalar. 4873 Worklist.insert(Ind); 4874 Worklist.insert(IndUpdate); 4875 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4876 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4877 << "\n"); 4878 } 4879 4880 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4881 } 4882 4883 bool LoopVectorizationCostModel::isScalarWithPredication( 4884 Instruction *I, ElementCount VF) const { 4885 if (!blockNeedsPredicationForAnyReason(I->getParent())) 4886 return false; 4887 switch(I->getOpcode()) { 4888 default: 4889 break; 4890 case Instruction::Load: 4891 case Instruction::Store: { 4892 if (!Legal->isMaskRequired(I)) 4893 return false; 4894 auto *Ptr = getLoadStorePointerOperand(I); 4895 auto *Ty = getLoadStoreType(I); 4896 Type *VTy = Ty; 4897 if (VF.isVector()) 4898 VTy = VectorType::get(Ty, VF); 4899 const Align Alignment = getLoadStoreAlignment(I); 4900 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 4901 TTI.isLegalMaskedGather(VTy, Alignment)) 4902 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 4903 TTI.isLegalMaskedScatter(VTy, Alignment)); 4904 } 4905 case Instruction::UDiv: 4906 case Instruction::SDiv: 4907 case Instruction::SRem: 4908 case Instruction::URem: 4909 return mayDivideByZero(*I); 4910 } 4911 return false; 4912 } 4913 4914 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 4915 Instruction *I, ElementCount VF) { 4916 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4917 assert(getWideningDecision(I, VF) == CM_Unknown && 4918 "Decision should not be set yet."); 4919 auto *Group = getInterleavedAccessGroup(I); 4920 assert(Group && "Must have a group."); 4921 4922 // If the instruction's allocated size doesn't equal it's type size, it 4923 // requires padding and will be scalarized. 4924 auto &DL = I->getModule()->getDataLayout(); 4925 auto *ScalarTy = getLoadStoreType(I); 4926 if (hasIrregularType(ScalarTy, DL)) 4927 return false; 4928 4929 // Check if masking is required. 4930 // A Group may need masking for one of two reasons: it resides in a block that 4931 // needs predication, or it was decided to use masking to deal with gaps 4932 // (either a gap at the end of a load-access that may result in a speculative 4933 // load, or any gaps in a store-access). 4934 bool PredicatedAccessRequiresMasking = 4935 blockNeedsPredicationForAnyReason(I->getParent()) && 4936 Legal->isMaskRequired(I); 4937 bool LoadAccessWithGapsRequiresEpilogMasking = 4938 isa<LoadInst>(I) && Group->requiresScalarEpilogue() && 4939 !isScalarEpilogueAllowed(); 4940 bool StoreAccessWithGapsRequiresMasking = 4941 isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()); 4942 if (!PredicatedAccessRequiresMasking && 4943 !LoadAccessWithGapsRequiresEpilogMasking && 4944 !StoreAccessWithGapsRequiresMasking) 4945 return true; 4946 4947 // If masked interleaving is required, we expect that the user/target had 4948 // enabled it, because otherwise it either wouldn't have been created or 4949 // it should have been invalidated by the CostModel. 4950 assert(useMaskedInterleavedAccesses(TTI) && 4951 "Masked interleave-groups for predicated accesses are not enabled."); 4952 4953 if (Group->isReverse()) 4954 return false; 4955 4956 auto *Ty = getLoadStoreType(I); 4957 const Align Alignment = getLoadStoreAlignment(I); 4958 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 4959 : TTI.isLegalMaskedStore(Ty, Alignment); 4960 } 4961 4962 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 4963 Instruction *I, ElementCount VF) { 4964 // Get and ensure we have a valid memory instruction. 4965 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction"); 4966 4967 auto *Ptr = getLoadStorePointerOperand(I); 4968 auto *ScalarTy = getLoadStoreType(I); 4969 4970 // In order to be widened, the pointer should be consecutive, first of all. 4971 if (!Legal->isConsecutivePtr(ScalarTy, Ptr)) 4972 return false; 4973 4974 // If the instruction is a store located in a predicated block, it will be 4975 // scalarized. 4976 if (isScalarWithPredication(I, VF)) 4977 return false; 4978 4979 // If the instruction's allocated size doesn't equal it's type size, it 4980 // requires padding and will be scalarized. 4981 auto &DL = I->getModule()->getDataLayout(); 4982 if (hasIrregularType(ScalarTy, DL)) 4983 return false; 4984 4985 return true; 4986 } 4987 4988 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 4989 // We should not collect Uniforms more than once per VF. Right now, 4990 // this function is called from collectUniformsAndScalars(), which 4991 // already does this check. Collecting Uniforms for VF=1 does not make any 4992 // sense. 4993 4994 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 4995 "This function should not be visited twice for the same VF"); 4996 4997 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4998 // not analyze again. Uniforms.count(VF) will return 1. 4999 Uniforms[VF].clear(); 5000 5001 // We now know that the loop is vectorizable! 5002 // Collect instructions inside the loop that will remain uniform after 5003 // vectorization. 5004 5005 // Global values, params and instructions outside of current loop are out of 5006 // scope. 5007 auto isOutOfScope = [&](Value *V) -> bool { 5008 Instruction *I = dyn_cast<Instruction>(V); 5009 return (!I || !TheLoop->contains(I)); 5010 }; 5011 5012 // Worklist containing uniform instructions demanding lane 0. 5013 SetVector<Instruction *> Worklist; 5014 BasicBlock *Latch = TheLoop->getLoopLatch(); 5015 5016 // Add uniform instructions demanding lane 0 to the worklist. Instructions 5017 // that are scalar with predication must not be considered uniform after 5018 // vectorization, because that would create an erroneous replicating region 5019 // where only a single instance out of VF should be formed. 5020 // TODO: optimize such seldom cases if found important, see PR40816. 5021 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 5022 if (isOutOfScope(I)) { 5023 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 5024 << *I << "\n"); 5025 return; 5026 } 5027 if (isScalarWithPredication(I, VF)) { 5028 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 5029 << *I << "\n"); 5030 return; 5031 } 5032 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 5033 Worklist.insert(I); 5034 }; 5035 5036 // Start with the conditional branch. If the branch condition is an 5037 // instruction contained in the loop that is only used by the branch, it is 5038 // uniform. 5039 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5040 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 5041 addToWorklistIfAllowed(Cmp); 5042 5043 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 5044 InstWidening WideningDecision = getWideningDecision(I, VF); 5045 assert(WideningDecision != CM_Unknown && 5046 "Widening decision should be ready at this moment"); 5047 5048 // A uniform memory op is itself uniform. We exclude uniform stores 5049 // here as they demand the last lane, not the first one. 5050 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 5051 assert(WideningDecision == CM_Scalarize); 5052 return true; 5053 } 5054 5055 return (WideningDecision == CM_Widen || 5056 WideningDecision == CM_Widen_Reverse || 5057 WideningDecision == CM_Interleave); 5058 }; 5059 5060 5061 // Returns true if Ptr is the pointer operand of a memory access instruction 5062 // I, and I is known to not require scalarization. 5063 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5064 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5065 }; 5066 5067 // Holds a list of values which are known to have at least one uniform use. 5068 // Note that there may be other uses which aren't uniform. A "uniform use" 5069 // here is something which only demands lane 0 of the unrolled iterations; 5070 // it does not imply that all lanes produce the same value (e.g. this is not 5071 // the usual meaning of uniform) 5072 SetVector<Value *> HasUniformUse; 5073 5074 // Scan the loop for instructions which are either a) known to have only 5075 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 5076 for (auto *BB : TheLoop->blocks()) 5077 for (auto &I : *BB) { 5078 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) { 5079 switch (II->getIntrinsicID()) { 5080 case Intrinsic::sideeffect: 5081 case Intrinsic::experimental_noalias_scope_decl: 5082 case Intrinsic::assume: 5083 case Intrinsic::lifetime_start: 5084 case Intrinsic::lifetime_end: 5085 if (TheLoop->hasLoopInvariantOperands(&I)) 5086 addToWorklistIfAllowed(&I); 5087 break; 5088 default: 5089 break; 5090 } 5091 } 5092 5093 // ExtractValue instructions must be uniform, because the operands are 5094 // known to be loop-invariant. 5095 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) { 5096 assert(isOutOfScope(EVI->getAggregateOperand()) && 5097 "Expected aggregate value to be loop invariant"); 5098 addToWorklistIfAllowed(EVI); 5099 continue; 5100 } 5101 5102 // If there's no pointer operand, there's nothing to do. 5103 auto *Ptr = getLoadStorePointerOperand(&I); 5104 if (!Ptr) 5105 continue; 5106 5107 // A uniform memory op is itself uniform. We exclude uniform stores 5108 // here as they demand the last lane, not the first one. 5109 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 5110 addToWorklistIfAllowed(&I); 5111 5112 if (isUniformDecision(&I, VF)) { 5113 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 5114 HasUniformUse.insert(Ptr); 5115 } 5116 } 5117 5118 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 5119 // demanding) users. Since loops are assumed to be in LCSSA form, this 5120 // disallows uses outside the loop as well. 5121 for (auto *V : HasUniformUse) { 5122 if (isOutOfScope(V)) 5123 continue; 5124 auto *I = cast<Instruction>(V); 5125 auto UsersAreMemAccesses = 5126 llvm::all_of(I->users(), [&](User *U) -> bool { 5127 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 5128 }); 5129 if (UsersAreMemAccesses) 5130 addToWorklistIfAllowed(I); 5131 } 5132 5133 // Expand Worklist in topological order: whenever a new instruction 5134 // is added , its users should be already inside Worklist. It ensures 5135 // a uniform instruction will only be used by uniform instructions. 5136 unsigned idx = 0; 5137 while (idx != Worklist.size()) { 5138 Instruction *I = Worklist[idx++]; 5139 5140 for (auto OV : I->operand_values()) { 5141 // isOutOfScope operands cannot be uniform instructions. 5142 if (isOutOfScope(OV)) 5143 continue; 5144 // First order recurrence Phi's should typically be considered 5145 // non-uniform. 5146 auto *OP = dyn_cast<PHINode>(OV); 5147 if (OP && Legal->isFirstOrderRecurrence(OP)) 5148 continue; 5149 // If all the users of the operand are uniform, then add the 5150 // operand into the uniform worklist. 5151 auto *OI = cast<Instruction>(OV); 5152 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5153 auto *J = cast<Instruction>(U); 5154 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 5155 })) 5156 addToWorklistIfAllowed(OI); 5157 } 5158 } 5159 5160 // For an instruction to be added into Worklist above, all its users inside 5161 // the loop should also be in Worklist. However, this condition cannot be 5162 // true for phi nodes that form a cyclic dependence. We must process phi 5163 // nodes separately. An induction variable will remain uniform if all users 5164 // of the induction variable and induction variable update remain uniform. 5165 // The code below handles both pointer and non-pointer induction variables. 5166 for (auto &Induction : Legal->getInductionVars()) { 5167 auto *Ind = Induction.first; 5168 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5169 5170 // Determine if all users of the induction variable are uniform after 5171 // vectorization. 5172 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5173 auto *I = cast<Instruction>(U); 5174 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5175 isVectorizedMemAccessUse(I, Ind); 5176 }); 5177 if (!UniformInd) 5178 continue; 5179 5180 // Determine if all users of the induction variable update instruction are 5181 // uniform after vectorization. 5182 auto UniformIndUpdate = 5183 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5184 auto *I = cast<Instruction>(U); 5185 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5186 isVectorizedMemAccessUse(I, IndUpdate); 5187 }); 5188 if (!UniformIndUpdate) 5189 continue; 5190 5191 // The induction variable and its update instruction will remain uniform. 5192 addToWorklistIfAllowed(Ind); 5193 addToWorklistIfAllowed(IndUpdate); 5194 } 5195 5196 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5197 } 5198 5199 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5200 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5201 5202 if (Legal->getRuntimePointerChecking()->Need) { 5203 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5204 "runtime pointer checks needed. Enable vectorization of this " 5205 "loop with '#pragma clang loop vectorize(enable)' when " 5206 "compiling with -Os/-Oz", 5207 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5208 return true; 5209 } 5210 5211 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5212 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5213 "runtime SCEV checks needed. Enable vectorization of this " 5214 "loop with '#pragma clang loop vectorize(enable)' when " 5215 "compiling with -Os/-Oz", 5216 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5217 return true; 5218 } 5219 5220 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5221 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5222 reportVectorizationFailure("Runtime stride check for small trip count", 5223 "runtime stride == 1 checks needed. Enable vectorization of " 5224 "this loop without such check by compiling with -Os/-Oz", 5225 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5226 return true; 5227 } 5228 5229 return false; 5230 } 5231 5232 ElementCount 5233 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 5234 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) 5235 return ElementCount::getScalable(0); 5236 5237 if (Hints->isScalableVectorizationDisabled()) { 5238 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 5239 "ScalableVectorizationDisabled", ORE, TheLoop); 5240 return ElementCount::getScalable(0); 5241 } 5242 5243 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n"); 5244 5245 auto MaxScalableVF = ElementCount::getScalable( 5246 std::numeric_limits<ElementCount::ScalarTy>::max()); 5247 5248 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 5249 // FIXME: While for scalable vectors this is currently sufficient, this should 5250 // be replaced by a more detailed mechanism that filters out specific VFs, 5251 // instead of invalidating vectorization for a whole set of VFs based on the 5252 // MaxVF. 5253 5254 // Disable scalable vectorization if the loop contains unsupported reductions. 5255 if (!canVectorizeReductions(MaxScalableVF)) { 5256 reportVectorizationInfo( 5257 "Scalable vectorization not supported for the reduction " 5258 "operations found in this loop.", 5259 "ScalableVFUnfeasible", ORE, TheLoop); 5260 return ElementCount::getScalable(0); 5261 } 5262 5263 // Disable scalable vectorization if the loop contains any instructions 5264 // with element types not supported for scalable vectors. 5265 if (any_of(ElementTypesInLoop, [&](Type *Ty) { 5266 return !Ty->isVoidTy() && 5267 !this->TTI.isElementTypeLegalForScalableVector(Ty); 5268 })) { 5269 reportVectorizationInfo("Scalable vectorization is not supported " 5270 "for all element types found in this loop.", 5271 "ScalableVFUnfeasible", ORE, TheLoop); 5272 return ElementCount::getScalable(0); 5273 } 5274 5275 if (Legal->isSafeForAnyVectorWidth()) 5276 return MaxScalableVF; 5277 5278 // Limit MaxScalableVF by the maximum safe dependence distance. 5279 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5280 if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) 5281 MaxVScale = 5282 TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax(); 5283 MaxScalableVF = ElementCount::getScalable( 5284 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5285 if (!MaxScalableVF) 5286 reportVectorizationInfo( 5287 "Max legal vector width too small, scalable vectorization " 5288 "unfeasible.", 5289 "ScalableVFUnfeasible", ORE, TheLoop); 5290 5291 return MaxScalableVF; 5292 } 5293 5294 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF( 5295 unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) { 5296 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5297 unsigned SmallestType, WidestType; 5298 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5299 5300 // Get the maximum safe dependence distance in bits computed by LAA. 5301 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5302 // the memory accesses that is most restrictive (involved in the smallest 5303 // dependence distance). 5304 unsigned MaxSafeElements = 5305 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 5306 5307 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 5308 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 5309 5310 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 5311 << ".\n"); 5312 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 5313 << ".\n"); 5314 5315 // First analyze the UserVF, fall back if the UserVF should be ignored. 5316 if (UserVF) { 5317 auto MaxSafeUserVF = 5318 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 5319 5320 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { 5321 // If `VF=vscale x N` is safe, then so is `VF=N` 5322 if (UserVF.isScalable()) 5323 return FixedScalableVFPair( 5324 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); 5325 else 5326 return UserVF; 5327 } 5328 5329 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 5330 5331 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 5332 // is better to ignore the hint and let the compiler choose a suitable VF. 5333 if (!UserVF.isScalable()) { 5334 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5335 << " is unsafe, clamping to max safe VF=" 5336 << MaxSafeFixedVF << ".\n"); 5337 ORE->emit([&]() { 5338 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5339 TheLoop->getStartLoc(), 5340 TheLoop->getHeader()) 5341 << "User-specified vectorization factor " 5342 << ore::NV("UserVectorizationFactor", UserVF) 5343 << " is unsafe, clamping to maximum safe vectorization factor " 5344 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 5345 }); 5346 return MaxSafeFixedVF; 5347 } 5348 5349 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 5350 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5351 << " is ignored because scalable vectors are not " 5352 "available.\n"); 5353 ORE->emit([&]() { 5354 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5355 TheLoop->getStartLoc(), 5356 TheLoop->getHeader()) 5357 << "User-specified vectorization factor " 5358 << ore::NV("UserVectorizationFactor", UserVF) 5359 << " is ignored because the target does not support scalable " 5360 "vectors. The compiler will pick a more suitable value."; 5361 }); 5362 } else { 5363 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5364 << " is unsafe. Ignoring scalable UserVF.\n"); 5365 ORE->emit([&]() { 5366 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5367 TheLoop->getStartLoc(), 5368 TheLoop->getHeader()) 5369 << "User-specified vectorization factor " 5370 << ore::NV("UserVectorizationFactor", UserVF) 5371 << " is unsafe. Ignoring the hint to let the compiler pick a " 5372 "more suitable value."; 5373 }); 5374 } 5375 } 5376 5377 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5378 << " / " << WidestType << " bits.\n"); 5379 5380 FixedScalableVFPair Result(ElementCount::getFixed(1), 5381 ElementCount::getScalable(0)); 5382 if (auto MaxVF = 5383 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 5384 MaxSafeFixedVF, FoldTailByMasking)) 5385 Result.FixedVF = MaxVF; 5386 5387 if (auto MaxVF = 5388 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 5389 MaxSafeScalableVF, FoldTailByMasking)) 5390 if (MaxVF.isScalable()) { 5391 Result.ScalableVF = MaxVF; 5392 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 5393 << "\n"); 5394 } 5395 5396 return Result; 5397 } 5398 5399 FixedScalableVFPair 5400 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5401 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5402 // TODO: It may by useful to do since it's still likely to be dynamically 5403 // uniform if the target can skip. 5404 reportVectorizationFailure( 5405 "Not inserting runtime ptr check for divergent target", 5406 "runtime pointer checks needed. Not enabled for divergent target", 5407 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5408 return FixedScalableVFPair::getNone(); 5409 } 5410 5411 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5412 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5413 if (TC == 1) { 5414 reportVectorizationFailure("Single iteration (non) loop", 5415 "loop trip count is one, irrelevant for vectorization", 5416 "SingleIterationLoop", ORE, TheLoop); 5417 return FixedScalableVFPair::getNone(); 5418 } 5419 5420 switch (ScalarEpilogueStatus) { 5421 case CM_ScalarEpilogueAllowed: 5422 return computeFeasibleMaxVF(TC, UserVF, false); 5423 case CM_ScalarEpilogueNotAllowedUsePredicate: 5424 LLVM_FALLTHROUGH; 5425 case CM_ScalarEpilogueNotNeededUsePredicate: 5426 LLVM_DEBUG( 5427 dbgs() << "LV: vector predicate hint/switch found.\n" 5428 << "LV: Not allowing scalar epilogue, creating predicated " 5429 << "vector loop.\n"); 5430 break; 5431 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5432 // fallthrough as a special case of OptForSize 5433 case CM_ScalarEpilogueNotAllowedOptSize: 5434 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5435 LLVM_DEBUG( 5436 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5437 else 5438 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5439 << "count.\n"); 5440 5441 // Bail if runtime checks are required, which are not good when optimising 5442 // for size. 5443 if (runtimeChecksRequired()) 5444 return FixedScalableVFPair::getNone(); 5445 5446 break; 5447 } 5448 5449 // The only loops we can vectorize without a scalar epilogue, are loops with 5450 // a bottom-test and a single exiting block. We'd have to handle the fact 5451 // that not every instruction executes on the last iteration. This will 5452 // require a lane mask which varies through the vector loop body. (TODO) 5453 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5454 // If there was a tail-folding hint/switch, but we can't fold the tail by 5455 // masking, fallback to a vectorization with a scalar epilogue. 5456 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5457 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5458 "scalar epilogue instead.\n"); 5459 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5460 return computeFeasibleMaxVF(TC, UserVF, false); 5461 } 5462 return FixedScalableVFPair::getNone(); 5463 } 5464 5465 // Now try the tail folding 5466 5467 // Invalidate interleave groups that require an epilogue if we can't mask 5468 // the interleave-group. 5469 if (!useMaskedInterleavedAccesses(TTI)) { 5470 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5471 "No decisions should have been taken at this point"); 5472 // Note: There is no need to invalidate any cost modeling decisions here, as 5473 // non where taken so far. 5474 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5475 } 5476 5477 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true); 5478 // Avoid tail folding if the trip count is known to be a multiple of any VF 5479 // we chose. 5480 // FIXME: The condition below pessimises the case for fixed-width vectors, 5481 // when scalable VFs are also candidates for vectorization. 5482 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5483 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5484 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5485 "MaxFixedVF must be a power of 2"); 5486 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5487 : MaxFixedVF.getFixedValue(); 5488 ScalarEvolution *SE = PSE.getSE(); 5489 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5490 const SCEV *ExitCount = SE->getAddExpr( 5491 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5492 const SCEV *Rem = SE->getURemExpr( 5493 SE->applyLoopGuards(ExitCount, TheLoop), 5494 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5495 if (Rem->isZero()) { 5496 // Accept MaxFixedVF if we do not have a tail. 5497 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5498 return MaxFactors; 5499 } 5500 } 5501 5502 // For scalable vectors don't use tail folding for low trip counts or 5503 // optimizing for code size. We only permit this if the user has explicitly 5504 // requested it. 5505 if (ScalarEpilogueStatus != CM_ScalarEpilogueNotNeededUsePredicate && 5506 ScalarEpilogueStatus != CM_ScalarEpilogueNotAllowedUsePredicate && 5507 MaxFactors.ScalableVF.isVector()) 5508 MaxFactors.ScalableVF = ElementCount::getScalable(0); 5509 5510 // If we don't know the precise trip count, or if the trip count that we 5511 // found modulo the vectorization factor is not zero, try to fold the tail 5512 // by masking. 5513 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5514 if (Legal->prepareToFoldTailByMasking()) { 5515 FoldTailByMasking = true; 5516 return MaxFactors; 5517 } 5518 5519 // If there was a tail-folding hint/switch, but we can't fold the tail by 5520 // masking, fallback to a vectorization with a scalar epilogue. 5521 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5522 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5523 "scalar epilogue instead.\n"); 5524 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5525 return MaxFactors; 5526 } 5527 5528 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5529 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5530 return FixedScalableVFPair::getNone(); 5531 } 5532 5533 if (TC == 0) { 5534 reportVectorizationFailure( 5535 "Unable to calculate the loop count due to complex control flow", 5536 "unable to calculate the loop count due to complex control flow", 5537 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5538 return FixedScalableVFPair::getNone(); 5539 } 5540 5541 reportVectorizationFailure( 5542 "Cannot optimize for size and vectorize at the same time.", 5543 "cannot optimize for size and vectorize at the same time. " 5544 "Enable vectorization of this loop with '#pragma clang loop " 5545 "vectorize(enable)' when compiling with -Os/-Oz", 5546 "NoTailLoopWithOptForSize", ORE, TheLoop); 5547 return FixedScalableVFPair::getNone(); 5548 } 5549 5550 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5551 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5552 const ElementCount &MaxSafeVF, bool FoldTailByMasking) { 5553 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5554 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5555 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5556 : TargetTransformInfo::RGK_FixedWidthVector); 5557 5558 // Convenience function to return the minimum of two ElementCounts. 5559 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5560 assert((LHS.isScalable() == RHS.isScalable()) && 5561 "Scalable flags must match"); 5562 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5563 }; 5564 5565 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5566 // Note that both WidestRegister and WidestType may not be a powers of 2. 5567 auto MaxVectorElementCount = ElementCount::get( 5568 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5569 ComputeScalableMaxVF); 5570 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5571 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5572 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5573 5574 if (!MaxVectorElementCount) { 5575 LLVM_DEBUG(dbgs() << "LV: The target has no " 5576 << (ComputeScalableMaxVF ? "scalable" : "fixed") 5577 << " vector registers.\n"); 5578 return ElementCount::getFixed(1); 5579 } 5580 5581 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5582 if (ConstTripCount && 5583 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5584 (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) { 5585 // If loop trip count (TC) is known at compile time there is no point in 5586 // choosing VF greater than TC (as done in the loop below). Select maximum 5587 // power of two which doesn't exceed TC. 5588 // If MaxVectorElementCount is scalable, we only fall back on a fixed VF 5589 // when the TC is less than or equal to the known number of lanes. 5590 auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount); 5591 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not " 5592 "exceeding the constant trip count: " 5593 << ClampedConstTripCount << "\n"); 5594 return ElementCount::getFixed(ClampedConstTripCount); 5595 } 5596 5597 ElementCount MaxVF = MaxVectorElementCount; 5598 if (TTI.shouldMaximizeVectorBandwidth() || 5599 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5600 auto MaxVectorElementCountMaxBW = ElementCount::get( 5601 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5602 ComputeScalableMaxVF); 5603 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5604 5605 // Collect all viable vectorization factors larger than the default MaxVF 5606 // (i.e. MaxVectorElementCount). 5607 SmallVector<ElementCount, 8> VFs; 5608 for (ElementCount VS = MaxVectorElementCount * 2; 5609 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5610 VFs.push_back(VS); 5611 5612 // For each VF calculate its register usage. 5613 auto RUs = calculateRegisterUsage(VFs); 5614 5615 // Select the largest VF which doesn't require more registers than existing 5616 // ones. 5617 for (int i = RUs.size() - 1; i >= 0; --i) { 5618 bool Selected = true; 5619 for (auto &pair : RUs[i].MaxLocalUsers) { 5620 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5621 if (pair.second > TargetNumRegisters) 5622 Selected = false; 5623 } 5624 if (Selected) { 5625 MaxVF = VFs[i]; 5626 break; 5627 } 5628 } 5629 if (ElementCount MinVF = 5630 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 5631 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5632 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5633 << ") with target's minimum: " << MinVF << '\n'); 5634 MaxVF = MinVF; 5635 } 5636 } 5637 } 5638 return MaxVF; 5639 } 5640 5641 bool LoopVectorizationCostModel::isMoreProfitable( 5642 const VectorizationFactor &A, const VectorizationFactor &B) const { 5643 InstructionCost CostA = A.Cost; 5644 InstructionCost CostB = B.Cost; 5645 5646 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 5647 5648 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 5649 MaxTripCount) { 5650 // If we are folding the tail and the trip count is a known (possibly small) 5651 // constant, the trip count will be rounded up to an integer number of 5652 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 5653 // which we compare directly. When not folding the tail, the total cost will 5654 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 5655 // approximated with the per-lane cost below instead of using the tripcount 5656 // as here. 5657 auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 5658 auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 5659 return RTCostA < RTCostB; 5660 } 5661 5662 // Improve estimate for the vector width if it is scalable. 5663 unsigned EstimatedWidthA = A.Width.getKnownMinValue(); 5664 unsigned EstimatedWidthB = B.Width.getKnownMinValue(); 5665 if (Optional<unsigned> VScale = TTI.getVScaleForTuning()) { 5666 if (A.Width.isScalable()) 5667 EstimatedWidthA *= VScale.getValue(); 5668 if (B.Width.isScalable()) 5669 EstimatedWidthB *= VScale.getValue(); 5670 } 5671 5672 // Assume vscale may be larger than 1 (or the value being tuned for), 5673 // so that scalable vectorization is slightly favorable over fixed-width 5674 // vectorization. 5675 if (A.Width.isScalable() && !B.Width.isScalable()) 5676 return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA); 5677 5678 // To avoid the need for FP division: 5679 // (CostA / A.Width) < (CostB / B.Width) 5680 // <=> (CostA * B.Width) < (CostB * A.Width) 5681 return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA); 5682 } 5683 5684 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 5685 const ElementCountSet &VFCandidates) { 5686 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5687 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5688 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5689 assert(VFCandidates.count(ElementCount::getFixed(1)) && 5690 "Expected Scalar VF to be a candidate"); 5691 5692 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 5693 VectorizationFactor ChosenFactor = ScalarCost; 5694 5695 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5696 if (ForceVectorization && VFCandidates.size() > 1) { 5697 // Ignore scalar width, because the user explicitly wants vectorization. 5698 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5699 // evaluation. 5700 ChosenFactor.Cost = InstructionCost::getMax(); 5701 } 5702 5703 SmallVector<InstructionVFPair> InvalidCosts; 5704 for (const auto &i : VFCandidates) { 5705 // The cost for scalar VF=1 is already calculated, so ignore it. 5706 if (i.isScalar()) 5707 continue; 5708 5709 VectorizationCostTy C = expectedCost(i, &InvalidCosts); 5710 VectorizationFactor Candidate(i, C.first); 5711 5712 #ifndef NDEBUG 5713 unsigned AssumedMinimumVscale = 1; 5714 if (Optional<unsigned> VScale = TTI.getVScaleForTuning()) 5715 AssumedMinimumVscale = VScale.getValue(); 5716 unsigned Width = 5717 Candidate.Width.isScalable() 5718 ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale 5719 : Candidate.Width.getFixedValue(); 5720 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5721 << " costs: " << (Candidate.Cost / Width)); 5722 if (i.isScalable()) 5723 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of " 5724 << AssumedMinimumVscale << ")"); 5725 LLVM_DEBUG(dbgs() << ".\n"); 5726 #endif 5727 5728 if (!C.second && !ForceVectorization) { 5729 LLVM_DEBUG( 5730 dbgs() << "LV: Not considering vector loop of width " << i 5731 << " because it will not generate any vector instructions.\n"); 5732 continue; 5733 } 5734 5735 // If profitable add it to ProfitableVF list. 5736 if (isMoreProfitable(Candidate, ScalarCost)) 5737 ProfitableVFs.push_back(Candidate); 5738 5739 if (isMoreProfitable(Candidate, ChosenFactor)) 5740 ChosenFactor = Candidate; 5741 } 5742 5743 // Emit a report of VFs with invalid costs in the loop. 5744 if (!InvalidCosts.empty()) { 5745 // Group the remarks per instruction, keeping the instruction order from 5746 // InvalidCosts. 5747 std::map<Instruction *, unsigned> Numbering; 5748 unsigned I = 0; 5749 for (auto &Pair : InvalidCosts) 5750 if (!Numbering.count(Pair.first)) 5751 Numbering[Pair.first] = I++; 5752 5753 // Sort the list, first on instruction(number) then on VF. 5754 llvm::sort(InvalidCosts, 5755 [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { 5756 if (Numbering[A.first] != Numbering[B.first]) 5757 return Numbering[A.first] < Numbering[B.first]; 5758 ElementCountComparator ECC; 5759 return ECC(A.second, B.second); 5760 }); 5761 5762 // For a list of ordered instruction-vf pairs: 5763 // [(load, vf1), (load, vf2), (store, vf1)] 5764 // Group the instructions together to emit separate remarks for: 5765 // load (vf1, vf2) 5766 // store (vf1) 5767 auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); 5768 auto Subset = ArrayRef<InstructionVFPair>(); 5769 do { 5770 if (Subset.empty()) 5771 Subset = Tail.take_front(1); 5772 5773 Instruction *I = Subset.front().first; 5774 5775 // If the next instruction is different, or if there are no other pairs, 5776 // emit a remark for the collated subset. e.g. 5777 // [(load, vf1), (load, vf2))] 5778 // to emit: 5779 // remark: invalid costs for 'load' at VF=(vf, vf2) 5780 if (Subset == Tail || Tail[Subset.size()].first != I) { 5781 std::string OutString; 5782 raw_string_ostream OS(OutString); 5783 assert(!Subset.empty() && "Unexpected empty range"); 5784 OS << "Instruction with invalid costs prevented vectorization at VF=("; 5785 for (auto &Pair : Subset) 5786 OS << (Pair.second == Subset.front().second ? "" : ", ") 5787 << Pair.second; 5788 OS << "):"; 5789 if (auto *CI = dyn_cast<CallInst>(I)) 5790 OS << " call to " << CI->getCalledFunction()->getName(); 5791 else 5792 OS << " " << I->getOpcodeName(); 5793 OS.flush(); 5794 reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); 5795 Tail = Tail.drop_front(Subset.size()); 5796 Subset = {}; 5797 } else 5798 // Grow the subset by one element 5799 Subset = Tail.take_front(Subset.size() + 1); 5800 } while (!Tail.empty()); 5801 } 5802 5803 if (!EnableCondStoresVectorization && NumPredStores) { 5804 reportVectorizationFailure("There are conditional stores.", 5805 "store that is conditionally executed prevents vectorization", 5806 "ConditionalStore", ORE, TheLoop); 5807 ChosenFactor = ScalarCost; 5808 } 5809 5810 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 5811 ChosenFactor.Cost >= ScalarCost.Cost) dbgs() 5812 << "LV: Vectorization seems to be not beneficial, " 5813 << "but was forced by a user.\n"); 5814 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 5815 return ChosenFactor; 5816 } 5817 5818 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5819 const Loop &L, ElementCount VF) const { 5820 // Cross iteration phis such as reductions need special handling and are 5821 // currently unsupported. 5822 if (any_of(L.getHeader()->phis(), 5823 [&](PHINode &Phi) { return Legal->isFirstOrderRecurrence(&Phi); })) 5824 return false; 5825 5826 // Phis with uses outside of the loop require special handling and are 5827 // currently unsupported. 5828 for (auto &Entry : Legal->getInductionVars()) { 5829 // Look for uses of the value of the induction at the last iteration. 5830 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5831 for (User *U : PostInc->users()) 5832 if (!L.contains(cast<Instruction>(U))) 5833 return false; 5834 // Look for uses of penultimate value of the induction. 5835 for (User *U : Entry.first->users()) 5836 if (!L.contains(cast<Instruction>(U))) 5837 return false; 5838 } 5839 5840 // Induction variables that are widened require special handling that is 5841 // currently not supported. 5842 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5843 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5844 this->isProfitableToScalarize(Entry.first, VF)); 5845 })) 5846 return false; 5847 5848 // Epilogue vectorization code has not been auditted to ensure it handles 5849 // non-latch exits properly. It may be fine, but it needs auditted and 5850 // tested. 5851 if (L.getExitingBlock() != L.getLoopLatch()) 5852 return false; 5853 5854 return true; 5855 } 5856 5857 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5858 const ElementCount VF) const { 5859 // FIXME: We need a much better cost-model to take different parameters such 5860 // as register pressure, code size increase and cost of extra branches into 5861 // account. For now we apply a very crude heuristic and only consider loops 5862 // with vectorization factors larger than a certain value. 5863 // We also consider epilogue vectorization unprofitable for targets that don't 5864 // consider interleaving beneficial (eg. MVE). 5865 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 5866 return false; 5867 if (VF.getFixedValue() >= EpilogueVectorizationMinVF) 5868 return true; 5869 return false; 5870 } 5871 5872 VectorizationFactor 5873 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 5874 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 5875 VectorizationFactor Result = VectorizationFactor::Disabled(); 5876 if (!EnableEpilogueVectorization) { 5877 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 5878 return Result; 5879 } 5880 5881 if (!isScalarEpilogueAllowed()) { 5882 LLVM_DEBUG( 5883 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 5884 "allowed.\n";); 5885 return Result; 5886 } 5887 5888 // Not really a cost consideration, but check for unsupported cases here to 5889 // simplify the logic. 5890 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 5891 LLVM_DEBUG( 5892 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 5893 "not a supported candidate.\n";); 5894 return Result; 5895 } 5896 5897 if (EpilogueVectorizationForceVF > 1) { 5898 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 5899 ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF); 5900 if (LVP.hasPlanWithVF(ForcedEC)) 5901 return {ForcedEC, 0}; 5902 else { 5903 LLVM_DEBUG( 5904 dbgs() 5905 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 5906 return Result; 5907 } 5908 } 5909 5910 if (TheLoop->getHeader()->getParent()->hasOptSize() || 5911 TheLoop->getHeader()->getParent()->hasMinSize()) { 5912 LLVM_DEBUG( 5913 dbgs() 5914 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 5915 return Result; 5916 } 5917 5918 auto FixedMainLoopVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue()); 5919 if (MainLoopVF.isScalable()) 5920 LLVM_DEBUG( 5921 dbgs() << "LEV: Epilogue vectorization using scalable vectors not " 5922 "yet supported. Converting to fixed-width (VF=" 5923 << FixedMainLoopVF << ") instead\n"); 5924 5925 if (!isEpilogueVectorizationProfitable(FixedMainLoopVF)) { 5926 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for " 5927 "this loop\n"); 5928 return Result; 5929 } 5930 5931 for (auto &NextVF : ProfitableVFs) 5932 if (ElementCount::isKnownLT(NextVF.Width, FixedMainLoopVF) && 5933 (Result.Width.getFixedValue() == 1 || 5934 isMoreProfitable(NextVF, Result)) && 5935 LVP.hasPlanWithVF(NextVF.Width)) 5936 Result = NextVF; 5937 5938 if (Result != VectorizationFactor::Disabled()) 5939 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 5940 << Result.Width.getFixedValue() << "\n";); 5941 return Result; 5942 } 5943 5944 std::pair<unsigned, unsigned> 5945 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5946 unsigned MinWidth = -1U; 5947 unsigned MaxWidth = 8; 5948 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5949 // For in-loop reductions, no element types are added to ElementTypesInLoop 5950 // if there are no loads/stores in the loop. In this case, check through the 5951 // reduction variables to determine the maximum width. 5952 if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) { 5953 // Reset MaxWidth so that we can find the smallest type used by recurrences 5954 // in the loop. 5955 MaxWidth = -1U; 5956 for (auto &PhiDescriptorPair : Legal->getReductionVars()) { 5957 const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second; 5958 // When finding the min width used by the recurrence we need to account 5959 // for casts on the input operands of the recurrence. 5960 MaxWidth = std::min<unsigned>( 5961 MaxWidth, std::min<unsigned>( 5962 RdxDesc.getMinWidthCastToRecurrenceTypeInBits(), 5963 RdxDesc.getRecurrenceType()->getScalarSizeInBits())); 5964 } 5965 } else { 5966 for (Type *T : ElementTypesInLoop) { 5967 MinWidth = std::min<unsigned>( 5968 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5969 MaxWidth = std::max<unsigned>( 5970 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5971 } 5972 } 5973 return {MinWidth, MaxWidth}; 5974 } 5975 5976 void LoopVectorizationCostModel::collectElementTypesForWidening() { 5977 ElementTypesInLoop.clear(); 5978 // For each block. 5979 for (BasicBlock *BB : TheLoop->blocks()) { 5980 // For each instruction in the loop. 5981 for (Instruction &I : BB->instructionsWithoutDebug()) { 5982 Type *T = I.getType(); 5983 5984 // Skip ignored values. 5985 if (ValuesToIgnore.count(&I)) 5986 continue; 5987 5988 // Only examine Loads, Stores and PHINodes. 5989 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5990 continue; 5991 5992 // Examine PHI nodes that are reduction variables. Update the type to 5993 // account for the recurrence type. 5994 if (auto *PN = dyn_cast<PHINode>(&I)) { 5995 if (!Legal->isReductionVariable(PN)) 5996 continue; 5997 const RecurrenceDescriptor &RdxDesc = 5998 Legal->getReductionVars().find(PN)->second; 5999 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 6000 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 6001 RdxDesc.getRecurrenceType(), 6002 TargetTransformInfo::ReductionFlags())) 6003 continue; 6004 T = RdxDesc.getRecurrenceType(); 6005 } 6006 6007 // Examine the stored values. 6008 if (auto *ST = dyn_cast<StoreInst>(&I)) 6009 T = ST->getValueOperand()->getType(); 6010 6011 assert(T->isSized() && 6012 "Expected the load/store/recurrence type to be sized"); 6013 6014 ElementTypesInLoop.insert(T); 6015 } 6016 } 6017 } 6018 6019 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 6020 unsigned LoopCost) { 6021 // -- The interleave heuristics -- 6022 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6023 // There are many micro-architectural considerations that we can't predict 6024 // at this level. For example, frontend pressure (on decode or fetch) due to 6025 // code size, or the number and capabilities of the execution ports. 6026 // 6027 // We use the following heuristics to select the interleave count: 6028 // 1. If the code has reductions, then we interleave to break the cross 6029 // iteration dependency. 6030 // 2. If the loop is really small, then we interleave to reduce the loop 6031 // overhead. 6032 // 3. We don't interleave if we think that we will spill registers to memory 6033 // due to the increased register pressure. 6034 6035 if (!isScalarEpilogueAllowed()) 6036 return 1; 6037 6038 // We used the distance for the interleave count. 6039 if (Legal->getMaxSafeDepDistBytes() != -1U) 6040 return 1; 6041 6042 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 6043 const bool HasReductions = !Legal->getReductionVars().empty(); 6044 // Do not interleave loops with a relatively small known or estimated trip 6045 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 6046 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 6047 // because with the above conditions interleaving can expose ILP and break 6048 // cross iteration dependences for reductions. 6049 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 6050 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 6051 return 1; 6052 6053 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6054 // We divide by these constants so assume that we have at least one 6055 // instruction that uses at least one register. 6056 for (auto& pair : R.MaxLocalUsers) { 6057 pair.second = std::max(pair.second, 1U); 6058 } 6059 6060 // We calculate the interleave count using the following formula. 6061 // Subtract the number of loop invariants from the number of available 6062 // registers. These registers are used by all of the interleaved instances. 6063 // Next, divide the remaining registers by the number of registers that is 6064 // required by the loop, in order to estimate how many parallel instances 6065 // fit without causing spills. All of this is rounded down if necessary to be 6066 // a power of two. We want power of two interleave count to simplify any 6067 // addressing operations or alignment considerations. 6068 // We also want power of two interleave counts to ensure that the induction 6069 // variable of the vector loop wraps to zero, when tail is folded by masking; 6070 // this currently happens when OptForSize, in which case IC is set to 1 above. 6071 unsigned IC = UINT_MAX; 6072 6073 for (auto& pair : R.MaxLocalUsers) { 6074 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 6075 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6076 << " registers of " 6077 << TTI.getRegisterClassName(pair.first) << " register class\n"); 6078 if (VF.isScalar()) { 6079 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6080 TargetNumRegisters = ForceTargetNumScalarRegs; 6081 } else { 6082 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6083 TargetNumRegisters = ForceTargetNumVectorRegs; 6084 } 6085 unsigned MaxLocalUsers = pair.second; 6086 unsigned LoopInvariantRegs = 0; 6087 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 6088 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 6089 6090 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 6091 // Don't count the induction variable as interleaved. 6092 if (EnableIndVarRegisterHeur) { 6093 TmpIC = 6094 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 6095 std::max(1U, (MaxLocalUsers - 1))); 6096 } 6097 6098 IC = std::min(IC, TmpIC); 6099 } 6100 6101 // Clamp the interleave ranges to reasonable counts. 6102 unsigned MaxInterleaveCount = 6103 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 6104 6105 // Check if the user has overridden the max. 6106 if (VF.isScalar()) { 6107 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6108 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6109 } else { 6110 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6111 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6112 } 6113 6114 // If trip count is known or estimated compile time constant, limit the 6115 // interleave count to be less than the trip count divided by VF, provided it 6116 // is at least 1. 6117 // 6118 // For scalable vectors we can't know if interleaving is beneficial. It may 6119 // not be beneficial for small loops if none of the lanes in the second vector 6120 // iterations is enabled. However, for larger loops, there is likely to be a 6121 // similar benefit as for fixed-width vectors. For now, we choose to leave 6122 // the InterleaveCount as if vscale is '1', although if some information about 6123 // the vector is known (e.g. min vector size), we can make a better decision. 6124 if (BestKnownTC) { 6125 MaxInterleaveCount = 6126 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 6127 // Make sure MaxInterleaveCount is greater than 0. 6128 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 6129 } 6130 6131 assert(MaxInterleaveCount > 0 && 6132 "Maximum interleave count must be greater than 0"); 6133 6134 // Clamp the calculated IC to be between the 1 and the max interleave count 6135 // that the target and trip count allows. 6136 if (IC > MaxInterleaveCount) 6137 IC = MaxInterleaveCount; 6138 else 6139 // Make sure IC is greater than 0. 6140 IC = std::max(1u, IC); 6141 6142 assert(IC > 0 && "Interleave count must be greater than 0."); 6143 6144 // If we did not calculate the cost for VF (because the user selected the VF) 6145 // then we calculate the cost of VF here. 6146 if (LoopCost == 0) { 6147 InstructionCost C = expectedCost(VF).first; 6148 assert(C.isValid() && "Expected to have chosen a VF with valid cost"); 6149 LoopCost = *C.getValue(); 6150 } 6151 6152 assert(LoopCost && "Non-zero loop cost expected"); 6153 6154 // Interleave if we vectorized this loop and there is a reduction that could 6155 // benefit from interleaving. 6156 if (VF.isVector() && HasReductions) { 6157 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6158 return IC; 6159 } 6160 6161 // Note that if we've already vectorized the loop we will have done the 6162 // runtime check and so interleaving won't require further checks. 6163 bool InterleavingRequiresRuntimePointerCheck = 6164 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 6165 6166 // We want to interleave small loops in order to reduce the loop overhead and 6167 // potentially expose ILP opportunities. 6168 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 6169 << "LV: IC is " << IC << '\n' 6170 << "LV: VF is " << VF << '\n'); 6171 const bool AggressivelyInterleaveReductions = 6172 TTI.enableAggressiveInterleaving(HasReductions); 6173 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6174 // We assume that the cost overhead is 1 and we use the cost model 6175 // to estimate the cost of the loop and interleave until the cost of the 6176 // loop overhead is about 5% of the cost of the loop. 6177 unsigned SmallIC = 6178 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6179 6180 // Interleave until store/load ports (estimated by max interleave count) are 6181 // saturated. 6182 unsigned NumStores = Legal->getNumStores(); 6183 unsigned NumLoads = Legal->getNumLoads(); 6184 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6185 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6186 6187 // There is little point in interleaving for reductions containing selects 6188 // and compares when VF=1 since it may just create more overhead than it's 6189 // worth for loops with small trip counts. This is because we still have to 6190 // do the final reduction after the loop. 6191 bool HasSelectCmpReductions = 6192 HasReductions && 6193 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 6194 const RecurrenceDescriptor &RdxDesc = Reduction.second; 6195 return RecurrenceDescriptor::isSelectCmpRecurrenceKind( 6196 RdxDesc.getRecurrenceKind()); 6197 }); 6198 if (HasSelectCmpReductions) { 6199 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n"); 6200 return 1; 6201 } 6202 6203 // If we have a scalar reduction (vector reductions are already dealt with 6204 // by this point), we can increase the critical path length if the loop 6205 // we're interleaving is inside another loop. For tree-wise reductions 6206 // set the limit to 2, and for ordered reductions it's best to disable 6207 // interleaving entirely. 6208 if (HasReductions && TheLoop->getLoopDepth() > 1) { 6209 bool HasOrderedReductions = 6210 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 6211 const RecurrenceDescriptor &RdxDesc = Reduction.second; 6212 return RdxDesc.isOrdered(); 6213 }); 6214 if (HasOrderedReductions) { 6215 LLVM_DEBUG( 6216 dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); 6217 return 1; 6218 } 6219 6220 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6221 SmallIC = std::min(SmallIC, F); 6222 StoresIC = std::min(StoresIC, F); 6223 LoadsIC = std::min(LoadsIC, F); 6224 } 6225 6226 if (EnableLoadStoreRuntimeInterleave && 6227 std::max(StoresIC, LoadsIC) > SmallIC) { 6228 LLVM_DEBUG( 6229 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6230 return std::max(StoresIC, LoadsIC); 6231 } 6232 6233 // If there are scalar reductions and TTI has enabled aggressive 6234 // interleaving for reductions, we will interleave to expose ILP. 6235 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6236 AggressivelyInterleaveReductions) { 6237 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6238 // Interleave no less than SmallIC but not as aggressive as the normal IC 6239 // to satisfy the rare situation when resources are too limited. 6240 return std::max(IC / 2, SmallIC); 6241 } else { 6242 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6243 return SmallIC; 6244 } 6245 } 6246 6247 // Interleave if this is a large loop (small loops are already dealt with by 6248 // this point) that could benefit from interleaving. 6249 if (AggressivelyInterleaveReductions) { 6250 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6251 return IC; 6252 } 6253 6254 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6255 return 1; 6256 } 6257 6258 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6259 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6260 // This function calculates the register usage by measuring the highest number 6261 // of values that are alive at a single location. Obviously, this is a very 6262 // rough estimation. We scan the loop in a topological order in order and 6263 // assign a number to each instruction. We use RPO to ensure that defs are 6264 // met before their users. We assume that each instruction that has in-loop 6265 // users starts an interval. We record every time that an in-loop value is 6266 // used, so we have a list of the first and last occurrences of each 6267 // instruction. Next, we transpose this data structure into a multi map that 6268 // holds the list of intervals that *end* at a specific location. This multi 6269 // map allows us to perform a linear search. We scan the instructions linearly 6270 // and record each time that a new interval starts, by placing it in a set. 6271 // If we find this value in the multi-map then we remove it from the set. 6272 // The max register usage is the maximum size of the set. 6273 // We also search for instructions that are defined outside the loop, but are 6274 // used inside the loop. We need this number separately from the max-interval 6275 // usage number because when we unroll, loop-invariant values do not take 6276 // more register. 6277 LoopBlocksDFS DFS(TheLoop); 6278 DFS.perform(LI); 6279 6280 RegisterUsage RU; 6281 6282 // Each 'key' in the map opens a new interval. The values 6283 // of the map are the index of the 'last seen' usage of the 6284 // instruction that is the key. 6285 using IntervalMap = DenseMap<Instruction *, unsigned>; 6286 6287 // Maps instruction to its index. 6288 SmallVector<Instruction *, 64> IdxToInstr; 6289 // Marks the end of each interval. 6290 IntervalMap EndPoint; 6291 // Saves the list of instruction indices that are used in the loop. 6292 SmallPtrSet<Instruction *, 8> Ends; 6293 // Saves the list of values that are used in the loop but are 6294 // defined outside the loop, such as arguments and constants. 6295 SmallPtrSet<Value *, 8> LoopInvariants; 6296 6297 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6298 for (Instruction &I : BB->instructionsWithoutDebug()) { 6299 IdxToInstr.push_back(&I); 6300 6301 // Save the end location of each USE. 6302 for (Value *U : I.operands()) { 6303 auto *Instr = dyn_cast<Instruction>(U); 6304 6305 // Ignore non-instruction values such as arguments, constants, etc. 6306 if (!Instr) 6307 continue; 6308 6309 // If this instruction is outside the loop then record it and continue. 6310 if (!TheLoop->contains(Instr)) { 6311 LoopInvariants.insert(Instr); 6312 continue; 6313 } 6314 6315 // Overwrite previous end points. 6316 EndPoint[Instr] = IdxToInstr.size(); 6317 Ends.insert(Instr); 6318 } 6319 } 6320 } 6321 6322 // Saves the list of intervals that end with the index in 'key'. 6323 using InstrList = SmallVector<Instruction *, 2>; 6324 DenseMap<unsigned, InstrList> TransposeEnds; 6325 6326 // Transpose the EndPoints to a list of values that end at each index. 6327 for (auto &Interval : EndPoint) 6328 TransposeEnds[Interval.second].push_back(Interval.first); 6329 6330 SmallPtrSet<Instruction *, 8> OpenIntervals; 6331 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6332 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6333 6334 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6335 6336 // A lambda that gets the register usage for the given type and VF. 6337 const auto &TTICapture = TTI; 6338 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned { 6339 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6340 return 0; 6341 InstructionCost::CostType RegUsage = 6342 *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue(); 6343 assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() && 6344 "Nonsensical values for register usage."); 6345 return RegUsage; 6346 }; 6347 6348 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6349 Instruction *I = IdxToInstr[i]; 6350 6351 // Remove all of the instructions that end at this location. 6352 InstrList &List = TransposeEnds[i]; 6353 for (Instruction *ToRemove : List) 6354 OpenIntervals.erase(ToRemove); 6355 6356 // Ignore instructions that are never used within the loop. 6357 if (!Ends.count(I)) 6358 continue; 6359 6360 // Skip ignored values. 6361 if (ValuesToIgnore.count(I)) 6362 continue; 6363 6364 // For each VF find the maximum usage of registers. 6365 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6366 // Count the number of live intervals. 6367 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6368 6369 if (VFs[j].isScalar()) { 6370 for (auto Inst : OpenIntervals) { 6371 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6372 if (RegUsage.find(ClassID) == RegUsage.end()) 6373 RegUsage[ClassID] = 1; 6374 else 6375 RegUsage[ClassID] += 1; 6376 } 6377 } else { 6378 collectUniformsAndScalars(VFs[j]); 6379 for (auto Inst : OpenIntervals) { 6380 // Skip ignored values for VF > 1. 6381 if (VecValuesToIgnore.count(Inst)) 6382 continue; 6383 if (isScalarAfterVectorization(Inst, VFs[j])) { 6384 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6385 if (RegUsage.find(ClassID) == RegUsage.end()) 6386 RegUsage[ClassID] = 1; 6387 else 6388 RegUsage[ClassID] += 1; 6389 } else { 6390 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6391 if (RegUsage.find(ClassID) == RegUsage.end()) 6392 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6393 else 6394 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6395 } 6396 } 6397 } 6398 6399 for (auto& pair : RegUsage) { 6400 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6401 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6402 else 6403 MaxUsages[j][pair.first] = pair.second; 6404 } 6405 } 6406 6407 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6408 << OpenIntervals.size() << '\n'); 6409 6410 // Add the current instruction to the list of open intervals. 6411 OpenIntervals.insert(I); 6412 } 6413 6414 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6415 SmallMapVector<unsigned, unsigned, 4> Invariant; 6416 6417 for (auto Inst : LoopInvariants) { 6418 unsigned Usage = 6419 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6420 unsigned ClassID = 6421 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6422 if (Invariant.find(ClassID) == Invariant.end()) 6423 Invariant[ClassID] = Usage; 6424 else 6425 Invariant[ClassID] += Usage; 6426 } 6427 6428 LLVM_DEBUG({ 6429 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6430 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6431 << " item\n"; 6432 for (const auto &pair : MaxUsages[i]) { 6433 dbgs() << "LV(REG): RegisterClass: " 6434 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6435 << " registers\n"; 6436 } 6437 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6438 << " item\n"; 6439 for (const auto &pair : Invariant) { 6440 dbgs() << "LV(REG): RegisterClass: " 6441 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6442 << " registers\n"; 6443 } 6444 }); 6445 6446 RU.LoopInvariantRegs = Invariant; 6447 RU.MaxLocalUsers = MaxUsages[i]; 6448 RUs[i] = RU; 6449 } 6450 6451 return RUs; 6452 } 6453 6454 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I, 6455 ElementCount VF) { 6456 // TODO: Cost model for emulated masked load/store is completely 6457 // broken. This hack guides the cost model to use an artificially 6458 // high enough value to practically disable vectorization with such 6459 // operations, except where previously deployed legality hack allowed 6460 // using very low cost values. This is to avoid regressions coming simply 6461 // from moving "masked load/store" check from legality to cost model. 6462 // Masked Load/Gather emulation was previously never allowed. 6463 // Limited number of Masked Store/Scatter emulation was allowed. 6464 assert(isPredicatedInst(I, VF) && "Expecting a scalar emulated instruction"); 6465 return isa<LoadInst>(I) || 6466 (isa<StoreInst>(I) && 6467 NumPredStores > NumberOfStoresToPredicate); 6468 } 6469 6470 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6471 // If we aren't vectorizing the loop, or if we've already collected the 6472 // instructions to scalarize, there's nothing to do. Collection may already 6473 // have occurred if we have a user-selected VF and are now computing the 6474 // expected cost for interleaving. 6475 if (VF.isScalar() || VF.isZero() || 6476 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6477 return; 6478 6479 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6480 // not profitable to scalarize any instructions, the presence of VF in the 6481 // map will indicate that we've analyzed it already. 6482 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6483 6484 // Find all the instructions that are scalar with predication in the loop and 6485 // determine if it would be better to not if-convert the blocks they are in. 6486 // If so, we also record the instructions to scalarize. 6487 for (BasicBlock *BB : TheLoop->blocks()) { 6488 if (!blockNeedsPredicationForAnyReason(BB)) 6489 continue; 6490 for (Instruction &I : *BB) 6491 if (isScalarWithPredication(&I, VF)) { 6492 ScalarCostsTy ScalarCosts; 6493 // Do not apply discount if scalable, because that would lead to 6494 // invalid scalarization costs. 6495 // Do not apply discount logic if hacked cost is needed 6496 // for emulated masked memrefs. 6497 if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) && 6498 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6499 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6500 // Remember that BB will remain after vectorization. 6501 PredicatedBBsAfterVectorization.insert(BB); 6502 } 6503 } 6504 } 6505 6506 int LoopVectorizationCostModel::computePredInstDiscount( 6507 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6508 assert(!isUniformAfterVectorization(PredInst, VF) && 6509 "Instruction marked uniform-after-vectorization will be predicated"); 6510 6511 // Initialize the discount to zero, meaning that the scalar version and the 6512 // vector version cost the same. 6513 InstructionCost Discount = 0; 6514 6515 // Holds instructions to analyze. The instructions we visit are mapped in 6516 // ScalarCosts. Those instructions are the ones that would be scalarized if 6517 // we find that the scalar version costs less. 6518 SmallVector<Instruction *, 8> Worklist; 6519 6520 // Returns true if the given instruction can be scalarized. 6521 auto canBeScalarized = [&](Instruction *I) -> bool { 6522 // We only attempt to scalarize instructions forming a single-use chain 6523 // from the original predicated block that would otherwise be vectorized. 6524 // Although not strictly necessary, we give up on instructions we know will 6525 // already be scalar to avoid traversing chains that are unlikely to be 6526 // beneficial. 6527 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6528 isScalarAfterVectorization(I, VF)) 6529 return false; 6530 6531 // If the instruction is scalar with predication, it will be analyzed 6532 // separately. We ignore it within the context of PredInst. 6533 if (isScalarWithPredication(I, VF)) 6534 return false; 6535 6536 // If any of the instruction's operands are uniform after vectorization, 6537 // the instruction cannot be scalarized. This prevents, for example, a 6538 // masked load from being scalarized. 6539 // 6540 // We assume we will only emit a value for lane zero of an instruction 6541 // marked uniform after vectorization, rather than VF identical values. 6542 // Thus, if we scalarize an instruction that uses a uniform, we would 6543 // create uses of values corresponding to the lanes we aren't emitting code 6544 // for. This behavior can be changed by allowing getScalarValue to clone 6545 // the lane zero values for uniforms rather than asserting. 6546 for (Use &U : I->operands()) 6547 if (auto *J = dyn_cast<Instruction>(U.get())) 6548 if (isUniformAfterVectorization(J, VF)) 6549 return false; 6550 6551 // Otherwise, we can scalarize the instruction. 6552 return true; 6553 }; 6554 6555 // Compute the expected cost discount from scalarizing the entire expression 6556 // feeding the predicated instruction. We currently only consider expressions 6557 // that are single-use instruction chains. 6558 Worklist.push_back(PredInst); 6559 while (!Worklist.empty()) { 6560 Instruction *I = Worklist.pop_back_val(); 6561 6562 // If we've already analyzed the instruction, there's nothing to do. 6563 if (ScalarCosts.find(I) != ScalarCosts.end()) 6564 continue; 6565 6566 // Compute the cost of the vector instruction. Note that this cost already 6567 // includes the scalarization overhead of the predicated instruction. 6568 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6569 6570 // Compute the cost of the scalarized instruction. This cost is the cost of 6571 // the instruction as if it wasn't if-converted and instead remained in the 6572 // predicated block. We will scale this cost by block probability after 6573 // computing the scalarization overhead. 6574 InstructionCost ScalarCost = 6575 VF.getFixedValue() * 6576 getInstructionCost(I, ElementCount::getFixed(1)).first; 6577 6578 // Compute the scalarization overhead of needed insertelement instructions 6579 // and phi nodes. 6580 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) { 6581 ScalarCost += TTI.getScalarizationOverhead( 6582 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6583 APInt::getAllOnes(VF.getFixedValue()), true, false); 6584 ScalarCost += 6585 VF.getFixedValue() * 6586 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6587 } 6588 6589 // Compute the scalarization overhead of needed extractelement 6590 // instructions. For each of the instruction's operands, if the operand can 6591 // be scalarized, add it to the worklist; otherwise, account for the 6592 // overhead. 6593 for (Use &U : I->operands()) 6594 if (auto *J = dyn_cast<Instruction>(U.get())) { 6595 assert(VectorType::isValidElementType(J->getType()) && 6596 "Instruction has non-scalar type"); 6597 if (canBeScalarized(J)) 6598 Worklist.push_back(J); 6599 else if (needsExtract(J, VF)) { 6600 ScalarCost += TTI.getScalarizationOverhead( 6601 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6602 APInt::getAllOnes(VF.getFixedValue()), false, true); 6603 } 6604 } 6605 6606 // Scale the total scalar cost by block probability. 6607 ScalarCost /= getReciprocalPredBlockProb(); 6608 6609 // Compute the discount. A non-negative discount means the vector version 6610 // of the instruction costs more, and scalarizing would be beneficial. 6611 Discount += VectorCost - ScalarCost; 6612 ScalarCosts[I] = ScalarCost; 6613 } 6614 6615 return *Discount.getValue(); 6616 } 6617 6618 LoopVectorizationCostModel::VectorizationCostTy 6619 LoopVectorizationCostModel::expectedCost( 6620 ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { 6621 VectorizationCostTy Cost; 6622 6623 // For each block. 6624 for (BasicBlock *BB : TheLoop->blocks()) { 6625 VectorizationCostTy BlockCost; 6626 6627 // For each instruction in the old loop. 6628 for (Instruction &I : BB->instructionsWithoutDebug()) { 6629 // Skip ignored values. 6630 if (ValuesToIgnore.count(&I) || 6631 (VF.isVector() && VecValuesToIgnore.count(&I))) 6632 continue; 6633 6634 VectorizationCostTy C = getInstructionCost(&I, VF); 6635 6636 // Check if we should override the cost. 6637 if (C.first.isValid() && 6638 ForceTargetInstructionCost.getNumOccurrences() > 0) 6639 C.first = InstructionCost(ForceTargetInstructionCost); 6640 6641 // Keep a list of instructions with invalid costs. 6642 if (Invalid && !C.first.isValid()) 6643 Invalid->emplace_back(&I, VF); 6644 6645 BlockCost.first += C.first; 6646 BlockCost.second |= C.second; 6647 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6648 << " for VF " << VF << " For instruction: " << I 6649 << '\n'); 6650 } 6651 6652 // If we are vectorizing a predicated block, it will have been 6653 // if-converted. This means that the block's instructions (aside from 6654 // stores and instructions that may divide by zero) will now be 6655 // unconditionally executed. For the scalar case, we may not always execute 6656 // the predicated block, if it is an if-else block. Thus, scale the block's 6657 // cost by the probability of executing it. blockNeedsPredication from 6658 // Legal is used so as to not include all blocks in tail folded loops. 6659 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6660 BlockCost.first /= getReciprocalPredBlockProb(); 6661 6662 Cost.first += BlockCost.first; 6663 Cost.second |= BlockCost.second; 6664 } 6665 6666 return Cost; 6667 } 6668 6669 /// Gets Address Access SCEV after verifying that the access pattern 6670 /// is loop invariant except the induction variable dependence. 6671 /// 6672 /// This SCEV can be sent to the Target in order to estimate the address 6673 /// calculation cost. 6674 static const SCEV *getAddressAccessSCEV( 6675 Value *Ptr, 6676 LoopVectorizationLegality *Legal, 6677 PredicatedScalarEvolution &PSE, 6678 const Loop *TheLoop) { 6679 6680 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6681 if (!Gep) 6682 return nullptr; 6683 6684 // We are looking for a gep with all loop invariant indices except for one 6685 // which should be an induction variable. 6686 auto SE = PSE.getSE(); 6687 unsigned NumOperands = Gep->getNumOperands(); 6688 for (unsigned i = 1; i < NumOperands; ++i) { 6689 Value *Opd = Gep->getOperand(i); 6690 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6691 !Legal->isInductionVariable(Opd)) 6692 return nullptr; 6693 } 6694 6695 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6696 return PSE.getSCEV(Ptr); 6697 } 6698 6699 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6700 return Legal->hasStride(I->getOperand(0)) || 6701 Legal->hasStride(I->getOperand(1)); 6702 } 6703 6704 InstructionCost 6705 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6706 ElementCount VF) { 6707 assert(VF.isVector() && 6708 "Scalarization cost of instruction implies vectorization."); 6709 if (VF.isScalable()) 6710 return InstructionCost::getInvalid(); 6711 6712 Type *ValTy = getLoadStoreType(I); 6713 auto SE = PSE.getSE(); 6714 6715 unsigned AS = getLoadStoreAddressSpace(I); 6716 Value *Ptr = getLoadStorePointerOperand(I); 6717 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6718 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost` 6719 // that it is being called from this specific place. 6720 6721 // Figure out whether the access is strided and get the stride value 6722 // if it's known in compile time 6723 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6724 6725 // Get the cost of the scalar memory instruction and address computation. 6726 InstructionCost Cost = 6727 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6728 6729 // Don't pass *I here, since it is scalar but will actually be part of a 6730 // vectorized loop where the user of it is a vectorized instruction. 6731 const Align Alignment = getLoadStoreAlignment(I); 6732 Cost += VF.getKnownMinValue() * 6733 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6734 AS, TTI::TCK_RecipThroughput); 6735 6736 // Get the overhead of the extractelement and insertelement instructions 6737 // we might create due to scalarization. 6738 Cost += getScalarizationOverhead(I, VF); 6739 6740 // If we have a predicated load/store, it will need extra i1 extracts and 6741 // conditional branches, but may not be executed for each vector lane. Scale 6742 // the cost by the probability of executing the predicated block. 6743 if (isPredicatedInst(I, VF)) { 6744 Cost /= getReciprocalPredBlockProb(); 6745 6746 // Add the cost of an i1 extract and a branch 6747 auto *Vec_i1Ty = 6748 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 6749 Cost += TTI.getScalarizationOverhead( 6750 Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()), 6751 /*Insert=*/false, /*Extract=*/true); 6752 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 6753 6754 if (useEmulatedMaskMemRefHack(I, VF)) 6755 // Artificially setting to a high enough value to practically disable 6756 // vectorization with such operations. 6757 Cost = 3000000; 6758 } 6759 6760 return Cost; 6761 } 6762 6763 InstructionCost 6764 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6765 ElementCount VF) { 6766 Type *ValTy = getLoadStoreType(I); 6767 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6768 Value *Ptr = getLoadStorePointerOperand(I); 6769 unsigned AS = getLoadStoreAddressSpace(I); 6770 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr); 6771 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6772 6773 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6774 "Stride should be 1 or -1 for consecutive memory access"); 6775 const Align Alignment = getLoadStoreAlignment(I); 6776 InstructionCost Cost = 0; 6777 if (Legal->isMaskRequired(I)) 6778 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6779 CostKind); 6780 else 6781 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6782 CostKind, I); 6783 6784 bool Reverse = ConsecutiveStride < 0; 6785 if (Reverse) 6786 Cost += 6787 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6788 return Cost; 6789 } 6790 6791 InstructionCost 6792 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6793 ElementCount VF) { 6794 assert(Legal->isUniformMemOp(*I)); 6795 6796 Type *ValTy = getLoadStoreType(I); 6797 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6798 const Align Alignment = getLoadStoreAlignment(I); 6799 unsigned AS = getLoadStoreAddressSpace(I); 6800 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6801 if (isa<LoadInst>(I)) { 6802 return TTI.getAddressComputationCost(ValTy) + 6803 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6804 CostKind) + 6805 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6806 } 6807 StoreInst *SI = cast<StoreInst>(I); 6808 6809 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6810 return TTI.getAddressComputationCost(ValTy) + 6811 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6812 CostKind) + 6813 (isLoopInvariantStoreValue 6814 ? 0 6815 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6816 VF.getKnownMinValue() - 1)); 6817 } 6818 6819 InstructionCost 6820 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6821 ElementCount VF) { 6822 Type *ValTy = getLoadStoreType(I); 6823 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6824 const Align Alignment = getLoadStoreAlignment(I); 6825 const Value *Ptr = getLoadStorePointerOperand(I); 6826 6827 return TTI.getAddressComputationCost(VectorTy) + 6828 TTI.getGatherScatterOpCost( 6829 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6830 TargetTransformInfo::TCK_RecipThroughput, I); 6831 } 6832 6833 InstructionCost 6834 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6835 ElementCount VF) { 6836 // TODO: Once we have support for interleaving with scalable vectors 6837 // we can calculate the cost properly here. 6838 if (VF.isScalable()) 6839 return InstructionCost::getInvalid(); 6840 6841 Type *ValTy = getLoadStoreType(I); 6842 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6843 unsigned AS = getLoadStoreAddressSpace(I); 6844 6845 auto Group = getInterleavedAccessGroup(I); 6846 assert(Group && "Fail to get an interleaved access group."); 6847 6848 unsigned InterleaveFactor = Group->getFactor(); 6849 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6850 6851 // Holds the indices of existing members in the interleaved group. 6852 SmallVector<unsigned, 4> Indices; 6853 for (unsigned IF = 0; IF < InterleaveFactor; IF++) 6854 if (Group->getMember(IF)) 6855 Indices.push_back(IF); 6856 6857 // Calculate the cost of the whole interleaved group. 6858 bool UseMaskForGaps = 6859 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) || 6860 (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor())); 6861 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 6862 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6863 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6864 6865 if (Group->isReverse()) { 6866 // TODO: Add support for reversed masked interleaved access. 6867 assert(!Legal->isMaskRequired(I) && 6868 "Reverse masked interleaved access not supported."); 6869 Cost += 6870 Group->getNumMembers() * 6871 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6872 } 6873 return Cost; 6874 } 6875 6876 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( 6877 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 6878 using namespace llvm::PatternMatch; 6879 // Early exit for no inloop reductions 6880 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 6881 return None; 6882 auto *VectorTy = cast<VectorType>(Ty); 6883 6884 // We are looking for a pattern of, and finding the minimal acceptable cost: 6885 // reduce(mul(ext(A), ext(B))) or 6886 // reduce(mul(A, B)) or 6887 // reduce(ext(A)) or 6888 // reduce(A). 6889 // The basic idea is that we walk down the tree to do that, finding the root 6890 // reduction instruction in InLoopReductionImmediateChains. From there we find 6891 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 6892 // of the components. If the reduction cost is lower then we return it for the 6893 // reduction instruction and 0 for the other instructions in the pattern. If 6894 // it is not we return an invalid cost specifying the orignal cost method 6895 // should be used. 6896 Instruction *RetI = I; 6897 if (match(RetI, m_ZExtOrSExt(m_Value()))) { 6898 if (!RetI->hasOneUser()) 6899 return None; 6900 RetI = RetI->user_back(); 6901 } 6902 if (match(RetI, m_Mul(m_Value(), m_Value())) && 6903 RetI->user_back()->getOpcode() == Instruction::Add) { 6904 if (!RetI->hasOneUser()) 6905 return None; 6906 RetI = RetI->user_back(); 6907 } 6908 6909 // Test if the found instruction is a reduction, and if not return an invalid 6910 // cost specifying the parent to use the original cost modelling. 6911 if (!InLoopReductionImmediateChains.count(RetI)) 6912 return None; 6913 6914 // Find the reduction this chain is a part of and calculate the basic cost of 6915 // the reduction on its own. 6916 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 6917 Instruction *ReductionPhi = LastChain; 6918 while (!isa<PHINode>(ReductionPhi)) 6919 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 6920 6921 const RecurrenceDescriptor &RdxDesc = 6922 Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second; 6923 6924 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 6925 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); 6926 6927 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a 6928 // normal fmul instruction to the cost of the fadd reduction. 6929 if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd) 6930 BaseCost += 6931 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind); 6932 6933 // If we're using ordered reductions then we can just return the base cost 6934 // here, since getArithmeticReductionCost calculates the full ordered 6935 // reduction cost when FP reassociation is not allowed. 6936 if (useOrderedReductions(RdxDesc)) 6937 return BaseCost; 6938 6939 // Get the operand that was not the reduction chain and match it to one of the 6940 // patterns, returning the better cost if it is found. 6941 Instruction *RedOp = RetI->getOperand(1) == LastChain 6942 ? dyn_cast<Instruction>(RetI->getOperand(0)) 6943 : dyn_cast<Instruction>(RetI->getOperand(1)); 6944 6945 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 6946 6947 Instruction *Op0, *Op1; 6948 if (RedOp && 6949 match(RedOp, 6950 m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) && 6951 match(Op0, m_ZExtOrSExt(m_Value())) && 6952 Op0->getOpcode() == Op1->getOpcode() && 6953 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 6954 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) && 6955 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) { 6956 6957 // Matched reduce(ext(mul(ext(A), ext(B))) 6958 // Note that the extend opcodes need to all match, or if A==B they will have 6959 // been converted to zext(mul(sext(A), sext(A))) as it is known positive, 6960 // which is equally fine. 6961 bool IsUnsigned = isa<ZExtInst>(Op0); 6962 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 6963 auto *MulType = VectorType::get(Op0->getType(), VectorTy); 6964 6965 InstructionCost ExtCost = 6966 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType, 6967 TTI::CastContextHint::None, CostKind, Op0); 6968 InstructionCost MulCost = 6969 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind); 6970 InstructionCost Ext2Cost = 6971 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType, 6972 TTI::CastContextHint::None, CostKind, RedOp); 6973 6974 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6975 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6976 CostKind); 6977 6978 if (RedCost.isValid() && 6979 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost) 6980 return I == RetI ? RedCost : 0; 6981 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && 6982 !TheLoop->isLoopInvariant(RedOp)) { 6983 // Matched reduce(ext(A)) 6984 bool IsUnsigned = isa<ZExtInst>(RedOp); 6985 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 6986 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6987 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6988 CostKind); 6989 6990 InstructionCost ExtCost = 6991 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 6992 TTI::CastContextHint::None, CostKind, RedOp); 6993 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 6994 return I == RetI ? RedCost : 0; 6995 } else if (RedOp && 6996 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { 6997 if (match(Op0, m_ZExtOrSExt(m_Value())) && 6998 Op0->getOpcode() == Op1->getOpcode() && 6999 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 7000 bool IsUnsigned = isa<ZExtInst>(Op0); 7001 Type *Op0Ty = Op0->getOperand(0)->getType(); 7002 Type *Op1Ty = Op1->getOperand(0)->getType(); 7003 Type *LargestOpTy = 7004 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty 7005 : Op0Ty; 7006 auto *ExtType = VectorType::get(LargestOpTy, VectorTy); 7007 7008 // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of 7009 // different sizes. We take the largest type as the ext to reduce, and add 7010 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))). 7011 InstructionCost ExtCost0 = TTI.getCastInstrCost( 7012 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy), 7013 TTI::CastContextHint::None, CostKind, Op0); 7014 InstructionCost ExtCost1 = TTI.getCastInstrCost( 7015 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy), 7016 TTI::CastContextHint::None, CostKind, Op1); 7017 InstructionCost MulCost = 7018 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7019 7020 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7021 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7022 CostKind); 7023 InstructionCost ExtraExtCost = 0; 7024 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) { 7025 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1; 7026 ExtraExtCost = TTI.getCastInstrCost( 7027 ExtraExtOp->getOpcode(), ExtType, 7028 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy), 7029 TTI::CastContextHint::None, CostKind, ExtraExtOp); 7030 } 7031 7032 if (RedCost.isValid() && 7033 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost)) 7034 return I == RetI ? RedCost : 0; 7035 } else if (!match(I, m_ZExtOrSExt(m_Value()))) { 7036 // Matched reduce(mul()) 7037 InstructionCost MulCost = 7038 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7039 7040 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7041 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 7042 CostKind); 7043 7044 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 7045 return I == RetI ? RedCost : 0; 7046 } 7047 } 7048 7049 return I == RetI ? Optional<InstructionCost>(BaseCost) : None; 7050 } 7051 7052 InstructionCost 7053 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 7054 ElementCount VF) { 7055 // Calculate scalar cost only. Vectorization cost should be ready at this 7056 // moment. 7057 if (VF.isScalar()) { 7058 Type *ValTy = getLoadStoreType(I); 7059 const Align Alignment = getLoadStoreAlignment(I); 7060 unsigned AS = getLoadStoreAddressSpace(I); 7061 7062 return TTI.getAddressComputationCost(ValTy) + 7063 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 7064 TTI::TCK_RecipThroughput, I); 7065 } 7066 return getWideningCost(I, VF); 7067 } 7068 7069 LoopVectorizationCostModel::VectorizationCostTy 7070 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7071 ElementCount VF) { 7072 // If we know that this instruction will remain uniform, check the cost of 7073 // the scalar version. 7074 if (isUniformAfterVectorization(I, VF)) 7075 VF = ElementCount::getFixed(1); 7076 7077 if (VF.isVector() && isProfitableToScalarize(I, VF)) 7078 return VectorizationCostTy(InstsToScalarize[VF][I], false); 7079 7080 // Forced scalars do not have any scalarization overhead. 7081 auto ForcedScalar = ForcedScalars.find(VF); 7082 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 7083 auto InstSet = ForcedScalar->second; 7084 if (InstSet.count(I)) 7085 return VectorizationCostTy( 7086 (getInstructionCost(I, ElementCount::getFixed(1)).first * 7087 VF.getKnownMinValue()), 7088 false); 7089 } 7090 7091 Type *VectorTy; 7092 InstructionCost C = getInstructionCost(I, VF, VectorTy); 7093 7094 bool TypeNotScalarized = false; 7095 if (VF.isVector() && VectorTy->isVectorTy()) { 7096 unsigned NumParts = TTI.getNumberOfParts(VectorTy); 7097 if (NumParts) 7098 TypeNotScalarized = NumParts < VF.getKnownMinValue(); 7099 else 7100 C = InstructionCost::getInvalid(); 7101 } 7102 return VectorizationCostTy(C, TypeNotScalarized); 7103 } 7104 7105 InstructionCost 7106 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 7107 ElementCount VF) const { 7108 7109 // There is no mechanism yet to create a scalable scalarization loop, 7110 // so this is currently Invalid. 7111 if (VF.isScalable()) 7112 return InstructionCost::getInvalid(); 7113 7114 if (VF.isScalar()) 7115 return 0; 7116 7117 InstructionCost Cost = 0; 7118 Type *RetTy = ToVectorTy(I->getType(), VF); 7119 if (!RetTy->isVoidTy() && 7120 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 7121 Cost += TTI.getScalarizationOverhead( 7122 cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true, 7123 false); 7124 7125 // Some targets keep addresses scalar. 7126 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 7127 return Cost; 7128 7129 // Some targets support efficient element stores. 7130 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 7131 return Cost; 7132 7133 // Collect operands to consider. 7134 CallInst *CI = dyn_cast<CallInst>(I); 7135 Instruction::op_range Ops = CI ? CI->args() : I->operands(); 7136 7137 // Skip operands that do not require extraction/scalarization and do not incur 7138 // any overhead. 7139 SmallVector<Type *> Tys; 7140 for (auto *V : filterExtractingOperands(Ops, VF)) 7141 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 7142 return Cost + TTI.getOperandsScalarizationOverhead( 7143 filterExtractingOperands(Ops, VF), Tys); 7144 } 7145 7146 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 7147 if (VF.isScalar()) 7148 return; 7149 NumPredStores = 0; 7150 for (BasicBlock *BB : TheLoop->blocks()) { 7151 // For each instruction in the old loop. 7152 for (Instruction &I : *BB) { 7153 Value *Ptr = getLoadStorePointerOperand(&I); 7154 if (!Ptr) 7155 continue; 7156 7157 // TODO: We should generate better code and update the cost model for 7158 // predicated uniform stores. Today they are treated as any other 7159 // predicated store (see added test cases in 7160 // invariant-store-vectorization.ll). 7161 if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF)) 7162 NumPredStores++; 7163 7164 if (Legal->isUniformMemOp(I)) { 7165 // TODO: Avoid replicating loads and stores instead of 7166 // relying on instcombine to remove them. 7167 // Load: Scalar load + broadcast 7168 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 7169 InstructionCost Cost; 7170 if (isa<StoreInst>(&I) && VF.isScalable() && 7171 isLegalGatherOrScatter(&I, VF)) { 7172 Cost = getGatherScatterCost(&I, VF); 7173 setWideningDecision(&I, VF, CM_GatherScatter, Cost); 7174 } else { 7175 assert((isa<LoadInst>(&I) || !VF.isScalable()) && 7176 "Cannot yet scalarize uniform stores"); 7177 Cost = getUniformMemOpCost(&I, VF); 7178 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7179 } 7180 continue; 7181 } 7182 7183 // We assume that widening is the best solution when possible. 7184 if (memoryInstructionCanBeWidened(&I, VF)) { 7185 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 7186 int ConsecutiveStride = Legal->isConsecutivePtr( 7187 getLoadStoreType(&I), getLoadStorePointerOperand(&I)); 7188 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7189 "Expected consecutive stride."); 7190 InstWidening Decision = 7191 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 7192 setWideningDecision(&I, VF, Decision, Cost); 7193 continue; 7194 } 7195 7196 // Choose between Interleaving, Gather/Scatter or Scalarization. 7197 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 7198 unsigned NumAccesses = 1; 7199 if (isAccessInterleaved(&I)) { 7200 auto Group = getInterleavedAccessGroup(&I); 7201 assert(Group && "Fail to get an interleaved access group."); 7202 7203 // Make one decision for the whole group. 7204 if (getWideningDecision(&I, VF) != CM_Unknown) 7205 continue; 7206 7207 NumAccesses = Group->getNumMembers(); 7208 if (interleavedAccessCanBeWidened(&I, VF)) 7209 InterleaveCost = getInterleaveGroupCost(&I, VF); 7210 } 7211 7212 InstructionCost GatherScatterCost = 7213 isLegalGatherOrScatter(&I, VF) 7214 ? getGatherScatterCost(&I, VF) * NumAccesses 7215 : InstructionCost::getInvalid(); 7216 7217 InstructionCost ScalarizationCost = 7218 getMemInstScalarizationCost(&I, VF) * NumAccesses; 7219 7220 // Choose better solution for the current VF, 7221 // write down this decision and use it during vectorization. 7222 InstructionCost Cost; 7223 InstWidening Decision; 7224 if (InterleaveCost <= GatherScatterCost && 7225 InterleaveCost < ScalarizationCost) { 7226 Decision = CM_Interleave; 7227 Cost = InterleaveCost; 7228 } else if (GatherScatterCost < ScalarizationCost) { 7229 Decision = CM_GatherScatter; 7230 Cost = GatherScatterCost; 7231 } else { 7232 Decision = CM_Scalarize; 7233 Cost = ScalarizationCost; 7234 } 7235 // If the instructions belongs to an interleave group, the whole group 7236 // receives the same decision. The whole group receives the cost, but 7237 // the cost will actually be assigned to one instruction. 7238 if (auto Group = getInterleavedAccessGroup(&I)) 7239 setWideningDecision(Group, VF, Decision, Cost); 7240 else 7241 setWideningDecision(&I, VF, Decision, Cost); 7242 } 7243 } 7244 7245 // Make sure that any load of address and any other address computation 7246 // remains scalar unless there is gather/scatter support. This avoids 7247 // inevitable extracts into address registers, and also has the benefit of 7248 // activating LSR more, since that pass can't optimize vectorized 7249 // addresses. 7250 if (TTI.prefersVectorizedAddressing()) 7251 return; 7252 7253 // Start with all scalar pointer uses. 7254 SmallPtrSet<Instruction *, 8> AddrDefs; 7255 for (BasicBlock *BB : TheLoop->blocks()) 7256 for (Instruction &I : *BB) { 7257 Instruction *PtrDef = 7258 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7259 if (PtrDef && TheLoop->contains(PtrDef) && 7260 getWideningDecision(&I, VF) != CM_GatherScatter) 7261 AddrDefs.insert(PtrDef); 7262 } 7263 7264 // Add all instructions used to generate the addresses. 7265 SmallVector<Instruction *, 4> Worklist; 7266 append_range(Worklist, AddrDefs); 7267 while (!Worklist.empty()) { 7268 Instruction *I = Worklist.pop_back_val(); 7269 for (auto &Op : I->operands()) 7270 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7271 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7272 AddrDefs.insert(InstOp).second) 7273 Worklist.push_back(InstOp); 7274 } 7275 7276 for (auto *I : AddrDefs) { 7277 if (isa<LoadInst>(I)) { 7278 // Setting the desired widening decision should ideally be handled in 7279 // by cost functions, but since this involves the task of finding out 7280 // if the loaded register is involved in an address computation, it is 7281 // instead changed here when we know this is the case. 7282 InstWidening Decision = getWideningDecision(I, VF); 7283 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7284 // Scalarize a widened load of address. 7285 setWideningDecision( 7286 I, VF, CM_Scalarize, 7287 (VF.getKnownMinValue() * 7288 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7289 else if (auto Group = getInterleavedAccessGroup(I)) { 7290 // Scalarize an interleave group of address loads. 7291 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7292 if (Instruction *Member = Group->getMember(I)) 7293 setWideningDecision( 7294 Member, VF, CM_Scalarize, 7295 (VF.getKnownMinValue() * 7296 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7297 } 7298 } 7299 } else 7300 // Make sure I gets scalarized and a cost estimate without 7301 // scalarization overhead. 7302 ForcedScalars[VF].insert(I); 7303 } 7304 } 7305 7306 InstructionCost 7307 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 7308 Type *&VectorTy) { 7309 Type *RetTy = I->getType(); 7310 if (canTruncateToMinimalBitwidth(I, VF)) 7311 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7312 auto SE = PSE.getSE(); 7313 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7314 7315 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 7316 ElementCount VF) -> bool { 7317 if (VF.isScalar()) 7318 return true; 7319 7320 auto Scalarized = InstsToScalarize.find(VF); 7321 assert(Scalarized != InstsToScalarize.end() && 7322 "VF not yet analyzed for scalarization profitability"); 7323 return !Scalarized->second.count(I) && 7324 llvm::all_of(I->users(), [&](User *U) { 7325 auto *UI = cast<Instruction>(U); 7326 return !Scalarized->second.count(UI); 7327 }); 7328 }; 7329 (void) hasSingleCopyAfterVectorization; 7330 7331 if (isScalarAfterVectorization(I, VF)) { 7332 // With the exception of GEPs and PHIs, after scalarization there should 7333 // only be one copy of the instruction generated in the loop. This is 7334 // because the VF is either 1, or any instructions that need scalarizing 7335 // have already been dealt with by the the time we get here. As a result, 7336 // it means we don't have to multiply the instruction cost by VF. 7337 assert(I->getOpcode() == Instruction::GetElementPtr || 7338 I->getOpcode() == Instruction::PHI || 7339 (I->getOpcode() == Instruction::BitCast && 7340 I->getType()->isPointerTy()) || 7341 hasSingleCopyAfterVectorization(I, VF)); 7342 VectorTy = RetTy; 7343 } else 7344 VectorTy = ToVectorTy(RetTy, VF); 7345 7346 // TODO: We need to estimate the cost of intrinsic calls. 7347 switch (I->getOpcode()) { 7348 case Instruction::GetElementPtr: 7349 // We mark this instruction as zero-cost because the cost of GEPs in 7350 // vectorized code depends on whether the corresponding memory instruction 7351 // is scalarized or not. Therefore, we handle GEPs with the memory 7352 // instruction cost. 7353 return 0; 7354 case Instruction::Br: { 7355 // In cases of scalarized and predicated instructions, there will be VF 7356 // predicated blocks in the vectorized loop. Each branch around these 7357 // blocks requires also an extract of its vector compare i1 element. 7358 bool ScalarPredicatedBB = false; 7359 BranchInst *BI = cast<BranchInst>(I); 7360 if (VF.isVector() && BI->isConditional() && 7361 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7362 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7363 ScalarPredicatedBB = true; 7364 7365 if (ScalarPredicatedBB) { 7366 // Not possible to scalarize scalable vector with predicated instructions. 7367 if (VF.isScalable()) 7368 return InstructionCost::getInvalid(); 7369 // Return cost for branches around scalarized and predicated blocks. 7370 auto *Vec_i1Ty = 7371 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7372 return ( 7373 TTI.getScalarizationOverhead( 7374 Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) + 7375 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); 7376 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7377 // The back-edge branch will remain, as will all scalar branches. 7378 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7379 else 7380 // This branch will be eliminated by if-conversion. 7381 return 0; 7382 // Note: We currently assume zero cost for an unconditional branch inside 7383 // a predicated block since it will become a fall-through, although we 7384 // may decide in the future to call TTI for all branches. 7385 } 7386 case Instruction::PHI: { 7387 auto *Phi = cast<PHINode>(I); 7388 7389 // First-order recurrences are replaced by vector shuffles inside the loop. 7390 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7391 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7392 return TTI.getShuffleCost( 7393 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7394 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7395 7396 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7397 // converted into select instructions. We require N - 1 selects per phi 7398 // node, where N is the number of incoming values. 7399 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7400 return (Phi->getNumIncomingValues() - 1) * 7401 TTI.getCmpSelInstrCost( 7402 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7403 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7404 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7405 7406 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7407 } 7408 case Instruction::UDiv: 7409 case Instruction::SDiv: 7410 case Instruction::URem: 7411 case Instruction::SRem: 7412 // If we have a predicated instruction, it may not be executed for each 7413 // vector lane. Get the scalarization cost and scale this amount by the 7414 // probability of executing the predicated block. If the instruction is not 7415 // predicated, we fall through to the next case. 7416 if (VF.isVector() && isScalarWithPredication(I, VF)) { 7417 InstructionCost Cost = 0; 7418 7419 // These instructions have a non-void type, so account for the phi nodes 7420 // that we will create. This cost is likely to be zero. The phi node 7421 // cost, if any, should be scaled by the block probability because it 7422 // models a copy at the end of each predicated block. 7423 Cost += VF.getKnownMinValue() * 7424 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7425 7426 // The cost of the non-predicated instruction. 7427 Cost += VF.getKnownMinValue() * 7428 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7429 7430 // The cost of insertelement and extractelement instructions needed for 7431 // scalarization. 7432 Cost += getScalarizationOverhead(I, VF); 7433 7434 // Scale the cost by the probability of executing the predicated blocks. 7435 // This assumes the predicated block for each vector lane is equally 7436 // likely. 7437 return Cost / getReciprocalPredBlockProb(); 7438 } 7439 LLVM_FALLTHROUGH; 7440 case Instruction::Add: 7441 case Instruction::FAdd: 7442 case Instruction::Sub: 7443 case Instruction::FSub: 7444 case Instruction::Mul: 7445 case Instruction::FMul: 7446 case Instruction::FDiv: 7447 case Instruction::FRem: 7448 case Instruction::Shl: 7449 case Instruction::LShr: 7450 case Instruction::AShr: 7451 case Instruction::And: 7452 case Instruction::Or: 7453 case Instruction::Xor: { 7454 // Since we will replace the stride by 1 the multiplication should go away. 7455 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7456 return 0; 7457 7458 // Detect reduction patterns 7459 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7460 return *RedCost; 7461 7462 // Certain instructions can be cheaper to vectorize if they have a constant 7463 // second vector operand. One example of this are shifts on x86. 7464 Value *Op2 = I->getOperand(1); 7465 TargetTransformInfo::OperandValueProperties Op2VP; 7466 TargetTransformInfo::OperandValueKind Op2VK = 7467 TTI.getOperandInfo(Op2, Op2VP); 7468 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7469 Op2VK = TargetTransformInfo::OK_UniformValue; 7470 7471 SmallVector<const Value *, 4> Operands(I->operand_values()); 7472 return TTI.getArithmeticInstrCost( 7473 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7474 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7475 } 7476 case Instruction::FNeg: { 7477 return TTI.getArithmeticInstrCost( 7478 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7479 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7480 TargetTransformInfo::OP_None, I->getOperand(0), I); 7481 } 7482 case Instruction::Select: { 7483 SelectInst *SI = cast<SelectInst>(I); 7484 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7485 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7486 7487 const Value *Op0, *Op1; 7488 using namespace llvm::PatternMatch; 7489 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7490 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7491 // select x, y, false --> x & y 7492 // select x, true, y --> x | y 7493 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7494 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7495 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7496 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7497 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7498 Op1->getType()->getScalarSizeInBits() == 1); 7499 7500 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7501 return TTI.getArithmeticInstrCost( 7502 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7503 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7504 } 7505 7506 Type *CondTy = SI->getCondition()->getType(); 7507 if (!ScalarCond) 7508 CondTy = VectorType::get(CondTy, VF); 7509 7510 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 7511 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition())) 7512 Pred = Cmp->getPredicate(); 7513 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred, 7514 CostKind, I); 7515 } 7516 case Instruction::ICmp: 7517 case Instruction::FCmp: { 7518 Type *ValTy = I->getOperand(0)->getType(); 7519 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7520 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7521 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7522 VectorTy = ToVectorTy(ValTy, VF); 7523 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7524 cast<CmpInst>(I)->getPredicate(), CostKind, 7525 I); 7526 } 7527 case Instruction::Store: 7528 case Instruction::Load: { 7529 ElementCount Width = VF; 7530 if (Width.isVector()) { 7531 InstWidening Decision = getWideningDecision(I, Width); 7532 assert(Decision != CM_Unknown && 7533 "CM decision should be taken at this point"); 7534 if (Decision == CM_Scalarize) 7535 Width = ElementCount::getFixed(1); 7536 } 7537 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7538 return getMemoryInstructionCost(I, VF); 7539 } 7540 case Instruction::BitCast: 7541 if (I->getType()->isPointerTy()) 7542 return 0; 7543 LLVM_FALLTHROUGH; 7544 case Instruction::ZExt: 7545 case Instruction::SExt: 7546 case Instruction::FPToUI: 7547 case Instruction::FPToSI: 7548 case Instruction::FPExt: 7549 case Instruction::PtrToInt: 7550 case Instruction::IntToPtr: 7551 case Instruction::SIToFP: 7552 case Instruction::UIToFP: 7553 case Instruction::Trunc: 7554 case Instruction::FPTrunc: { 7555 // Computes the CastContextHint from a Load/Store instruction. 7556 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7557 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7558 "Expected a load or a store!"); 7559 7560 if (VF.isScalar() || !TheLoop->contains(I)) 7561 return TTI::CastContextHint::Normal; 7562 7563 switch (getWideningDecision(I, VF)) { 7564 case LoopVectorizationCostModel::CM_GatherScatter: 7565 return TTI::CastContextHint::GatherScatter; 7566 case LoopVectorizationCostModel::CM_Interleave: 7567 return TTI::CastContextHint::Interleave; 7568 case LoopVectorizationCostModel::CM_Scalarize: 7569 case LoopVectorizationCostModel::CM_Widen: 7570 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7571 : TTI::CastContextHint::Normal; 7572 case LoopVectorizationCostModel::CM_Widen_Reverse: 7573 return TTI::CastContextHint::Reversed; 7574 case LoopVectorizationCostModel::CM_Unknown: 7575 llvm_unreachable("Instr did not go through cost modelling?"); 7576 } 7577 7578 llvm_unreachable("Unhandled case!"); 7579 }; 7580 7581 unsigned Opcode = I->getOpcode(); 7582 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7583 // For Trunc, the context is the only user, which must be a StoreInst. 7584 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7585 if (I->hasOneUse()) 7586 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7587 CCH = ComputeCCH(Store); 7588 } 7589 // For Z/Sext, the context is the operand, which must be a LoadInst. 7590 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7591 Opcode == Instruction::FPExt) { 7592 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7593 CCH = ComputeCCH(Load); 7594 } 7595 7596 // We optimize the truncation of induction variables having constant 7597 // integer steps. The cost of these truncations is the same as the scalar 7598 // operation. 7599 if (isOptimizableIVTruncate(I, VF)) { 7600 auto *Trunc = cast<TruncInst>(I); 7601 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7602 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7603 } 7604 7605 // Detect reduction patterns 7606 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7607 return *RedCost; 7608 7609 Type *SrcScalarTy = I->getOperand(0)->getType(); 7610 Type *SrcVecTy = 7611 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7612 if (canTruncateToMinimalBitwidth(I, VF)) { 7613 // This cast is going to be shrunk. This may remove the cast or it might 7614 // turn it into slightly different cast. For example, if MinBW == 16, 7615 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7616 // 7617 // Calculate the modified src and dest types. 7618 Type *MinVecTy = VectorTy; 7619 if (Opcode == Instruction::Trunc) { 7620 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7621 VectorTy = 7622 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7623 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7624 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7625 VectorTy = 7626 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7627 } 7628 } 7629 7630 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7631 } 7632 case Instruction::Call: { 7633 if (RecurrenceDescriptor::isFMulAddIntrinsic(I)) 7634 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7635 return *RedCost; 7636 bool NeedToScalarize; 7637 CallInst *CI = cast<CallInst>(I); 7638 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7639 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7640 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7641 return std::min(CallCost, IntrinsicCost); 7642 } 7643 return CallCost; 7644 } 7645 case Instruction::ExtractValue: 7646 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7647 case Instruction::Alloca: 7648 // We cannot easily widen alloca to a scalable alloca, as 7649 // the result would need to be a vector of pointers. 7650 if (VF.isScalable()) 7651 return InstructionCost::getInvalid(); 7652 LLVM_FALLTHROUGH; 7653 default: 7654 // This opcode is unknown. Assume that it is the same as 'mul'. 7655 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7656 } // end of switch. 7657 } 7658 7659 char LoopVectorize::ID = 0; 7660 7661 static const char lv_name[] = "Loop Vectorization"; 7662 7663 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7664 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7665 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7666 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7667 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7668 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7669 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7670 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7671 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7672 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7673 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7674 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7675 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7676 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7677 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7678 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7679 7680 namespace llvm { 7681 7682 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7683 7684 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7685 bool VectorizeOnlyWhenForced) { 7686 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7687 } 7688 7689 } // end namespace llvm 7690 7691 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7692 // Check if the pointer operand of a load or store instruction is 7693 // consecutive. 7694 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7695 return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr); 7696 return false; 7697 } 7698 7699 void LoopVectorizationCostModel::collectValuesToIgnore() { 7700 // Ignore ephemeral values. 7701 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7702 7703 // Ignore type-promoting instructions we identified during reduction 7704 // detection. 7705 for (auto &Reduction : Legal->getReductionVars()) { 7706 const RecurrenceDescriptor &RedDes = Reduction.second; 7707 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7708 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7709 } 7710 // Ignore type-casting instructions we identified during induction 7711 // detection. 7712 for (auto &Induction : Legal->getInductionVars()) { 7713 const InductionDescriptor &IndDes = Induction.second; 7714 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7715 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7716 } 7717 } 7718 7719 void LoopVectorizationCostModel::collectInLoopReductions() { 7720 for (auto &Reduction : Legal->getReductionVars()) { 7721 PHINode *Phi = Reduction.first; 7722 const RecurrenceDescriptor &RdxDesc = Reduction.second; 7723 7724 // We don't collect reductions that are type promoted (yet). 7725 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7726 continue; 7727 7728 // If the target would prefer this reduction to happen "in-loop", then we 7729 // want to record it as such. 7730 unsigned Opcode = RdxDesc.getOpcode(); 7731 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 7732 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7733 TargetTransformInfo::ReductionFlags())) 7734 continue; 7735 7736 // Check that we can correctly put the reductions into the loop, by 7737 // finding the chain of operations that leads from the phi to the loop 7738 // exit value. 7739 SmallVector<Instruction *, 4> ReductionOperations = 7740 RdxDesc.getReductionOpChain(Phi, TheLoop); 7741 bool InLoop = !ReductionOperations.empty(); 7742 if (InLoop) { 7743 InLoopReductionChains[Phi] = ReductionOperations; 7744 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7745 Instruction *LastChain = Phi; 7746 for (auto *I : ReductionOperations) { 7747 InLoopReductionImmediateChains[I] = LastChain; 7748 LastChain = I; 7749 } 7750 } 7751 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7752 << " reduction for phi: " << *Phi << "\n"); 7753 } 7754 } 7755 7756 // TODO: we could return a pair of values that specify the max VF and 7757 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7758 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7759 // doesn't have a cost model that can choose which plan to execute if 7760 // more than one is generated. 7761 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7762 LoopVectorizationCostModel &CM) { 7763 unsigned WidestType; 7764 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7765 return WidestVectorRegBits / WidestType; 7766 } 7767 7768 VectorizationFactor 7769 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7770 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7771 ElementCount VF = UserVF; 7772 // Outer loop handling: They may require CFG and instruction level 7773 // transformations before even evaluating whether vectorization is profitable. 7774 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7775 // the vectorization pipeline. 7776 if (!OrigLoop->isInnermost()) { 7777 // If the user doesn't provide a vectorization factor, determine a 7778 // reasonable one. 7779 if (UserVF.isZero()) { 7780 VF = ElementCount::getFixed(determineVPlanVF( 7781 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 7782 .getFixedSize(), 7783 CM)); 7784 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7785 7786 // Make sure we have a VF > 1 for stress testing. 7787 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7788 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7789 << "overriding computed VF.\n"); 7790 VF = ElementCount::getFixed(4); 7791 } 7792 } 7793 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7794 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7795 "VF needs to be a power of two"); 7796 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7797 << "VF " << VF << " to build VPlans.\n"); 7798 buildVPlans(VF, VF); 7799 7800 // For VPlan build stress testing, we bail out after VPlan construction. 7801 if (VPlanBuildStressTest) 7802 return VectorizationFactor::Disabled(); 7803 7804 return {VF, 0 /*Cost*/}; 7805 } 7806 7807 LLVM_DEBUG( 7808 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7809 "VPlan-native path.\n"); 7810 return VectorizationFactor::Disabled(); 7811 } 7812 7813 Optional<VectorizationFactor> 7814 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7815 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7816 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 7817 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 7818 return None; 7819 7820 // Invalidate interleave groups if all blocks of loop will be predicated. 7821 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) && 7822 !useMaskedInterleavedAccesses(*TTI)) { 7823 LLVM_DEBUG( 7824 dbgs() 7825 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7826 "which requires masked-interleaved support.\n"); 7827 if (CM.InterleaveInfo.invalidateGroups()) 7828 // Invalidating interleave groups also requires invalidating all decisions 7829 // based on them, which includes widening decisions and uniform and scalar 7830 // values. 7831 CM.invalidateCostModelingDecisions(); 7832 } 7833 7834 ElementCount MaxUserVF = 7835 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 7836 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 7837 if (!UserVF.isZero() && UserVFIsLegal) { 7838 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 7839 "VF needs to be a power of two"); 7840 // Collect the instructions (and their associated costs) that will be more 7841 // profitable to scalarize. 7842 if (CM.selectUserVectorizationFactor(UserVF)) { 7843 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 7844 CM.collectInLoopReductions(); 7845 buildVPlansWithVPRecipes(UserVF, UserVF); 7846 LLVM_DEBUG(printPlans(dbgs())); 7847 return {{UserVF, 0}}; 7848 } else 7849 reportVectorizationInfo("UserVF ignored because of invalid costs.", 7850 "InvalidCost", ORE, OrigLoop); 7851 } 7852 7853 // Populate the set of Vectorization Factor Candidates. 7854 ElementCountSet VFCandidates; 7855 for (auto VF = ElementCount::getFixed(1); 7856 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 7857 VFCandidates.insert(VF); 7858 for (auto VF = ElementCount::getScalable(1); 7859 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 7860 VFCandidates.insert(VF); 7861 7862 for (const auto &VF : VFCandidates) { 7863 // Collect Uniform and Scalar instructions after vectorization with VF. 7864 CM.collectUniformsAndScalars(VF); 7865 7866 // Collect the instructions (and their associated costs) that will be more 7867 // profitable to scalarize. 7868 if (VF.isVector()) 7869 CM.collectInstsToScalarize(VF); 7870 } 7871 7872 CM.collectInLoopReductions(); 7873 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 7874 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 7875 7876 LLVM_DEBUG(printPlans(dbgs())); 7877 if (!MaxFactors.hasVector()) 7878 return VectorizationFactor::Disabled(); 7879 7880 // Select the optimal vectorization factor. 7881 auto SelectedVF = CM.selectVectorizationFactor(VFCandidates); 7882 7883 // Check if it is profitable to vectorize with runtime checks. 7884 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 7885 if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { 7886 bool PragmaThresholdReached = 7887 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 7888 bool ThresholdReached = 7889 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 7890 if ((ThresholdReached && !Hints.allowReordering()) || 7891 PragmaThresholdReached) { 7892 ORE->emit([&]() { 7893 return OptimizationRemarkAnalysisAliasing( 7894 DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(), 7895 OrigLoop->getHeader()) 7896 << "loop not vectorized: cannot prove it is safe to reorder " 7897 "memory operations"; 7898 }); 7899 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 7900 Hints.emitRemarkWithHints(); 7901 return VectorizationFactor::Disabled(); 7902 } 7903 } 7904 return SelectedVF; 7905 } 7906 7907 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const { 7908 assert(count_if(VPlans, 7909 [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) == 7910 1 && 7911 "Best VF has not a single VPlan."); 7912 7913 for (const VPlanPtr &Plan : VPlans) { 7914 if (Plan->hasVF(VF)) 7915 return *Plan.get(); 7916 } 7917 llvm_unreachable("No plan found!"); 7918 } 7919 7920 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7921 SmallVector<Metadata *, 4> MDs; 7922 // Reserve first location for self reference to the LoopID metadata node. 7923 MDs.push_back(nullptr); 7924 bool IsUnrollMetadata = false; 7925 MDNode *LoopID = L->getLoopID(); 7926 if (LoopID) { 7927 // First find existing loop unrolling disable metadata. 7928 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7929 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7930 if (MD) { 7931 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7932 IsUnrollMetadata = 7933 S && S->getString().startswith("llvm.loop.unroll.disable"); 7934 } 7935 MDs.push_back(LoopID->getOperand(i)); 7936 } 7937 } 7938 7939 if (!IsUnrollMetadata) { 7940 // Add runtime unroll disable metadata. 7941 LLVMContext &Context = L->getHeader()->getContext(); 7942 SmallVector<Metadata *, 1> DisableOperands; 7943 DisableOperands.push_back( 7944 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7945 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7946 MDs.push_back(DisableNode); 7947 MDNode *NewLoopID = MDNode::get(Context, MDs); 7948 // Set operand 0 to refer to the loop id itself. 7949 NewLoopID->replaceOperandWith(0, NewLoopID); 7950 L->setLoopID(NewLoopID); 7951 } 7952 } 7953 7954 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF, 7955 VPlan &BestVPlan, 7956 InnerLoopVectorizer &ILV, 7957 DominatorTree *DT) { 7958 LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF 7959 << '\n'); 7960 7961 // Perform the actual loop transformation. 7962 7963 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7964 VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan}; 7965 Value *CanonicalIVStartValue; 7966 std::tie(State.CFG.PrevBB, CanonicalIVStartValue) = 7967 ILV.createVectorizedLoopSkeleton(); 7968 ILV.collectPoisonGeneratingRecipes(State); 7969 7970 ILV.printDebugTracesAtStart(); 7971 7972 //===------------------------------------------------===// 7973 // 7974 // Notice: any optimization or new instruction that go 7975 // into the code below should also be implemented in 7976 // the cost-model. 7977 // 7978 //===------------------------------------------------===// 7979 7980 // 2. Copy and widen instructions from the old loop into the new loop. 7981 BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr), 7982 ILV.getOrCreateVectorTripCount(nullptr), 7983 CanonicalIVStartValue, State); 7984 BestVPlan.execute(&State); 7985 7986 // Keep all loop hints from the original loop on the vector loop (we'll 7987 // replace the vectorizer-specific hints below). 7988 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7989 7990 Optional<MDNode *> VectorizedLoopID = 7991 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 7992 LLVMLoopVectorizeFollowupVectorized}); 7993 7994 Loop *L = LI->getLoopFor(State.CFG.PrevBB); 7995 if (VectorizedLoopID.hasValue()) 7996 L->setLoopID(VectorizedLoopID.getValue()); 7997 else { 7998 // Keep all loop hints from the original loop on the vector loop (we'll 7999 // replace the vectorizer-specific hints below). 8000 if (MDNode *LID = OrigLoop->getLoopID()) 8001 L->setLoopID(LID); 8002 8003 LoopVectorizeHints Hints(L, true, *ORE); 8004 Hints.setAlreadyVectorized(); 8005 } 8006 // Disable runtime unrolling when vectorizing the epilogue loop. 8007 if (CanonicalIVStartValue) 8008 AddRuntimeUnrollDisableMetaData(L); 8009 8010 // 3. Fix the vectorized code: take care of header phi's, live-outs, 8011 // predication, updating analyses. 8012 ILV.fixVectorizedLoop(State); 8013 8014 ILV.printDebugTracesAtEnd(); 8015 } 8016 8017 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 8018 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 8019 for (const auto &Plan : VPlans) 8020 if (PrintVPlansInDotFormat) 8021 Plan->printDOT(O); 8022 else 8023 Plan->print(O); 8024 } 8025 #endif 8026 8027 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 8028 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 8029 8030 // We create new control-flow for the vectorized loop, so the original exit 8031 // conditions will be dead after vectorization if it's only used by the 8032 // terminator 8033 SmallVector<BasicBlock*> ExitingBlocks; 8034 OrigLoop->getExitingBlocks(ExitingBlocks); 8035 for (auto *BB : ExitingBlocks) { 8036 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 8037 if (!Cmp || !Cmp->hasOneUse()) 8038 continue; 8039 8040 // TODO: we should introduce a getUniqueExitingBlocks on Loop 8041 if (!DeadInstructions.insert(Cmp).second) 8042 continue; 8043 8044 // The operands of the icmp is often a dead trunc, used by IndUpdate. 8045 // TODO: can recurse through operands in general 8046 for (Value *Op : Cmp->operands()) { 8047 if (isa<TruncInst>(Op) && Op->hasOneUse()) 8048 DeadInstructions.insert(cast<Instruction>(Op)); 8049 } 8050 } 8051 8052 // We create new "steps" for induction variable updates to which the original 8053 // induction variables map. An original update instruction will be dead if 8054 // all its users except the induction variable are dead. 8055 auto *Latch = OrigLoop->getLoopLatch(); 8056 for (auto &Induction : Legal->getInductionVars()) { 8057 PHINode *Ind = Induction.first; 8058 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 8059 8060 // If the tail is to be folded by masking, the primary induction variable, 8061 // if exists, isn't dead: it will be used for masking. Don't kill it. 8062 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 8063 continue; 8064 8065 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 8066 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 8067 })) 8068 DeadInstructions.insert(IndUpdate); 8069 } 8070 } 8071 8072 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 8073 8074 //===--------------------------------------------------------------------===// 8075 // EpilogueVectorizerMainLoop 8076 //===--------------------------------------------------------------------===// 8077 8078 /// This function is partially responsible for generating the control flow 8079 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8080 std::pair<BasicBlock *, Value *> 8081 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 8082 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8083 Loop *Lp = createVectorLoopSkeleton(""); 8084 8085 // Generate the code to check the minimum iteration count of the vector 8086 // epilogue (see below). 8087 EPI.EpilogueIterationCountCheck = 8088 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); 8089 EPI.EpilogueIterationCountCheck->setName("iter.check"); 8090 8091 // Generate the code to check any assumptions that we've made for SCEV 8092 // expressions. 8093 EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader); 8094 8095 // Generate the code that checks at runtime if arrays overlap. We put the 8096 // checks into a separate block to make the more common case of few elements 8097 // faster. 8098 EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 8099 8100 // Generate the iteration count check for the main loop, *after* the check 8101 // for the epilogue loop, so that the path-length is shorter for the case 8102 // that goes directly through the vector epilogue. The longer-path length for 8103 // the main loop is compensated for, by the gain from vectorizing the larger 8104 // trip count. Note: the branch will get updated later on when we vectorize 8105 // the epilogue. 8106 EPI.MainLoopIterationCountCheck = 8107 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); 8108 8109 // Generate the induction variable. 8110 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8111 EPI.VectorTripCount = CountRoundDown; 8112 createHeaderBranch(Lp); 8113 8114 // Skip induction resume value creation here because they will be created in 8115 // the second pass. If we created them here, they wouldn't be used anyway, 8116 // because the vplan in the second pass still contains the inductions from the 8117 // original loop. 8118 8119 return {completeLoopSkeleton(Lp, OrigLoopID), nullptr}; 8120 } 8121 8122 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 8123 LLVM_DEBUG({ 8124 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 8125 << "Main Loop VF:" << EPI.MainLoopVF 8126 << ", Main Loop UF:" << EPI.MainLoopUF 8127 << ", Epilogue Loop VF:" << EPI.EpilogueVF 8128 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8129 }); 8130 } 8131 8132 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 8133 DEBUG_WITH_TYPE(VerboseDebug, { 8134 dbgs() << "intermediate fn:\n" 8135 << *OrigLoop->getHeader()->getParent() << "\n"; 8136 }); 8137 } 8138 8139 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( 8140 Loop *L, BasicBlock *Bypass, bool ForEpilogue) { 8141 assert(L && "Expected valid Loop."); 8142 assert(Bypass && "Expected valid bypass basic block."); 8143 ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF; 8144 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 8145 Value *Count = getOrCreateTripCount(L); 8146 // Reuse existing vector loop preheader for TC checks. 8147 // Note that new preheader block is generated for vector loop. 8148 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 8149 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 8150 8151 // Generate code to check if the loop's trip count is less than VF * UF of the 8152 // main vector loop. 8153 auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? 8154 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8155 8156 Value *CheckMinIters = Builder.CreateICmp( 8157 P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor), 8158 "min.iters.check"); 8159 8160 if (!ForEpilogue) 8161 TCCheckBlock->setName("vector.main.loop.iter.check"); 8162 8163 // Create new preheader for vector loop. 8164 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 8165 DT, LI, nullptr, "vector.ph"); 8166 8167 if (ForEpilogue) { 8168 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 8169 DT->getNode(Bypass)->getIDom()) && 8170 "TC check is expected to dominate Bypass"); 8171 8172 // Update dominator for Bypass & LoopExit. 8173 DT->changeImmediateDominator(Bypass, TCCheckBlock); 8174 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8175 // For loops with multiple exits, there's no edge from the middle block 8176 // to exit blocks (as the epilogue must run) and thus no need to update 8177 // the immediate dominator of the exit blocks. 8178 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 8179 8180 LoopBypassBlocks.push_back(TCCheckBlock); 8181 8182 // Save the trip count so we don't have to regenerate it in the 8183 // vec.epilog.iter.check. This is safe to do because the trip count 8184 // generated here dominates the vector epilog iter check. 8185 EPI.TripCount = Count; 8186 } 8187 8188 ReplaceInstWithInst( 8189 TCCheckBlock->getTerminator(), 8190 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8191 8192 return TCCheckBlock; 8193 } 8194 8195 //===--------------------------------------------------------------------===// 8196 // EpilogueVectorizerEpilogueLoop 8197 //===--------------------------------------------------------------------===// 8198 8199 /// This function is partially responsible for generating the control flow 8200 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8201 std::pair<BasicBlock *, Value *> 8202 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 8203 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8204 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 8205 8206 // Now, compare the remaining count and if there aren't enough iterations to 8207 // execute the vectorized epilogue skip to the scalar part. 8208 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 8209 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 8210 LoopVectorPreHeader = 8211 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 8212 LI, nullptr, "vec.epilog.ph"); 8213 emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, 8214 VecEpilogueIterationCountCheck); 8215 8216 // Adjust the control flow taking the state info from the main loop 8217 // vectorization into account. 8218 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 8219 "expected this to be saved from the previous pass."); 8220 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 8221 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 8222 8223 DT->changeImmediateDominator(LoopVectorPreHeader, 8224 EPI.MainLoopIterationCountCheck); 8225 8226 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 8227 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8228 8229 if (EPI.SCEVSafetyCheck) 8230 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 8231 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8232 if (EPI.MemSafetyCheck) 8233 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 8234 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8235 8236 DT->changeImmediateDominator( 8237 VecEpilogueIterationCountCheck, 8238 VecEpilogueIterationCountCheck->getSinglePredecessor()); 8239 8240 DT->changeImmediateDominator(LoopScalarPreHeader, 8241 EPI.EpilogueIterationCountCheck); 8242 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8243 // If there is an epilogue which must run, there's no edge from the 8244 // middle block to exit blocks and thus no need to update the immediate 8245 // dominator of the exit blocks. 8246 DT->changeImmediateDominator(LoopExitBlock, 8247 EPI.EpilogueIterationCountCheck); 8248 8249 // Keep track of bypass blocks, as they feed start values to the induction 8250 // phis in the scalar loop preheader. 8251 if (EPI.SCEVSafetyCheck) 8252 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 8253 if (EPI.MemSafetyCheck) 8254 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 8255 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 8256 8257 // The vec.epilog.iter.check block may contain Phi nodes from reductions which 8258 // merge control-flow from the latch block and the middle block. Update the 8259 // incoming values here and move the Phi into the preheader. 8260 SmallVector<PHINode *, 4> PhisInBlock; 8261 for (PHINode &Phi : VecEpilogueIterationCountCheck->phis()) 8262 PhisInBlock.push_back(&Phi); 8263 8264 for (PHINode *Phi : PhisInBlock) { 8265 Phi->replaceIncomingBlockWith( 8266 VecEpilogueIterationCountCheck->getSinglePredecessor(), 8267 VecEpilogueIterationCountCheck); 8268 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck); 8269 if (EPI.SCEVSafetyCheck) 8270 Phi->removeIncomingValue(EPI.SCEVSafetyCheck); 8271 if (EPI.MemSafetyCheck) 8272 Phi->removeIncomingValue(EPI.MemSafetyCheck); 8273 Phi->moveBefore(LoopVectorPreHeader->getFirstNonPHI()); 8274 } 8275 8276 // Generate a resume induction for the vector epilogue and put it in the 8277 // vector epilogue preheader 8278 Type *IdxTy = Legal->getWidestInductionType(); 8279 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 8280 LoopVectorPreHeader->getFirstNonPHI()); 8281 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 8282 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 8283 EPI.MainLoopIterationCountCheck); 8284 8285 // Generate the induction variable. 8286 createHeaderBranch(Lp); 8287 8288 // Generate induction resume values. These variables save the new starting 8289 // indexes for the scalar loop. They are used to test if there are any tail 8290 // iterations left once the vector loop has completed. 8291 // Note that when the vectorized epilogue is skipped due to iteration count 8292 // check, then the resume value for the induction variable comes from 8293 // the trip count of the main vector loop, hence passing the AdditionalBypass 8294 // argument. 8295 createInductionResumeValues(Lp, {VecEpilogueIterationCountCheck, 8296 EPI.VectorTripCount} /* AdditionalBypass */); 8297 8298 return {completeLoopSkeleton(Lp, OrigLoopID), EPResumeVal}; 8299 } 8300 8301 BasicBlock * 8302 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 8303 Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { 8304 8305 assert(EPI.TripCount && 8306 "Expected trip count to have been safed in the first pass."); 8307 assert( 8308 (!isa<Instruction>(EPI.TripCount) || 8309 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 8310 "saved trip count does not dominate insertion point."); 8311 Value *TC = EPI.TripCount; 8312 IRBuilder<> Builder(Insert->getTerminator()); 8313 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 8314 8315 // Generate code to check if the loop's trip count is less than VF * UF of the 8316 // vector epilogue loop. 8317 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? 8318 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8319 8320 Value *CheckMinIters = 8321 Builder.CreateICmp(P, Count, 8322 createStepForVF(Builder, Count->getType(), 8323 EPI.EpilogueVF, EPI.EpilogueUF), 8324 "min.epilog.iters.check"); 8325 8326 ReplaceInstWithInst( 8327 Insert->getTerminator(), 8328 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8329 8330 LoopBypassBlocks.push_back(Insert); 8331 return Insert; 8332 } 8333 8334 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 8335 LLVM_DEBUG({ 8336 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 8337 << "Epilogue Loop VF:" << EPI.EpilogueVF 8338 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8339 }); 8340 } 8341 8342 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 8343 DEBUG_WITH_TYPE(VerboseDebug, { 8344 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n"; 8345 }); 8346 } 8347 8348 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8349 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8350 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8351 bool PredicateAtRangeStart = Predicate(Range.Start); 8352 8353 for (ElementCount TmpVF = Range.Start * 2; 8354 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8355 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8356 Range.End = TmpVF; 8357 break; 8358 } 8359 8360 return PredicateAtRangeStart; 8361 } 8362 8363 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8364 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8365 /// of VF's starting at a given VF and extending it as much as possible. Each 8366 /// vectorization decision can potentially shorten this sub-range during 8367 /// buildVPlan(). 8368 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8369 ElementCount MaxVF) { 8370 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8371 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8372 VFRange SubRange = {VF, MaxVFPlusOne}; 8373 VPlans.push_back(buildVPlan(SubRange)); 8374 VF = SubRange.End; 8375 } 8376 } 8377 8378 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8379 VPlanPtr &Plan) { 8380 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8381 8382 // Look for cached value. 8383 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8384 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8385 if (ECEntryIt != EdgeMaskCache.end()) 8386 return ECEntryIt->second; 8387 8388 VPValue *SrcMask = createBlockInMask(Src, Plan); 8389 8390 // The terminator has to be a branch inst! 8391 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8392 assert(BI && "Unexpected terminator found"); 8393 8394 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8395 return EdgeMaskCache[Edge] = SrcMask; 8396 8397 // If source is an exiting block, we know the exit edge is dynamically dead 8398 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8399 // adding uses of an otherwise potentially dead instruction. 8400 if (OrigLoop->isLoopExiting(Src)) 8401 return EdgeMaskCache[Edge] = SrcMask; 8402 8403 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8404 assert(EdgeMask && "No Edge Mask found for condition"); 8405 8406 if (BI->getSuccessor(0) != Dst) 8407 EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc()); 8408 8409 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8410 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8411 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8412 // The select version does not introduce new UB if SrcMask is false and 8413 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8414 VPValue *False = Plan->getOrAddVPValue( 8415 ConstantInt::getFalse(BI->getCondition()->getType())); 8416 EdgeMask = 8417 Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc()); 8418 } 8419 8420 return EdgeMaskCache[Edge] = EdgeMask; 8421 } 8422 8423 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8424 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8425 8426 // Look for cached value. 8427 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8428 if (BCEntryIt != BlockMaskCache.end()) 8429 return BCEntryIt->second; 8430 8431 // All-one mask is modelled as no-mask following the convention for masked 8432 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8433 VPValue *BlockMask = nullptr; 8434 8435 if (OrigLoop->getHeader() == BB) { 8436 if (!CM.blockNeedsPredicationForAnyReason(BB)) 8437 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8438 8439 // Introduce the early-exit compare IV <= BTC to form header block mask. 8440 // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by 8441 // constructing the desired canonical IV in the header block as its first 8442 // non-phi instructions. 8443 assert(CM.foldTailByMasking() && "must fold the tail"); 8444 VPBasicBlock *HeaderVPBB = Plan->getEntry()->getEntryBasicBlock(); 8445 auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi(); 8446 auto *IV = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV()); 8447 HeaderVPBB->insert(IV, HeaderVPBB->getFirstNonPhi()); 8448 8449 VPBuilder::InsertPointGuard Guard(Builder); 8450 Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint); 8451 if (CM.TTI.emitGetActiveLaneMask()) { 8452 VPValue *TC = Plan->getOrCreateTripCount(); 8453 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC}); 8454 } else { 8455 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8456 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8457 } 8458 return BlockMaskCache[BB] = BlockMask; 8459 } 8460 8461 // This is the block mask. We OR all incoming edges. 8462 for (auto *Predecessor : predecessors(BB)) { 8463 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8464 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8465 return BlockMaskCache[BB] = EdgeMask; 8466 8467 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8468 BlockMask = EdgeMask; 8469 continue; 8470 } 8471 8472 BlockMask = Builder.createOr(BlockMask, EdgeMask, {}); 8473 } 8474 8475 return BlockMaskCache[BB] = BlockMask; 8476 } 8477 8478 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8479 ArrayRef<VPValue *> Operands, 8480 VFRange &Range, 8481 VPlanPtr &Plan) { 8482 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8483 "Must be called with either a load or store"); 8484 8485 auto willWiden = [&](ElementCount VF) -> bool { 8486 if (VF.isScalar()) 8487 return false; 8488 LoopVectorizationCostModel::InstWidening Decision = 8489 CM.getWideningDecision(I, VF); 8490 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8491 "CM decision should be taken at this point."); 8492 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8493 return true; 8494 if (CM.isScalarAfterVectorization(I, VF) || 8495 CM.isProfitableToScalarize(I, VF)) 8496 return false; 8497 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8498 }; 8499 8500 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8501 return nullptr; 8502 8503 VPValue *Mask = nullptr; 8504 if (Legal->isMaskRequired(I)) 8505 Mask = createBlockInMask(I->getParent(), Plan); 8506 8507 // Determine if the pointer operand of the access is either consecutive or 8508 // reverse consecutive. 8509 LoopVectorizationCostModel::InstWidening Decision = 8510 CM.getWideningDecision(I, Range.Start); 8511 bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse; 8512 bool Consecutive = 8513 Reverse || Decision == LoopVectorizationCostModel::CM_Widen; 8514 8515 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8516 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask, 8517 Consecutive, Reverse); 8518 8519 StoreInst *Store = cast<StoreInst>(I); 8520 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8521 Mask, Consecutive, Reverse); 8522 } 8523 8524 static VPWidenIntOrFpInductionRecipe * 8525 createWidenInductionRecipe(PHINode *Phi, Instruction *PhiOrTrunc, 8526 VPValue *Start, const InductionDescriptor &IndDesc, 8527 LoopVectorizationCostModel &CM, Loop &OrigLoop, 8528 VFRange &Range) { 8529 // Returns true if an instruction \p I should be scalarized instead of 8530 // vectorized for the chosen vectorization factor. 8531 auto ShouldScalarizeInstruction = [&CM](Instruction *I, ElementCount VF) { 8532 return CM.isScalarAfterVectorization(I, VF) || 8533 CM.isProfitableToScalarize(I, VF); 8534 }; 8535 8536 bool NeedsScalarIV = LoopVectorizationPlanner::getDecisionAndClampRange( 8537 [&](ElementCount VF) { 8538 // Returns true if we should generate a scalar version of \p IV. 8539 if (ShouldScalarizeInstruction(PhiOrTrunc, VF)) 8540 return true; 8541 auto isScalarInst = [&](User *U) -> bool { 8542 auto *I = cast<Instruction>(U); 8543 return OrigLoop.contains(I) && ShouldScalarizeInstruction(I, VF); 8544 }; 8545 return any_of(PhiOrTrunc->users(), isScalarInst); 8546 }, 8547 Range); 8548 bool NeedsScalarIVOnly = LoopVectorizationPlanner::getDecisionAndClampRange( 8549 [&](ElementCount VF) { 8550 return ShouldScalarizeInstruction(PhiOrTrunc, VF); 8551 }, 8552 Range); 8553 assert(IndDesc.getStartValue() == 8554 Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader())); 8555 if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) { 8556 return new VPWidenIntOrFpInductionRecipe(Phi, Start, IndDesc, TruncI, 8557 NeedsScalarIV, !NeedsScalarIVOnly); 8558 } 8559 assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here"); 8560 return new VPWidenIntOrFpInductionRecipe(Phi, Start, IndDesc, NeedsScalarIV, 8561 !NeedsScalarIVOnly); 8562 } 8563 8564 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionPHI( 8565 PHINode *Phi, ArrayRef<VPValue *> Operands, VFRange &Range) const { 8566 8567 // Check if this is an integer or fp induction. If so, build the recipe that 8568 // produces its scalar and vector values. 8569 if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi)) 8570 return createWidenInductionRecipe(Phi, Phi, Operands[0], *II, CM, *OrigLoop, 8571 Range); 8572 8573 return nullptr; 8574 } 8575 8576 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8577 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, 8578 VPlan &Plan) const { 8579 // Optimize the special case where the source is a constant integer 8580 // induction variable. Notice that we can only optimize the 'trunc' case 8581 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8582 // (c) other casts depend on pointer size. 8583 8584 // Determine whether \p K is a truncation based on an induction variable that 8585 // can be optimized. 8586 auto isOptimizableIVTruncate = 8587 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8588 return [=](ElementCount VF) -> bool { 8589 return CM.isOptimizableIVTruncate(K, VF); 8590 }; 8591 }; 8592 8593 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8594 isOptimizableIVTruncate(I), Range)) { 8595 8596 auto *Phi = cast<PHINode>(I->getOperand(0)); 8597 const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi); 8598 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8599 return createWidenInductionRecipe(Phi, I, Start, II, CM, *OrigLoop, Range); 8600 } 8601 return nullptr; 8602 } 8603 8604 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8605 ArrayRef<VPValue *> Operands, 8606 VPlanPtr &Plan) { 8607 // If all incoming values are equal, the incoming VPValue can be used directly 8608 // instead of creating a new VPBlendRecipe. 8609 VPValue *FirstIncoming = Operands[0]; 8610 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8611 return FirstIncoming == Inc; 8612 })) { 8613 return Operands[0]; 8614 } 8615 8616 // We know that all PHIs in non-header blocks are converted into selects, so 8617 // we don't have to worry about the insertion order and we can just use the 8618 // builder. At this point we generate the predication tree. There may be 8619 // duplications since this is a simple recursive scan, but future 8620 // optimizations will clean it up. 8621 SmallVector<VPValue *, 2> OperandsWithMask; 8622 unsigned NumIncoming = Phi->getNumIncomingValues(); 8623 8624 for (unsigned In = 0; In < NumIncoming; In++) { 8625 VPValue *EdgeMask = 8626 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8627 assert((EdgeMask || NumIncoming == 1) && 8628 "Multiple predecessors with one having a full mask"); 8629 OperandsWithMask.push_back(Operands[In]); 8630 if (EdgeMask) 8631 OperandsWithMask.push_back(EdgeMask); 8632 } 8633 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8634 } 8635 8636 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8637 ArrayRef<VPValue *> Operands, 8638 VFRange &Range) const { 8639 8640 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8641 [this, CI](ElementCount VF) { 8642 return CM.isScalarWithPredication(CI, VF); 8643 }, 8644 Range); 8645 8646 if (IsPredicated) 8647 return nullptr; 8648 8649 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8650 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8651 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8652 ID == Intrinsic::pseudoprobe || 8653 ID == Intrinsic::experimental_noalias_scope_decl)) 8654 return nullptr; 8655 8656 auto willWiden = [&](ElementCount VF) -> bool { 8657 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8658 // The following case may be scalarized depending on the VF. 8659 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8660 // version of the instruction. 8661 // Is it beneficial to perform intrinsic call compared to lib call? 8662 bool NeedToScalarize = false; 8663 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8664 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8665 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8666 return UseVectorIntrinsic || !NeedToScalarize; 8667 }; 8668 8669 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8670 return nullptr; 8671 8672 ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size()); 8673 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8674 } 8675 8676 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8677 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8678 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8679 // Instruction should be widened, unless it is scalar after vectorization, 8680 // scalarization is profitable or it is predicated. 8681 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8682 return CM.isScalarAfterVectorization(I, VF) || 8683 CM.isProfitableToScalarize(I, VF) || 8684 CM.isScalarWithPredication(I, VF); 8685 }; 8686 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8687 Range); 8688 } 8689 8690 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8691 ArrayRef<VPValue *> Operands) const { 8692 auto IsVectorizableOpcode = [](unsigned Opcode) { 8693 switch (Opcode) { 8694 case Instruction::Add: 8695 case Instruction::And: 8696 case Instruction::AShr: 8697 case Instruction::BitCast: 8698 case Instruction::FAdd: 8699 case Instruction::FCmp: 8700 case Instruction::FDiv: 8701 case Instruction::FMul: 8702 case Instruction::FNeg: 8703 case Instruction::FPExt: 8704 case Instruction::FPToSI: 8705 case Instruction::FPToUI: 8706 case Instruction::FPTrunc: 8707 case Instruction::FRem: 8708 case Instruction::FSub: 8709 case Instruction::ICmp: 8710 case Instruction::IntToPtr: 8711 case Instruction::LShr: 8712 case Instruction::Mul: 8713 case Instruction::Or: 8714 case Instruction::PtrToInt: 8715 case Instruction::SDiv: 8716 case Instruction::Select: 8717 case Instruction::SExt: 8718 case Instruction::Shl: 8719 case Instruction::SIToFP: 8720 case Instruction::SRem: 8721 case Instruction::Sub: 8722 case Instruction::Trunc: 8723 case Instruction::UDiv: 8724 case Instruction::UIToFP: 8725 case Instruction::URem: 8726 case Instruction::Xor: 8727 case Instruction::ZExt: 8728 return true; 8729 } 8730 return false; 8731 }; 8732 8733 if (!IsVectorizableOpcode(I->getOpcode())) 8734 return nullptr; 8735 8736 // Success: widen this instruction. 8737 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 8738 } 8739 8740 void VPRecipeBuilder::fixHeaderPhis() { 8741 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 8742 for (VPHeaderPHIRecipe *R : PhisToFix) { 8743 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 8744 VPRecipeBase *IncR = 8745 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 8746 R->addOperand(IncR->getVPSingleValue()); 8747 } 8748 } 8749 8750 VPBasicBlock *VPRecipeBuilder::handleReplication( 8751 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8752 VPlanPtr &Plan) { 8753 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8754 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8755 Range); 8756 8757 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8758 [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); }, 8759 Range); 8760 8761 // Even if the instruction is not marked as uniform, there are certain 8762 // intrinsic calls that can be effectively treated as such, so we check for 8763 // them here. Conservatively, we only do this for scalable vectors, since 8764 // for fixed-width VFs we can always fall back on full scalarization. 8765 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) { 8766 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) { 8767 case Intrinsic::assume: 8768 case Intrinsic::lifetime_start: 8769 case Intrinsic::lifetime_end: 8770 // For scalable vectors if one of the operands is variant then we still 8771 // want to mark as uniform, which will generate one instruction for just 8772 // the first lane of the vector. We can't scalarize the call in the same 8773 // way as for fixed-width vectors because we don't know how many lanes 8774 // there are. 8775 // 8776 // The reasons for doing it this way for scalable vectors are: 8777 // 1. For the assume intrinsic generating the instruction for the first 8778 // lane is still be better than not generating any at all. For 8779 // example, the input may be a splat across all lanes. 8780 // 2. For the lifetime start/end intrinsics the pointer operand only 8781 // does anything useful when the input comes from a stack object, 8782 // which suggests it should always be uniform. For non-stack objects 8783 // the effect is to poison the object, which still allows us to 8784 // remove the call. 8785 IsUniform = true; 8786 break; 8787 default: 8788 break; 8789 } 8790 } 8791 8792 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8793 IsUniform, IsPredicated); 8794 setRecipe(I, Recipe); 8795 Plan->addVPValue(I, Recipe); 8796 8797 // Find if I uses a predicated instruction. If so, it will use its scalar 8798 // value. Avoid hoisting the insert-element which packs the scalar value into 8799 // a vector value, as that happens iff all users use the vector value. 8800 for (VPValue *Op : Recipe->operands()) { 8801 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 8802 if (!PredR) 8803 continue; 8804 auto *RepR = 8805 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 8806 assert(RepR->isPredicated() && 8807 "expected Replicate recipe to be predicated"); 8808 RepR->setAlsoPack(false); 8809 } 8810 8811 // Finalize the recipe for Instr, first if it is not predicated. 8812 if (!IsPredicated) { 8813 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8814 VPBB->appendRecipe(Recipe); 8815 return VPBB; 8816 } 8817 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8818 8819 VPBlockBase *SingleSucc = VPBB->getSingleSuccessor(); 8820 assert(SingleSucc && "VPBB must have a single successor when handling " 8821 "predicated replication."); 8822 VPBlockUtils::disconnectBlocks(VPBB, SingleSucc); 8823 // Record predicated instructions for above packing optimizations. 8824 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8825 VPBlockUtils::insertBlockAfter(Region, VPBB); 8826 auto *RegSucc = new VPBasicBlock(); 8827 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8828 VPBlockUtils::connectBlocks(RegSucc, SingleSucc); 8829 return RegSucc; 8830 } 8831 8832 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8833 VPRecipeBase *PredRecipe, 8834 VPlanPtr &Plan) { 8835 // Instructions marked for predication are replicated and placed under an 8836 // if-then construct to prevent side-effects. 8837 8838 // Generate recipes to compute the block mask for this region. 8839 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8840 8841 // Build the triangular if-then region. 8842 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8843 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8844 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8845 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8846 auto *PHIRecipe = Instr->getType()->isVoidTy() 8847 ? nullptr 8848 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8849 if (PHIRecipe) { 8850 Plan->removeVPValueFor(Instr); 8851 Plan->addVPValue(Instr, PHIRecipe); 8852 } 8853 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8854 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8855 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8856 8857 // Note: first set Entry as region entry and then connect successors starting 8858 // from it in order, to propagate the "parent" of each VPBasicBlock. 8859 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8860 VPBlockUtils::connectBlocks(Pred, Exit); 8861 8862 return Region; 8863 } 8864 8865 VPRecipeOrVPValueTy 8866 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8867 ArrayRef<VPValue *> Operands, 8868 VFRange &Range, VPlanPtr &Plan) { 8869 // First, check for specific widening recipes that deal with calls, memory 8870 // operations, inductions and Phi nodes. 8871 if (auto *CI = dyn_cast<CallInst>(Instr)) 8872 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 8873 8874 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8875 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 8876 8877 VPRecipeBase *Recipe; 8878 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8879 if (Phi->getParent() != OrigLoop->getHeader()) 8880 return tryToBlend(Phi, Operands, Plan); 8881 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, Range))) 8882 return toVPRecipeResult(Recipe); 8883 8884 VPHeaderPHIRecipe *PhiRecipe = nullptr; 8885 if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) { 8886 VPValue *StartV = Operands[0]; 8887 if (Legal->isReductionVariable(Phi)) { 8888 const RecurrenceDescriptor &RdxDesc = 8889 Legal->getReductionVars().find(Phi)->second; 8890 assert(RdxDesc.getRecurrenceStartValue() == 8891 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8892 PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, 8893 CM.isInLoopReduction(Phi), 8894 CM.useOrderedReductions(RdxDesc)); 8895 } else { 8896 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); 8897 } 8898 8899 // Record the incoming value from the backedge, so we can add the incoming 8900 // value from the backedge after all recipes have been created. 8901 recordRecipeOf(cast<Instruction>( 8902 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 8903 PhisToFix.push_back(PhiRecipe); 8904 } else { 8905 // TODO: record backedge value for remaining pointer induction phis. 8906 assert(Phi->getType()->isPointerTy() && 8907 "only pointer phis should be handled here"); 8908 assert(Legal->getInductionVars().count(Phi) && 8909 "Not an induction variable"); 8910 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8911 VPValue *Start = Plan->getOrAddVPValue(II.getStartValue()); 8912 PhiRecipe = new VPWidenPHIRecipe(Phi, Start); 8913 } 8914 8915 return toVPRecipeResult(PhiRecipe); 8916 } 8917 8918 if (isa<TruncInst>(Instr) && 8919 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 8920 Range, *Plan))) 8921 return toVPRecipeResult(Recipe); 8922 8923 if (!shouldWiden(Instr, Range)) 8924 return nullptr; 8925 8926 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8927 return toVPRecipeResult(new VPWidenGEPRecipe( 8928 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 8929 8930 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8931 bool InvariantCond = 8932 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8933 return toVPRecipeResult(new VPWidenSelectRecipe( 8934 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 8935 } 8936 8937 return toVPRecipeResult(tryToWiden(Instr, Operands)); 8938 } 8939 8940 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8941 ElementCount MaxVF) { 8942 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8943 8944 // Collect instructions from the original loop that will become trivially dead 8945 // in the vectorized loop. We don't need to vectorize these instructions. For 8946 // example, original induction update instructions can become dead because we 8947 // separately emit induction "steps" when generating code for the new loop. 8948 // Similarly, we create a new latch condition when setting up the structure 8949 // of the new loop, so the old one can become dead. 8950 SmallPtrSet<Instruction *, 4> DeadInstructions; 8951 collectTriviallyDeadInstructions(DeadInstructions); 8952 8953 // Add assume instructions we need to drop to DeadInstructions, to prevent 8954 // them from being added to the VPlan. 8955 // TODO: We only need to drop assumes in blocks that get flattend. If the 8956 // control flow is preserved, we should keep them. 8957 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8958 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8959 8960 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8961 // Dead instructions do not need sinking. Remove them from SinkAfter. 8962 for (Instruction *I : DeadInstructions) 8963 SinkAfter.erase(I); 8964 8965 // Cannot sink instructions after dead instructions (there won't be any 8966 // recipes for them). Instead, find the first non-dead previous instruction. 8967 for (auto &P : Legal->getSinkAfter()) { 8968 Instruction *SinkTarget = P.second; 8969 Instruction *FirstInst = &*SinkTarget->getParent()->begin(); 8970 (void)FirstInst; 8971 while (DeadInstructions.contains(SinkTarget)) { 8972 assert( 8973 SinkTarget != FirstInst && 8974 "Must find a live instruction (at least the one feeding the " 8975 "first-order recurrence PHI) before reaching beginning of the block"); 8976 SinkTarget = SinkTarget->getPrevNode(); 8977 assert(SinkTarget != P.first && 8978 "sink source equals target, no sinking required"); 8979 } 8980 P.second = SinkTarget; 8981 } 8982 8983 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8984 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8985 VFRange SubRange = {VF, MaxVFPlusOne}; 8986 VPlans.push_back( 8987 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 8988 VF = SubRange.End; 8989 } 8990 } 8991 8992 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header, a 8993 // CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF and a 8994 // BranchOnCount VPInstruction to the latch. 8995 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL, 8996 bool HasNUW, bool IsVPlanNative) { 8997 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8998 auto *StartV = Plan.getOrAddVPValue(StartIdx); 8999 9000 auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL); 9001 VPRegionBlock *TopRegion = Plan.getVectorLoopRegion(); 9002 VPBasicBlock *Header = TopRegion->getEntryBasicBlock(); 9003 if (IsVPlanNative) 9004 Header = cast<VPBasicBlock>(Header->getSingleSuccessor()); 9005 Header->insert(CanonicalIVPHI, Header->begin()); 9006 9007 auto *CanonicalIVIncrement = 9008 new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW 9009 : VPInstruction::CanonicalIVIncrement, 9010 {CanonicalIVPHI}, DL); 9011 CanonicalIVPHI->addOperand(CanonicalIVIncrement); 9012 9013 VPBasicBlock *EB = TopRegion->getExitBasicBlock(); 9014 if (IsVPlanNative) { 9015 EB = cast<VPBasicBlock>(EB->getSinglePredecessor()); 9016 EB->setCondBit(nullptr); 9017 } 9018 EB->appendRecipe(CanonicalIVIncrement); 9019 9020 auto *BranchOnCount = 9021 new VPInstruction(VPInstruction::BranchOnCount, 9022 {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL); 9023 EB->appendRecipe(BranchOnCount); 9024 } 9025 9026 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 9027 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 9028 const MapVector<Instruction *, Instruction *> &SinkAfter) { 9029 9030 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 9031 9032 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 9033 9034 // --------------------------------------------------------------------------- 9035 // Pre-construction: record ingredients whose recipes we'll need to further 9036 // process after constructing the initial VPlan. 9037 // --------------------------------------------------------------------------- 9038 9039 // Mark instructions we'll need to sink later and their targets as 9040 // ingredients whose recipe we'll need to record. 9041 for (auto &Entry : SinkAfter) { 9042 RecipeBuilder.recordRecipeOf(Entry.first); 9043 RecipeBuilder.recordRecipeOf(Entry.second); 9044 } 9045 for (auto &Reduction : CM.getInLoopReductionChains()) { 9046 PHINode *Phi = Reduction.first; 9047 RecurKind Kind = 9048 Legal->getReductionVars().find(Phi)->second.getRecurrenceKind(); 9049 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9050 9051 RecipeBuilder.recordRecipeOf(Phi); 9052 for (auto &R : ReductionOperations) { 9053 RecipeBuilder.recordRecipeOf(R); 9054 // For min/max reducitons, where we have a pair of icmp/select, we also 9055 // need to record the ICmp recipe, so it can be removed later. 9056 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9057 "Only min/max recurrences allowed for inloop reductions"); 9058 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 9059 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 9060 } 9061 } 9062 9063 // For each interleave group which is relevant for this (possibly trimmed) 9064 // Range, add it to the set of groups to be later applied to the VPlan and add 9065 // placeholders for its members' Recipes which we'll be replacing with a 9066 // single VPInterleaveRecipe. 9067 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 9068 auto applyIG = [IG, this](ElementCount VF) -> bool { 9069 return (VF.isVector() && // Query is illegal for VF == 1 9070 CM.getWideningDecision(IG->getInsertPos(), VF) == 9071 LoopVectorizationCostModel::CM_Interleave); 9072 }; 9073 if (!getDecisionAndClampRange(applyIG, Range)) 9074 continue; 9075 InterleaveGroups.insert(IG); 9076 for (unsigned i = 0; i < IG->getFactor(); i++) 9077 if (Instruction *Member = IG->getMember(i)) 9078 RecipeBuilder.recordRecipeOf(Member); 9079 }; 9080 9081 // --------------------------------------------------------------------------- 9082 // Build initial VPlan: Scan the body of the loop in a topological order to 9083 // visit each basic block after having visited its predecessor basic blocks. 9084 // --------------------------------------------------------------------------- 9085 9086 // Create initial VPlan skeleton, with separate header and latch blocks. 9087 VPBasicBlock *HeaderVPBB = new VPBasicBlock(); 9088 VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch"); 9089 VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB); 9090 auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop"); 9091 auto Plan = std::make_unique<VPlan>(TopRegion); 9092 9093 Instruction *DLInst = 9094 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()); 9095 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), 9096 DLInst ? DLInst->getDebugLoc() : DebugLoc(), 9097 !CM.foldTailByMasking(), false); 9098 9099 // Scan the body of the loop in a topological order to visit each basic block 9100 // after having visited its predecessor basic blocks. 9101 LoopBlocksDFS DFS(OrigLoop); 9102 DFS.perform(LI); 9103 9104 VPBasicBlock *VPBB = HeaderVPBB; 9105 SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove; 9106 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 9107 // Relevant instructions from basic block BB will be grouped into VPRecipe 9108 // ingredients and fill a new VPBasicBlock. 9109 unsigned VPBBsForBB = 0; 9110 VPBB->setName(BB->getName()); 9111 Builder.setInsertPoint(VPBB); 9112 9113 // Introduce each ingredient into VPlan. 9114 // TODO: Model and preserve debug instrinsics in VPlan. 9115 for (Instruction &I : BB->instructionsWithoutDebug()) { 9116 Instruction *Instr = &I; 9117 9118 // First filter out irrelevant instructions, to ensure no recipes are 9119 // built for them. 9120 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 9121 continue; 9122 9123 SmallVector<VPValue *, 4> Operands; 9124 auto *Phi = dyn_cast<PHINode>(Instr); 9125 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 9126 Operands.push_back(Plan->getOrAddVPValue( 9127 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 9128 } else { 9129 auto OpRange = Plan->mapToVPValues(Instr->operands()); 9130 Operands = {OpRange.begin(), OpRange.end()}; 9131 } 9132 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 9133 Instr, Operands, Range, Plan)) { 9134 // If Instr can be simplified to an existing VPValue, use it. 9135 if (RecipeOrValue.is<VPValue *>()) { 9136 auto *VPV = RecipeOrValue.get<VPValue *>(); 9137 Plan->addVPValue(Instr, VPV); 9138 // If the re-used value is a recipe, register the recipe for the 9139 // instruction, in case the recipe for Instr needs to be recorded. 9140 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 9141 RecipeBuilder.setRecipe(Instr, R); 9142 continue; 9143 } 9144 // Otherwise, add the new recipe. 9145 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 9146 for (auto *Def : Recipe->definedValues()) { 9147 auto *UV = Def->getUnderlyingValue(); 9148 Plan->addVPValue(UV, Def); 9149 } 9150 9151 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && 9152 HeaderVPBB->getFirstNonPhi() != VPBB->end()) { 9153 // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section 9154 // of the header block. That can happen for truncates of induction 9155 // variables. Those recipes are moved to the phi section of the header 9156 // block after applying SinkAfter, which relies on the original 9157 // position of the trunc. 9158 assert(isa<TruncInst>(Instr)); 9159 InductionsToMove.push_back( 9160 cast<VPWidenIntOrFpInductionRecipe>(Recipe)); 9161 } 9162 RecipeBuilder.setRecipe(Instr, Recipe); 9163 VPBB->appendRecipe(Recipe); 9164 continue; 9165 } 9166 9167 // Otherwise, if all widening options failed, Instruction is to be 9168 // replicated. This may create a successor for VPBB. 9169 VPBasicBlock *NextVPBB = 9170 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 9171 if (NextVPBB != VPBB) { 9172 VPBB = NextVPBB; 9173 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 9174 : ""); 9175 } 9176 } 9177 9178 VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB); 9179 VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor()); 9180 } 9181 9182 // Fold the last, empty block into its predecessor. 9183 VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB); 9184 assert(VPBB && "expected to fold last (empty) block"); 9185 // After here, VPBB should not be used. 9186 VPBB = nullptr; 9187 9188 assert(isa<VPRegionBlock>(Plan->getEntry()) && 9189 !Plan->getEntry()->getEntryBasicBlock()->empty() && 9190 "entry block must be set to a VPRegionBlock having a non-empty entry " 9191 "VPBasicBlock"); 9192 RecipeBuilder.fixHeaderPhis(); 9193 9194 // --------------------------------------------------------------------------- 9195 // Transform initial VPlan: Apply previously taken decisions, in order, to 9196 // bring the VPlan to its final state. 9197 // --------------------------------------------------------------------------- 9198 9199 // Apply Sink-After legal constraints. 9200 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 9201 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 9202 if (Region && Region->isReplicator()) { 9203 assert(Region->getNumSuccessors() == 1 && 9204 Region->getNumPredecessors() == 1 && "Expected SESE region!"); 9205 assert(R->getParent()->size() == 1 && 9206 "A recipe in an original replicator region must be the only " 9207 "recipe in its block"); 9208 return Region; 9209 } 9210 return nullptr; 9211 }; 9212 for (auto &Entry : SinkAfter) { 9213 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 9214 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 9215 9216 auto *TargetRegion = GetReplicateRegion(Target); 9217 auto *SinkRegion = GetReplicateRegion(Sink); 9218 if (!SinkRegion) { 9219 // If the sink source is not a replicate region, sink the recipe directly. 9220 if (TargetRegion) { 9221 // The target is in a replication region, make sure to move Sink to 9222 // the block after it, not into the replication region itself. 9223 VPBasicBlock *NextBlock = 9224 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 9225 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 9226 } else 9227 Sink->moveAfter(Target); 9228 continue; 9229 } 9230 9231 // The sink source is in a replicate region. Unhook the region from the CFG. 9232 auto *SinkPred = SinkRegion->getSinglePredecessor(); 9233 auto *SinkSucc = SinkRegion->getSingleSuccessor(); 9234 VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); 9235 VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); 9236 VPBlockUtils::connectBlocks(SinkPred, SinkSucc); 9237 9238 if (TargetRegion) { 9239 // The target recipe is also in a replicate region, move the sink region 9240 // after the target region. 9241 auto *TargetSucc = TargetRegion->getSingleSuccessor(); 9242 VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); 9243 VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); 9244 VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); 9245 } else { 9246 // The sink source is in a replicate region, we need to move the whole 9247 // replicate region, which should only contain a single recipe in the 9248 // main block. 9249 auto *SplitBlock = 9250 Target->getParent()->splitAt(std::next(Target->getIterator())); 9251 9252 auto *SplitPred = SplitBlock->getSinglePredecessor(); 9253 9254 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 9255 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 9256 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 9257 } 9258 } 9259 9260 VPlanTransforms::removeRedundantCanonicalIVs(*Plan); 9261 VPlanTransforms::removeRedundantInductionCasts(*Plan); 9262 9263 // Now that sink-after is done, move induction recipes for optimized truncates 9264 // to the phi section of the header block. 9265 for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove) 9266 Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi()); 9267 9268 // Adjust the recipes for any inloop reductions. 9269 adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExit()), Plan, 9270 RecipeBuilder, Range.Start); 9271 9272 // Introduce a recipe to combine the incoming and previous values of a 9273 // first-order recurrence. 9274 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9275 auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); 9276 if (!RecurPhi) 9277 continue; 9278 9279 VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); 9280 VPBasicBlock *InsertBlock = PrevRecipe->getParent(); 9281 auto *Region = GetReplicateRegion(PrevRecipe); 9282 if (Region) 9283 InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor()); 9284 if (Region || PrevRecipe->isPhi()) 9285 Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi()); 9286 else 9287 Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator())); 9288 9289 auto *RecurSplice = cast<VPInstruction>( 9290 Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 9291 {RecurPhi, RecurPhi->getBackedgeValue()})); 9292 9293 RecurPhi->replaceAllUsesWith(RecurSplice); 9294 // Set the first operand of RecurSplice to RecurPhi again, after replacing 9295 // all users. 9296 RecurSplice->setOperand(0, RecurPhi); 9297 } 9298 9299 // Interleave memory: for each Interleave Group we marked earlier as relevant 9300 // for this VPlan, replace the Recipes widening its memory instructions with a 9301 // single VPInterleaveRecipe at its insertion point. 9302 for (auto IG : InterleaveGroups) { 9303 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 9304 RecipeBuilder.getRecipe(IG->getInsertPos())); 9305 SmallVector<VPValue *, 4> StoredValues; 9306 for (unsigned i = 0; i < IG->getFactor(); ++i) 9307 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 9308 auto *StoreR = 9309 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); 9310 StoredValues.push_back(StoreR->getStoredValue()); 9311 } 9312 9313 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 9314 Recipe->getMask()); 9315 VPIG->insertBefore(Recipe); 9316 unsigned J = 0; 9317 for (unsigned i = 0; i < IG->getFactor(); ++i) 9318 if (Instruction *Member = IG->getMember(i)) { 9319 if (!Member->getType()->isVoidTy()) { 9320 VPValue *OriginalV = Plan->getVPValue(Member); 9321 Plan->removeVPValueFor(Member); 9322 Plan->addVPValue(Member, VPIG->getVPValue(J)); 9323 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 9324 J++; 9325 } 9326 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9327 } 9328 } 9329 9330 // From this point onwards, VPlan-to-VPlan transformations may change the plan 9331 // in ways that accessing values using original IR values is incorrect. 9332 Plan->disableValue2VPValue(); 9333 9334 VPlanTransforms::sinkScalarOperands(*Plan); 9335 VPlanTransforms::mergeReplicateRegions(*Plan); 9336 9337 std::string PlanName; 9338 raw_string_ostream RSO(PlanName); 9339 ElementCount VF = Range.Start; 9340 Plan->addVF(VF); 9341 RSO << "Initial VPlan for VF={" << VF; 9342 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9343 Plan->addVF(VF); 9344 RSO << "," << VF; 9345 } 9346 RSO << "},UF>=1"; 9347 RSO.flush(); 9348 Plan->setName(PlanName); 9349 9350 // Fold Exit block into its predecessor if possible. 9351 // TODO: Fold block earlier once all VPlan transforms properly maintain a 9352 // VPBasicBlock as exit. 9353 VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExit()); 9354 9355 assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid"); 9356 return Plan; 9357 } 9358 9359 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9360 // Outer loop handling: They may require CFG and instruction level 9361 // transformations before even evaluating whether vectorization is profitable. 9362 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9363 // the vectorization pipeline. 9364 assert(!OrigLoop->isInnermost()); 9365 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9366 9367 // Create new empty VPlan 9368 auto Plan = std::make_unique<VPlan>(); 9369 9370 // Build hierarchical CFG 9371 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9372 HCFGBuilder.buildHierarchicalCFG(); 9373 9374 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9375 VF *= 2) 9376 Plan->addVF(VF); 9377 9378 if (EnableVPlanPredication) { 9379 VPlanPredicator VPP(*Plan); 9380 VPP.predicate(); 9381 9382 // Avoid running transformation to recipes until masked code generation in 9383 // VPlan-native path is in place. 9384 return Plan; 9385 } 9386 9387 SmallPtrSet<Instruction *, 1> DeadInstructions; 9388 VPlanTransforms::VPInstructionsToVPRecipes( 9389 OrigLoop, Plan, 9390 [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); }, 9391 DeadInstructions, *PSE.getSE()); 9392 9393 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(), 9394 true, true); 9395 return Plan; 9396 } 9397 9398 // Adjust the recipes for reductions. For in-loop reductions the chain of 9399 // instructions leading from the loop exit instr to the phi need to be converted 9400 // to reductions, with one operand being vector and the other being the scalar 9401 // reduction chain. For other reductions, a select is introduced between the phi 9402 // and live-out recipes when folding the tail. 9403 void LoopVectorizationPlanner::adjustRecipesForReductions( 9404 VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, 9405 ElementCount MinVF) { 9406 for (auto &Reduction : CM.getInLoopReductionChains()) { 9407 PHINode *Phi = Reduction.first; 9408 const RecurrenceDescriptor &RdxDesc = 9409 Legal->getReductionVars().find(Phi)->second; 9410 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9411 9412 if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) 9413 continue; 9414 9415 // ReductionOperations are orders top-down from the phi's use to the 9416 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9417 // which of the two operands will remain scalar and which will be reduced. 9418 // For minmax the chain will be the select instructions. 9419 Instruction *Chain = Phi; 9420 for (Instruction *R : ReductionOperations) { 9421 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9422 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9423 9424 VPValue *ChainOp = Plan->getVPValue(Chain); 9425 unsigned FirstOpId; 9426 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9427 "Only min/max recurrences allowed for inloop reductions"); 9428 // Recognize a call to the llvm.fmuladd intrinsic. 9429 bool IsFMulAdd = (Kind == RecurKind::FMulAdd); 9430 assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) && 9431 "Expected instruction to be a call to the llvm.fmuladd intrinsic"); 9432 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9433 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9434 "Expected to replace a VPWidenSelectSC"); 9435 FirstOpId = 1; 9436 } else { 9437 assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) || 9438 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) && 9439 "Expected to replace a VPWidenSC"); 9440 FirstOpId = 0; 9441 } 9442 unsigned VecOpId = 9443 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9444 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9445 9446 auto *CondOp = CM.foldTailByMasking() 9447 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9448 : nullptr; 9449 9450 if (IsFMulAdd) { 9451 // If the instruction is a call to the llvm.fmuladd intrinsic then we 9452 // need to create an fmul recipe to use as the vector operand for the 9453 // fadd reduction. 9454 VPInstruction *FMulRecipe = new VPInstruction( 9455 Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))}); 9456 FMulRecipe->setFastMathFlags(R->getFastMathFlags()); 9457 WidenRecipe->getParent()->insert(FMulRecipe, 9458 WidenRecipe->getIterator()); 9459 VecOp = FMulRecipe; 9460 } 9461 VPReductionRecipe *RedRecipe = 9462 new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9463 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9464 Plan->removeVPValueFor(R); 9465 Plan->addVPValue(R, RedRecipe); 9466 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9467 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9468 WidenRecipe->eraseFromParent(); 9469 9470 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9471 VPRecipeBase *CompareRecipe = 9472 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9473 assert(isa<VPWidenRecipe>(CompareRecipe) && 9474 "Expected to replace a VPWidenSC"); 9475 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9476 "Expected no remaining users"); 9477 CompareRecipe->eraseFromParent(); 9478 } 9479 Chain = R; 9480 } 9481 } 9482 9483 // If tail is folded by masking, introduce selects between the phi 9484 // and the live-out instruction of each reduction, at the beginning of the 9485 // dedicated latch block. 9486 if (CM.foldTailByMasking()) { 9487 Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin()); 9488 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9489 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R); 9490 if (!PhiR || PhiR->isInLoop()) 9491 continue; 9492 VPValue *Cond = 9493 RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9494 VPValue *Red = PhiR->getBackedgeValue(); 9495 assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB && 9496 "reduction recipe must be defined before latch"); 9497 Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR}); 9498 } 9499 } 9500 } 9501 9502 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9503 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9504 VPSlotTracker &SlotTracker) const { 9505 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9506 IG->getInsertPos()->printAsOperand(O, false); 9507 O << ", "; 9508 getAddr()->printAsOperand(O, SlotTracker); 9509 VPValue *Mask = getMask(); 9510 if (Mask) { 9511 O << ", "; 9512 Mask->printAsOperand(O, SlotTracker); 9513 } 9514 9515 unsigned OpIdx = 0; 9516 for (unsigned i = 0; i < IG->getFactor(); ++i) { 9517 if (!IG->getMember(i)) 9518 continue; 9519 if (getNumStoreOperands() > 0) { 9520 O << "\n" << Indent << " store "; 9521 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker); 9522 O << " to index " << i; 9523 } else { 9524 O << "\n" << Indent << " "; 9525 getVPValue(OpIdx)->printAsOperand(O, SlotTracker); 9526 O << " = load from index " << i; 9527 } 9528 ++OpIdx; 9529 } 9530 } 9531 #endif 9532 9533 void VPWidenCallRecipe::execute(VPTransformState &State) { 9534 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9535 *this, State); 9536 } 9537 9538 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9539 auto &I = *cast<SelectInst>(getUnderlyingInstr()); 9540 State.ILV->setDebugLocFromInst(&I); 9541 9542 // The condition can be loop invariant but still defined inside the 9543 // loop. This means that we can't just use the original 'cond' value. 9544 // We have to take the 'vectorized' value and pick the first lane. 9545 // Instcombine will make this a no-op. 9546 auto *InvarCond = 9547 InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr; 9548 9549 for (unsigned Part = 0; Part < State.UF; ++Part) { 9550 Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part); 9551 Value *Op0 = State.get(getOperand(1), Part); 9552 Value *Op1 = State.get(getOperand(2), Part); 9553 Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1); 9554 State.set(this, Sel, Part); 9555 State.ILV->addMetadata(Sel, &I); 9556 } 9557 } 9558 9559 void VPWidenRecipe::execute(VPTransformState &State) { 9560 auto &I = *cast<Instruction>(getUnderlyingValue()); 9561 auto &Builder = State.Builder; 9562 switch (I.getOpcode()) { 9563 case Instruction::Call: 9564 case Instruction::Br: 9565 case Instruction::PHI: 9566 case Instruction::GetElementPtr: 9567 case Instruction::Select: 9568 llvm_unreachable("This instruction is handled by a different recipe."); 9569 case Instruction::UDiv: 9570 case Instruction::SDiv: 9571 case Instruction::SRem: 9572 case Instruction::URem: 9573 case Instruction::Add: 9574 case Instruction::FAdd: 9575 case Instruction::Sub: 9576 case Instruction::FSub: 9577 case Instruction::FNeg: 9578 case Instruction::Mul: 9579 case Instruction::FMul: 9580 case Instruction::FDiv: 9581 case Instruction::FRem: 9582 case Instruction::Shl: 9583 case Instruction::LShr: 9584 case Instruction::AShr: 9585 case Instruction::And: 9586 case Instruction::Or: 9587 case Instruction::Xor: { 9588 // Just widen unops and binops. 9589 State.ILV->setDebugLocFromInst(&I); 9590 9591 for (unsigned Part = 0; Part < State.UF; ++Part) { 9592 SmallVector<Value *, 2> Ops; 9593 for (VPValue *VPOp : operands()) 9594 Ops.push_back(State.get(VPOp, Part)); 9595 9596 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 9597 9598 if (auto *VecOp = dyn_cast<Instruction>(V)) { 9599 VecOp->copyIRFlags(&I); 9600 9601 // If the instruction is vectorized and was in a basic block that needed 9602 // predication, we can't propagate poison-generating flags (nuw/nsw, 9603 // exact, etc.). The control flow has been linearized and the 9604 // instruction is no longer guarded by the predicate, which could make 9605 // the flag properties to no longer hold. 9606 if (State.MayGeneratePoisonRecipes.contains(this)) 9607 VecOp->dropPoisonGeneratingFlags(); 9608 } 9609 9610 // Use this vector value for all users of the original instruction. 9611 State.set(this, V, Part); 9612 State.ILV->addMetadata(V, &I); 9613 } 9614 9615 break; 9616 } 9617 case Instruction::ICmp: 9618 case Instruction::FCmp: { 9619 // Widen compares. Generate vector compares. 9620 bool FCmp = (I.getOpcode() == Instruction::FCmp); 9621 auto *Cmp = cast<CmpInst>(&I); 9622 State.ILV->setDebugLocFromInst(Cmp); 9623 for (unsigned Part = 0; Part < State.UF; ++Part) { 9624 Value *A = State.get(getOperand(0), Part); 9625 Value *B = State.get(getOperand(1), Part); 9626 Value *C = nullptr; 9627 if (FCmp) { 9628 // Propagate fast math flags. 9629 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9630 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 9631 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 9632 } else { 9633 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 9634 } 9635 State.set(this, C, Part); 9636 State.ILV->addMetadata(C, &I); 9637 } 9638 9639 break; 9640 } 9641 9642 case Instruction::ZExt: 9643 case Instruction::SExt: 9644 case Instruction::FPToUI: 9645 case Instruction::FPToSI: 9646 case Instruction::FPExt: 9647 case Instruction::PtrToInt: 9648 case Instruction::IntToPtr: 9649 case Instruction::SIToFP: 9650 case Instruction::UIToFP: 9651 case Instruction::Trunc: 9652 case Instruction::FPTrunc: 9653 case Instruction::BitCast: { 9654 auto *CI = cast<CastInst>(&I); 9655 State.ILV->setDebugLocFromInst(CI); 9656 9657 /// Vectorize casts. 9658 Type *DestTy = (State.VF.isScalar()) 9659 ? CI->getType() 9660 : VectorType::get(CI->getType(), State.VF); 9661 9662 for (unsigned Part = 0; Part < State.UF; ++Part) { 9663 Value *A = State.get(getOperand(0), Part); 9664 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 9665 State.set(this, Cast, Part); 9666 State.ILV->addMetadata(Cast, &I); 9667 } 9668 break; 9669 } 9670 default: 9671 // This instruction is not vectorized by simple widening. 9672 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 9673 llvm_unreachable("Unhandled instruction!"); 9674 } // end of switch. 9675 } 9676 9677 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9678 auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr()); 9679 // Construct a vector GEP by widening the operands of the scalar GEP as 9680 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 9681 // results in a vector of pointers when at least one operand of the GEP 9682 // is vector-typed. Thus, to keep the representation compact, we only use 9683 // vector-typed operands for loop-varying values. 9684 9685 if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 9686 // If we are vectorizing, but the GEP has only loop-invariant operands, 9687 // the GEP we build (by only using vector-typed operands for 9688 // loop-varying values) would be a scalar pointer. Thus, to ensure we 9689 // produce a vector of pointers, we need to either arbitrarily pick an 9690 // operand to broadcast, or broadcast a clone of the original GEP. 9691 // Here, we broadcast a clone of the original. 9692 // 9693 // TODO: If at some point we decide to scalarize instructions having 9694 // loop-invariant operands, this special case will no longer be 9695 // required. We would add the scalarization decision to 9696 // collectLoopScalars() and teach getVectorValue() to broadcast 9697 // the lane-zero scalar value. 9698 auto *Clone = State.Builder.Insert(GEP->clone()); 9699 for (unsigned Part = 0; Part < State.UF; ++Part) { 9700 Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone); 9701 State.set(this, EntryPart, Part); 9702 State.ILV->addMetadata(EntryPart, GEP); 9703 } 9704 } else { 9705 // If the GEP has at least one loop-varying operand, we are sure to 9706 // produce a vector of pointers. But if we are only unrolling, we want 9707 // to produce a scalar GEP for each unroll part. Thus, the GEP we 9708 // produce with the code below will be scalar (if VF == 1) or vector 9709 // (otherwise). Note that for the unroll-only case, we still maintain 9710 // values in the vector mapping with initVector, as we do for other 9711 // instructions. 9712 for (unsigned Part = 0; Part < State.UF; ++Part) { 9713 // The pointer operand of the new GEP. If it's loop-invariant, we 9714 // won't broadcast it. 9715 auto *Ptr = IsPtrLoopInvariant 9716 ? State.get(getOperand(0), VPIteration(0, 0)) 9717 : State.get(getOperand(0), Part); 9718 9719 // Collect all the indices for the new GEP. If any index is 9720 // loop-invariant, we won't broadcast it. 9721 SmallVector<Value *, 4> Indices; 9722 for (unsigned I = 1, E = getNumOperands(); I < E; I++) { 9723 VPValue *Operand = getOperand(I); 9724 if (IsIndexLoopInvariant[I - 1]) 9725 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 9726 else 9727 Indices.push_back(State.get(Operand, Part)); 9728 } 9729 9730 // If the GEP instruction is vectorized and was in a basic block that 9731 // needed predication, we can't propagate the poison-generating 'inbounds' 9732 // flag. The control flow has been linearized and the GEP is no longer 9733 // guarded by the predicate, which could make the 'inbounds' properties to 9734 // no longer hold. 9735 bool IsInBounds = 9736 GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0; 9737 9738 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 9739 // but it should be a vector, otherwise. 9740 auto *NewGEP = IsInBounds 9741 ? State.Builder.CreateInBoundsGEP( 9742 GEP->getSourceElementType(), Ptr, Indices) 9743 : State.Builder.CreateGEP(GEP->getSourceElementType(), 9744 Ptr, Indices); 9745 assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) && 9746 "NewGEP is not a pointer vector"); 9747 State.set(this, NewGEP, Part); 9748 State.ILV->addMetadata(NewGEP, GEP); 9749 } 9750 } 9751 } 9752 9753 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9754 assert(!State.Instance && "Int or FP induction being replicated."); 9755 auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0); 9756 State.ILV->widenIntOrFpInduction(IV, this, State, CanonicalIV); 9757 } 9758 9759 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9760 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this, 9761 State); 9762 } 9763 9764 void VPBlendRecipe::execute(VPTransformState &State) { 9765 State.ILV->setDebugLocFromInst(Phi, &State.Builder); 9766 // We know that all PHIs in non-header blocks are converted into 9767 // selects, so we don't have to worry about the insertion order and we 9768 // can just use the builder. 9769 // At this point we generate the predication tree. There may be 9770 // duplications since this is a simple recursive scan, but future 9771 // optimizations will clean it up. 9772 9773 unsigned NumIncoming = getNumIncomingValues(); 9774 9775 // Generate a sequence of selects of the form: 9776 // SELECT(Mask3, In3, 9777 // SELECT(Mask2, In2, 9778 // SELECT(Mask1, In1, 9779 // In0))) 9780 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9781 // are essentially undef are taken from In0. 9782 InnerLoopVectorizer::VectorParts Entry(State.UF); 9783 for (unsigned In = 0; In < NumIncoming; ++In) { 9784 for (unsigned Part = 0; Part < State.UF; ++Part) { 9785 // We might have single edge PHIs (blocks) - use an identity 9786 // 'select' for the first PHI operand. 9787 Value *In0 = State.get(getIncomingValue(In), Part); 9788 if (In == 0) 9789 Entry[Part] = In0; // Initialize with the first incoming value. 9790 else { 9791 // Select between the current value and the previous incoming edge 9792 // based on the incoming mask. 9793 Value *Cond = State.get(getMask(In), Part); 9794 Entry[Part] = 9795 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9796 } 9797 } 9798 } 9799 for (unsigned Part = 0; Part < State.UF; ++Part) 9800 State.set(this, Entry[Part], Part); 9801 } 9802 9803 void VPInterleaveRecipe::execute(VPTransformState &State) { 9804 assert(!State.Instance && "Interleave group being replicated."); 9805 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9806 getStoredValues(), getMask()); 9807 } 9808 9809 void VPReductionRecipe::execute(VPTransformState &State) { 9810 assert(!State.Instance && "Reduction being replicated."); 9811 Value *PrevInChain = State.get(getChainOp(), 0); 9812 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9813 bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); 9814 // Propagate the fast-math flags carried by the underlying instruction. 9815 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 9816 State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags()); 9817 for (unsigned Part = 0; Part < State.UF; ++Part) { 9818 Value *NewVecOp = State.get(getVecOp(), Part); 9819 if (VPValue *Cond = getCondOp()) { 9820 Value *NewCond = State.get(Cond, Part); 9821 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9822 Value *Iden = RdxDesc->getRecurrenceIdentity( 9823 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9824 Value *IdenVec = 9825 State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden); 9826 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9827 NewVecOp = Select; 9828 } 9829 Value *NewRed; 9830 Value *NextInChain; 9831 if (IsOrdered) { 9832 if (State.VF.isVector()) 9833 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9834 PrevInChain); 9835 else 9836 NewRed = State.Builder.CreateBinOp( 9837 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain, 9838 NewVecOp); 9839 PrevInChain = NewRed; 9840 } else { 9841 PrevInChain = State.get(getChainOp(), Part); 9842 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9843 } 9844 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9845 NextInChain = 9846 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9847 NewRed, PrevInChain); 9848 } else if (IsOrdered) 9849 NextInChain = NewRed; 9850 else 9851 NextInChain = State.Builder.CreateBinOp( 9852 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed, 9853 PrevInChain); 9854 State.set(this, NextInChain, Part); 9855 } 9856 } 9857 9858 void VPReplicateRecipe::execute(VPTransformState &State) { 9859 if (State.Instance) { // Generate a single instance. 9860 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9861 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance, 9862 IsPredicated, State); 9863 // Insert scalar instance packing it into a vector. 9864 if (AlsoPack && State.VF.isVector()) { 9865 // If we're constructing lane 0, initialize to start from poison. 9866 if (State.Instance->Lane.isFirstLane()) { 9867 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9868 Value *Poison = PoisonValue::get( 9869 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9870 State.set(this, Poison, State.Instance->Part); 9871 } 9872 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9873 } 9874 return; 9875 } 9876 9877 // Generate scalar instances for all VF lanes of all UF parts, unless the 9878 // instruction is uniform inwhich case generate only the first lane for each 9879 // of the UF parts. 9880 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9881 assert((!State.VF.isScalable() || IsUniform) && 9882 "Can't scalarize a scalable vector"); 9883 for (unsigned Part = 0; Part < State.UF; ++Part) 9884 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9885 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, 9886 VPIteration(Part, Lane), IsPredicated, 9887 State); 9888 } 9889 9890 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9891 assert(State.Instance && "Branch on Mask works only on single instance."); 9892 9893 unsigned Part = State.Instance->Part; 9894 unsigned Lane = State.Instance->Lane.getKnownLane(); 9895 9896 Value *ConditionBit = nullptr; 9897 VPValue *BlockInMask = getMask(); 9898 if (BlockInMask) { 9899 ConditionBit = State.get(BlockInMask, Part); 9900 if (ConditionBit->getType()->isVectorTy()) 9901 ConditionBit = State.Builder.CreateExtractElement( 9902 ConditionBit, State.Builder.getInt32(Lane)); 9903 } else // Block in mask is all-one. 9904 ConditionBit = State.Builder.getTrue(); 9905 9906 // Replace the temporary unreachable terminator with a new conditional branch, 9907 // whose two destinations will be set later when they are created. 9908 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9909 assert(isa<UnreachableInst>(CurrentTerminator) && 9910 "Expected to replace unreachable terminator with conditional branch."); 9911 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9912 CondBr->setSuccessor(0, nullptr); 9913 ReplaceInstWithInst(CurrentTerminator, CondBr); 9914 } 9915 9916 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9917 assert(State.Instance && "Predicated instruction PHI works per instance."); 9918 Instruction *ScalarPredInst = 9919 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9920 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9921 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9922 assert(PredicatingBB && "Predicated block has no single predecessor."); 9923 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9924 "operand must be VPReplicateRecipe"); 9925 9926 // By current pack/unpack logic we need to generate only a single phi node: if 9927 // a vector value for the predicated instruction exists at this point it means 9928 // the instruction has vector users only, and a phi for the vector value is 9929 // needed. In this case the recipe of the predicated instruction is marked to 9930 // also do that packing, thereby "hoisting" the insert-element sequence. 9931 // Otherwise, a phi node for the scalar value is needed. 9932 unsigned Part = State.Instance->Part; 9933 if (State.hasVectorValue(getOperand(0), Part)) { 9934 Value *VectorValue = State.get(getOperand(0), Part); 9935 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9936 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9937 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9938 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9939 if (State.hasVectorValue(this, Part)) 9940 State.reset(this, VPhi, Part); 9941 else 9942 State.set(this, VPhi, Part); 9943 // NOTE: Currently we need to update the value of the operand, so the next 9944 // predicated iteration inserts its generated value in the correct vector. 9945 State.reset(getOperand(0), VPhi, Part); 9946 } else { 9947 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9948 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9949 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9950 PredicatingBB); 9951 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9952 if (State.hasScalarValue(this, *State.Instance)) 9953 State.reset(this, Phi, *State.Instance); 9954 else 9955 State.set(this, Phi, *State.Instance); 9956 // NOTE: Currently we need to update the value of the operand, so the next 9957 // predicated iteration inserts its generated value in the correct vector. 9958 State.reset(getOperand(0), Phi, *State.Instance); 9959 } 9960 } 9961 9962 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9963 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9964 9965 // Attempt to issue a wide load. 9966 LoadInst *LI = dyn_cast<LoadInst>(&Ingredient); 9967 StoreInst *SI = dyn_cast<StoreInst>(&Ingredient); 9968 9969 assert((LI || SI) && "Invalid Load/Store instruction"); 9970 assert((!SI || StoredValue) && "No stored value provided for widened store"); 9971 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 9972 9973 Type *ScalarDataTy = getLoadStoreType(&Ingredient); 9974 9975 auto *DataTy = VectorType::get(ScalarDataTy, State.VF); 9976 const Align Alignment = getLoadStoreAlignment(&Ingredient); 9977 bool CreateGatherScatter = !Consecutive; 9978 9979 auto &Builder = State.Builder; 9980 InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF); 9981 bool isMaskRequired = getMask(); 9982 if (isMaskRequired) 9983 for (unsigned Part = 0; Part < State.UF; ++Part) 9984 BlockInMaskParts[Part] = State.get(getMask(), Part); 9985 9986 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 9987 // Calculate the pointer for the specific unroll-part. 9988 GetElementPtrInst *PartPtr = nullptr; 9989 9990 bool InBounds = false; 9991 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 9992 InBounds = gep->isInBounds(); 9993 if (Reverse) { 9994 // If the address is consecutive but reversed, then the 9995 // wide store needs to start at the last vector element. 9996 // RunTimeVF = VScale * VF.getKnownMinValue() 9997 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 9998 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF); 9999 // NumElt = -Part * RunTimeVF 10000 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 10001 // LastLane = 1 - RunTimeVF 10002 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 10003 PartPtr = 10004 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 10005 PartPtr->setIsInBounds(InBounds); 10006 PartPtr = cast<GetElementPtrInst>( 10007 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 10008 PartPtr->setIsInBounds(InBounds); 10009 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 10010 BlockInMaskParts[Part] = 10011 Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse"); 10012 } else { 10013 Value *Increment = 10014 createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part); 10015 PartPtr = cast<GetElementPtrInst>( 10016 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 10017 PartPtr->setIsInBounds(InBounds); 10018 } 10019 10020 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 10021 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 10022 }; 10023 10024 // Handle Stores: 10025 if (SI) { 10026 State.ILV->setDebugLocFromInst(SI); 10027 10028 for (unsigned Part = 0; Part < State.UF; ++Part) { 10029 Instruction *NewSI = nullptr; 10030 Value *StoredVal = State.get(StoredValue, Part); 10031 if (CreateGatherScatter) { 10032 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 10033 Value *VectorGep = State.get(getAddr(), Part); 10034 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 10035 MaskPart); 10036 } else { 10037 if (Reverse) { 10038 // If we store to reverse consecutive memory locations, then we need 10039 // to reverse the order of elements in the stored value. 10040 StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse"); 10041 // We don't want to update the value in the map as it might be used in 10042 // another expression. So don't call resetVectorValue(StoredVal). 10043 } 10044 auto *VecPtr = 10045 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 10046 if (isMaskRequired) 10047 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 10048 BlockInMaskParts[Part]); 10049 else 10050 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 10051 } 10052 State.ILV->addMetadata(NewSI, SI); 10053 } 10054 return; 10055 } 10056 10057 // Handle loads. 10058 assert(LI && "Must have a load instruction"); 10059 State.ILV->setDebugLocFromInst(LI); 10060 for (unsigned Part = 0; Part < State.UF; ++Part) { 10061 Value *NewLI; 10062 if (CreateGatherScatter) { 10063 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 10064 Value *VectorGep = State.get(getAddr(), Part); 10065 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, 10066 nullptr, "wide.masked.gather"); 10067 State.ILV->addMetadata(NewLI, LI); 10068 } else { 10069 auto *VecPtr = 10070 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 10071 if (isMaskRequired) 10072 NewLI = Builder.CreateMaskedLoad( 10073 DataTy, VecPtr, Alignment, BlockInMaskParts[Part], 10074 PoisonValue::get(DataTy), "wide.masked.load"); 10075 else 10076 NewLI = 10077 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 10078 10079 // Add metadata to the load, but setVectorValue to the reverse shuffle. 10080 State.ILV->addMetadata(NewLI, LI); 10081 if (Reverse) 10082 NewLI = Builder.CreateVectorReverse(NewLI, "reverse"); 10083 } 10084 10085 State.set(this, NewLI, Part); 10086 } 10087 } 10088 10089 // Determine how to lower the scalar epilogue, which depends on 1) optimising 10090 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 10091 // predication, and 4) a TTI hook that analyses whether the loop is suitable 10092 // for predication. 10093 static ScalarEpilogueLowering getScalarEpilogueLowering( 10094 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 10095 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 10096 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 10097 LoopVectorizationLegality &LVL) { 10098 // 1) OptSize takes precedence over all other options, i.e. if this is set, 10099 // don't look at hints or options, and don't request a scalar epilogue. 10100 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 10101 // LoopAccessInfo (due to code dependency and not being able to reliably get 10102 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 10103 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 10104 // versioning when the vectorization is forced, unlike hasOptSize. So revert 10105 // back to the old way and vectorize with versioning when forced. See D81345.) 10106 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 10107 PGSOQueryType::IRPass) && 10108 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 10109 return CM_ScalarEpilogueNotAllowedOptSize; 10110 10111 // 2) If set, obey the directives 10112 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 10113 switch (PreferPredicateOverEpilogue) { 10114 case PreferPredicateTy::ScalarEpilogue: 10115 return CM_ScalarEpilogueAllowed; 10116 case PreferPredicateTy::PredicateElseScalarEpilogue: 10117 return CM_ScalarEpilogueNotNeededUsePredicate; 10118 case PreferPredicateTy::PredicateOrDontVectorize: 10119 return CM_ScalarEpilogueNotAllowedUsePredicate; 10120 }; 10121 } 10122 10123 // 3) If set, obey the hints 10124 switch (Hints.getPredicate()) { 10125 case LoopVectorizeHints::FK_Enabled: 10126 return CM_ScalarEpilogueNotNeededUsePredicate; 10127 case LoopVectorizeHints::FK_Disabled: 10128 return CM_ScalarEpilogueAllowed; 10129 }; 10130 10131 // 4) if the TTI hook indicates this is profitable, request predication. 10132 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 10133 LVL.getLAI())) 10134 return CM_ScalarEpilogueNotNeededUsePredicate; 10135 10136 return CM_ScalarEpilogueAllowed; 10137 } 10138 10139 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 10140 // If Values have been set for this Def return the one relevant for \p Part. 10141 if (hasVectorValue(Def, Part)) 10142 return Data.PerPartOutput[Def][Part]; 10143 10144 if (!hasScalarValue(Def, {Part, 0})) { 10145 Value *IRV = Def->getLiveInIRValue(); 10146 Value *B = ILV->getBroadcastInstrs(IRV); 10147 set(Def, B, Part); 10148 return B; 10149 } 10150 10151 Value *ScalarValue = get(Def, {Part, 0}); 10152 // If we aren't vectorizing, we can just copy the scalar map values over 10153 // to the vector map. 10154 if (VF.isScalar()) { 10155 set(Def, ScalarValue, Part); 10156 return ScalarValue; 10157 } 10158 10159 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 10160 bool IsUniform = RepR && RepR->isUniform(); 10161 10162 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 10163 // Check if there is a scalar value for the selected lane. 10164 if (!hasScalarValue(Def, {Part, LastLane})) { 10165 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 10166 assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) && 10167 "unexpected recipe found to be invariant"); 10168 IsUniform = true; 10169 LastLane = 0; 10170 } 10171 10172 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 10173 // Set the insert point after the last scalarized instruction or after the 10174 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 10175 // will directly follow the scalar definitions. 10176 auto OldIP = Builder.saveIP(); 10177 auto NewIP = 10178 isa<PHINode>(LastInst) 10179 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 10180 : std::next(BasicBlock::iterator(LastInst)); 10181 Builder.SetInsertPoint(&*NewIP); 10182 10183 // However, if we are vectorizing, we need to construct the vector values. 10184 // If the value is known to be uniform after vectorization, we can just 10185 // broadcast the scalar value corresponding to lane zero for each unroll 10186 // iteration. Otherwise, we construct the vector values using 10187 // insertelement instructions. Since the resulting vectors are stored in 10188 // State, we will only generate the insertelements once. 10189 Value *VectorValue = nullptr; 10190 if (IsUniform) { 10191 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 10192 set(Def, VectorValue, Part); 10193 } else { 10194 // Initialize packing with insertelements to start from undef. 10195 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 10196 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 10197 set(Def, Undef, Part); 10198 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 10199 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 10200 VectorValue = get(Def, Part); 10201 } 10202 Builder.restoreIP(OldIP); 10203 return VectorValue; 10204 } 10205 10206 // Process the loop in the VPlan-native vectorization path. This path builds 10207 // VPlan upfront in the vectorization pipeline, which allows to apply 10208 // VPlan-to-VPlan transformations from the very beginning without modifying the 10209 // input LLVM IR. 10210 static bool processLoopInVPlanNativePath( 10211 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 10212 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 10213 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 10214 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 10215 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 10216 LoopVectorizationRequirements &Requirements) { 10217 10218 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 10219 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 10220 return false; 10221 } 10222 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 10223 Function *F = L->getHeader()->getParent(); 10224 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 10225 10226 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10227 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 10228 10229 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 10230 &Hints, IAI); 10231 // Use the planner for outer loop vectorization. 10232 // TODO: CM is not used at this point inside the planner. Turn CM into an 10233 // optional argument if we don't need it in the future. 10234 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 10235 Requirements, ORE); 10236 10237 // Get user vectorization factor. 10238 ElementCount UserVF = Hints.getWidth(); 10239 10240 CM.collectElementTypesForWidening(); 10241 10242 // Plan how to best vectorize, return the best VF and its cost. 10243 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 10244 10245 // If we are stress testing VPlan builds, do not attempt to generate vector 10246 // code. Masked vector code generation support will follow soon. 10247 // Also, do not attempt to vectorize if no vector code will be produced. 10248 if (VPlanBuildStressTest || EnableVPlanPredication || 10249 VectorizationFactor::Disabled() == VF) 10250 return false; 10251 10252 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10253 10254 { 10255 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10256 F->getParent()->getDataLayout()); 10257 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 10258 &CM, BFI, PSI, Checks); 10259 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 10260 << L->getHeader()->getParent()->getName() << "\"\n"); 10261 LVP.executePlan(VF.Width, 1, BestPlan, LB, DT); 10262 } 10263 10264 // Mark the loop as already vectorized to avoid vectorizing again. 10265 Hints.setAlreadyVectorized(); 10266 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10267 return true; 10268 } 10269 10270 // Emit a remark if there are stores to floats that required a floating point 10271 // extension. If the vectorized loop was generated with floating point there 10272 // will be a performance penalty from the conversion overhead and the change in 10273 // the vector width. 10274 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 10275 SmallVector<Instruction *, 4> Worklist; 10276 for (BasicBlock *BB : L->getBlocks()) { 10277 for (Instruction &Inst : *BB) { 10278 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 10279 if (S->getValueOperand()->getType()->isFloatTy()) 10280 Worklist.push_back(S); 10281 } 10282 } 10283 } 10284 10285 // Traverse the floating point stores upwards searching, for floating point 10286 // conversions. 10287 SmallPtrSet<const Instruction *, 4> Visited; 10288 SmallPtrSet<const Instruction *, 4> EmittedRemark; 10289 while (!Worklist.empty()) { 10290 auto *I = Worklist.pop_back_val(); 10291 if (!L->contains(I)) 10292 continue; 10293 if (!Visited.insert(I).second) 10294 continue; 10295 10296 // Emit a remark if the floating point store required a floating 10297 // point conversion. 10298 // TODO: More work could be done to identify the root cause such as a 10299 // constant or a function return type and point the user to it. 10300 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 10301 ORE->emit([&]() { 10302 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 10303 I->getDebugLoc(), L->getHeader()) 10304 << "floating point conversion changes vector width. " 10305 << "Mixed floating point precision requires an up/down " 10306 << "cast that will negatively impact performance."; 10307 }); 10308 10309 for (Use &Op : I->operands()) 10310 if (auto *OpI = dyn_cast<Instruction>(Op)) 10311 Worklist.push_back(OpI); 10312 } 10313 } 10314 10315 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 10316 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 10317 !EnableLoopInterleaving), 10318 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 10319 !EnableLoopVectorization) {} 10320 10321 bool LoopVectorizePass::processLoop(Loop *L) { 10322 assert((EnableVPlanNativePath || L->isInnermost()) && 10323 "VPlan-native path is not enabled. Only process inner loops."); 10324 10325 #ifndef NDEBUG 10326 const std::string DebugLocStr = getDebugLocString(L); 10327 #endif /* NDEBUG */ 10328 10329 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 10330 << L->getHeader()->getParent()->getName() << "\" from " 10331 << DebugLocStr << "\n"); 10332 10333 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI); 10334 10335 LLVM_DEBUG( 10336 dbgs() << "LV: Loop hints:" 10337 << " force=" 10338 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 10339 ? "disabled" 10340 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 10341 ? "enabled" 10342 : "?")) 10343 << " width=" << Hints.getWidth() 10344 << " interleave=" << Hints.getInterleave() << "\n"); 10345 10346 // Function containing loop 10347 Function *F = L->getHeader()->getParent(); 10348 10349 // Looking at the diagnostic output is the only way to determine if a loop 10350 // was vectorized (other than looking at the IR or machine code), so it 10351 // is important to generate an optimization remark for each loop. Most of 10352 // these messages are generated as OptimizationRemarkAnalysis. Remarks 10353 // generated as OptimizationRemark and OptimizationRemarkMissed are 10354 // less verbose reporting vectorized loops and unvectorized loops that may 10355 // benefit from vectorization, respectively. 10356 10357 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 10358 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 10359 return false; 10360 } 10361 10362 PredicatedScalarEvolution PSE(*SE, *L); 10363 10364 // Check if it is legal to vectorize the loop. 10365 LoopVectorizationRequirements Requirements; 10366 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 10367 &Requirements, &Hints, DB, AC, BFI, PSI); 10368 if (!LVL.canVectorize(EnableVPlanNativePath)) { 10369 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 10370 Hints.emitRemarkWithHints(); 10371 return false; 10372 } 10373 10374 // Check the function attributes and profiles to find out if this function 10375 // should be optimized for size. 10376 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10377 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 10378 10379 // Entrance to the VPlan-native vectorization path. Outer loops are processed 10380 // here. They may require CFG and instruction level transformations before 10381 // even evaluating whether vectorization is profitable. Since we cannot modify 10382 // the incoming IR, we need to build VPlan upfront in the vectorization 10383 // pipeline. 10384 if (!L->isInnermost()) 10385 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 10386 ORE, BFI, PSI, Hints, Requirements); 10387 10388 assert(L->isInnermost() && "Inner loop expected."); 10389 10390 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 10391 // count by optimizing for size, to minimize overheads. 10392 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 10393 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 10394 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 10395 << "This loop is worth vectorizing only if no scalar " 10396 << "iteration overheads are incurred."); 10397 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 10398 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 10399 else { 10400 LLVM_DEBUG(dbgs() << "\n"); 10401 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 10402 } 10403 } 10404 10405 // Check the function attributes to see if implicit floats are allowed. 10406 // FIXME: This check doesn't seem possibly correct -- what if the loop is 10407 // an integer loop and the vector instructions selected are purely integer 10408 // vector instructions? 10409 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10410 reportVectorizationFailure( 10411 "Can't vectorize when the NoImplicitFloat attribute is used", 10412 "loop not vectorized due to NoImplicitFloat attribute", 10413 "NoImplicitFloat", ORE, L); 10414 Hints.emitRemarkWithHints(); 10415 return false; 10416 } 10417 10418 // Check if the target supports potentially unsafe FP vectorization. 10419 // FIXME: Add a check for the type of safety issue (denormal, signaling) 10420 // for the target we're vectorizing for, to make sure none of the 10421 // additional fp-math flags can help. 10422 if (Hints.isPotentiallyUnsafe() && 10423 TTI->isFPVectorizationPotentiallyUnsafe()) { 10424 reportVectorizationFailure( 10425 "Potentially unsafe FP op prevents vectorization", 10426 "loop not vectorized due to unsafe FP support.", 10427 "UnsafeFP", ORE, L); 10428 Hints.emitRemarkWithHints(); 10429 return false; 10430 } 10431 10432 bool AllowOrderedReductions; 10433 // If the flag is set, use that instead and override the TTI behaviour. 10434 if (ForceOrderedReductions.getNumOccurrences() > 0) 10435 AllowOrderedReductions = ForceOrderedReductions; 10436 else 10437 AllowOrderedReductions = TTI->enableOrderedReductions(); 10438 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) { 10439 ORE->emit([&]() { 10440 auto *ExactFPMathInst = Requirements.getExactFPInst(); 10441 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 10442 ExactFPMathInst->getDebugLoc(), 10443 ExactFPMathInst->getParent()) 10444 << "loop not vectorized: cannot prove it is safe to reorder " 10445 "floating-point operations"; 10446 }); 10447 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 10448 "reorder floating-point operations\n"); 10449 Hints.emitRemarkWithHints(); 10450 return false; 10451 } 10452 10453 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 10454 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 10455 10456 // If an override option has been passed in for interleaved accesses, use it. 10457 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 10458 UseInterleaved = EnableInterleavedMemAccesses; 10459 10460 // Analyze interleaved memory accesses. 10461 if (UseInterleaved) { 10462 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10463 } 10464 10465 // Use the cost model. 10466 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10467 F, &Hints, IAI); 10468 CM.collectValuesToIgnore(); 10469 CM.collectElementTypesForWidening(); 10470 10471 // Use the planner for vectorization. 10472 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 10473 Requirements, ORE); 10474 10475 // Get user vectorization factor and interleave count. 10476 ElementCount UserVF = Hints.getWidth(); 10477 unsigned UserIC = Hints.getInterleave(); 10478 10479 // Plan how to best vectorize, return the best VF and its cost. 10480 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10481 10482 VectorizationFactor VF = VectorizationFactor::Disabled(); 10483 unsigned IC = 1; 10484 10485 if (MaybeVF) { 10486 VF = *MaybeVF; 10487 // Select the interleave count. 10488 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10489 } 10490 10491 // Identify the diagnostic messages that should be produced. 10492 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10493 bool VectorizeLoop = true, InterleaveLoop = true; 10494 if (VF.Width.isScalar()) { 10495 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10496 VecDiagMsg = std::make_pair( 10497 "VectorizationNotBeneficial", 10498 "the cost-model indicates that vectorization is not beneficial"); 10499 VectorizeLoop = false; 10500 } 10501 10502 if (!MaybeVF && UserIC > 1) { 10503 // Tell the user interleaving was avoided up-front, despite being explicitly 10504 // requested. 10505 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10506 "interleaving should be avoided up front\n"); 10507 IntDiagMsg = std::make_pair( 10508 "InterleavingAvoided", 10509 "Ignoring UserIC, because interleaving was avoided up front"); 10510 InterleaveLoop = false; 10511 } else if (IC == 1 && UserIC <= 1) { 10512 // Tell the user interleaving is not beneficial. 10513 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10514 IntDiagMsg = std::make_pair( 10515 "InterleavingNotBeneficial", 10516 "the cost-model indicates that interleaving is not beneficial"); 10517 InterleaveLoop = false; 10518 if (UserIC == 1) { 10519 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10520 IntDiagMsg.second += 10521 " and is explicitly disabled or interleave count is set to 1"; 10522 } 10523 } else if (IC > 1 && UserIC == 1) { 10524 // Tell the user interleaving is beneficial, but it explicitly disabled. 10525 LLVM_DEBUG( 10526 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10527 IntDiagMsg = std::make_pair( 10528 "InterleavingBeneficialButDisabled", 10529 "the cost-model indicates that interleaving is beneficial " 10530 "but is explicitly disabled or interleave count is set to 1"); 10531 InterleaveLoop = false; 10532 } 10533 10534 // Override IC if user provided an interleave count. 10535 IC = UserIC > 0 ? UserIC : IC; 10536 10537 // Emit diagnostic messages, if any. 10538 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10539 if (!VectorizeLoop && !InterleaveLoop) { 10540 // Do not vectorize or interleaving the loop. 10541 ORE->emit([&]() { 10542 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10543 L->getStartLoc(), L->getHeader()) 10544 << VecDiagMsg.second; 10545 }); 10546 ORE->emit([&]() { 10547 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10548 L->getStartLoc(), L->getHeader()) 10549 << IntDiagMsg.second; 10550 }); 10551 return false; 10552 } else if (!VectorizeLoop && InterleaveLoop) { 10553 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10554 ORE->emit([&]() { 10555 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10556 L->getStartLoc(), L->getHeader()) 10557 << VecDiagMsg.second; 10558 }); 10559 } else if (VectorizeLoop && !InterleaveLoop) { 10560 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10561 << ") in " << DebugLocStr << '\n'); 10562 ORE->emit([&]() { 10563 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10564 L->getStartLoc(), L->getHeader()) 10565 << IntDiagMsg.second; 10566 }); 10567 } else if (VectorizeLoop && InterleaveLoop) { 10568 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10569 << ") in " << DebugLocStr << '\n'); 10570 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10571 } 10572 10573 bool DisableRuntimeUnroll = false; 10574 MDNode *OrigLoopID = L->getLoopID(); 10575 { 10576 // Optimistically generate runtime checks. Drop them if they turn out to not 10577 // be profitable. Limit the scope of Checks, so the cleanup happens 10578 // immediately after vector codegeneration is done. 10579 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10580 F->getParent()->getDataLayout()); 10581 if (!VF.Width.isScalar() || IC > 1) 10582 Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate()); 10583 10584 using namespace ore; 10585 if (!VectorizeLoop) { 10586 assert(IC > 1 && "interleave count should not be 1 or 0"); 10587 // If we decided that it is not legal to vectorize the loop, then 10588 // interleave it. 10589 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10590 &CM, BFI, PSI, Checks); 10591 10592 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10593 LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT); 10594 10595 ORE->emit([&]() { 10596 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10597 L->getHeader()) 10598 << "interleaved loop (interleaved count: " 10599 << NV("InterleaveCount", IC) << ")"; 10600 }); 10601 } else { 10602 // If we decided that it is *legal* to vectorize the loop, then do it. 10603 10604 // Consider vectorizing the epilogue too if it's profitable. 10605 VectorizationFactor EpilogueVF = 10606 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10607 if (EpilogueVF.Width.isVector()) { 10608 10609 // The first pass vectorizes the main loop and creates a scalar epilogue 10610 // to be vectorized by executing the plan (potentially with a different 10611 // factor) again shortly afterwards. 10612 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1); 10613 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10614 EPI, &LVL, &CM, BFI, PSI, Checks); 10615 10616 VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF); 10617 LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, 10618 DT); 10619 ++LoopsVectorized; 10620 10621 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10622 formLCSSARecursively(*L, *DT, LI, SE); 10623 10624 // Second pass vectorizes the epilogue and adjusts the control flow 10625 // edges from the first pass. 10626 EPI.MainLoopVF = EPI.EpilogueVF; 10627 EPI.MainLoopUF = EPI.EpilogueUF; 10628 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10629 ORE, EPI, &LVL, &CM, BFI, PSI, 10630 Checks); 10631 10632 VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF); 10633 10634 // Ensure that the start values for any VPReductionPHIRecipes are 10635 // updated before vectorising the epilogue loop. 10636 VPBasicBlock *Header = BestEpiPlan.getEntry()->getEntryBasicBlock(); 10637 for (VPRecipeBase &R : Header->phis()) { 10638 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) { 10639 if (auto *Resume = MainILV.getReductionResumeValue( 10640 ReductionPhi->getRecurrenceDescriptor())) { 10641 VPValue *StartVal = new VPValue(Resume); 10642 BestEpiPlan.addExternalDef(StartVal); 10643 ReductionPhi->setOperand(0, StartVal); 10644 } 10645 } 10646 } 10647 10648 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, 10649 DT); 10650 ++LoopsEpilogueVectorized; 10651 10652 if (!MainILV.areSafetyChecksAdded()) 10653 DisableRuntimeUnroll = true; 10654 } else { 10655 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 10656 &LVL, &CM, BFI, PSI, Checks); 10657 10658 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10659 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT); 10660 ++LoopsVectorized; 10661 10662 // Add metadata to disable runtime unrolling a scalar loop when there 10663 // are no runtime checks about strides and memory. A scalar loop that is 10664 // rarely used is not worth unrolling. 10665 if (!LB.areSafetyChecksAdded()) 10666 DisableRuntimeUnroll = true; 10667 } 10668 // Report the vectorization decision. 10669 ORE->emit([&]() { 10670 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10671 L->getHeader()) 10672 << "vectorized loop (vectorization width: " 10673 << NV("VectorizationFactor", VF.Width) 10674 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10675 }); 10676 } 10677 10678 if (ORE->allowExtraAnalysis(LV_NAME)) 10679 checkMixedPrecision(L, ORE); 10680 } 10681 10682 Optional<MDNode *> RemainderLoopID = 10683 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10684 LLVMLoopVectorizeFollowupEpilogue}); 10685 if (RemainderLoopID.hasValue()) { 10686 L->setLoopID(RemainderLoopID.getValue()); 10687 } else { 10688 if (DisableRuntimeUnroll) 10689 AddRuntimeUnrollDisableMetaData(L); 10690 10691 // Mark the loop as already vectorized to avoid vectorizing again. 10692 Hints.setAlreadyVectorized(); 10693 } 10694 10695 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10696 return true; 10697 } 10698 10699 LoopVectorizeResult LoopVectorizePass::runImpl( 10700 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10701 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10702 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10703 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10704 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10705 SE = &SE_; 10706 LI = &LI_; 10707 TTI = &TTI_; 10708 DT = &DT_; 10709 BFI = &BFI_; 10710 TLI = TLI_; 10711 AA = &AA_; 10712 AC = &AC_; 10713 GetLAA = &GetLAA_; 10714 DB = &DB_; 10715 ORE = &ORE_; 10716 PSI = PSI_; 10717 10718 // Don't attempt if 10719 // 1. the target claims to have no vector registers, and 10720 // 2. interleaving won't help ILP. 10721 // 10722 // The second condition is necessary because, even if the target has no 10723 // vector registers, loop vectorization may still enable scalar 10724 // interleaving. 10725 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10726 TTI->getMaxInterleaveFactor(1) < 2) 10727 return LoopVectorizeResult(false, false); 10728 10729 bool Changed = false, CFGChanged = false; 10730 10731 // The vectorizer requires loops to be in simplified form. 10732 // Since simplification may add new inner loops, it has to run before the 10733 // legality and profitability checks. This means running the loop vectorizer 10734 // will simplify all loops, regardless of whether anything end up being 10735 // vectorized. 10736 for (auto &L : *LI) 10737 Changed |= CFGChanged |= 10738 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10739 10740 // Build up a worklist of inner-loops to vectorize. This is necessary as 10741 // the act of vectorizing or partially unrolling a loop creates new loops 10742 // and can invalidate iterators across the loops. 10743 SmallVector<Loop *, 8> Worklist; 10744 10745 for (Loop *L : *LI) 10746 collectSupportedLoops(*L, LI, ORE, Worklist); 10747 10748 LoopsAnalyzed += Worklist.size(); 10749 10750 // Now walk the identified inner loops. 10751 while (!Worklist.empty()) { 10752 Loop *L = Worklist.pop_back_val(); 10753 10754 // For the inner loops we actually process, form LCSSA to simplify the 10755 // transform. 10756 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10757 10758 Changed |= CFGChanged |= processLoop(L); 10759 } 10760 10761 // Process each loop nest in the function. 10762 return LoopVectorizeResult(Changed, CFGChanged); 10763 } 10764 10765 PreservedAnalyses LoopVectorizePass::run(Function &F, 10766 FunctionAnalysisManager &AM) { 10767 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10768 auto &LI = AM.getResult<LoopAnalysis>(F); 10769 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10770 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10771 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10772 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10773 auto &AA = AM.getResult<AAManager>(F); 10774 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10775 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10776 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10777 10778 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10779 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10780 [&](Loop &L) -> const LoopAccessInfo & { 10781 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10782 TLI, TTI, nullptr, nullptr, nullptr}; 10783 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10784 }; 10785 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10786 ProfileSummaryInfo *PSI = 10787 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10788 LoopVectorizeResult Result = 10789 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10790 if (!Result.MadeAnyChange) 10791 return PreservedAnalyses::all(); 10792 PreservedAnalyses PA; 10793 10794 // We currently do not preserve loopinfo/dominator analyses with outer loop 10795 // vectorization. Until this is addressed, mark these analyses as preserved 10796 // only for non-VPlan-native path. 10797 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10798 if (!EnableVPlanNativePath) { 10799 PA.preserve<LoopAnalysis>(); 10800 PA.preserve<DominatorTreeAnalysis>(); 10801 } 10802 10803 if (Result.MadeCFGChange) { 10804 // Making CFG changes likely means a loop got vectorized. Indicate that 10805 // extra simplification passes should be run. 10806 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only 10807 // be run if runtime checks have been added. 10808 AM.getResult<ShouldRunExtraVectorPasses>(F); 10809 PA.preserve<ShouldRunExtraVectorPasses>(); 10810 } else { 10811 PA.preserveSet<CFGAnalyses>(); 10812 } 10813 return PA; 10814 } 10815 10816 void LoopVectorizePass::printPipeline( 10817 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 10818 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline( 10819 OS, MapClassName2PassName); 10820 10821 OS << "<"; 10822 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;"; 10823 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;"; 10824 OS << ">"; 10825 } 10826