1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 91 #include "llvm/Analysis/ProfileSummaryInfo.h" 92 #include "llvm/Analysis/ScalarEvolution.h" 93 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 94 #include "llvm/Analysis/TargetLibraryInfo.h" 95 #include "llvm/Analysis/TargetTransformInfo.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/LLVMContext.h" 116 #include "llvm/IR/Metadata.h" 117 #include "llvm/IR/Module.h" 118 #include "llvm/IR/Operator.h" 119 #include "llvm/IR/PatternMatch.h" 120 #include "llvm/IR/Type.h" 121 #include "llvm/IR/Use.h" 122 #include "llvm/IR/User.h" 123 #include "llvm/IR/Value.h" 124 #include "llvm/IR/ValueHandle.h" 125 #include "llvm/IR/Verifier.h" 126 #include "llvm/InitializePasses.h" 127 #include "llvm/Pass.h" 128 #include "llvm/Support/Casting.h" 129 #include "llvm/Support/CommandLine.h" 130 #include "llvm/Support/Compiler.h" 131 #include "llvm/Support/Debug.h" 132 #include "llvm/Support/ErrorHandling.h" 133 #include "llvm/Support/InstructionCost.h" 134 #include "llvm/Support/MathExtras.h" 135 #include "llvm/Support/raw_ostream.h" 136 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 137 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 138 #include "llvm/Transforms/Utils/LoopSimplify.h" 139 #include "llvm/Transforms/Utils/LoopUtils.h" 140 #include "llvm/Transforms/Utils/LoopVersioning.h" 141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 142 #include "llvm/Transforms/Utils/SizeOpts.h" 143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 144 #include <algorithm> 145 #include <cassert> 146 #include <cstdint> 147 #include <cstdlib> 148 #include <functional> 149 #include <iterator> 150 #include <limits> 151 #include <memory> 152 #include <string> 153 #include <tuple> 154 #include <utility> 155 156 using namespace llvm; 157 158 #define LV_NAME "loop-vectorize" 159 #define DEBUG_TYPE LV_NAME 160 161 #ifndef NDEBUG 162 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 163 #endif 164 165 /// @{ 166 /// Metadata attribute names 167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 168 const char LLVMLoopVectorizeFollowupVectorized[] = 169 "llvm.loop.vectorize.followup_vectorized"; 170 const char LLVMLoopVectorizeFollowupEpilogue[] = 171 "llvm.loop.vectorize.followup_epilogue"; 172 /// @} 173 174 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 177 178 static cl::opt<bool> EnableEpilogueVectorization( 179 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 180 cl::desc("Enable vectorization of epilogue loops.")); 181 182 static cl::opt<unsigned> EpilogueVectorizationForceVF( 183 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 184 cl::desc("When epilogue vectorization is enabled, and a value greater than " 185 "1 is specified, forces the given VF for all applicable epilogue " 186 "loops.")); 187 188 static cl::opt<unsigned> EpilogueVectorizationMinVF( 189 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 190 cl::desc("Only loops with vectorization factor equal to or larger than " 191 "the specified value are considered for epilogue vectorization.")); 192 193 /// Loops with a known constant trip count below this number are vectorized only 194 /// if no scalar iteration overheads are incurred. 195 static cl::opt<unsigned> TinyTripCountVectorThreshold( 196 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 197 cl::desc("Loops with a constant trip count that is smaller than this " 198 "value are vectorized only if no scalar iteration overheads " 199 "are incurred.")); 200 201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 202 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 203 cl::desc("The maximum allowed number of runtime memory checks with a " 204 "vectorize(enable) pragma.")); 205 206 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 207 // that predication is preferred, and this lists all options. I.e., the 208 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 209 // and predicate the instructions accordingly. If tail-folding fails, there are 210 // different fallback strategies depending on these values: 211 namespace PreferPredicateTy { 212 enum Option { 213 ScalarEpilogue = 0, 214 PredicateElseScalarEpilogue, 215 PredicateOrDontVectorize 216 }; 217 } // namespace PreferPredicateTy 218 219 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 220 "prefer-predicate-over-epilogue", 221 cl::init(PreferPredicateTy::ScalarEpilogue), 222 cl::Hidden, 223 cl::desc("Tail-folding and predication preferences over creating a scalar " 224 "epilogue loop."), 225 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 226 "scalar-epilogue", 227 "Don't tail-predicate loops, create scalar epilogue"), 228 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 229 "predicate-else-scalar-epilogue", 230 "prefer tail-folding, create scalar epilogue if tail " 231 "folding fails."), 232 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 233 "predicate-dont-vectorize", 234 "prefers tail-folding, don't attempt vectorization if " 235 "tail-folding fails."))); 236 237 static cl::opt<bool> MaximizeBandwidth( 238 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 239 cl::desc("Maximize bandwidth when selecting vectorization factor which " 240 "will be determined by the smallest type in loop.")); 241 242 static cl::opt<bool> EnableInterleavedMemAccesses( 243 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 244 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 245 246 /// An interleave-group may need masking if it resides in a block that needs 247 /// predication, or in order to mask away gaps. 248 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 249 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 250 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 251 252 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 253 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 254 cl::desc("We don't interleave loops with a estimated constant trip count " 255 "below this number")); 256 257 static cl::opt<unsigned> ForceTargetNumScalarRegs( 258 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 259 cl::desc("A flag that overrides the target's number of scalar registers.")); 260 261 static cl::opt<unsigned> ForceTargetNumVectorRegs( 262 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 263 cl::desc("A flag that overrides the target's number of vector registers.")); 264 265 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 266 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 267 cl::desc("A flag that overrides the target's max interleave factor for " 268 "scalar loops.")); 269 270 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 271 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 272 cl::desc("A flag that overrides the target's max interleave factor for " 273 "vectorized loops.")); 274 275 static cl::opt<unsigned> ForceTargetInstructionCost( 276 "force-target-instruction-cost", cl::init(0), cl::Hidden, 277 cl::desc("A flag that overrides the target's expected cost for " 278 "an instruction to a single constant value. Mostly " 279 "useful for getting consistent testing.")); 280 281 static cl::opt<bool> ForceTargetSupportsScalableVectors( 282 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 283 cl::desc( 284 "Pretend that scalable vectors are supported, even if the target does " 285 "not support them. This flag should only be used for testing.")); 286 287 static cl::opt<unsigned> SmallLoopCost( 288 "small-loop-cost", cl::init(20), cl::Hidden, 289 cl::desc( 290 "The cost of a loop that is considered 'small' by the interleaver.")); 291 292 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 293 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 294 cl::desc("Enable the use of the block frequency analysis to access PGO " 295 "heuristics minimizing code growth in cold regions and being more " 296 "aggressive in hot regions.")); 297 298 // Runtime interleave loops for load/store throughput. 299 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 300 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 301 cl::desc( 302 "Enable runtime interleaving until load/store ports are saturated")); 303 304 /// Interleave small loops with scalar reductions. 305 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 306 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 307 cl::desc("Enable interleaving for loops with small iteration counts that " 308 "contain scalar reductions to expose ILP.")); 309 310 static cl::opt<bool> EnableIndVarRegisterHeur( 311 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 312 cl::desc("Count the induction variable only once when interleaving")); 313 314 static cl::opt<bool> EnableCondStoresVectorization( 315 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 316 cl::desc("Enable if predication of stores during vectorization.")); 317 318 static cl::opt<unsigned> MaxNestedScalarReductionIC( 319 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 320 cl::desc("The maximum interleave count to use when interleaving a scalar " 321 "reduction in a nested loop.")); 322 323 static cl::opt<bool> 324 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 325 cl::Hidden, 326 cl::desc("Prefer in-loop vector reductions, " 327 "overriding the targets preference.")); 328 329 static cl::opt<bool> ForceOrderedReductions( 330 "force-ordered-reductions", cl::init(false), cl::Hidden, 331 cl::desc("Enable the vectorisation of loops with in-order (strict) " 332 "FP reductions")); 333 334 static cl::opt<bool> PreferPredicatedReductionSelect( 335 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 336 cl::desc( 337 "Prefer predicating a reduction operation over an after loop select.")); 338 339 cl::opt<bool> EnableVPlanNativePath( 340 "enable-vplan-native-path", cl::init(false), cl::Hidden, 341 cl::desc("Enable VPlan-native vectorization path with " 342 "support for outer loop vectorization.")); 343 344 // FIXME: Remove this switch once we have divergence analysis. Currently we 345 // assume divergent non-backedge branches when this switch is true. 346 cl::opt<bool> EnableVPlanPredication( 347 "enable-vplan-predication", cl::init(false), cl::Hidden, 348 cl::desc("Enable VPlan-native vectorization path predicator with " 349 "support for outer loop vectorization.")); 350 351 // This flag enables the stress testing of the VPlan H-CFG construction in the 352 // VPlan-native vectorization path. It must be used in conjuction with 353 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 354 // verification of the H-CFGs built. 355 static cl::opt<bool> VPlanBuildStressTest( 356 "vplan-build-stress-test", cl::init(false), cl::Hidden, 357 cl::desc( 358 "Build VPlan for every supported loop nest in the function and bail " 359 "out right after the build (stress test the VPlan H-CFG construction " 360 "in the VPlan-native vectorization path).")); 361 362 cl::opt<bool> llvm::EnableLoopInterleaving( 363 "interleave-loops", cl::init(true), cl::Hidden, 364 cl::desc("Enable loop interleaving in Loop vectorization passes")); 365 cl::opt<bool> llvm::EnableLoopVectorization( 366 "vectorize-loops", cl::init(true), cl::Hidden, 367 cl::desc("Run the Loop vectorization passes")); 368 369 cl::opt<bool> PrintVPlansInDotFormat( 370 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 371 cl::desc("Use dot format instead of plain text when dumping VPlans")); 372 373 /// A helper function that returns true if the given type is irregular. The 374 /// type is irregular if its allocated size doesn't equal the store size of an 375 /// element of the corresponding vector type. 376 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 377 // Determine if an array of N elements of type Ty is "bitcast compatible" 378 // with a <N x Ty> vector. 379 // This is only true if there is no padding between the array elements. 380 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 381 } 382 383 /// A helper function that returns the reciprocal of the block probability of 384 /// predicated blocks. If we return X, we are assuming the predicated block 385 /// will execute once for every X iterations of the loop header. 386 /// 387 /// TODO: We should use actual block probability here, if available. Currently, 388 /// we always assume predicated blocks have a 50% chance of executing. 389 static unsigned getReciprocalPredBlockProb() { return 2; } 390 391 /// A helper function that returns an integer or floating-point constant with 392 /// value C. 393 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 394 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 395 : ConstantFP::get(Ty, C); 396 } 397 398 /// Returns "best known" trip count for the specified loop \p L as defined by 399 /// the following procedure: 400 /// 1) Returns exact trip count if it is known. 401 /// 2) Returns expected trip count according to profile data if any. 402 /// 3) Returns upper bound estimate if it is known. 403 /// 4) Returns None if all of the above failed. 404 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 405 // Check if exact trip count is known. 406 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 407 return ExpectedTC; 408 409 // Check if there is an expected trip count available from profile data. 410 if (LoopVectorizeWithBlockFrequency) 411 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 412 return EstimatedTC; 413 414 // Check if upper bound estimate is known. 415 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 416 return ExpectedTC; 417 418 return None; 419 } 420 421 // Forward declare GeneratedRTChecks. 422 class GeneratedRTChecks; 423 424 namespace llvm { 425 426 AnalysisKey ShouldRunExtraVectorPasses::Key; 427 428 /// InnerLoopVectorizer vectorizes loops which contain only one basic 429 /// block to a specified vectorization factor (VF). 430 /// This class performs the widening of scalars into vectors, or multiple 431 /// scalars. This class also implements the following features: 432 /// * It inserts an epilogue loop for handling loops that don't have iteration 433 /// counts that are known to be a multiple of the vectorization factor. 434 /// * It handles the code generation for reduction variables. 435 /// * Scalarization (implementation using scalars) of un-vectorizable 436 /// instructions. 437 /// InnerLoopVectorizer does not perform any vectorization-legality 438 /// checks, and relies on the caller to check for the different legality 439 /// aspects. The InnerLoopVectorizer relies on the 440 /// LoopVectorizationLegality class to provide information about the induction 441 /// and reduction variables that were found to a given vectorization factor. 442 class InnerLoopVectorizer { 443 public: 444 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 445 LoopInfo *LI, DominatorTree *DT, 446 const TargetLibraryInfo *TLI, 447 const TargetTransformInfo *TTI, AssumptionCache *AC, 448 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 449 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 450 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 451 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 452 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 453 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 454 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 455 PSI(PSI), RTChecks(RTChecks) { 456 // Query this against the original loop and save it here because the profile 457 // of the original loop header may change as the transformation happens. 458 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 459 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 460 } 461 462 virtual ~InnerLoopVectorizer() = default; 463 464 /// Create a new empty loop that will contain vectorized instructions later 465 /// on, while the old loop will be used as the scalar remainder. Control flow 466 /// is generated around the vectorized (and scalar epilogue) loops consisting 467 /// of various checks and bypasses. Return the pre-header block of the new 468 /// loop and the start value for the canonical induction, if it is != 0. The 469 /// latter is the case when vectorizing the epilogue loop. In the case of 470 /// epilogue vectorization, this function is overriden to handle the more 471 /// complex control flow around the loops. 472 virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton(); 473 474 /// Widen a single call instruction within the innermost loop. 475 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 476 VPTransformState &State); 477 478 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 479 void fixVectorizedLoop(VPTransformState &State); 480 481 // Return true if any runtime check is added. 482 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 483 484 /// A type for vectorized values in the new loop. Each value from the 485 /// original loop, when vectorized, is represented by UF vector values in the 486 /// new unrolled loop, where UF is the unroll factor. 487 using VectorParts = SmallVector<Value *, 2>; 488 489 /// Vectorize a single first-order recurrence or pointer induction PHINode in 490 /// a block. This method handles the induction variable canonicalization. It 491 /// supports both VF = 1 for unrolled loops and arbitrary length vectors. 492 void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR, 493 VPTransformState &State); 494 495 /// A helper function to scalarize a single Instruction in the innermost loop. 496 /// Generates a sequence of scalar instances for each lane between \p MinLane 497 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 498 /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p 499 /// Instr's operands. 500 void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe, 501 const VPIteration &Instance, bool IfPredicateInstr, 502 VPTransformState &State); 503 504 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 505 /// is provided, the integer induction variable will first be truncated to 506 /// the corresponding type. \p CanonicalIV is the scalar value generated for 507 /// the canonical induction variable. 508 void widenIntOrFpInduction(PHINode *IV, VPWidenIntOrFpInductionRecipe *Def, 509 VPTransformState &State, Value *CanonicalIV); 510 511 /// Construct the vector value of a scalarized value \p V one lane at a time. 512 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 513 VPTransformState &State); 514 515 /// Try to vectorize interleaved access group \p Group with the base address 516 /// given in \p Addr, optionally masking the vector operations if \p 517 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 518 /// values in the vectorized loop. 519 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 520 ArrayRef<VPValue *> VPDefs, 521 VPTransformState &State, VPValue *Addr, 522 ArrayRef<VPValue *> StoredValues, 523 VPValue *BlockInMask = nullptr); 524 525 /// Set the debug location in the builder \p Ptr using the debug location in 526 /// \p V. If \p Ptr is None then it uses the class member's Builder. 527 void setDebugLocFromInst(const Value *V, 528 Optional<IRBuilderBase *> CustomBuilder = None); 529 530 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 531 void fixNonInductionPHIs(VPTransformState &State); 532 533 /// Returns true if the reordering of FP operations is not allowed, but we are 534 /// able to vectorize with strict in-order reductions for the given RdxDesc. 535 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc); 536 537 /// Create a broadcast instruction. This method generates a broadcast 538 /// instruction (shuffle) for loop invariant values and for the induction 539 /// value. If this is the induction variable then we extend it to N, N+1, ... 540 /// this is needed because each iteration in the loop corresponds to a SIMD 541 /// element. 542 virtual Value *getBroadcastInstrs(Value *V); 543 544 /// Add metadata from one instruction to another. 545 /// 546 /// This includes both the original MDs from \p From and additional ones (\see 547 /// addNewMetadata). Use this for *newly created* instructions in the vector 548 /// loop. 549 void addMetadata(Instruction *To, Instruction *From); 550 551 /// Similar to the previous function but it adds the metadata to a 552 /// vector of instructions. 553 void addMetadata(ArrayRef<Value *> To, Instruction *From); 554 555 // Returns the resume value (bc.merge.rdx) for a reduction as 556 // generated by fixReduction. 557 PHINode *getReductionResumeValue(const RecurrenceDescriptor &RdxDesc); 558 559 protected: 560 friend class LoopVectorizationPlanner; 561 562 /// A small list of PHINodes. 563 using PhiVector = SmallVector<PHINode *, 4>; 564 565 /// A type for scalarized values in the new loop. Each value from the 566 /// original loop, when scalarized, is represented by UF x VF scalar values 567 /// in the new unrolled loop, where UF is the unroll factor and VF is the 568 /// vectorization factor. 569 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 570 571 /// Set up the values of the IVs correctly when exiting the vector loop. 572 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 573 Value *CountRoundDown, Value *EndValue, 574 BasicBlock *MiddleBlock); 575 576 /// Introduce a conditional branch (on true, condition to be set later) at the 577 /// end of the header=latch connecting it to itself (across the backedge) and 578 /// to the exit block of \p L. 579 void createHeaderBranch(Loop *L); 580 581 /// Handle all cross-iteration phis in the header. 582 void fixCrossIterationPHIs(VPTransformState &State); 583 584 /// Create the exit value of first order recurrences in the middle block and 585 /// update their users. 586 void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR, 587 VPTransformState &State); 588 589 /// Create code for the loop exit value of the reduction. 590 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); 591 592 /// Clear NSW/NUW flags from reduction instructions if necessary. 593 void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 594 VPTransformState &State); 595 596 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 597 /// means we need to add the appropriate incoming value from the middle 598 /// block as exiting edges from the scalar epilogue loop (if present) are 599 /// already in place, and we exit the vector loop exclusively to the middle 600 /// block. 601 void fixLCSSAPHIs(VPTransformState &State); 602 603 /// Iteratively sink the scalarized operands of a predicated instruction into 604 /// the block that was created for it. 605 void sinkScalarOperands(Instruction *PredInst); 606 607 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 608 /// represented as. 609 void truncateToMinimalBitwidths(VPTransformState &State); 610 611 /// Create a vector induction phi node based on an existing scalar one. \p 612 /// EntryVal is the value from the original loop that maps to the vector phi 613 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 614 /// truncate instruction, instead of widening the original IV, we widen a 615 /// version of the IV truncated to \p EntryVal's type. 616 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 617 Value *Step, Value *Start, 618 Instruction *EntryVal, VPValue *Def, 619 VPTransformState &State); 620 621 /// Returns (and creates if needed) the original loop trip count. 622 Value *getOrCreateTripCount(Loop *NewLoop); 623 624 /// Returns (and creates if needed) the trip count of the widened loop. 625 Value *getOrCreateVectorTripCount(Loop *NewLoop); 626 627 /// Returns a bitcasted value to the requested vector type. 628 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 629 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 630 const DataLayout &DL); 631 632 /// Emit a bypass check to see if the vector trip count is zero, including if 633 /// it overflows. 634 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 635 636 /// Emit a bypass check to see if all of the SCEV assumptions we've 637 /// had to make are correct. Returns the block containing the checks or 638 /// nullptr if no checks have been added. 639 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass); 640 641 /// Emit bypass checks to check any memory assumptions we may have made. 642 /// Returns the block containing the checks or nullptr if no checks have been 643 /// added. 644 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 645 646 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 647 /// vector loop preheader, middle block and scalar preheader. Also 648 /// allocate a loop object for the new vector loop and return it. 649 Loop *createVectorLoopSkeleton(StringRef Prefix); 650 651 /// Create new phi nodes for the induction variables to resume iteration count 652 /// in the scalar epilogue, from where the vectorized loop left off. 653 /// In cases where the loop skeleton is more complicated (eg. epilogue 654 /// vectorization) and the resume values can come from an additional bypass 655 /// block, the \p AdditionalBypass pair provides information about the bypass 656 /// block and the end value on the edge from bypass to this loop. 657 void createInductionResumeValues( 658 Loop *L, 659 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 660 661 /// Complete the loop skeleton by adding debug MDs, creating appropriate 662 /// conditional branches in the middle block, preparing the builder and 663 /// running the verifier. Take in the vector loop \p L as argument, and return 664 /// the preheader of the completed vector loop. 665 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 666 667 /// Add additional metadata to \p To that was not present on \p Orig. 668 /// 669 /// Currently this is used to add the noalias annotations based on the 670 /// inserted memchecks. Use this for instructions that are *cloned* into the 671 /// vector loop. 672 void addNewMetadata(Instruction *To, const Instruction *Orig); 673 674 /// Collect poison-generating recipes that may generate a poison value that is 675 /// used after vectorization, even when their operands are not poison. Those 676 /// recipes meet the following conditions: 677 /// * Contribute to the address computation of a recipe generating a widen 678 /// memory load/store (VPWidenMemoryInstructionRecipe or 679 /// VPInterleaveRecipe). 680 /// * Such a widen memory load/store has at least one underlying Instruction 681 /// that is in a basic block that needs predication and after vectorization 682 /// the generated instruction won't be predicated. 683 void collectPoisonGeneratingRecipes(VPTransformState &State); 684 685 /// Allow subclasses to override and print debug traces before/after vplan 686 /// execution, when trace information is requested. 687 virtual void printDebugTracesAtStart(){}; 688 virtual void printDebugTracesAtEnd(){}; 689 690 /// The original loop. 691 Loop *OrigLoop; 692 693 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 694 /// dynamic knowledge to simplify SCEV expressions and converts them to a 695 /// more usable form. 696 PredicatedScalarEvolution &PSE; 697 698 /// Loop Info. 699 LoopInfo *LI; 700 701 /// Dominator Tree. 702 DominatorTree *DT; 703 704 /// Alias Analysis. 705 AAResults *AA; 706 707 /// Target Library Info. 708 const TargetLibraryInfo *TLI; 709 710 /// Target Transform Info. 711 const TargetTransformInfo *TTI; 712 713 /// Assumption Cache. 714 AssumptionCache *AC; 715 716 /// Interface to emit optimization remarks. 717 OptimizationRemarkEmitter *ORE; 718 719 /// LoopVersioning. It's only set up (non-null) if memchecks were 720 /// used. 721 /// 722 /// This is currently only used to add no-alias metadata based on the 723 /// memchecks. The actually versioning is performed manually. 724 std::unique_ptr<LoopVersioning> LVer; 725 726 /// The vectorization SIMD factor to use. Each vector will have this many 727 /// vector elements. 728 ElementCount VF; 729 730 /// The vectorization unroll factor to use. Each scalar is vectorized to this 731 /// many different vector instructions. 732 unsigned UF; 733 734 /// The builder that we use 735 IRBuilder<> Builder; 736 737 // --- Vectorization state --- 738 739 /// The vector-loop preheader. 740 BasicBlock *LoopVectorPreHeader; 741 742 /// The scalar-loop preheader. 743 BasicBlock *LoopScalarPreHeader; 744 745 /// Middle Block between the vector and the scalar. 746 BasicBlock *LoopMiddleBlock; 747 748 /// The unique ExitBlock of the scalar loop if one exists. Note that 749 /// there can be multiple exiting edges reaching this block. 750 BasicBlock *LoopExitBlock; 751 752 /// The vector loop body. 753 BasicBlock *LoopVectorBody; 754 755 /// The scalar loop body. 756 BasicBlock *LoopScalarBody; 757 758 /// A list of all bypass blocks. The first block is the entry of the loop. 759 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 760 761 /// Store instructions that were predicated. 762 SmallVector<Instruction *, 4> PredicatedInstructions; 763 764 /// Trip count of the original loop. 765 Value *TripCount = nullptr; 766 767 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 768 Value *VectorTripCount = nullptr; 769 770 /// The legality analysis. 771 LoopVectorizationLegality *Legal; 772 773 /// The profitablity analysis. 774 LoopVectorizationCostModel *Cost; 775 776 // Record whether runtime checks are added. 777 bool AddedSafetyChecks = false; 778 779 // Holds the end values for each induction variable. We save the end values 780 // so we can later fix-up the external users of the induction variables. 781 DenseMap<PHINode *, Value *> IVEndValues; 782 783 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 784 // fixed up at the end of vector code generation. 785 SmallVector<PHINode *, 8> OrigPHIsToFix; 786 787 /// BFI and PSI are used to check for profile guided size optimizations. 788 BlockFrequencyInfo *BFI; 789 ProfileSummaryInfo *PSI; 790 791 // Whether this loop should be optimized for size based on profile guided size 792 // optimizatios. 793 bool OptForSizeBasedOnProfile; 794 795 /// Structure to hold information about generated runtime checks, responsible 796 /// for cleaning the checks, if vectorization turns out unprofitable. 797 GeneratedRTChecks &RTChecks; 798 799 // Holds the resume values for reductions in the loops, used to set the 800 // correct start value of reduction PHIs when vectorizing the epilogue. 801 SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4> 802 ReductionResumeValues; 803 }; 804 805 class InnerLoopUnroller : public InnerLoopVectorizer { 806 public: 807 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 808 LoopInfo *LI, DominatorTree *DT, 809 const TargetLibraryInfo *TLI, 810 const TargetTransformInfo *TTI, AssumptionCache *AC, 811 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 812 LoopVectorizationLegality *LVL, 813 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 814 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 815 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 816 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 817 BFI, PSI, Check) {} 818 819 private: 820 Value *getBroadcastInstrs(Value *V) override; 821 }; 822 823 /// Encapsulate information regarding vectorization of a loop and its epilogue. 824 /// This information is meant to be updated and used across two stages of 825 /// epilogue vectorization. 826 struct EpilogueLoopVectorizationInfo { 827 ElementCount MainLoopVF = ElementCount::getFixed(0); 828 unsigned MainLoopUF = 0; 829 ElementCount EpilogueVF = ElementCount::getFixed(0); 830 unsigned EpilogueUF = 0; 831 BasicBlock *MainLoopIterationCountCheck = nullptr; 832 BasicBlock *EpilogueIterationCountCheck = nullptr; 833 BasicBlock *SCEVSafetyCheck = nullptr; 834 BasicBlock *MemSafetyCheck = nullptr; 835 Value *TripCount = nullptr; 836 Value *VectorTripCount = nullptr; 837 838 EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, 839 ElementCount EVF, unsigned EUF) 840 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) { 841 assert(EUF == 1 && 842 "A high UF for the epilogue loop is likely not beneficial."); 843 } 844 }; 845 846 /// An extension of the inner loop vectorizer that creates a skeleton for a 847 /// vectorized loop that has its epilogue (residual) also vectorized. 848 /// The idea is to run the vplan on a given loop twice, firstly to setup the 849 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 850 /// from the first step and vectorize the epilogue. This is achieved by 851 /// deriving two concrete strategy classes from this base class and invoking 852 /// them in succession from the loop vectorizer planner. 853 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 854 public: 855 InnerLoopAndEpilogueVectorizer( 856 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 857 DominatorTree *DT, const TargetLibraryInfo *TLI, 858 const TargetTransformInfo *TTI, AssumptionCache *AC, 859 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 860 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 861 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 862 GeneratedRTChecks &Checks) 863 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 864 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 865 Checks), 866 EPI(EPI) {} 867 868 // Override this function to handle the more complex control flow around the 869 // three loops. 870 std::pair<BasicBlock *, Value *> 871 createVectorizedLoopSkeleton() final override { 872 return createEpilogueVectorizedLoopSkeleton(); 873 } 874 875 /// The interface for creating a vectorized skeleton using one of two 876 /// different strategies, each corresponding to one execution of the vplan 877 /// as described above. 878 virtual std::pair<BasicBlock *, Value *> 879 createEpilogueVectorizedLoopSkeleton() = 0; 880 881 /// Holds and updates state information required to vectorize the main loop 882 /// and its epilogue in two separate passes. This setup helps us avoid 883 /// regenerating and recomputing runtime safety checks. It also helps us to 884 /// shorten the iteration-count-check path length for the cases where the 885 /// iteration count of the loop is so small that the main vector loop is 886 /// completely skipped. 887 EpilogueLoopVectorizationInfo &EPI; 888 }; 889 890 /// A specialized derived class of inner loop vectorizer that performs 891 /// vectorization of *main* loops in the process of vectorizing loops and their 892 /// epilogues. 893 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 894 public: 895 EpilogueVectorizerMainLoop( 896 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 897 DominatorTree *DT, const TargetLibraryInfo *TLI, 898 const TargetTransformInfo *TTI, AssumptionCache *AC, 899 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 900 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 901 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 902 GeneratedRTChecks &Check) 903 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 904 EPI, LVL, CM, BFI, PSI, Check) {} 905 /// Implements the interface for creating a vectorized skeleton using the 906 /// *main loop* strategy (ie the first pass of vplan execution). 907 std::pair<BasicBlock *, Value *> 908 createEpilogueVectorizedLoopSkeleton() final override; 909 910 protected: 911 /// Emits an iteration count bypass check once for the main loop (when \p 912 /// ForEpilogue is false) and once for the epilogue loop (when \p 913 /// ForEpilogue is true). 914 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, 915 bool ForEpilogue); 916 void printDebugTracesAtStart() override; 917 void printDebugTracesAtEnd() override; 918 }; 919 920 // A specialized derived class of inner loop vectorizer that performs 921 // vectorization of *epilogue* loops in the process of vectorizing loops and 922 // their epilogues. 923 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 924 public: 925 EpilogueVectorizerEpilogueLoop( 926 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 927 DominatorTree *DT, const TargetLibraryInfo *TLI, 928 const TargetTransformInfo *TTI, AssumptionCache *AC, 929 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 930 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 931 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 932 GeneratedRTChecks &Checks) 933 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 934 EPI, LVL, CM, BFI, PSI, Checks) {} 935 /// Implements the interface for creating a vectorized skeleton using the 936 /// *epilogue loop* strategy (ie the second pass of vplan execution). 937 std::pair<BasicBlock *, Value *> 938 createEpilogueVectorizedLoopSkeleton() final override; 939 940 protected: 941 /// Emits an iteration count bypass check after the main vector loop has 942 /// finished to see if there are any iterations left to execute by either 943 /// the vector epilogue or the scalar epilogue. 944 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, 945 BasicBlock *Bypass, 946 BasicBlock *Insert); 947 void printDebugTracesAtStart() override; 948 void printDebugTracesAtEnd() override; 949 }; 950 } // end namespace llvm 951 952 /// Look for a meaningful debug location on the instruction or it's 953 /// operands. 954 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 955 if (!I) 956 return I; 957 958 DebugLoc Empty; 959 if (I->getDebugLoc() != Empty) 960 return I; 961 962 for (Use &Op : I->operands()) { 963 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 964 if (OpInst->getDebugLoc() != Empty) 965 return OpInst; 966 } 967 968 return I; 969 } 970 971 void InnerLoopVectorizer::setDebugLocFromInst( 972 const Value *V, Optional<IRBuilderBase *> CustomBuilder) { 973 IRBuilderBase *B = (CustomBuilder == None) ? &Builder : *CustomBuilder; 974 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) { 975 const DILocation *DIL = Inst->getDebugLoc(); 976 977 // When a FSDiscriminator is enabled, we don't need to add the multiply 978 // factors to the discriminators. 979 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 980 !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { 981 // FIXME: For scalable vectors, assume vscale=1. 982 auto NewDIL = 983 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 984 if (NewDIL) 985 B->SetCurrentDebugLocation(NewDIL.getValue()); 986 else 987 LLVM_DEBUG(dbgs() 988 << "Failed to create new discriminator: " 989 << DIL->getFilename() << " Line: " << DIL->getLine()); 990 } else 991 B->SetCurrentDebugLocation(DIL); 992 } else 993 B->SetCurrentDebugLocation(DebugLoc()); 994 } 995 996 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 997 /// is passed, the message relates to that particular instruction. 998 #ifndef NDEBUG 999 static void debugVectorizationMessage(const StringRef Prefix, 1000 const StringRef DebugMsg, 1001 Instruction *I) { 1002 dbgs() << "LV: " << Prefix << DebugMsg; 1003 if (I != nullptr) 1004 dbgs() << " " << *I; 1005 else 1006 dbgs() << '.'; 1007 dbgs() << '\n'; 1008 } 1009 #endif 1010 1011 /// Create an analysis remark that explains why vectorization failed 1012 /// 1013 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 1014 /// RemarkName is the identifier for the remark. If \p I is passed it is an 1015 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 1016 /// the location of the remark. \return the remark object that can be 1017 /// streamed to. 1018 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1019 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1020 Value *CodeRegion = TheLoop->getHeader(); 1021 DebugLoc DL = TheLoop->getStartLoc(); 1022 1023 if (I) { 1024 CodeRegion = I->getParent(); 1025 // If there is no debug location attached to the instruction, revert back to 1026 // using the loop's. 1027 if (I->getDebugLoc()) 1028 DL = I->getDebugLoc(); 1029 } 1030 1031 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 1032 } 1033 1034 namespace llvm { 1035 1036 /// Return a value for Step multiplied by VF. 1037 Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, 1038 int64_t Step) { 1039 assert(Ty->isIntegerTy() && "Expected an integer step"); 1040 Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue()); 1041 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1042 } 1043 1044 /// Return the runtime value for VF. 1045 Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF) { 1046 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1047 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1048 } 1049 1050 static Value *getRuntimeVFAsFloat(IRBuilderBase &B, Type *FTy, 1051 ElementCount VF) { 1052 assert(FTy->isFloatingPointTy() && "Expected floating point type!"); 1053 Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits()); 1054 Value *RuntimeVF = getRuntimeVF(B, IntTy, VF); 1055 return B.CreateUIToFP(RuntimeVF, FTy); 1056 } 1057 1058 void reportVectorizationFailure(const StringRef DebugMsg, 1059 const StringRef OREMsg, const StringRef ORETag, 1060 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1061 Instruction *I) { 1062 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 1063 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1064 ORE->emit( 1065 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1066 << "loop not vectorized: " << OREMsg); 1067 } 1068 1069 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 1070 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1071 Instruction *I) { 1072 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 1073 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1074 ORE->emit( 1075 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1076 << Msg); 1077 } 1078 1079 } // end namespace llvm 1080 1081 #ifndef NDEBUG 1082 /// \return string containing a file name and a line # for the given loop. 1083 static std::string getDebugLocString(const Loop *L) { 1084 std::string Result; 1085 if (L) { 1086 raw_string_ostream OS(Result); 1087 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1088 LoopDbgLoc.print(OS); 1089 else 1090 // Just print the module name. 1091 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1092 OS.flush(); 1093 } 1094 return Result; 1095 } 1096 #endif 1097 1098 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1099 const Instruction *Orig) { 1100 // If the loop was versioned with memchecks, add the corresponding no-alias 1101 // metadata. 1102 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1103 LVer->annotateInstWithNoAlias(To, Orig); 1104 } 1105 1106 void InnerLoopVectorizer::collectPoisonGeneratingRecipes( 1107 VPTransformState &State) { 1108 1109 // Collect recipes in the backward slice of `Root` that may generate a poison 1110 // value that is used after vectorization. 1111 SmallPtrSet<VPRecipeBase *, 16> Visited; 1112 auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) { 1113 SmallVector<VPRecipeBase *, 16> Worklist; 1114 Worklist.push_back(Root); 1115 1116 // Traverse the backward slice of Root through its use-def chain. 1117 while (!Worklist.empty()) { 1118 VPRecipeBase *CurRec = Worklist.back(); 1119 Worklist.pop_back(); 1120 1121 if (!Visited.insert(CurRec).second) 1122 continue; 1123 1124 // Prune search if we find another recipe generating a widen memory 1125 // instruction. Widen memory instructions involved in address computation 1126 // will lead to gather/scatter instructions, which don't need to be 1127 // handled. 1128 if (isa<VPWidenMemoryInstructionRecipe>(CurRec) || 1129 isa<VPInterleaveRecipe>(CurRec) || 1130 isa<VPCanonicalIVPHIRecipe>(CurRec)) 1131 continue; 1132 1133 // This recipe contributes to the address computation of a widen 1134 // load/store. Collect recipe if its underlying instruction has 1135 // poison-generating flags. 1136 Instruction *Instr = CurRec->getUnderlyingInstr(); 1137 if (Instr && Instr->hasPoisonGeneratingFlags()) 1138 State.MayGeneratePoisonRecipes.insert(CurRec); 1139 1140 // Add new definitions to the worklist. 1141 for (VPValue *operand : CurRec->operands()) 1142 if (VPDef *OpDef = operand->getDef()) 1143 Worklist.push_back(cast<VPRecipeBase>(OpDef)); 1144 } 1145 }); 1146 1147 // Traverse all the recipes in the VPlan and collect the poison-generating 1148 // recipes in the backward slice starting at the address of a VPWidenRecipe or 1149 // VPInterleaveRecipe. 1150 auto Iter = depth_first( 1151 VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry())); 1152 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) { 1153 for (VPRecipeBase &Recipe : *VPBB) { 1154 if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) { 1155 Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr(); 1156 VPDef *AddrDef = WidenRec->getAddr()->getDef(); 1157 if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr && 1158 Legal->blockNeedsPredication(UnderlyingInstr->getParent())) 1159 collectPoisonGeneratingInstrsInBackwardSlice( 1160 cast<VPRecipeBase>(AddrDef)); 1161 } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) { 1162 VPDef *AddrDef = InterleaveRec->getAddr()->getDef(); 1163 if (AddrDef) { 1164 // Check if any member of the interleave group needs predication. 1165 const InterleaveGroup<Instruction> *InterGroup = 1166 InterleaveRec->getInterleaveGroup(); 1167 bool NeedPredication = false; 1168 for (int I = 0, NumMembers = InterGroup->getNumMembers(); 1169 I < NumMembers; ++I) { 1170 Instruction *Member = InterGroup->getMember(I); 1171 if (Member) 1172 NeedPredication |= 1173 Legal->blockNeedsPredication(Member->getParent()); 1174 } 1175 1176 if (NeedPredication) 1177 collectPoisonGeneratingInstrsInBackwardSlice( 1178 cast<VPRecipeBase>(AddrDef)); 1179 } 1180 } 1181 } 1182 } 1183 } 1184 1185 void InnerLoopVectorizer::addMetadata(Instruction *To, 1186 Instruction *From) { 1187 propagateMetadata(To, From); 1188 addNewMetadata(To, From); 1189 } 1190 1191 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1192 Instruction *From) { 1193 for (Value *V : To) { 1194 if (Instruction *I = dyn_cast<Instruction>(V)) 1195 addMetadata(I, From); 1196 } 1197 } 1198 1199 PHINode *InnerLoopVectorizer::getReductionResumeValue( 1200 const RecurrenceDescriptor &RdxDesc) { 1201 auto It = ReductionResumeValues.find(&RdxDesc); 1202 assert(It != ReductionResumeValues.end() && 1203 "Expected to find a resume value for the reduction."); 1204 return It->second; 1205 } 1206 1207 namespace llvm { 1208 1209 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1210 // lowered. 1211 enum ScalarEpilogueLowering { 1212 1213 // The default: allowing scalar epilogues. 1214 CM_ScalarEpilogueAllowed, 1215 1216 // Vectorization with OptForSize: don't allow epilogues. 1217 CM_ScalarEpilogueNotAllowedOptSize, 1218 1219 // A special case of vectorisation with OptForSize: loops with a very small 1220 // trip count are considered for vectorization under OptForSize, thereby 1221 // making sure the cost of their loop body is dominant, free of runtime 1222 // guards and scalar iteration overheads. 1223 CM_ScalarEpilogueNotAllowedLowTripLoop, 1224 1225 // Loop hint predicate indicating an epilogue is undesired. 1226 CM_ScalarEpilogueNotNeededUsePredicate, 1227 1228 // Directive indicating we must either tail fold or not vectorize 1229 CM_ScalarEpilogueNotAllowedUsePredicate 1230 }; 1231 1232 /// ElementCountComparator creates a total ordering for ElementCount 1233 /// for the purposes of using it in a set structure. 1234 struct ElementCountComparator { 1235 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1236 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1237 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1238 } 1239 }; 1240 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1241 1242 /// LoopVectorizationCostModel - estimates the expected speedups due to 1243 /// vectorization. 1244 /// In many cases vectorization is not profitable. This can happen because of 1245 /// a number of reasons. In this class we mainly attempt to predict the 1246 /// expected speedup/slowdowns due to the supported instruction set. We use the 1247 /// TargetTransformInfo to query the different backends for the cost of 1248 /// different operations. 1249 class LoopVectorizationCostModel { 1250 public: 1251 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1252 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1253 LoopVectorizationLegality *Legal, 1254 const TargetTransformInfo &TTI, 1255 const TargetLibraryInfo *TLI, DemandedBits *DB, 1256 AssumptionCache *AC, 1257 OptimizationRemarkEmitter *ORE, const Function *F, 1258 const LoopVectorizeHints *Hints, 1259 InterleavedAccessInfo &IAI) 1260 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1261 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1262 Hints(Hints), InterleaveInfo(IAI) {} 1263 1264 /// \return An upper bound for the vectorization factors (both fixed and 1265 /// scalable). If the factors are 0, vectorization and interleaving should be 1266 /// avoided up front. 1267 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1268 1269 /// \return True if runtime checks are required for vectorization, and false 1270 /// otherwise. 1271 bool runtimeChecksRequired(); 1272 1273 /// \return The most profitable vectorization factor and the cost of that VF. 1274 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1275 /// then this vectorization factor will be selected if vectorization is 1276 /// possible. 1277 VectorizationFactor 1278 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1279 1280 VectorizationFactor 1281 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1282 const LoopVectorizationPlanner &LVP); 1283 1284 /// Setup cost-based decisions for user vectorization factor. 1285 /// \return true if the UserVF is a feasible VF to be chosen. 1286 bool selectUserVectorizationFactor(ElementCount UserVF) { 1287 collectUniformsAndScalars(UserVF); 1288 collectInstsToScalarize(UserVF); 1289 return expectedCost(UserVF).first.isValid(); 1290 } 1291 1292 /// \return The size (in bits) of the smallest and widest types in the code 1293 /// that needs to be vectorized. We ignore values that remain scalar such as 1294 /// 64 bit loop indices. 1295 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1296 1297 /// \return The desired interleave count. 1298 /// If interleave count has been specified by metadata it will be returned. 1299 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1300 /// are the selected vectorization factor and the cost of the selected VF. 1301 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1302 1303 /// Memory access instruction may be vectorized in more than one way. 1304 /// Form of instruction after vectorization depends on cost. 1305 /// This function takes cost-based decisions for Load/Store instructions 1306 /// and collects them in a map. This decisions map is used for building 1307 /// the lists of loop-uniform and loop-scalar instructions. 1308 /// The calculated cost is saved with widening decision in order to 1309 /// avoid redundant calculations. 1310 void setCostBasedWideningDecision(ElementCount VF); 1311 1312 /// A struct that represents some properties of the register usage 1313 /// of a loop. 1314 struct RegisterUsage { 1315 /// Holds the number of loop invariant values that are used in the loop. 1316 /// The key is ClassID of target-provided register class. 1317 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1318 /// Holds the maximum number of concurrent live intervals in the loop. 1319 /// The key is ClassID of target-provided register class. 1320 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1321 }; 1322 1323 /// \return Returns information about the register usages of the loop for the 1324 /// given vectorization factors. 1325 SmallVector<RegisterUsage, 8> 1326 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1327 1328 /// Collect values we want to ignore in the cost model. 1329 void collectValuesToIgnore(); 1330 1331 /// Collect all element types in the loop for which widening is needed. 1332 void collectElementTypesForWidening(); 1333 1334 /// Split reductions into those that happen in the loop, and those that happen 1335 /// outside. In loop reductions are collected into InLoopReductionChains. 1336 void collectInLoopReductions(); 1337 1338 /// Returns true if we should use strict in-order reductions for the given 1339 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, 1340 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering 1341 /// of FP operations. 1342 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) { 1343 return !Hints->allowReordering() && RdxDesc.isOrdered(); 1344 } 1345 1346 /// \returns The smallest bitwidth each instruction can be represented with. 1347 /// The vector equivalents of these instructions should be truncated to this 1348 /// type. 1349 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1350 return MinBWs; 1351 } 1352 1353 /// \returns True if it is more profitable to scalarize instruction \p I for 1354 /// vectorization factor \p VF. 1355 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1356 assert(VF.isVector() && 1357 "Profitable to scalarize relevant only for VF > 1."); 1358 1359 // Cost model is not run in the VPlan-native path - return conservative 1360 // result until this changes. 1361 if (EnableVPlanNativePath) 1362 return false; 1363 1364 auto Scalars = InstsToScalarize.find(VF); 1365 assert(Scalars != InstsToScalarize.end() && 1366 "VF not yet analyzed for scalarization profitability"); 1367 return Scalars->second.find(I) != Scalars->second.end(); 1368 } 1369 1370 /// Returns true if \p I is known to be uniform after vectorization. 1371 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1372 if (VF.isScalar()) 1373 return true; 1374 1375 // Cost model is not run in the VPlan-native path - return conservative 1376 // result until this changes. 1377 if (EnableVPlanNativePath) 1378 return false; 1379 1380 auto UniformsPerVF = Uniforms.find(VF); 1381 assert(UniformsPerVF != Uniforms.end() && 1382 "VF not yet analyzed for uniformity"); 1383 return UniformsPerVF->second.count(I); 1384 } 1385 1386 /// Returns true if \p I is known to be scalar after vectorization. 1387 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1388 if (VF.isScalar()) 1389 return true; 1390 1391 // Cost model is not run in the VPlan-native path - return conservative 1392 // result until this changes. 1393 if (EnableVPlanNativePath) 1394 return false; 1395 1396 auto ScalarsPerVF = Scalars.find(VF); 1397 assert(ScalarsPerVF != Scalars.end() && 1398 "Scalar values are not calculated for VF"); 1399 return ScalarsPerVF->second.count(I); 1400 } 1401 1402 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1403 /// for vectorization factor \p VF. 1404 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1405 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1406 !isProfitableToScalarize(I, VF) && 1407 !isScalarAfterVectorization(I, VF); 1408 } 1409 1410 /// Decision that was taken during cost calculation for memory instruction. 1411 enum InstWidening { 1412 CM_Unknown, 1413 CM_Widen, // For consecutive accesses with stride +1. 1414 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1415 CM_Interleave, 1416 CM_GatherScatter, 1417 CM_Scalarize 1418 }; 1419 1420 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1421 /// instruction \p I and vector width \p VF. 1422 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1423 InstructionCost Cost) { 1424 assert(VF.isVector() && "Expected VF >=2"); 1425 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1426 } 1427 1428 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1429 /// interleaving group \p Grp and vector width \p VF. 1430 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1431 ElementCount VF, InstWidening W, 1432 InstructionCost Cost) { 1433 assert(VF.isVector() && "Expected VF >=2"); 1434 /// Broadcast this decicion to all instructions inside the group. 1435 /// But the cost will be assigned to one instruction only. 1436 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1437 if (auto *I = Grp->getMember(i)) { 1438 if (Grp->getInsertPos() == I) 1439 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1440 else 1441 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1442 } 1443 } 1444 } 1445 1446 /// Return the cost model decision for the given instruction \p I and vector 1447 /// width \p VF. Return CM_Unknown if this instruction did not pass 1448 /// through the cost modeling. 1449 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1450 assert(VF.isVector() && "Expected VF to be a vector VF"); 1451 // Cost model is not run in the VPlan-native path - return conservative 1452 // result until this changes. 1453 if (EnableVPlanNativePath) 1454 return CM_GatherScatter; 1455 1456 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1457 auto Itr = WideningDecisions.find(InstOnVF); 1458 if (Itr == WideningDecisions.end()) 1459 return CM_Unknown; 1460 return Itr->second.first; 1461 } 1462 1463 /// Return the vectorization cost for the given instruction \p I and vector 1464 /// width \p VF. 1465 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1466 assert(VF.isVector() && "Expected VF >=2"); 1467 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1468 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1469 "The cost is not calculated"); 1470 return WideningDecisions[InstOnVF].second; 1471 } 1472 1473 /// Return True if instruction \p I is an optimizable truncate whose operand 1474 /// is an induction variable. Such a truncate will be removed by adding a new 1475 /// induction variable with the destination type. 1476 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1477 // If the instruction is not a truncate, return false. 1478 auto *Trunc = dyn_cast<TruncInst>(I); 1479 if (!Trunc) 1480 return false; 1481 1482 // Get the source and destination types of the truncate. 1483 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1484 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1485 1486 // If the truncate is free for the given types, return false. Replacing a 1487 // free truncate with an induction variable would add an induction variable 1488 // update instruction to each iteration of the loop. We exclude from this 1489 // check the primary induction variable since it will need an update 1490 // instruction regardless. 1491 Value *Op = Trunc->getOperand(0); 1492 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1493 return false; 1494 1495 // If the truncated value is not an induction variable, return false. 1496 return Legal->isInductionPhi(Op); 1497 } 1498 1499 /// Collects the instructions to scalarize for each predicated instruction in 1500 /// the loop. 1501 void collectInstsToScalarize(ElementCount VF); 1502 1503 /// Collect Uniform and Scalar values for the given \p VF. 1504 /// The sets depend on CM decision for Load/Store instructions 1505 /// that may be vectorized as interleave, gather-scatter or scalarized. 1506 void collectUniformsAndScalars(ElementCount VF) { 1507 // Do the analysis once. 1508 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1509 return; 1510 setCostBasedWideningDecision(VF); 1511 collectLoopUniforms(VF); 1512 collectLoopScalars(VF); 1513 } 1514 1515 /// Returns true if the target machine supports masked store operation 1516 /// for the given \p DataType and kind of access to \p Ptr. 1517 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1518 return Legal->isConsecutivePtr(DataType, Ptr) && 1519 TTI.isLegalMaskedStore(DataType, Alignment); 1520 } 1521 1522 /// Returns true if the target machine supports masked load operation 1523 /// for the given \p DataType and kind of access to \p Ptr. 1524 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1525 return Legal->isConsecutivePtr(DataType, Ptr) && 1526 TTI.isLegalMaskedLoad(DataType, Alignment); 1527 } 1528 1529 /// Returns true if the target machine can represent \p V as a masked gather 1530 /// or scatter operation. 1531 bool isLegalGatherOrScatter(Value *V, 1532 ElementCount VF = ElementCount::getFixed(1)) { 1533 bool LI = isa<LoadInst>(V); 1534 bool SI = isa<StoreInst>(V); 1535 if (!LI && !SI) 1536 return false; 1537 auto *Ty = getLoadStoreType(V); 1538 Align Align = getLoadStoreAlignment(V); 1539 if (VF.isVector()) 1540 Ty = VectorType::get(Ty, VF); 1541 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1542 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1543 } 1544 1545 /// Returns true if the target machine supports all of the reduction 1546 /// variables found for the given VF. 1547 bool canVectorizeReductions(ElementCount VF) const { 1548 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1549 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1550 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1551 })); 1552 } 1553 1554 /// Returns true if \p I is an instruction that will be scalarized with 1555 /// predication when vectorizing \p I with vectorization factor \p VF. Such 1556 /// instructions include conditional stores and instructions that may divide 1557 /// by zero. 1558 bool isScalarWithPredication(Instruction *I, ElementCount VF) const; 1559 1560 // Returns true if \p I is an instruction that will be predicated either 1561 // through scalar predication or masked load/store or masked gather/scatter. 1562 // \p VF is the vectorization factor that will be used to vectorize \p I. 1563 // Superset of instructions that return true for isScalarWithPredication. 1564 bool isPredicatedInst(Instruction *I, ElementCount VF, 1565 bool IsKnownUniform = false) { 1566 // When we know the load is uniform and the original scalar loop was not 1567 // predicated we don't need to mark it as a predicated instruction. Any 1568 // vectorised blocks created when tail-folding are something artificial we 1569 // have introduced and we know there is always at least one active lane. 1570 // That's why we call Legal->blockNeedsPredication here because it doesn't 1571 // query tail-folding. 1572 if (IsKnownUniform && isa<LoadInst>(I) && 1573 !Legal->blockNeedsPredication(I->getParent())) 1574 return false; 1575 if (!blockNeedsPredicationForAnyReason(I->getParent())) 1576 return false; 1577 // Loads and stores that need some form of masked operation are predicated 1578 // instructions. 1579 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1580 return Legal->isMaskRequired(I); 1581 return isScalarWithPredication(I, VF); 1582 } 1583 1584 /// Returns true if \p I is a memory instruction with consecutive memory 1585 /// access that can be widened. 1586 bool 1587 memoryInstructionCanBeWidened(Instruction *I, 1588 ElementCount VF = ElementCount::getFixed(1)); 1589 1590 /// Returns true if \p I is a memory instruction in an interleaved-group 1591 /// of memory accesses that can be vectorized with wide vector loads/stores 1592 /// and shuffles. 1593 bool 1594 interleavedAccessCanBeWidened(Instruction *I, 1595 ElementCount VF = ElementCount::getFixed(1)); 1596 1597 /// Check if \p Instr belongs to any interleaved access group. 1598 bool isAccessInterleaved(Instruction *Instr) { 1599 return InterleaveInfo.isInterleaved(Instr); 1600 } 1601 1602 /// Get the interleaved access group that \p Instr belongs to. 1603 const InterleaveGroup<Instruction> * 1604 getInterleavedAccessGroup(Instruction *Instr) { 1605 return InterleaveInfo.getInterleaveGroup(Instr); 1606 } 1607 1608 /// Returns true if we're required to use a scalar epilogue for at least 1609 /// the final iteration of the original loop. 1610 bool requiresScalarEpilogue(ElementCount VF) const { 1611 if (!isScalarEpilogueAllowed()) 1612 return false; 1613 // If we might exit from anywhere but the latch, must run the exiting 1614 // iteration in scalar form. 1615 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1616 return true; 1617 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); 1618 } 1619 1620 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1621 /// loop hint annotation. 1622 bool isScalarEpilogueAllowed() const { 1623 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1624 } 1625 1626 /// Returns true if all loop blocks should be masked to fold tail loop. 1627 bool foldTailByMasking() const { return FoldTailByMasking; } 1628 1629 /// Returns true if the instructions in this block requires predication 1630 /// for any reason, e.g. because tail folding now requires a predicate 1631 /// or because the block in the original loop was predicated. 1632 bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const { 1633 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1634 } 1635 1636 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1637 /// nodes to the chain of instructions representing the reductions. Uses a 1638 /// MapVector to ensure deterministic iteration order. 1639 using ReductionChainMap = 1640 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1641 1642 /// Return the chain of instructions representing an inloop reduction. 1643 const ReductionChainMap &getInLoopReductionChains() const { 1644 return InLoopReductionChains; 1645 } 1646 1647 /// Returns true if the Phi is part of an inloop reduction. 1648 bool isInLoopReduction(PHINode *Phi) const { 1649 return InLoopReductionChains.count(Phi); 1650 } 1651 1652 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1653 /// with factor VF. Return the cost of the instruction, including 1654 /// scalarization overhead if it's needed. 1655 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1656 1657 /// Estimate cost of a call instruction CI if it were vectorized with factor 1658 /// VF. Return the cost of the instruction, including scalarization overhead 1659 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1660 /// scalarized - 1661 /// i.e. either vector version isn't available, or is too expensive. 1662 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1663 bool &NeedToScalarize) const; 1664 1665 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1666 /// that of B. 1667 bool isMoreProfitable(const VectorizationFactor &A, 1668 const VectorizationFactor &B) const; 1669 1670 /// Invalidates decisions already taken by the cost model. 1671 void invalidateCostModelingDecisions() { 1672 WideningDecisions.clear(); 1673 Uniforms.clear(); 1674 Scalars.clear(); 1675 } 1676 1677 private: 1678 unsigned NumPredStores = 0; 1679 1680 /// Convenience function that returns the value of vscale_range iff 1681 /// vscale_range.min == vscale_range.max or otherwise returns the value 1682 /// returned by the corresponding TLI method. 1683 Optional<unsigned> getVScaleForTuning() const; 1684 1685 /// \return An upper bound for the vectorization factors for both 1686 /// fixed and scalable vectorization, where the minimum-known number of 1687 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1688 /// disabled or unsupported, then the scalable part will be equal to 1689 /// ElementCount::getScalable(0). 1690 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1691 ElementCount UserVF, 1692 bool FoldTailByMasking); 1693 1694 /// \return the maximized element count based on the targets vector 1695 /// registers and the loop trip-count, but limited to a maximum safe VF. 1696 /// This is a helper function of computeFeasibleMaxVF. 1697 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure 1698 /// issue that occurred on one of the buildbots which cannot be reproduced 1699 /// without having access to the properietary compiler (see comments on 1700 /// D98509). The issue is currently under investigation and this workaround 1701 /// will be removed as soon as possible. 1702 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1703 unsigned SmallestType, 1704 unsigned WidestType, 1705 const ElementCount &MaxSafeVF, 1706 bool FoldTailByMasking); 1707 1708 /// \return the maximum legal scalable VF, based on the safe max number 1709 /// of elements. 1710 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1711 1712 /// The vectorization cost is a combination of the cost itself and a boolean 1713 /// indicating whether any of the contributing operations will actually 1714 /// operate on vector values after type legalization in the backend. If this 1715 /// latter value is false, then all operations will be scalarized (i.e. no 1716 /// vectorization has actually taken place). 1717 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1718 1719 /// Returns the expected execution cost. The unit of the cost does 1720 /// not matter because we use the 'cost' units to compare different 1721 /// vector widths. The cost that is returned is *not* normalized by 1722 /// the factor width. If \p Invalid is not nullptr, this function 1723 /// will add a pair(Instruction*, ElementCount) to \p Invalid for 1724 /// each instruction that has an Invalid cost for the given VF. 1725 using InstructionVFPair = std::pair<Instruction *, ElementCount>; 1726 VectorizationCostTy 1727 expectedCost(ElementCount VF, 1728 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); 1729 1730 /// Returns the execution time cost of an instruction for a given vector 1731 /// width. Vector width of one means scalar. 1732 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1733 1734 /// The cost-computation logic from getInstructionCost which provides 1735 /// the vector type as an output parameter. 1736 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1737 Type *&VectorTy); 1738 1739 /// Return the cost of instructions in an inloop reduction pattern, if I is 1740 /// part of that pattern. 1741 Optional<InstructionCost> 1742 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, 1743 TTI::TargetCostKind CostKind); 1744 1745 /// Calculate vectorization cost of memory instruction \p I. 1746 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1747 1748 /// The cost computation for scalarized memory instruction. 1749 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1750 1751 /// The cost computation for interleaving group of memory instructions. 1752 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1753 1754 /// The cost computation for Gather/Scatter instruction. 1755 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1756 1757 /// The cost computation for widening instruction \p I with consecutive 1758 /// memory access. 1759 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1760 1761 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1762 /// Load: scalar load + broadcast. 1763 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1764 /// element) 1765 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1766 1767 /// Estimate the overhead of scalarizing an instruction. This is a 1768 /// convenience wrapper for the type-based getScalarizationOverhead API. 1769 InstructionCost getScalarizationOverhead(Instruction *I, 1770 ElementCount VF) const; 1771 1772 /// Returns whether the instruction is a load or store and will be a emitted 1773 /// as a vector operation. 1774 bool isConsecutiveLoadOrStore(Instruction *I); 1775 1776 /// Map of scalar integer values to the smallest bitwidth they can be legally 1777 /// represented as. The vector equivalents of these values should be truncated 1778 /// to this type. 1779 MapVector<Instruction *, uint64_t> MinBWs; 1780 1781 /// A type representing the costs for instructions if they were to be 1782 /// scalarized rather than vectorized. The entries are Instruction-Cost 1783 /// pairs. 1784 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1785 1786 /// A set containing all BasicBlocks that are known to present after 1787 /// vectorization as a predicated block. 1788 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1789 1790 /// Records whether it is allowed to have the original scalar loop execute at 1791 /// least once. This may be needed as a fallback loop in case runtime 1792 /// aliasing/dependence checks fail, or to handle the tail/remainder 1793 /// iterations when the trip count is unknown or doesn't divide by the VF, 1794 /// or as a peel-loop to handle gaps in interleave-groups. 1795 /// Under optsize and when the trip count is very small we don't allow any 1796 /// iterations to execute in the scalar loop. 1797 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1798 1799 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1800 bool FoldTailByMasking = false; 1801 1802 /// A map holding scalar costs for different vectorization factors. The 1803 /// presence of a cost for an instruction in the mapping indicates that the 1804 /// instruction will be scalarized when vectorizing with the associated 1805 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1806 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1807 1808 /// Holds the instructions known to be uniform after vectorization. 1809 /// The data is collected per VF. 1810 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1811 1812 /// Holds the instructions known to be scalar after vectorization. 1813 /// The data is collected per VF. 1814 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1815 1816 /// Holds the instructions (address computations) that are forced to be 1817 /// scalarized. 1818 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1819 1820 /// PHINodes of the reductions that should be expanded in-loop along with 1821 /// their associated chains of reduction operations, in program order from top 1822 /// (PHI) to bottom 1823 ReductionChainMap InLoopReductionChains; 1824 1825 /// A Map of inloop reduction operations and their immediate chain operand. 1826 /// FIXME: This can be removed once reductions can be costed correctly in 1827 /// vplan. This was added to allow quick lookup to the inloop operations, 1828 /// without having to loop through InLoopReductionChains. 1829 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1830 1831 /// Returns the expected difference in cost from scalarizing the expression 1832 /// feeding a predicated instruction \p PredInst. The instructions to 1833 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1834 /// non-negative return value implies the expression will be scalarized. 1835 /// Currently, only single-use chains are considered for scalarization. 1836 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1837 ElementCount VF); 1838 1839 /// Collect the instructions that are uniform after vectorization. An 1840 /// instruction is uniform if we represent it with a single scalar value in 1841 /// the vectorized loop corresponding to each vector iteration. Examples of 1842 /// uniform instructions include pointer operands of consecutive or 1843 /// interleaved memory accesses. Note that although uniformity implies an 1844 /// instruction will be scalar, the reverse is not true. In general, a 1845 /// scalarized instruction will be represented by VF scalar values in the 1846 /// vectorized loop, each corresponding to an iteration of the original 1847 /// scalar loop. 1848 void collectLoopUniforms(ElementCount VF); 1849 1850 /// Collect the instructions that are scalar after vectorization. An 1851 /// instruction is scalar if it is known to be uniform or will be scalarized 1852 /// during vectorization. collectLoopScalars should only add non-uniform nodes 1853 /// to the list if they are used by a load/store instruction that is marked as 1854 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by 1855 /// VF values in the vectorized loop, each corresponding to an iteration of 1856 /// the original scalar loop. 1857 void collectLoopScalars(ElementCount VF); 1858 1859 /// Keeps cost model vectorization decision and cost for instructions. 1860 /// Right now it is used for memory instructions only. 1861 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1862 std::pair<InstWidening, InstructionCost>>; 1863 1864 DecisionList WideningDecisions; 1865 1866 /// Returns true if \p V is expected to be vectorized and it needs to be 1867 /// extracted. 1868 bool needsExtract(Value *V, ElementCount VF) const { 1869 Instruction *I = dyn_cast<Instruction>(V); 1870 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1871 TheLoop->isLoopInvariant(I)) 1872 return false; 1873 1874 // Assume we can vectorize V (and hence we need extraction) if the 1875 // scalars are not computed yet. This can happen, because it is called 1876 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1877 // the scalars are collected. That should be a safe assumption in most 1878 // cases, because we check if the operands have vectorizable types 1879 // beforehand in LoopVectorizationLegality. 1880 return Scalars.find(VF) == Scalars.end() || 1881 !isScalarAfterVectorization(I, VF); 1882 }; 1883 1884 /// Returns a range containing only operands needing to be extracted. 1885 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1886 ElementCount VF) const { 1887 return SmallVector<Value *, 4>(make_filter_range( 1888 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1889 } 1890 1891 /// Determines if we have the infrastructure to vectorize loop \p L and its 1892 /// epilogue, assuming the main loop is vectorized by \p VF. 1893 bool isCandidateForEpilogueVectorization(const Loop &L, 1894 const ElementCount VF) const; 1895 1896 /// Returns true if epilogue vectorization is considered profitable, and 1897 /// false otherwise. 1898 /// \p VF is the vectorization factor chosen for the original loop. 1899 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1900 1901 public: 1902 /// The loop that we evaluate. 1903 Loop *TheLoop; 1904 1905 /// Predicated scalar evolution analysis. 1906 PredicatedScalarEvolution &PSE; 1907 1908 /// Loop Info analysis. 1909 LoopInfo *LI; 1910 1911 /// Vectorization legality. 1912 LoopVectorizationLegality *Legal; 1913 1914 /// Vector target information. 1915 const TargetTransformInfo &TTI; 1916 1917 /// Target Library Info. 1918 const TargetLibraryInfo *TLI; 1919 1920 /// Demanded bits analysis. 1921 DemandedBits *DB; 1922 1923 /// Assumption cache. 1924 AssumptionCache *AC; 1925 1926 /// Interface to emit optimization remarks. 1927 OptimizationRemarkEmitter *ORE; 1928 1929 const Function *TheFunction; 1930 1931 /// Loop Vectorize Hint. 1932 const LoopVectorizeHints *Hints; 1933 1934 /// The interleave access information contains groups of interleaved accesses 1935 /// with the same stride and close to each other. 1936 InterleavedAccessInfo &InterleaveInfo; 1937 1938 /// Values to ignore in the cost model. 1939 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1940 1941 /// Values to ignore in the cost model when VF > 1. 1942 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1943 1944 /// All element types found in the loop. 1945 SmallPtrSet<Type *, 16> ElementTypesInLoop; 1946 1947 /// Profitable vector factors. 1948 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1949 }; 1950 } // end namespace llvm 1951 1952 /// Helper struct to manage generating runtime checks for vectorization. 1953 /// 1954 /// The runtime checks are created up-front in temporary blocks to allow better 1955 /// estimating the cost and un-linked from the existing IR. After deciding to 1956 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1957 /// temporary blocks are completely removed. 1958 class GeneratedRTChecks { 1959 /// Basic block which contains the generated SCEV checks, if any. 1960 BasicBlock *SCEVCheckBlock = nullptr; 1961 1962 /// The value representing the result of the generated SCEV checks. If it is 1963 /// nullptr, either no SCEV checks have been generated or they have been used. 1964 Value *SCEVCheckCond = nullptr; 1965 1966 /// Basic block which contains the generated memory runtime checks, if any. 1967 BasicBlock *MemCheckBlock = nullptr; 1968 1969 /// The value representing the result of the generated memory runtime checks. 1970 /// If it is nullptr, either no memory runtime checks have been generated or 1971 /// they have been used. 1972 Value *MemRuntimeCheckCond = nullptr; 1973 1974 DominatorTree *DT; 1975 LoopInfo *LI; 1976 1977 SCEVExpander SCEVExp; 1978 SCEVExpander MemCheckExp; 1979 1980 public: 1981 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 1982 const DataLayout &DL) 1983 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 1984 MemCheckExp(SE, DL, "scev.check") {} 1985 1986 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 1987 /// accurately estimate the cost of the runtime checks. The blocks are 1988 /// un-linked from the IR and is added back during vector code generation. If 1989 /// there is no vector code generation, the check blocks are removed 1990 /// completely. 1991 void Create(Loop *L, const LoopAccessInfo &LAI, 1992 const SCEVUnionPredicate &UnionPred) { 1993 1994 BasicBlock *LoopHeader = L->getHeader(); 1995 BasicBlock *Preheader = L->getLoopPreheader(); 1996 1997 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 1998 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 1999 // may be used by SCEVExpander. The blocks will be un-linked from their 2000 // predecessors and removed from LI & DT at the end of the function. 2001 if (!UnionPred.isAlwaysTrue()) { 2002 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 2003 nullptr, "vector.scevcheck"); 2004 2005 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 2006 &UnionPred, SCEVCheckBlock->getTerminator()); 2007 } 2008 2009 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 2010 if (RtPtrChecking.Need) { 2011 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 2012 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 2013 "vector.memcheck"); 2014 2015 MemRuntimeCheckCond = 2016 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 2017 RtPtrChecking.getChecks(), MemCheckExp); 2018 assert(MemRuntimeCheckCond && 2019 "no RT checks generated although RtPtrChecking " 2020 "claimed checks are required"); 2021 } 2022 2023 if (!MemCheckBlock && !SCEVCheckBlock) 2024 return; 2025 2026 // Unhook the temporary block with the checks, update various places 2027 // accordingly. 2028 if (SCEVCheckBlock) 2029 SCEVCheckBlock->replaceAllUsesWith(Preheader); 2030 if (MemCheckBlock) 2031 MemCheckBlock->replaceAllUsesWith(Preheader); 2032 2033 if (SCEVCheckBlock) { 2034 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2035 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 2036 Preheader->getTerminator()->eraseFromParent(); 2037 } 2038 if (MemCheckBlock) { 2039 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2040 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 2041 Preheader->getTerminator()->eraseFromParent(); 2042 } 2043 2044 DT->changeImmediateDominator(LoopHeader, Preheader); 2045 if (MemCheckBlock) { 2046 DT->eraseNode(MemCheckBlock); 2047 LI->removeBlock(MemCheckBlock); 2048 } 2049 if (SCEVCheckBlock) { 2050 DT->eraseNode(SCEVCheckBlock); 2051 LI->removeBlock(SCEVCheckBlock); 2052 } 2053 } 2054 2055 /// Remove the created SCEV & memory runtime check blocks & instructions, if 2056 /// unused. 2057 ~GeneratedRTChecks() { 2058 SCEVExpanderCleaner SCEVCleaner(SCEVExp); 2059 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp); 2060 if (!SCEVCheckCond) 2061 SCEVCleaner.markResultUsed(); 2062 2063 if (!MemRuntimeCheckCond) 2064 MemCheckCleaner.markResultUsed(); 2065 2066 if (MemRuntimeCheckCond) { 2067 auto &SE = *MemCheckExp.getSE(); 2068 // Memory runtime check generation creates compares that use expanded 2069 // values. Remove them before running the SCEVExpanderCleaners. 2070 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2071 if (MemCheckExp.isInsertedInstruction(&I)) 2072 continue; 2073 SE.forgetValue(&I); 2074 I.eraseFromParent(); 2075 } 2076 } 2077 MemCheckCleaner.cleanup(); 2078 SCEVCleaner.cleanup(); 2079 2080 if (SCEVCheckCond) 2081 SCEVCheckBlock->eraseFromParent(); 2082 if (MemRuntimeCheckCond) 2083 MemCheckBlock->eraseFromParent(); 2084 } 2085 2086 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2087 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2088 /// depending on the generated condition. 2089 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass, 2090 BasicBlock *LoopVectorPreHeader, 2091 BasicBlock *LoopExitBlock) { 2092 if (!SCEVCheckCond) 2093 return nullptr; 2094 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) 2095 if (C->isZero()) 2096 return nullptr; 2097 2098 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2099 2100 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2101 // Create new preheader for vector loop. 2102 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2103 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2104 2105 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2106 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2107 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2108 SCEVCheckBlock); 2109 2110 DT->addNewBlock(SCEVCheckBlock, Pred); 2111 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2112 2113 ReplaceInstWithInst( 2114 SCEVCheckBlock->getTerminator(), 2115 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); 2116 // Mark the check as used, to prevent it from being removed during cleanup. 2117 SCEVCheckCond = nullptr; 2118 return SCEVCheckBlock; 2119 } 2120 2121 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2122 /// the branches to branch to the vector preheader or \p Bypass, depending on 2123 /// the generated condition. 2124 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass, 2125 BasicBlock *LoopVectorPreHeader) { 2126 // Check if we generated code that checks in runtime if arrays overlap. 2127 if (!MemRuntimeCheckCond) 2128 return nullptr; 2129 2130 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2131 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2132 MemCheckBlock); 2133 2134 DT->addNewBlock(MemCheckBlock, Pred); 2135 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2136 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2137 2138 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2139 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2140 2141 ReplaceInstWithInst( 2142 MemCheckBlock->getTerminator(), 2143 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2144 MemCheckBlock->getTerminator()->setDebugLoc( 2145 Pred->getTerminator()->getDebugLoc()); 2146 2147 // Mark the check as used, to prevent it from being removed during cleanup. 2148 MemRuntimeCheckCond = nullptr; 2149 return MemCheckBlock; 2150 } 2151 }; 2152 2153 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2154 // vectorization. The loop needs to be annotated with #pragma omp simd 2155 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2156 // vector length information is not provided, vectorization is not considered 2157 // explicit. Interleave hints are not allowed either. These limitations will be 2158 // relaxed in the future. 2159 // Please, note that we are currently forced to abuse the pragma 'clang 2160 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2161 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2162 // provides *explicit vectorization hints* (LV can bypass legal checks and 2163 // assume that vectorization is legal). However, both hints are implemented 2164 // using the same metadata (llvm.loop.vectorize, processed by 2165 // LoopVectorizeHints). This will be fixed in the future when the native IR 2166 // representation for pragma 'omp simd' is introduced. 2167 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2168 OptimizationRemarkEmitter *ORE) { 2169 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2170 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2171 2172 // Only outer loops with an explicit vectorization hint are supported. 2173 // Unannotated outer loops are ignored. 2174 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2175 return false; 2176 2177 Function *Fn = OuterLp->getHeader()->getParent(); 2178 if (!Hints.allowVectorization(Fn, OuterLp, 2179 true /*VectorizeOnlyWhenForced*/)) { 2180 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2181 return false; 2182 } 2183 2184 if (Hints.getInterleave() > 1) { 2185 // TODO: Interleave support is future work. 2186 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2187 "outer loops.\n"); 2188 Hints.emitRemarkWithHints(); 2189 return false; 2190 } 2191 2192 return true; 2193 } 2194 2195 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2196 OptimizationRemarkEmitter *ORE, 2197 SmallVectorImpl<Loop *> &V) { 2198 // Collect inner loops and outer loops without irreducible control flow. For 2199 // now, only collect outer loops that have explicit vectorization hints. If we 2200 // are stress testing the VPlan H-CFG construction, we collect the outermost 2201 // loop of every loop nest. 2202 if (L.isInnermost() || VPlanBuildStressTest || 2203 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2204 LoopBlocksRPO RPOT(&L); 2205 RPOT.perform(LI); 2206 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2207 V.push_back(&L); 2208 // TODO: Collect inner loops inside marked outer loops in case 2209 // vectorization fails for the outer loop. Do not invoke 2210 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2211 // already known to be reducible. We can use an inherited attribute for 2212 // that. 2213 return; 2214 } 2215 } 2216 for (Loop *InnerL : L) 2217 collectSupportedLoops(*InnerL, LI, ORE, V); 2218 } 2219 2220 namespace { 2221 2222 /// The LoopVectorize Pass. 2223 struct LoopVectorize : public FunctionPass { 2224 /// Pass identification, replacement for typeid 2225 static char ID; 2226 2227 LoopVectorizePass Impl; 2228 2229 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2230 bool VectorizeOnlyWhenForced = false) 2231 : FunctionPass(ID), 2232 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2233 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2234 } 2235 2236 bool runOnFunction(Function &F) override { 2237 if (skipFunction(F)) 2238 return false; 2239 2240 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2241 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2242 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2243 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2244 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2245 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2246 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2247 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2248 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2249 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2250 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2251 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2252 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2253 2254 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2255 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2256 2257 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2258 GetLAA, *ORE, PSI).MadeAnyChange; 2259 } 2260 2261 void getAnalysisUsage(AnalysisUsage &AU) const override { 2262 AU.addRequired<AssumptionCacheTracker>(); 2263 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2264 AU.addRequired<DominatorTreeWrapperPass>(); 2265 AU.addRequired<LoopInfoWrapperPass>(); 2266 AU.addRequired<ScalarEvolutionWrapperPass>(); 2267 AU.addRequired<TargetTransformInfoWrapperPass>(); 2268 AU.addRequired<AAResultsWrapperPass>(); 2269 AU.addRequired<LoopAccessLegacyAnalysis>(); 2270 AU.addRequired<DemandedBitsWrapperPass>(); 2271 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2272 AU.addRequired<InjectTLIMappingsLegacy>(); 2273 2274 // We currently do not preserve loopinfo/dominator analyses with outer loop 2275 // vectorization. Until this is addressed, mark these analyses as preserved 2276 // only for non-VPlan-native path. 2277 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2278 if (!EnableVPlanNativePath) { 2279 AU.addPreserved<LoopInfoWrapperPass>(); 2280 AU.addPreserved<DominatorTreeWrapperPass>(); 2281 } 2282 2283 AU.addPreserved<BasicAAWrapperPass>(); 2284 AU.addPreserved<GlobalsAAWrapperPass>(); 2285 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2286 } 2287 }; 2288 2289 } // end anonymous namespace 2290 2291 //===----------------------------------------------------------------------===// 2292 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2293 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2294 //===----------------------------------------------------------------------===// 2295 2296 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2297 // We need to place the broadcast of invariant variables outside the loop, 2298 // but only if it's proven safe to do so. Else, broadcast will be inside 2299 // vector loop body. 2300 Instruction *Instr = dyn_cast<Instruction>(V); 2301 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2302 (!Instr || 2303 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2304 // Place the code for broadcasting invariant variables in the new preheader. 2305 IRBuilder<>::InsertPointGuard Guard(Builder); 2306 if (SafeToHoist) 2307 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2308 2309 // Broadcast the scalar into all locations in the vector. 2310 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2311 2312 return Shuf; 2313 } 2314 2315 /// This function adds 2316 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 2317 /// to each vector element of Val. The sequence starts at StartIndex. 2318 /// \p Opcode is relevant for FP induction variable. 2319 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step, 2320 Instruction::BinaryOps BinOp, ElementCount VF, 2321 IRBuilderBase &Builder) { 2322 assert(VF.isVector() && "only vector VFs are supported"); 2323 2324 // Create and check the types. 2325 auto *ValVTy = cast<VectorType>(Val->getType()); 2326 ElementCount VLen = ValVTy->getElementCount(); 2327 2328 Type *STy = Val->getType()->getScalarType(); 2329 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2330 "Induction Step must be an integer or FP"); 2331 assert(Step->getType() == STy && "Step has wrong type"); 2332 2333 SmallVector<Constant *, 8> Indices; 2334 2335 // Create a vector of consecutive numbers from zero to VF. 2336 VectorType *InitVecValVTy = ValVTy; 2337 Type *InitVecValSTy = STy; 2338 if (STy->isFloatingPointTy()) { 2339 InitVecValSTy = 2340 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2341 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2342 } 2343 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2344 2345 // Splat the StartIdx 2346 Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx); 2347 2348 if (STy->isIntegerTy()) { 2349 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2350 Step = Builder.CreateVectorSplat(VLen, Step); 2351 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2352 // FIXME: The newly created binary instructions should contain nsw/nuw 2353 // flags, which can be found from the original scalar operations. 2354 Step = Builder.CreateMul(InitVec, Step); 2355 return Builder.CreateAdd(Val, Step, "induction"); 2356 } 2357 2358 // Floating point induction. 2359 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2360 "Binary Opcode should be specified for FP induction"); 2361 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2362 InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat); 2363 2364 Step = Builder.CreateVectorSplat(VLen, Step); 2365 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2366 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2367 } 2368 2369 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2370 const InductionDescriptor &II, Value *Step, Value *Start, 2371 Instruction *EntryVal, VPValue *Def, VPTransformState &State) { 2372 IRBuilderBase &Builder = State.Builder; 2373 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2374 "Expected either an induction phi-node or a truncate of it!"); 2375 2376 // Construct the initial value of the vector IV in the vector loop preheader 2377 auto CurrIP = Builder.saveIP(); 2378 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2379 if (isa<TruncInst>(EntryVal)) { 2380 assert(Start->getType()->isIntegerTy() && 2381 "Truncation requires an integer type"); 2382 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2383 Step = Builder.CreateTrunc(Step, TruncType); 2384 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2385 } 2386 2387 Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0); 2388 Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start); 2389 Value *SteppedStart = getStepVector( 2390 SplatStart, Zero, Step, II.getInductionOpcode(), State.VF, State.Builder); 2391 2392 // We create vector phi nodes for both integer and floating-point induction 2393 // variables. Here, we determine the kind of arithmetic we will perform. 2394 Instruction::BinaryOps AddOp; 2395 Instruction::BinaryOps MulOp; 2396 if (Step->getType()->isIntegerTy()) { 2397 AddOp = Instruction::Add; 2398 MulOp = Instruction::Mul; 2399 } else { 2400 AddOp = II.getInductionOpcode(); 2401 MulOp = Instruction::FMul; 2402 } 2403 2404 // Multiply the vectorization factor by the step using integer or 2405 // floating-point arithmetic as appropriate. 2406 Type *StepType = Step->getType(); 2407 Value *RuntimeVF; 2408 if (Step->getType()->isFloatingPointTy()) 2409 RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF); 2410 else 2411 RuntimeVF = getRuntimeVF(Builder, StepType, State.VF); 2412 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 2413 2414 // Create a vector splat to use in the induction update. 2415 // 2416 // FIXME: If the step is non-constant, we create the vector splat with 2417 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2418 // handle a constant vector splat. 2419 Value *SplatVF = isa<Constant>(Mul) 2420 ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul)) 2421 : Builder.CreateVectorSplat(State.VF, Mul); 2422 Builder.restoreIP(CurrIP); 2423 2424 // We may need to add the step a number of times, depending on the unroll 2425 // factor. The last of those goes into the PHI. 2426 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2427 &*LoopVectorBody->getFirstInsertionPt()); 2428 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 2429 Instruction *LastInduction = VecInd; 2430 for (unsigned Part = 0; Part < UF; ++Part) { 2431 State.set(Def, LastInduction, Part); 2432 2433 if (isa<TruncInst>(EntryVal)) 2434 addMetadata(LastInduction, EntryVal); 2435 2436 LastInduction = cast<Instruction>( 2437 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 2438 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 2439 } 2440 2441 // Move the last step to the end of the latch block. This ensures consistent 2442 // placement of all induction updates. 2443 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2444 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2445 LastInduction->moveBefore(Br); 2446 LastInduction->setName("vec.ind.next"); 2447 2448 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2449 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2450 } 2451 2452 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 2453 /// variable on which to base the steps, \p Step is the size of the step, and 2454 /// \p EntryVal is the value from the original loop that maps to the steps. 2455 /// Note that \p EntryVal doesn't have to be an induction variable - it 2456 /// can also be a truncate instruction. 2457 static void buildScalarSteps(Value *ScalarIV, Value *Step, 2458 Instruction *EntryVal, 2459 const InductionDescriptor &ID, VPValue *Def, 2460 VPTransformState &State) { 2461 IRBuilderBase &Builder = State.Builder; 2462 // We shouldn't have to build scalar steps if we aren't vectorizing. 2463 assert(State.VF.isVector() && "VF should be greater than one"); 2464 // Get the value type and ensure it and the step have the same integer type. 2465 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2466 assert(ScalarIVTy == Step->getType() && 2467 "Val and Step should have the same type"); 2468 2469 // We build scalar steps for both integer and floating-point induction 2470 // variables. Here, we determine the kind of arithmetic we will perform. 2471 Instruction::BinaryOps AddOp; 2472 Instruction::BinaryOps MulOp; 2473 if (ScalarIVTy->isIntegerTy()) { 2474 AddOp = Instruction::Add; 2475 MulOp = Instruction::Mul; 2476 } else { 2477 AddOp = ID.getInductionOpcode(); 2478 MulOp = Instruction::FMul; 2479 } 2480 2481 // Determine the number of scalars we need to generate for each unroll 2482 // iteration. 2483 bool FirstLaneOnly = vputils::onlyFirstLaneUsed(Def); 2484 unsigned Lanes = FirstLaneOnly ? 1 : State.VF.getKnownMinValue(); 2485 // Compute the scalar steps and save the results in State. 2486 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2487 ScalarIVTy->getScalarSizeInBits()); 2488 Type *VecIVTy = nullptr; 2489 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2490 if (!FirstLaneOnly && State.VF.isScalable()) { 2491 VecIVTy = VectorType::get(ScalarIVTy, State.VF); 2492 UnitStepVec = 2493 Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF)); 2494 SplatStep = Builder.CreateVectorSplat(State.VF, Step); 2495 SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV); 2496 } 2497 2498 for (unsigned Part = 0; Part < State.UF; ++Part) { 2499 Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part); 2500 2501 if (!FirstLaneOnly && State.VF.isScalable()) { 2502 auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0); 2503 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2504 if (ScalarIVTy->isFloatingPointTy()) 2505 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2506 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2507 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2508 State.set(Def, Add, Part); 2509 // It's useful to record the lane values too for the known minimum number 2510 // of elements so we do those below. This improves the code quality when 2511 // trying to extract the first element, for example. 2512 } 2513 2514 if (ScalarIVTy->isFloatingPointTy()) 2515 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2516 2517 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2518 Value *StartIdx = Builder.CreateBinOp( 2519 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2520 // The step returned by `createStepForVF` is a runtime-evaluated value 2521 // when VF is scalable. Otherwise, it should be folded into a Constant. 2522 assert((State.VF.isScalable() || isa<Constant>(StartIdx)) && 2523 "Expected StartIdx to be folded to a constant when VF is not " 2524 "scalable"); 2525 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2526 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2527 State.set(Def, Add, VPIteration(Part, Lane)); 2528 } 2529 } 2530 } 2531 2532 /// Compute the transformed value of Index at offset StartValue using step 2533 /// StepValue. 2534 /// For integer induction, returns StartValue + Index * StepValue. 2535 /// For pointer induction, returns StartValue[Index * StepValue]. 2536 /// FIXME: The newly created binary instructions should contain nsw/nuw 2537 /// flags, which can be found from the original scalar operations. 2538 static Value *emitTransformedIndex(IRBuilderBase &B, Value *Index, 2539 ScalarEvolution *SE, const DataLayout &DL, 2540 const InductionDescriptor &ID, LoopInfo &LI, 2541 BasicBlock *VectorHeader) { 2542 2543 SCEVExpander Exp(*SE, DL, "induction"); 2544 auto Step = ID.getStep(); 2545 auto StartValue = ID.getStartValue(); 2546 assert(Index->getType()->getScalarType() == Step->getType() && 2547 "Index scalar type does not match StepValue type"); 2548 2549 // Note: the IR at this point is broken. We cannot use SE to create any new 2550 // SCEV and then expand it, hoping that SCEV's simplification will give us 2551 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 2552 // lead to various SCEV crashes. So all we can do is to use builder and rely 2553 // on InstCombine for future simplifications. Here we handle some trivial 2554 // cases only. 2555 auto CreateAdd = [&B](Value *X, Value *Y) { 2556 assert(X->getType() == Y->getType() && "Types don't match!"); 2557 if (auto *CX = dyn_cast<ConstantInt>(X)) 2558 if (CX->isZero()) 2559 return Y; 2560 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2561 if (CY->isZero()) 2562 return X; 2563 return B.CreateAdd(X, Y); 2564 }; 2565 2566 // We allow X to be a vector type, in which case Y will potentially be 2567 // splatted into a vector with the same element count. 2568 auto CreateMul = [&B](Value *X, Value *Y) { 2569 assert(X->getType()->getScalarType() == Y->getType() && 2570 "Types don't match!"); 2571 if (auto *CX = dyn_cast<ConstantInt>(X)) 2572 if (CX->isOne()) 2573 return Y; 2574 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2575 if (CY->isOne()) 2576 return X; 2577 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 2578 if (XVTy && !isa<VectorType>(Y->getType())) 2579 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 2580 return B.CreateMul(X, Y); 2581 }; 2582 2583 // Get a suitable insert point for SCEV expansion. For blocks in the vector 2584 // loop, choose the end of the vector loop header (=VectorHeader), because 2585 // the DomTree is not kept up-to-date for additional blocks generated in the 2586 // vector loop. By using the header as insertion point, we guarantee that the 2587 // expanded instructions dominate all their uses. 2588 auto GetInsertPoint = [&B, &LI, VectorHeader]() { 2589 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 2590 if (InsertBB != VectorHeader && 2591 LI.getLoopFor(VectorHeader) == LI.getLoopFor(InsertBB)) 2592 return VectorHeader->getTerminator(); 2593 return &*B.GetInsertPoint(); 2594 }; 2595 2596 switch (ID.getKind()) { 2597 case InductionDescriptor::IK_IntInduction: { 2598 assert(!isa<VectorType>(Index->getType()) && 2599 "Vector indices not supported for integer inductions yet"); 2600 assert(Index->getType() == StartValue->getType() && 2601 "Index type does not match StartValue type"); 2602 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 2603 return B.CreateSub(StartValue, Index); 2604 auto *Offset = CreateMul( 2605 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 2606 return CreateAdd(StartValue, Offset); 2607 } 2608 case InductionDescriptor::IK_PtrInduction: { 2609 assert(isa<SCEVConstant>(Step) && 2610 "Expected constant step for pointer induction"); 2611 return B.CreateGEP( 2612 ID.getElementType(), StartValue, 2613 CreateMul(Index, 2614 Exp.expandCodeFor(Step, Index->getType()->getScalarType(), 2615 GetInsertPoint()))); 2616 } 2617 case InductionDescriptor::IK_FpInduction: { 2618 assert(!isa<VectorType>(Index->getType()) && 2619 "Vector indices not supported for FP inductions yet"); 2620 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 2621 auto InductionBinOp = ID.getInductionBinOp(); 2622 assert(InductionBinOp && 2623 (InductionBinOp->getOpcode() == Instruction::FAdd || 2624 InductionBinOp->getOpcode() == Instruction::FSub) && 2625 "Original bin op should be defined for FP induction"); 2626 2627 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 2628 Value *MulExp = B.CreateFMul(StepValue, Index); 2629 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 2630 "induction"); 2631 } 2632 case InductionDescriptor::IK_NoInduction: 2633 return nullptr; 2634 } 2635 llvm_unreachable("invalid enum"); 2636 } 2637 2638 void InnerLoopVectorizer::widenIntOrFpInduction( 2639 PHINode *IV, VPWidenIntOrFpInductionRecipe *Def, VPTransformState &State, 2640 Value *CanonicalIV) { 2641 Value *Start = Def->getStartValue()->getLiveInIRValue(); 2642 const InductionDescriptor &ID = Def->getInductionDescriptor(); 2643 TruncInst *Trunc = Def->getTruncInst(); 2644 IRBuilderBase &Builder = State.Builder; 2645 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2646 assert(!State.VF.isZero() && "VF must be non-zero"); 2647 2648 // The value from the original loop to which we are mapping the new induction 2649 // variable. 2650 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2651 2652 auto &DL = EntryVal->getModule()->getDataLayout(); 2653 2654 // Generate code for the induction step. Note that induction steps are 2655 // required to be loop-invariant 2656 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 2657 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 2658 "Induction step should be loop invariant"); 2659 if (PSE.getSE()->isSCEVable(IV->getType())) { 2660 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2661 return Exp.expandCodeFor(Step, Step->getType(), 2662 State.CFG.VectorPreHeader->getTerminator()); 2663 } 2664 return cast<SCEVUnknown>(Step)->getValue(); 2665 }; 2666 2667 // The scalar value to broadcast. This is derived from the canonical 2668 // induction variable. If a truncation type is given, truncate the canonical 2669 // induction variable and step. Otherwise, derive these values from the 2670 // induction descriptor. 2671 auto CreateScalarIV = [&](Value *&Step) -> Value * { 2672 Value *ScalarIV = CanonicalIV; 2673 Type *NeededType = IV->getType(); 2674 if (!Def->isCanonical() || ScalarIV->getType() != NeededType) { 2675 ScalarIV = 2676 NeededType->isIntegerTy() 2677 ? Builder.CreateSExtOrTrunc(ScalarIV, NeededType) 2678 : Builder.CreateCast(Instruction::SIToFP, ScalarIV, NeededType); 2679 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID, 2680 *State.LI, State.CFG.PrevBB); 2681 ScalarIV->setName("offset.idx"); 2682 } 2683 if (Trunc) { 2684 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2685 assert(Step->getType()->isIntegerTy() && 2686 "Truncation requires an integer step"); 2687 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2688 Step = Builder.CreateTrunc(Step, TruncType); 2689 } 2690 return ScalarIV; 2691 }; 2692 2693 // Fast-math-flags propagate from the original induction instruction. 2694 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 2695 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 2696 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 2697 2698 // Now do the actual transformations, and start with creating the step value. 2699 Value *Step = CreateStepValue(ID.getStep()); 2700 if (State.VF.isScalar()) { 2701 Value *ScalarIV = CreateScalarIV(Step); 2702 Type *ScalarTy = IntegerType::get(ScalarIV->getContext(), 2703 Step->getType()->getScalarSizeInBits()); 2704 2705 Instruction::BinaryOps IncOp = ID.getInductionOpcode(); 2706 if (IncOp == Instruction::BinaryOpsEnd) 2707 IncOp = Instruction::Add; 2708 for (unsigned Part = 0; Part < UF; ++Part) { 2709 Value *StartIdx = ConstantInt::get(ScalarTy, Part); 2710 Instruction::BinaryOps MulOp = Instruction::Mul; 2711 if (Step->getType()->isFloatingPointTy()) { 2712 StartIdx = Builder.CreateUIToFP(StartIdx, Step->getType()); 2713 MulOp = Instruction::FMul; 2714 } 2715 2716 Value *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2717 Value *EntryPart = Builder.CreateBinOp(IncOp, ScalarIV, Mul, "induction"); 2718 State.set(Def, EntryPart, Part); 2719 if (Trunc) { 2720 assert(!Step->getType()->isFloatingPointTy() && 2721 "fp inductions shouldn't be truncated"); 2722 addMetadata(EntryPart, Trunc); 2723 } 2724 } 2725 return; 2726 } 2727 2728 // Create a new independent vector induction variable, if one is needed. 2729 if (Def->needsVectorIV()) 2730 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, State); 2731 2732 if (Def->needsScalarIV()) { 2733 // Create scalar steps that can be used by instructions we will later 2734 // scalarize. Note that the addition of the scalar steps will not increase 2735 // the number of instructions in the loop in the common case prior to 2736 // InstCombine. We will be trading one vector extract for each scalar step. 2737 Value *ScalarIV = CreateScalarIV(Step); 2738 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, State); 2739 } 2740 } 2741 2742 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2743 const VPIteration &Instance, 2744 VPTransformState &State) { 2745 Value *ScalarInst = State.get(Def, Instance); 2746 Value *VectorValue = State.get(Def, Instance.Part); 2747 VectorValue = Builder.CreateInsertElement( 2748 VectorValue, ScalarInst, 2749 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2750 State.set(Def, VectorValue, Instance.Part); 2751 } 2752 2753 // Return whether we allow using masked interleave-groups (for dealing with 2754 // strided loads/stores that reside in predicated blocks, or for dealing 2755 // with gaps). 2756 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2757 // If an override option has been passed in for interleaved accesses, use it. 2758 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2759 return EnableMaskedInterleavedMemAccesses; 2760 2761 return TTI.enableMaskedInterleavedAccessVectorization(); 2762 } 2763 2764 // Try to vectorize the interleave group that \p Instr belongs to. 2765 // 2766 // E.g. Translate following interleaved load group (factor = 3): 2767 // for (i = 0; i < N; i+=3) { 2768 // R = Pic[i]; // Member of index 0 2769 // G = Pic[i+1]; // Member of index 1 2770 // B = Pic[i+2]; // Member of index 2 2771 // ... // do something to R, G, B 2772 // } 2773 // To: 2774 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2775 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2776 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2777 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2778 // 2779 // Or translate following interleaved store group (factor = 3): 2780 // for (i = 0; i < N; i+=3) { 2781 // ... do something to R, G, B 2782 // Pic[i] = R; // Member of index 0 2783 // Pic[i+1] = G; // Member of index 1 2784 // Pic[i+2] = B; // Member of index 2 2785 // } 2786 // To: 2787 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2788 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2789 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2790 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2791 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2792 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2793 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2794 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2795 VPValue *BlockInMask) { 2796 Instruction *Instr = Group->getInsertPos(); 2797 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2798 2799 // Prepare for the vector type of the interleaved load/store. 2800 Type *ScalarTy = getLoadStoreType(Instr); 2801 unsigned InterleaveFactor = Group->getFactor(); 2802 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2803 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2804 2805 // Prepare for the new pointers. 2806 SmallVector<Value *, 2> AddrParts; 2807 unsigned Index = Group->getIndex(Instr); 2808 2809 // TODO: extend the masked interleaved-group support to reversed access. 2810 assert((!BlockInMask || !Group->isReverse()) && 2811 "Reversed masked interleave-group not supported."); 2812 2813 // If the group is reverse, adjust the index to refer to the last vector lane 2814 // instead of the first. We adjust the index from the first vector lane, 2815 // rather than directly getting the pointer for lane VF - 1, because the 2816 // pointer operand of the interleaved access is supposed to be uniform. For 2817 // uniform instructions, we're only required to generate a value for the 2818 // first vector lane in each unroll iteration. 2819 if (Group->isReverse()) 2820 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2821 2822 for (unsigned Part = 0; Part < UF; Part++) { 2823 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2824 setDebugLocFromInst(AddrPart); 2825 2826 // Notice current instruction could be any index. Need to adjust the address 2827 // to the member of index 0. 2828 // 2829 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2830 // b = A[i]; // Member of index 0 2831 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2832 // 2833 // E.g. A[i+1] = a; // Member of index 1 2834 // A[i] = b; // Member of index 0 2835 // A[i+2] = c; // Member of index 2 (Current instruction) 2836 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2837 2838 bool InBounds = false; 2839 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2840 InBounds = gep->isInBounds(); 2841 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2842 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2843 2844 // Cast to the vector pointer type. 2845 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2846 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2847 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2848 } 2849 2850 setDebugLocFromInst(Instr); 2851 Value *PoisonVec = PoisonValue::get(VecTy); 2852 2853 Value *MaskForGaps = nullptr; 2854 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2855 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2856 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2857 } 2858 2859 // Vectorize the interleaved load group. 2860 if (isa<LoadInst>(Instr)) { 2861 // For each unroll part, create a wide load for the group. 2862 SmallVector<Value *, 2> NewLoads; 2863 for (unsigned Part = 0; Part < UF; Part++) { 2864 Instruction *NewLoad; 2865 if (BlockInMask || MaskForGaps) { 2866 assert(useMaskedInterleavedAccesses(*TTI) && 2867 "masked interleaved groups are not allowed."); 2868 Value *GroupMask = MaskForGaps; 2869 if (BlockInMask) { 2870 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2871 Value *ShuffledMask = Builder.CreateShuffleVector( 2872 BlockInMaskPart, 2873 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2874 "interleaved.mask"); 2875 GroupMask = MaskForGaps 2876 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2877 MaskForGaps) 2878 : ShuffledMask; 2879 } 2880 NewLoad = 2881 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), 2882 GroupMask, PoisonVec, "wide.masked.vec"); 2883 } 2884 else 2885 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2886 Group->getAlign(), "wide.vec"); 2887 Group->addMetadata(NewLoad); 2888 NewLoads.push_back(NewLoad); 2889 } 2890 2891 // For each member in the group, shuffle out the appropriate data from the 2892 // wide loads. 2893 unsigned J = 0; 2894 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2895 Instruction *Member = Group->getMember(I); 2896 2897 // Skip the gaps in the group. 2898 if (!Member) 2899 continue; 2900 2901 auto StrideMask = 2902 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2903 for (unsigned Part = 0; Part < UF; Part++) { 2904 Value *StridedVec = Builder.CreateShuffleVector( 2905 NewLoads[Part], StrideMask, "strided.vec"); 2906 2907 // If this member has different type, cast the result type. 2908 if (Member->getType() != ScalarTy) { 2909 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2910 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2911 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2912 } 2913 2914 if (Group->isReverse()) 2915 StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse"); 2916 2917 State.set(VPDefs[J], StridedVec, Part); 2918 } 2919 ++J; 2920 } 2921 return; 2922 } 2923 2924 // The sub vector type for current instruction. 2925 auto *SubVT = VectorType::get(ScalarTy, VF); 2926 2927 // Vectorize the interleaved store group. 2928 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2929 assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && 2930 "masked interleaved groups are not allowed."); 2931 assert((!MaskForGaps || !VF.isScalable()) && 2932 "masking gaps for scalable vectors is not yet supported."); 2933 for (unsigned Part = 0; Part < UF; Part++) { 2934 // Collect the stored vector from each member. 2935 SmallVector<Value *, 4> StoredVecs; 2936 for (unsigned i = 0; i < InterleaveFactor; i++) { 2937 assert((Group->getMember(i) || MaskForGaps) && 2938 "Fail to get a member from an interleaved store group"); 2939 Instruction *Member = Group->getMember(i); 2940 2941 // Skip the gaps in the group. 2942 if (!Member) { 2943 Value *Undef = PoisonValue::get(SubVT); 2944 StoredVecs.push_back(Undef); 2945 continue; 2946 } 2947 2948 Value *StoredVec = State.get(StoredValues[i], Part); 2949 2950 if (Group->isReverse()) 2951 StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse"); 2952 2953 // If this member has different type, cast it to a unified type. 2954 2955 if (StoredVec->getType() != SubVT) 2956 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2957 2958 StoredVecs.push_back(StoredVec); 2959 } 2960 2961 // Concatenate all vectors into a wide vector. 2962 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2963 2964 // Interleave the elements in the wide vector. 2965 Value *IVec = Builder.CreateShuffleVector( 2966 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2967 "interleaved.vec"); 2968 2969 Instruction *NewStoreInstr; 2970 if (BlockInMask || MaskForGaps) { 2971 Value *GroupMask = MaskForGaps; 2972 if (BlockInMask) { 2973 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2974 Value *ShuffledMask = Builder.CreateShuffleVector( 2975 BlockInMaskPart, 2976 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2977 "interleaved.mask"); 2978 GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And, 2979 ShuffledMask, MaskForGaps) 2980 : ShuffledMask; 2981 } 2982 NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part], 2983 Group->getAlign(), GroupMask); 2984 } else 2985 NewStoreInstr = 2986 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2987 2988 Group->addMetadata(NewStoreInstr); 2989 } 2990 } 2991 2992 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2993 VPReplicateRecipe *RepRecipe, 2994 const VPIteration &Instance, 2995 bool IfPredicateInstr, 2996 VPTransformState &State) { 2997 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2998 2999 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 3000 // the first lane and part. 3001 if (isa<NoAliasScopeDeclInst>(Instr)) 3002 if (!Instance.isFirstIteration()) 3003 return; 3004 3005 setDebugLocFromInst(Instr); 3006 3007 // Does this instruction return a value ? 3008 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 3009 3010 Instruction *Cloned = Instr->clone(); 3011 if (!IsVoidRetTy) 3012 Cloned->setName(Instr->getName() + ".cloned"); 3013 3014 // If the scalarized instruction contributes to the address computation of a 3015 // widen masked load/store which was in a basic block that needed predication 3016 // and is not predicated after vectorization, we can't propagate 3017 // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized 3018 // instruction could feed a poison value to the base address of the widen 3019 // load/store. 3020 if (State.MayGeneratePoisonRecipes.contains(RepRecipe)) 3021 Cloned->dropPoisonGeneratingFlags(); 3022 3023 State.Builder.SetInsertPoint(Builder.GetInsertBlock(), 3024 Builder.GetInsertPoint()); 3025 // Replace the operands of the cloned instructions with their scalar 3026 // equivalents in the new loop. 3027 for (auto &I : enumerate(RepRecipe->operands())) { 3028 auto InputInstance = Instance; 3029 VPValue *Operand = I.value(); 3030 VPReplicateRecipe *OperandR = dyn_cast<VPReplicateRecipe>(Operand); 3031 if (OperandR && OperandR->isUniform()) 3032 InputInstance.Lane = VPLane::getFirstLane(); 3033 Cloned->setOperand(I.index(), State.get(Operand, InputInstance)); 3034 } 3035 addNewMetadata(Cloned, Instr); 3036 3037 // Place the cloned scalar in the new loop. 3038 Builder.Insert(Cloned); 3039 3040 State.set(RepRecipe, Cloned, Instance); 3041 3042 // If we just cloned a new assumption, add it the assumption cache. 3043 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 3044 AC->registerAssumption(II); 3045 3046 // End if-block. 3047 if (IfPredicateInstr) 3048 PredicatedInstructions.push_back(Cloned); 3049 } 3050 3051 void InnerLoopVectorizer::createHeaderBranch(Loop *L) { 3052 BasicBlock *Header = L->getHeader(); 3053 assert(!L->getLoopLatch() && "loop should not have a latch at this point"); 3054 3055 IRBuilder<> B(Header->getTerminator()); 3056 Instruction *OldInst = 3057 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()); 3058 setDebugLocFromInst(OldInst, &B); 3059 3060 // Connect the header to the exit and header blocks and replace the old 3061 // terminator. 3062 B.CreateCondBr(B.getTrue(), L->getUniqueExitBlock(), Header); 3063 3064 // Now we have two terminators. Remove the old one from the block. 3065 Header->getTerminator()->eraseFromParent(); 3066 } 3067 3068 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3069 if (TripCount) 3070 return TripCount; 3071 3072 assert(L && "Create Trip Count for null loop."); 3073 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3074 // Find the loop boundaries. 3075 ScalarEvolution *SE = PSE.getSE(); 3076 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3077 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 3078 "Invalid loop count"); 3079 3080 Type *IdxTy = Legal->getWidestInductionType(); 3081 assert(IdxTy && "No type for induction"); 3082 3083 // The exit count might have the type of i64 while the phi is i32. This can 3084 // happen if we have an induction variable that is sign extended before the 3085 // compare. The only way that we get a backedge taken count is that the 3086 // induction variable was signed and as such will not overflow. In such a case 3087 // truncation is legal. 3088 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 3089 IdxTy->getPrimitiveSizeInBits()) 3090 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3091 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3092 3093 // Get the total trip count from the count by adding 1. 3094 const SCEV *ExitCount = SE->getAddExpr( 3095 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3096 3097 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3098 3099 // Expand the trip count and place the new instructions in the preheader. 3100 // Notice that the pre-header does not change, only the loop body. 3101 SCEVExpander Exp(*SE, DL, "induction"); 3102 3103 // Count holds the overall loop count (N). 3104 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3105 L->getLoopPreheader()->getTerminator()); 3106 3107 if (TripCount->getType()->isPointerTy()) 3108 TripCount = 3109 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3110 L->getLoopPreheader()->getTerminator()); 3111 3112 return TripCount; 3113 } 3114 3115 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3116 if (VectorTripCount) 3117 return VectorTripCount; 3118 3119 Value *TC = getOrCreateTripCount(L); 3120 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3121 3122 Type *Ty = TC->getType(); 3123 // This is where we can make the step a runtime constant. 3124 Value *Step = createStepForVF(Builder, Ty, VF, UF); 3125 3126 // If the tail is to be folded by masking, round the number of iterations N 3127 // up to a multiple of Step instead of rounding down. This is done by first 3128 // adding Step-1 and then rounding down. Note that it's ok if this addition 3129 // overflows: the vector induction variable will eventually wrap to zero given 3130 // that it starts at zero and its Step is a power of two; the loop will then 3131 // exit, with the last early-exit vector comparison also producing all-true. 3132 if (Cost->foldTailByMasking()) { 3133 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 3134 "VF*UF must be a power of 2 when folding tail by masking"); 3135 Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF); 3136 TC = Builder.CreateAdd( 3137 TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up"); 3138 } 3139 3140 // Now we need to generate the expression for the part of the loop that the 3141 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3142 // iterations are not required for correctness, or N - Step, otherwise. Step 3143 // is equal to the vectorization factor (number of SIMD elements) times the 3144 // unroll factor (number of SIMD instructions). 3145 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3146 3147 // There are cases where we *must* run at least one iteration in the remainder 3148 // loop. See the cost model for when this can happen. If the step evenly 3149 // divides the trip count, we set the remainder to be equal to the step. If 3150 // the step does not evenly divide the trip count, no adjustment is necessary 3151 // since there will already be scalar iterations. Note that the minimum 3152 // iterations check ensures that N >= Step. 3153 if (Cost->requiresScalarEpilogue(VF)) { 3154 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3155 R = Builder.CreateSelect(IsZero, Step, R); 3156 } 3157 3158 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3159 3160 return VectorTripCount; 3161 } 3162 3163 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3164 const DataLayout &DL) { 3165 // Verify that V is a vector type with same number of elements as DstVTy. 3166 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 3167 unsigned VF = DstFVTy->getNumElements(); 3168 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 3169 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3170 Type *SrcElemTy = SrcVecTy->getElementType(); 3171 Type *DstElemTy = DstFVTy->getElementType(); 3172 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3173 "Vector elements must have same size"); 3174 3175 // Do a direct cast if element types are castable. 3176 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3177 return Builder.CreateBitOrPointerCast(V, DstFVTy); 3178 } 3179 // V cannot be directly casted to desired vector type. 3180 // May happen when V is a floating point vector but DstVTy is a vector of 3181 // pointers or vice-versa. Handle this using a two-step bitcast using an 3182 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3183 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3184 "Only one type should be a pointer type"); 3185 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3186 "Only one type should be a floating point type"); 3187 Type *IntTy = 3188 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3189 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 3190 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3191 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 3192 } 3193 3194 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3195 BasicBlock *Bypass) { 3196 Value *Count = getOrCreateTripCount(L); 3197 // Reuse existing vector loop preheader for TC checks. 3198 // Note that new preheader block is generated for vector loop. 3199 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 3200 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 3201 3202 // Generate code to check if the loop's trip count is less than VF * UF, or 3203 // equal to it in case a scalar epilogue is required; this implies that the 3204 // vector trip count is zero. This check also covers the case where adding one 3205 // to the backedge-taken count overflowed leading to an incorrect trip count 3206 // of zero. In this case we will also jump to the scalar loop. 3207 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE 3208 : ICmpInst::ICMP_ULT; 3209 3210 // If tail is to be folded, vector loop takes care of all iterations. 3211 Value *CheckMinIters = Builder.getFalse(); 3212 if (!Cost->foldTailByMasking()) { 3213 Value *Step = createStepForVF(Builder, Count->getType(), VF, UF); 3214 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3215 } 3216 // Create new preheader for vector loop. 3217 LoopVectorPreHeader = 3218 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3219 "vector.ph"); 3220 3221 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3222 DT->getNode(Bypass)->getIDom()) && 3223 "TC check is expected to dominate Bypass"); 3224 3225 // Update dominator for Bypass & LoopExit (if needed). 3226 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3227 if (!Cost->requiresScalarEpilogue(VF)) 3228 // If there is an epilogue which must run, there's no edge from the 3229 // middle block to exit blocks and thus no need to update the immediate 3230 // dominator of the exit blocks. 3231 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3232 3233 ReplaceInstWithInst( 3234 TCCheckBlock->getTerminator(), 3235 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3236 LoopBypassBlocks.push_back(TCCheckBlock); 3237 } 3238 3239 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3240 3241 BasicBlock *const SCEVCheckBlock = 3242 RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock); 3243 if (!SCEVCheckBlock) 3244 return nullptr; 3245 3246 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3247 (OptForSizeBasedOnProfile && 3248 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3249 "Cannot SCEV check stride or overflow when optimizing for size"); 3250 3251 3252 // Update dominator only if this is first RT check. 3253 if (LoopBypassBlocks.empty()) { 3254 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3255 if (!Cost->requiresScalarEpilogue(VF)) 3256 // If there is an epilogue which must run, there's no edge from the 3257 // middle block to exit blocks and thus no need to update the immediate 3258 // dominator of the exit blocks. 3259 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3260 } 3261 3262 LoopBypassBlocks.push_back(SCEVCheckBlock); 3263 AddedSafetyChecks = true; 3264 return SCEVCheckBlock; 3265 } 3266 3267 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, 3268 BasicBlock *Bypass) { 3269 // VPlan-native path does not do any analysis for runtime checks currently. 3270 if (EnableVPlanNativePath) 3271 return nullptr; 3272 3273 BasicBlock *const MemCheckBlock = 3274 RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader); 3275 3276 // Check if we generated code that checks in runtime if arrays overlap. We put 3277 // the checks into a separate block to make the more common case of few 3278 // elements faster. 3279 if (!MemCheckBlock) 3280 return nullptr; 3281 3282 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3283 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3284 "Cannot emit memory checks when optimizing for size, unless forced " 3285 "to vectorize."); 3286 ORE->emit([&]() { 3287 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3288 L->getStartLoc(), L->getHeader()) 3289 << "Code-size may be reduced by not forcing " 3290 "vectorization, or by source-code modifications " 3291 "eliminating the need for runtime checks " 3292 "(e.g., adding 'restrict')."; 3293 }); 3294 } 3295 3296 LoopBypassBlocks.push_back(MemCheckBlock); 3297 3298 AddedSafetyChecks = true; 3299 3300 // We currently don't use LoopVersioning for the actual loop cloning but we 3301 // still use it to add the noalias metadata. 3302 LVer = std::make_unique<LoopVersioning>( 3303 *Legal->getLAI(), 3304 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3305 DT, PSE.getSE()); 3306 LVer->prepareNoAliasMetadata(); 3307 return MemCheckBlock; 3308 } 3309 3310 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3311 LoopScalarBody = OrigLoop->getHeader(); 3312 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3313 assert(LoopVectorPreHeader && "Invalid loop structure"); 3314 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr 3315 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && 3316 "multiple exit loop without required epilogue?"); 3317 3318 LoopMiddleBlock = 3319 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3320 LI, nullptr, Twine(Prefix) + "middle.block"); 3321 LoopScalarPreHeader = 3322 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3323 nullptr, Twine(Prefix) + "scalar.ph"); 3324 3325 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3326 3327 // Set up the middle block terminator. Two cases: 3328 // 1) If we know that we must execute the scalar epilogue, emit an 3329 // unconditional branch. 3330 // 2) Otherwise, we must have a single unique exit block (due to how we 3331 // implement the multiple exit case). In this case, set up a conditonal 3332 // branch from the middle block to the loop scalar preheader, and the 3333 // exit block. completeLoopSkeleton will update the condition to use an 3334 // iteration check, if required to decide whether to execute the remainder. 3335 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? 3336 BranchInst::Create(LoopScalarPreHeader) : 3337 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, 3338 Builder.getTrue()); 3339 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3340 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3341 3342 // We intentionally don't let SplitBlock to update LoopInfo since 3343 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3344 // LoopVectorBody is explicitly added to the correct place few lines later. 3345 LoopVectorBody = 3346 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3347 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3348 3349 // Update dominator for loop exit. 3350 if (!Cost->requiresScalarEpilogue(VF)) 3351 // If there is an epilogue which must run, there's no edge from the 3352 // middle block to exit blocks and thus no need to update the immediate 3353 // dominator of the exit blocks. 3354 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3355 3356 // Create and register the new vector loop. 3357 Loop *Lp = LI->AllocateLoop(); 3358 Loop *ParentLoop = OrigLoop->getParentLoop(); 3359 3360 // Insert the new loop into the loop nest and register the new basic blocks 3361 // before calling any utilities such as SCEV that require valid LoopInfo. 3362 if (ParentLoop) { 3363 ParentLoop->addChildLoop(Lp); 3364 } else { 3365 LI->addTopLevelLoop(Lp); 3366 } 3367 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3368 return Lp; 3369 } 3370 3371 void InnerLoopVectorizer::createInductionResumeValues( 3372 Loop *L, std::pair<BasicBlock *, Value *> AdditionalBypass) { 3373 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3374 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3375 "Inconsistent information about additional bypass."); 3376 3377 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3378 assert(VectorTripCount && L && "Expected valid arguments"); 3379 // We are going to resume the execution of the scalar loop. 3380 // Go over all of the induction variables that we found and fix the 3381 // PHIs that are left in the scalar version of the loop. 3382 // The starting values of PHI nodes depend on the counter of the last 3383 // iteration in the vectorized loop. 3384 // If we come from a bypass edge then we need to start from the original 3385 // start value. 3386 Instruction *OldInduction = Legal->getPrimaryInduction(); 3387 for (auto &InductionEntry : Legal->getInductionVars()) { 3388 PHINode *OrigPhi = InductionEntry.first; 3389 InductionDescriptor II = InductionEntry.second; 3390 3391 // Create phi nodes to merge from the backedge-taken check block. 3392 PHINode *BCResumeVal = 3393 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3394 LoopScalarPreHeader->getTerminator()); 3395 // Copy original phi DL over to the new one. 3396 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3397 Value *&EndValue = IVEndValues[OrigPhi]; 3398 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3399 if (OrigPhi == OldInduction) { 3400 // We know what the end value is. 3401 EndValue = VectorTripCount; 3402 } else { 3403 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3404 3405 // Fast-math-flags propagate from the original induction instruction. 3406 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3407 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3408 3409 Type *StepType = II.getStep()->getType(); 3410 Instruction::CastOps CastOp = 3411 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3412 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3413 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3414 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II, *LI, 3415 LoopVectorBody); 3416 EndValue->setName("ind.end"); 3417 3418 // Compute the end value for the additional bypass (if applicable). 3419 if (AdditionalBypass.first) { 3420 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3421 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3422 StepType, true); 3423 CRD = 3424 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3425 EndValueFromAdditionalBypass = emitTransformedIndex( 3426 B, CRD, PSE.getSE(), DL, II, *LI, LoopVectorBody); 3427 EndValueFromAdditionalBypass->setName("ind.end"); 3428 } 3429 } 3430 // The new PHI merges the original incoming value, in case of a bypass, 3431 // or the value at the end of the vectorized loop. 3432 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3433 3434 // Fix the scalar body counter (PHI node). 3435 // The old induction's phi node in the scalar body needs the truncated 3436 // value. 3437 for (BasicBlock *BB : LoopBypassBlocks) 3438 BCResumeVal->addIncoming(II.getStartValue(), BB); 3439 3440 if (AdditionalBypass.first) 3441 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3442 EndValueFromAdditionalBypass); 3443 3444 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3445 } 3446 } 3447 3448 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3449 MDNode *OrigLoopID) { 3450 assert(L && "Expected valid loop."); 3451 3452 // The trip counts should be cached by now. 3453 Value *Count = getOrCreateTripCount(L); 3454 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3455 3456 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3457 3458 // Add a check in the middle block to see if we have completed 3459 // all of the iterations in the first vector loop. Three cases: 3460 // 1) If we require a scalar epilogue, there is no conditional branch as 3461 // we unconditionally branch to the scalar preheader. Do nothing. 3462 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. 3463 // Thus if tail is to be folded, we know we don't need to run the 3464 // remainder and we can use the previous value for the condition (true). 3465 // 3) Otherwise, construct a runtime check. 3466 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { 3467 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3468 Count, VectorTripCount, "cmp.n", 3469 LoopMiddleBlock->getTerminator()); 3470 3471 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3472 // of the corresponding compare because they may have ended up with 3473 // different line numbers and we want to avoid awkward line stepping while 3474 // debugging. Eg. if the compare has got a line number inside the loop. 3475 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3476 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3477 } 3478 3479 // Get ready to start creating new instructions into the vectorized body. 3480 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3481 "Inconsistent vector loop preheader"); 3482 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3483 3484 #ifdef EXPENSIVE_CHECKS 3485 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3486 LI->verify(*DT); 3487 #endif 3488 3489 return LoopVectorPreHeader; 3490 } 3491 3492 std::pair<BasicBlock *, Value *> 3493 InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3494 /* 3495 In this function we generate a new loop. The new loop will contain 3496 the vectorized instructions while the old loop will continue to run the 3497 scalar remainder. 3498 3499 [ ] <-- loop iteration number check. 3500 / | 3501 / v 3502 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3503 | / | 3504 | / v 3505 || [ ] <-- vector pre header. 3506 |/ | 3507 | v 3508 | [ ] \ 3509 | [ ]_| <-- vector loop. 3510 | | 3511 | v 3512 \ -[ ] <--- middle-block. 3513 \/ | 3514 /\ v 3515 | ->[ ] <--- new preheader. 3516 | | 3517 (opt) v <-- edge from middle to exit iff epilogue is not required. 3518 | [ ] \ 3519 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). 3520 \ | 3521 \ v 3522 >[ ] <-- exit block(s). 3523 ... 3524 */ 3525 3526 // Get the metadata of the original loop before it gets modified. 3527 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3528 3529 // Workaround! Compute the trip count of the original loop and cache it 3530 // before we start modifying the CFG. This code has a systemic problem 3531 // wherein it tries to run analysis over partially constructed IR; this is 3532 // wrong, and not simply for SCEV. The trip count of the original loop 3533 // simply happens to be prone to hitting this in practice. In theory, we 3534 // can hit the same issue for any SCEV, or ValueTracking query done during 3535 // mutation. See PR49900. 3536 getOrCreateTripCount(OrigLoop); 3537 3538 // Create an empty vector loop, and prepare basic blocks for the runtime 3539 // checks. 3540 Loop *Lp = createVectorLoopSkeleton(""); 3541 3542 // Now, compare the new count to zero. If it is zero skip the vector loop and 3543 // jump to the scalar loop. This check also covers the case where the 3544 // backedge-taken count is uint##_max: adding one to it will overflow leading 3545 // to an incorrect trip count of zero. In this (rare) case we will also jump 3546 // to the scalar loop. 3547 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3548 3549 // Generate the code to check any assumptions that we've made for SCEV 3550 // expressions. 3551 emitSCEVChecks(Lp, LoopScalarPreHeader); 3552 3553 // Generate the code that checks in runtime if arrays overlap. We put the 3554 // checks into a separate block to make the more common case of few elements 3555 // faster. 3556 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3557 3558 createHeaderBranch(Lp); 3559 3560 // Emit phis for the new starting index of the scalar loop. 3561 createInductionResumeValues(Lp); 3562 3563 return {completeLoopSkeleton(Lp, OrigLoopID), nullptr}; 3564 } 3565 3566 // Fix up external users of the induction variable. At this point, we are 3567 // in LCSSA form, with all external PHIs that use the IV having one input value, 3568 // coming from the remainder loop. We need those PHIs to also have a correct 3569 // value for the IV when arriving directly from the middle block. 3570 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3571 const InductionDescriptor &II, 3572 Value *CountRoundDown, Value *EndValue, 3573 BasicBlock *MiddleBlock) { 3574 // There are two kinds of external IV usages - those that use the value 3575 // computed in the last iteration (the PHI) and those that use the penultimate 3576 // value (the value that feeds into the phi from the loop latch). 3577 // We allow both, but they, obviously, have different values. 3578 3579 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3580 3581 DenseMap<Value *, Value *> MissingVals; 3582 3583 // An external user of the last iteration's value should see the value that 3584 // the remainder loop uses to initialize its own IV. 3585 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3586 for (User *U : PostInc->users()) { 3587 Instruction *UI = cast<Instruction>(U); 3588 if (!OrigLoop->contains(UI)) { 3589 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3590 MissingVals[UI] = EndValue; 3591 } 3592 } 3593 3594 // An external user of the penultimate value need to see EndValue - Step. 3595 // The simplest way to get this is to recompute it from the constituent SCEVs, 3596 // that is Start + (Step * (CRD - 1)). 3597 for (User *U : OrigPhi->users()) { 3598 auto *UI = cast<Instruction>(U); 3599 if (!OrigLoop->contains(UI)) { 3600 const DataLayout &DL = 3601 OrigLoop->getHeader()->getModule()->getDataLayout(); 3602 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3603 3604 IRBuilder<> B(MiddleBlock->getTerminator()); 3605 3606 // Fast-math-flags propagate from the original induction instruction. 3607 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3608 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3609 3610 Value *CountMinusOne = B.CreateSub( 3611 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3612 Value *CMO = 3613 !II.getStep()->getType()->isIntegerTy() 3614 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3615 II.getStep()->getType()) 3616 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3617 CMO->setName("cast.cmo"); 3618 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II, *LI, 3619 LoopVectorBody); 3620 Escape->setName("ind.escape"); 3621 MissingVals[UI] = Escape; 3622 } 3623 } 3624 3625 for (auto &I : MissingVals) { 3626 PHINode *PHI = cast<PHINode>(I.first); 3627 // One corner case we have to handle is two IVs "chasing" each-other, 3628 // that is %IV2 = phi [...], [ %IV1, %latch ] 3629 // In this case, if IV1 has an external use, we need to avoid adding both 3630 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3631 // don't already have an incoming value for the middle block. 3632 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3633 PHI->addIncoming(I.second, MiddleBlock); 3634 } 3635 } 3636 3637 namespace { 3638 3639 struct CSEDenseMapInfo { 3640 static bool canHandle(const Instruction *I) { 3641 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3642 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3643 } 3644 3645 static inline Instruction *getEmptyKey() { 3646 return DenseMapInfo<Instruction *>::getEmptyKey(); 3647 } 3648 3649 static inline Instruction *getTombstoneKey() { 3650 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3651 } 3652 3653 static unsigned getHashValue(const Instruction *I) { 3654 assert(canHandle(I) && "Unknown instruction!"); 3655 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3656 I->value_op_end())); 3657 } 3658 3659 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3660 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3661 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3662 return LHS == RHS; 3663 return LHS->isIdenticalTo(RHS); 3664 } 3665 }; 3666 3667 } // end anonymous namespace 3668 3669 ///Perform cse of induction variable instructions. 3670 static void cse(BasicBlock *BB) { 3671 // Perform simple cse. 3672 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3673 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 3674 if (!CSEDenseMapInfo::canHandle(&In)) 3675 continue; 3676 3677 // Check if we can replace this instruction with any of the 3678 // visited instructions. 3679 if (Instruction *V = CSEMap.lookup(&In)) { 3680 In.replaceAllUsesWith(V); 3681 In.eraseFromParent(); 3682 continue; 3683 } 3684 3685 CSEMap[&In] = &In; 3686 } 3687 } 3688 3689 InstructionCost 3690 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3691 bool &NeedToScalarize) const { 3692 Function *F = CI->getCalledFunction(); 3693 Type *ScalarRetTy = CI->getType(); 3694 SmallVector<Type *, 4> Tys, ScalarTys; 3695 for (auto &ArgOp : CI->args()) 3696 ScalarTys.push_back(ArgOp->getType()); 3697 3698 // Estimate cost of scalarized vector call. The source operands are assumed 3699 // to be vectors, so we need to extract individual elements from there, 3700 // execute VF scalar calls, and then gather the result into the vector return 3701 // value. 3702 InstructionCost ScalarCallCost = 3703 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3704 if (VF.isScalar()) 3705 return ScalarCallCost; 3706 3707 // Compute corresponding vector type for return value and arguments. 3708 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3709 for (Type *ScalarTy : ScalarTys) 3710 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3711 3712 // Compute costs of unpacking argument values for the scalar calls and 3713 // packing the return values to a vector. 3714 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3715 3716 InstructionCost Cost = 3717 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3718 3719 // If we can't emit a vector call for this function, then the currently found 3720 // cost is the cost we need to return. 3721 NeedToScalarize = true; 3722 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3723 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3724 3725 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3726 return Cost; 3727 3728 // If the corresponding vector cost is cheaper, return its cost. 3729 InstructionCost VectorCallCost = 3730 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3731 if (VectorCallCost < Cost) { 3732 NeedToScalarize = false; 3733 Cost = VectorCallCost; 3734 } 3735 return Cost; 3736 } 3737 3738 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3739 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3740 return Elt; 3741 return VectorType::get(Elt, VF); 3742 } 3743 3744 InstructionCost 3745 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3746 ElementCount VF) const { 3747 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3748 assert(ID && "Expected intrinsic call!"); 3749 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3750 FastMathFlags FMF; 3751 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3752 FMF = FPMO->getFastMathFlags(); 3753 3754 SmallVector<const Value *> Arguments(CI->args()); 3755 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3756 SmallVector<Type *> ParamTys; 3757 std::transform(FTy->param_begin(), FTy->param_end(), 3758 std::back_inserter(ParamTys), 3759 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3760 3761 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3762 dyn_cast<IntrinsicInst>(CI)); 3763 return TTI.getIntrinsicInstrCost(CostAttrs, 3764 TargetTransformInfo::TCK_RecipThroughput); 3765 } 3766 3767 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3768 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3769 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3770 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3771 } 3772 3773 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3774 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3775 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3776 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3777 } 3778 3779 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3780 // For every instruction `I` in MinBWs, truncate the operands, create a 3781 // truncated version of `I` and reextend its result. InstCombine runs 3782 // later and will remove any ext/trunc pairs. 3783 SmallPtrSet<Value *, 4> Erased; 3784 for (const auto &KV : Cost->getMinimalBitwidths()) { 3785 // If the value wasn't vectorized, we must maintain the original scalar 3786 // type. The absence of the value from State indicates that it 3787 // wasn't vectorized. 3788 // FIXME: Should not rely on getVPValue at this point. 3789 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3790 if (!State.hasAnyVectorValue(Def)) 3791 continue; 3792 for (unsigned Part = 0; Part < UF; ++Part) { 3793 Value *I = State.get(Def, Part); 3794 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3795 continue; 3796 Type *OriginalTy = I->getType(); 3797 Type *ScalarTruncatedTy = 3798 IntegerType::get(OriginalTy->getContext(), KV.second); 3799 auto *TruncatedTy = VectorType::get( 3800 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); 3801 if (TruncatedTy == OriginalTy) 3802 continue; 3803 3804 IRBuilder<> B(cast<Instruction>(I)); 3805 auto ShrinkOperand = [&](Value *V) -> Value * { 3806 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3807 if (ZI->getSrcTy() == TruncatedTy) 3808 return ZI->getOperand(0); 3809 return B.CreateZExtOrTrunc(V, TruncatedTy); 3810 }; 3811 3812 // The actual instruction modification depends on the instruction type, 3813 // unfortunately. 3814 Value *NewI = nullptr; 3815 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3816 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3817 ShrinkOperand(BO->getOperand(1))); 3818 3819 // Any wrapping introduced by shrinking this operation shouldn't be 3820 // considered undefined behavior. So, we can't unconditionally copy 3821 // arithmetic wrapping flags to NewI. 3822 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3823 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3824 NewI = 3825 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3826 ShrinkOperand(CI->getOperand(1))); 3827 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3828 NewI = B.CreateSelect(SI->getCondition(), 3829 ShrinkOperand(SI->getTrueValue()), 3830 ShrinkOperand(SI->getFalseValue())); 3831 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3832 switch (CI->getOpcode()) { 3833 default: 3834 llvm_unreachable("Unhandled cast!"); 3835 case Instruction::Trunc: 3836 NewI = ShrinkOperand(CI->getOperand(0)); 3837 break; 3838 case Instruction::SExt: 3839 NewI = B.CreateSExtOrTrunc( 3840 CI->getOperand(0), 3841 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3842 break; 3843 case Instruction::ZExt: 3844 NewI = B.CreateZExtOrTrunc( 3845 CI->getOperand(0), 3846 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3847 break; 3848 } 3849 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3850 auto Elements0 = 3851 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); 3852 auto *O0 = B.CreateZExtOrTrunc( 3853 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3854 auto Elements1 = 3855 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); 3856 auto *O1 = B.CreateZExtOrTrunc( 3857 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3858 3859 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3860 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3861 // Don't do anything with the operands, just extend the result. 3862 continue; 3863 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3864 auto Elements = 3865 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); 3866 auto *O0 = B.CreateZExtOrTrunc( 3867 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3868 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3869 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3870 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3871 auto Elements = 3872 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); 3873 auto *O0 = B.CreateZExtOrTrunc( 3874 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3875 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3876 } else { 3877 // If we don't know what to do, be conservative and don't do anything. 3878 continue; 3879 } 3880 3881 // Lastly, extend the result. 3882 NewI->takeName(cast<Instruction>(I)); 3883 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3884 I->replaceAllUsesWith(Res); 3885 cast<Instruction>(I)->eraseFromParent(); 3886 Erased.insert(I); 3887 State.reset(Def, Res, Part); 3888 } 3889 } 3890 3891 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3892 for (const auto &KV : Cost->getMinimalBitwidths()) { 3893 // If the value wasn't vectorized, we must maintain the original scalar 3894 // type. The absence of the value from State indicates that it 3895 // wasn't vectorized. 3896 // FIXME: Should not rely on getVPValue at this point. 3897 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3898 if (!State.hasAnyVectorValue(Def)) 3899 continue; 3900 for (unsigned Part = 0; Part < UF; ++Part) { 3901 Value *I = State.get(Def, Part); 3902 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3903 if (Inst && Inst->use_empty()) { 3904 Value *NewI = Inst->getOperand(0); 3905 Inst->eraseFromParent(); 3906 State.reset(Def, NewI, Part); 3907 } 3908 } 3909 } 3910 } 3911 3912 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 3913 // Insert truncates and extends for any truncated instructions as hints to 3914 // InstCombine. 3915 if (VF.isVector()) 3916 truncateToMinimalBitwidths(State); 3917 3918 // Fix widened non-induction PHIs by setting up the PHI operands. 3919 if (OrigPHIsToFix.size()) { 3920 assert(EnableVPlanNativePath && 3921 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3922 fixNonInductionPHIs(State); 3923 } 3924 3925 // At this point every instruction in the original loop is widened to a 3926 // vector form. Now we need to fix the recurrences in the loop. These PHI 3927 // nodes are currently empty because we did not want to introduce cycles. 3928 // This is the second stage of vectorizing recurrences. 3929 fixCrossIterationPHIs(State); 3930 3931 // Forget the original basic block. 3932 PSE.getSE()->forgetLoop(OrigLoop); 3933 3934 // If we inserted an edge from the middle block to the unique exit block, 3935 // update uses outside the loop (phis) to account for the newly inserted 3936 // edge. 3937 if (!Cost->requiresScalarEpilogue(VF)) { 3938 // Fix-up external users of the induction variables. 3939 for (auto &Entry : Legal->getInductionVars()) 3940 fixupIVUsers(Entry.first, Entry.second, 3941 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 3942 IVEndValues[Entry.first], LoopMiddleBlock); 3943 3944 fixLCSSAPHIs(State); 3945 } 3946 3947 for (Instruction *PI : PredicatedInstructions) 3948 sinkScalarOperands(&*PI); 3949 3950 // Remove redundant induction instructions. 3951 cse(LoopVectorBody); 3952 3953 // Set/update profile weights for the vector and remainder loops as original 3954 // loop iterations are now distributed among them. Note that original loop 3955 // represented by LoopScalarBody becomes remainder loop after vectorization. 3956 // 3957 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 3958 // end up getting slightly roughened result but that should be OK since 3959 // profile is not inherently precise anyway. Note also possible bypass of 3960 // vector code caused by legality checks is ignored, assigning all the weight 3961 // to the vector loop, optimistically. 3962 // 3963 // For scalable vectorization we can't know at compile time how many iterations 3964 // of the loop are handled in one vector iteration, so instead assume a pessimistic 3965 // vscale of '1'. 3966 setProfileInfoAfterUnrolling( 3967 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), 3968 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); 3969 } 3970 3971 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 3972 // In order to support recurrences we need to be able to vectorize Phi nodes. 3973 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3974 // stage #2: We now need to fix the recurrences by adding incoming edges to 3975 // the currently empty PHI nodes. At this point every instruction in the 3976 // original loop is widened to a vector form so we can use them to construct 3977 // the incoming edges. 3978 VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock(); 3979 for (VPRecipeBase &R : Header->phis()) { 3980 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) 3981 fixReduction(ReductionPhi, State); 3982 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 3983 fixFirstOrderRecurrence(FOR, State); 3984 } 3985 } 3986 3987 void InnerLoopVectorizer::fixFirstOrderRecurrence( 3988 VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) { 3989 // This is the second phase of vectorizing first-order recurrences. An 3990 // overview of the transformation is described below. Suppose we have the 3991 // following loop. 3992 // 3993 // for (int i = 0; i < n; ++i) 3994 // b[i] = a[i] - a[i - 1]; 3995 // 3996 // There is a first-order recurrence on "a". For this loop, the shorthand 3997 // scalar IR looks like: 3998 // 3999 // scalar.ph: 4000 // s_init = a[-1] 4001 // br scalar.body 4002 // 4003 // scalar.body: 4004 // i = phi [0, scalar.ph], [i+1, scalar.body] 4005 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4006 // s2 = a[i] 4007 // b[i] = s2 - s1 4008 // br cond, scalar.body, ... 4009 // 4010 // In this example, s1 is a recurrence because it's value depends on the 4011 // previous iteration. In the first phase of vectorization, we created a 4012 // vector phi v1 for s1. We now complete the vectorization and produce the 4013 // shorthand vector IR shown below (for VF = 4, UF = 1). 4014 // 4015 // vector.ph: 4016 // v_init = vector(..., ..., ..., a[-1]) 4017 // br vector.body 4018 // 4019 // vector.body 4020 // i = phi [0, vector.ph], [i+4, vector.body] 4021 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4022 // v2 = a[i, i+1, i+2, i+3]; 4023 // v3 = vector(v1(3), v2(0, 1, 2)) 4024 // b[i, i+1, i+2, i+3] = v2 - v3 4025 // br cond, vector.body, middle.block 4026 // 4027 // middle.block: 4028 // x = v2(3) 4029 // br scalar.ph 4030 // 4031 // scalar.ph: 4032 // s_init = phi [x, middle.block], [a[-1], otherwise] 4033 // br scalar.body 4034 // 4035 // After execution completes the vector loop, we extract the next value of 4036 // the recurrence (x) to use as the initial value in the scalar loop. 4037 4038 // Extract the last vector element in the middle block. This will be the 4039 // initial value for the recurrence when jumping to the scalar loop. 4040 VPValue *PreviousDef = PhiR->getBackedgeValue(); 4041 Value *Incoming = State.get(PreviousDef, UF - 1); 4042 auto *ExtractForScalar = Incoming; 4043 auto *IdxTy = Builder.getInt32Ty(); 4044 if (VF.isVector()) { 4045 auto *One = ConstantInt::get(IdxTy, 1); 4046 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4047 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4048 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 4049 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 4050 "vector.recur.extract"); 4051 } 4052 // Extract the second last element in the middle block if the 4053 // Phi is used outside the loop. We need to extract the phi itself 4054 // and not the last element (the phi update in the current iteration). This 4055 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4056 // when the scalar loop is not run at all. 4057 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4058 if (VF.isVector()) { 4059 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4060 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 4061 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4062 Incoming, Idx, "vector.recur.extract.for.phi"); 4063 } else if (UF > 1) 4064 // When loop is unrolled without vectorizing, initialize 4065 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 4066 // of `Incoming`. This is analogous to the vectorized case above: extracting 4067 // the second last element when VF > 1. 4068 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 4069 4070 // Fix the initial value of the original recurrence in the scalar loop. 4071 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4072 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); 4073 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4074 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); 4075 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4076 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4077 Start->addIncoming(Incoming, BB); 4078 } 4079 4080 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 4081 Phi->setName("scalar.recur"); 4082 4083 // Finally, fix users of the recurrence outside the loop. The users will need 4084 // either the last value of the scalar recurrence or the last value of the 4085 // vector recurrence we extracted in the middle block. Since the loop is in 4086 // LCSSA form, we just need to find all the phi nodes for the original scalar 4087 // recurrence in the exit block, and then add an edge for the middle block. 4088 // Note that LCSSA does not imply single entry when the original scalar loop 4089 // had multiple exiting edges (as we always run the last iteration in the 4090 // scalar epilogue); in that case, there is no edge from middle to exit and 4091 // and thus no phis which needed updated. 4092 if (!Cost->requiresScalarEpilogue(VF)) 4093 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4094 if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi)) 4095 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4096 } 4097 4098 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, 4099 VPTransformState &State) { 4100 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 4101 // Get it's reduction variable descriptor. 4102 assert(Legal->isReductionVariable(OrigPhi) && 4103 "Unable to find the reduction variable"); 4104 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 4105 4106 RecurKind RK = RdxDesc.getRecurrenceKind(); 4107 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4108 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4109 setDebugLocFromInst(ReductionStartValue); 4110 4111 VPValue *LoopExitInstDef = PhiR->getBackedgeValue(); 4112 // This is the vector-clone of the value that leaves the loop. 4113 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 4114 4115 // Wrap flags are in general invalid after vectorization, clear them. 4116 clearReductionWrapFlags(RdxDesc, State); 4117 4118 // Before each round, move the insertion point right between 4119 // the PHIs and the values we are going to write. 4120 // This allows us to write both PHINodes and the extractelement 4121 // instructions. 4122 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4123 4124 setDebugLocFromInst(LoopExitInst); 4125 4126 Type *PhiTy = OrigPhi->getType(); 4127 // If tail is folded by masking, the vector value to leave the loop should be 4128 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 4129 // instead of the former. For an inloop reduction the reduction will already 4130 // be predicated, and does not need to be handled here. 4131 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { 4132 for (unsigned Part = 0; Part < UF; ++Part) { 4133 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 4134 Value *Sel = nullptr; 4135 for (User *U : VecLoopExitInst->users()) { 4136 if (isa<SelectInst>(U)) { 4137 assert(!Sel && "Reduction exit feeding two selects"); 4138 Sel = U; 4139 } else 4140 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 4141 } 4142 assert(Sel && "Reduction exit feeds no select"); 4143 State.reset(LoopExitInstDef, Sel, Part); 4144 4145 // If the target can create a predicated operator for the reduction at no 4146 // extra cost in the loop (for example a predicated vadd), it can be 4147 // cheaper for the select to remain in the loop than be sunk out of it, 4148 // and so use the select value for the phi instead of the old 4149 // LoopExitValue. 4150 if (PreferPredicatedReductionSelect || 4151 TTI->preferPredicatedReductionSelect( 4152 RdxDesc.getOpcode(), PhiTy, 4153 TargetTransformInfo::ReductionFlags())) { 4154 auto *VecRdxPhi = 4155 cast<PHINode>(State.get(PhiR, Part)); 4156 VecRdxPhi->setIncomingValueForBlock( 4157 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 4158 } 4159 } 4160 } 4161 4162 // If the vector reduction can be performed in a smaller type, we truncate 4163 // then extend the loop exit value to enable InstCombine to evaluate the 4164 // entire expression in the smaller type. 4165 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 4166 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"); 4167 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4168 Builder.SetInsertPoint( 4169 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4170 VectorParts RdxParts(UF); 4171 for (unsigned Part = 0; Part < UF; ++Part) { 4172 RdxParts[Part] = State.get(LoopExitInstDef, Part); 4173 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4174 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4175 : Builder.CreateZExt(Trunc, VecTy); 4176 for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users())) 4177 if (U != Trunc) { 4178 U->replaceUsesOfWith(RdxParts[Part], Extnd); 4179 RdxParts[Part] = Extnd; 4180 } 4181 } 4182 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4183 for (unsigned Part = 0; Part < UF; ++Part) { 4184 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4185 State.reset(LoopExitInstDef, RdxParts[Part], Part); 4186 } 4187 } 4188 4189 // Reduce all of the unrolled parts into a single vector. 4190 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 4191 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 4192 4193 // The middle block terminator has already been assigned a DebugLoc here (the 4194 // OrigLoop's single latch terminator). We want the whole middle block to 4195 // appear to execute on this line because: (a) it is all compiler generated, 4196 // (b) these instructions are always executed after evaluating the latch 4197 // conditional branch, and (c) other passes may add new predecessors which 4198 // terminate on this line. This is the easiest way to ensure we don't 4199 // accidentally cause an extra step back into the loop while debugging. 4200 setDebugLocFromInst(LoopMiddleBlock->getTerminator()); 4201 if (PhiR->isOrdered()) 4202 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 4203 else { 4204 // Floating-point operations should have some FMF to enable the reduction. 4205 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 4206 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 4207 for (unsigned Part = 1; Part < UF; ++Part) { 4208 Value *RdxPart = State.get(LoopExitInstDef, Part); 4209 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 4210 ReducedPartRdx = Builder.CreateBinOp( 4211 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 4212 } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) 4213 ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK, 4214 ReducedPartRdx, RdxPart); 4215 else 4216 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4217 } 4218 } 4219 4220 // Create the reduction after the loop. Note that inloop reductions create the 4221 // target reduction in the loop using a Reduction recipe. 4222 if (VF.isVector() && !PhiR->isInLoop()) { 4223 ReducedPartRdx = 4224 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi); 4225 // If the reduction can be performed in a smaller type, we need to extend 4226 // the reduction to the wider type before we branch to the original loop. 4227 if (PhiTy != RdxDesc.getRecurrenceType()) 4228 ReducedPartRdx = RdxDesc.isSigned() 4229 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 4230 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 4231 } 4232 4233 PHINode *ResumePhi = 4234 dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue()); 4235 4236 // Create a phi node that merges control-flow from the backedge-taken check 4237 // block and the middle block. 4238 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 4239 LoopScalarPreHeader->getTerminator()); 4240 4241 // If we are fixing reductions in the epilogue loop then we should already 4242 // have created a bc.merge.rdx Phi after the main vector body. Ensure that 4243 // we carry over the incoming values correctly. 4244 for (auto *Incoming : predecessors(LoopScalarPreHeader)) { 4245 if (Incoming == LoopMiddleBlock) 4246 BCBlockPhi->addIncoming(ReducedPartRdx, Incoming); 4247 else if (ResumePhi && llvm::is_contained(ResumePhi->blocks(), Incoming)) 4248 BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(Incoming), 4249 Incoming); 4250 else 4251 BCBlockPhi->addIncoming(ReductionStartValue, Incoming); 4252 } 4253 4254 // Set the resume value for this reduction 4255 ReductionResumeValues.insert({&RdxDesc, BCBlockPhi}); 4256 4257 // Now, we need to fix the users of the reduction variable 4258 // inside and outside of the scalar remainder loop. 4259 4260 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4261 // in the exit blocks. See comment on analogous loop in 4262 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4263 if (!Cost->requiresScalarEpilogue(VF)) 4264 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4265 if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst)) 4266 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4267 4268 // Fix the scalar loop reduction variable with the incoming reduction sum 4269 // from the vector body and from the backedge value. 4270 int IncomingEdgeBlockIdx = 4271 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4272 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4273 // Pick the other block. 4274 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4275 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4276 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4277 } 4278 4279 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 4280 VPTransformState &State) { 4281 RecurKind RK = RdxDesc.getRecurrenceKind(); 4282 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4283 return; 4284 4285 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4286 assert(LoopExitInstr && "null loop exit instruction"); 4287 SmallVector<Instruction *, 8> Worklist; 4288 SmallPtrSet<Instruction *, 8> Visited; 4289 Worklist.push_back(LoopExitInstr); 4290 Visited.insert(LoopExitInstr); 4291 4292 while (!Worklist.empty()) { 4293 Instruction *Cur = Worklist.pop_back_val(); 4294 if (isa<OverflowingBinaryOperator>(Cur)) 4295 for (unsigned Part = 0; Part < UF; ++Part) { 4296 // FIXME: Should not rely on getVPValue at this point. 4297 Value *V = State.get(State.Plan->getVPValue(Cur, true), Part); 4298 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4299 } 4300 4301 for (User *U : Cur->users()) { 4302 Instruction *UI = cast<Instruction>(U); 4303 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4304 Visited.insert(UI).second) 4305 Worklist.push_back(UI); 4306 } 4307 } 4308 } 4309 4310 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4311 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4312 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4313 // Some phis were already hand updated by the reduction and recurrence 4314 // code above, leave them alone. 4315 continue; 4316 4317 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4318 // Non-instruction incoming values will have only one value. 4319 4320 VPLane Lane = VPLane::getFirstLane(); 4321 if (isa<Instruction>(IncomingValue) && 4322 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), 4323 VF)) 4324 Lane = VPLane::getLastLaneForVF(VF); 4325 4326 // Can be a loop invariant incoming value or the last scalar value to be 4327 // extracted from the vectorized loop. 4328 // FIXME: Should not rely on getVPValue at this point. 4329 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4330 Value *lastIncomingValue = 4331 OrigLoop->isLoopInvariant(IncomingValue) 4332 ? IncomingValue 4333 : State.get(State.Plan->getVPValue(IncomingValue, true), 4334 VPIteration(UF - 1, Lane)); 4335 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4336 } 4337 } 4338 4339 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4340 // The basic block and loop containing the predicated instruction. 4341 auto *PredBB = PredInst->getParent(); 4342 auto *VectorLoop = LI->getLoopFor(PredBB); 4343 4344 // Initialize a worklist with the operands of the predicated instruction. 4345 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4346 4347 // Holds instructions that we need to analyze again. An instruction may be 4348 // reanalyzed if we don't yet know if we can sink it or not. 4349 SmallVector<Instruction *, 8> InstsToReanalyze; 4350 4351 // Returns true if a given use occurs in the predicated block. Phi nodes use 4352 // their operands in their corresponding predecessor blocks. 4353 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4354 auto *I = cast<Instruction>(U.getUser()); 4355 BasicBlock *BB = I->getParent(); 4356 if (auto *Phi = dyn_cast<PHINode>(I)) 4357 BB = Phi->getIncomingBlock( 4358 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4359 return BB == PredBB; 4360 }; 4361 4362 // Iteratively sink the scalarized operands of the predicated instruction 4363 // into the block we created for it. When an instruction is sunk, it's 4364 // operands are then added to the worklist. The algorithm ends after one pass 4365 // through the worklist doesn't sink a single instruction. 4366 bool Changed; 4367 do { 4368 // Add the instructions that need to be reanalyzed to the worklist, and 4369 // reset the changed indicator. 4370 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4371 InstsToReanalyze.clear(); 4372 Changed = false; 4373 4374 while (!Worklist.empty()) { 4375 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4376 4377 // We can't sink an instruction if it is a phi node, is not in the loop, 4378 // or may have side effects. 4379 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4380 I->mayHaveSideEffects()) 4381 continue; 4382 4383 // If the instruction is already in PredBB, check if we can sink its 4384 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4385 // sinking the scalar instruction I, hence it appears in PredBB; but it 4386 // may have failed to sink I's operands (recursively), which we try 4387 // (again) here. 4388 if (I->getParent() == PredBB) { 4389 Worklist.insert(I->op_begin(), I->op_end()); 4390 continue; 4391 } 4392 4393 // It's legal to sink the instruction if all its uses occur in the 4394 // predicated block. Otherwise, there's nothing to do yet, and we may 4395 // need to reanalyze the instruction. 4396 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4397 InstsToReanalyze.push_back(I); 4398 continue; 4399 } 4400 4401 // Move the instruction to the beginning of the predicated block, and add 4402 // it's operands to the worklist. 4403 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4404 Worklist.insert(I->op_begin(), I->op_end()); 4405 4406 // The sinking may have enabled other instructions to be sunk, so we will 4407 // need to iterate. 4408 Changed = true; 4409 } 4410 } while (Changed); 4411 } 4412 4413 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4414 for (PHINode *OrigPhi : OrigPHIsToFix) { 4415 VPWidenPHIRecipe *VPPhi = 4416 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4417 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4418 // Make sure the builder has a valid insert point. 4419 Builder.SetInsertPoint(NewPhi); 4420 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4421 VPValue *Inc = VPPhi->getIncomingValue(i); 4422 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4423 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4424 } 4425 } 4426 } 4427 4428 bool InnerLoopVectorizer::useOrderedReductions( 4429 const RecurrenceDescriptor &RdxDesc) { 4430 return Cost->useOrderedReductions(RdxDesc); 4431 } 4432 4433 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4434 VPWidenPHIRecipe *PhiR, 4435 VPTransformState &State) { 4436 PHINode *P = cast<PHINode>(PN); 4437 if (EnableVPlanNativePath) { 4438 // Currently we enter here in the VPlan-native path for non-induction 4439 // PHIs where all control flow is uniform. We simply widen these PHIs. 4440 // Create a vector phi with no operands - the vector phi operands will be 4441 // set at the end of vector code generation. 4442 Type *VecTy = (State.VF.isScalar()) 4443 ? PN->getType() 4444 : VectorType::get(PN->getType(), State.VF); 4445 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4446 State.set(PhiR, VecPhi, 0); 4447 OrigPHIsToFix.push_back(P); 4448 4449 return; 4450 } 4451 4452 assert(PN->getParent() == OrigLoop->getHeader() && 4453 "Non-header phis should have been handled elsewhere"); 4454 4455 // In order to support recurrences we need to be able to vectorize Phi nodes. 4456 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4457 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4458 // this value when we vectorize all of the instructions that use the PHI. 4459 4460 assert(!Legal->isReductionVariable(P) && 4461 "reductions should be handled elsewhere"); 4462 4463 setDebugLocFromInst(P); 4464 4465 // This PHINode must be an induction variable. 4466 // Make sure that we know about it. 4467 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4468 4469 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4470 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4471 4472 auto *IVR = PhiR->getParent()->getPlan()->getCanonicalIV(); 4473 PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0)); 4474 4475 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4476 // which can be found from the original scalar operations. 4477 switch (II.getKind()) { 4478 case InductionDescriptor::IK_NoInduction: 4479 llvm_unreachable("Unknown induction"); 4480 case InductionDescriptor::IK_IntInduction: 4481 case InductionDescriptor::IK_FpInduction: 4482 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4483 case InductionDescriptor::IK_PtrInduction: { 4484 // Handle the pointer induction variable case. 4485 assert(P->getType()->isPointerTy() && "Unexpected type."); 4486 4487 if (Cost->isScalarAfterVectorization(P, State.VF)) { 4488 // This is the normalized GEP that starts counting at zero. 4489 Value *PtrInd = 4490 Builder.CreateSExtOrTrunc(CanonicalIV, II.getStep()->getType()); 4491 // Determine the number of scalars we need to generate for each unroll 4492 // iteration. If the instruction is uniform, we only need to generate the 4493 // first lane. Otherwise, we generate all VF values. 4494 bool IsUniform = vputils::onlyFirstLaneUsed(PhiR); 4495 assert((IsUniform || !State.VF.isScalable()) && 4496 "Cannot scalarize a scalable VF"); 4497 unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue(); 4498 4499 for (unsigned Part = 0; Part < UF; ++Part) { 4500 Value *PartStart = 4501 createStepForVF(Builder, PtrInd->getType(), VF, Part); 4502 4503 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4504 Value *Idx = Builder.CreateAdd( 4505 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 4506 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4507 Value *SclrGep = 4508 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II, 4509 *State.LI, State.CFG.PrevBB); 4510 SclrGep->setName("next.gep"); 4511 State.set(PhiR, SclrGep, VPIteration(Part, Lane)); 4512 } 4513 } 4514 return; 4515 } 4516 assert(isa<SCEVConstant>(II.getStep()) && 4517 "Induction step not a SCEV constant!"); 4518 Type *PhiType = II.getStep()->getType(); 4519 4520 // Build a pointer phi 4521 Value *ScalarStartValue = PhiR->getStartValue()->getLiveInIRValue(); 4522 Type *ScStValueType = ScalarStartValue->getType(); 4523 PHINode *NewPointerPhi = 4524 PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV); 4525 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4526 4527 // A pointer induction, performed by using a gep 4528 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4529 Instruction *InductionLoc = LoopLatch->getTerminator(); 4530 const SCEV *ScalarStep = II.getStep(); 4531 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4532 Value *ScalarStepValue = 4533 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4534 Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF); 4535 Value *NumUnrolledElems = 4536 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 4537 Value *InductionGEP = GetElementPtrInst::Create( 4538 II.getElementType(), NewPointerPhi, 4539 Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 4540 InductionLoc); 4541 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4542 4543 // Create UF many actual address geps that use the pointer 4544 // phi as base and a vectorized version of the step value 4545 // (<step*0, ..., step*N>) as offset. 4546 for (unsigned Part = 0; Part < State.UF; ++Part) { 4547 Type *VecPhiType = VectorType::get(PhiType, State.VF); 4548 Value *StartOffsetScalar = 4549 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 4550 Value *StartOffset = 4551 Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 4552 // Create a vector of consecutive numbers from zero to VF. 4553 StartOffset = 4554 Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType)); 4555 4556 Value *GEP = Builder.CreateGEP( 4557 II.getElementType(), NewPointerPhi, 4558 Builder.CreateMul( 4559 StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue), 4560 "vector.gep")); 4561 State.set(PhiR, GEP, Part); 4562 } 4563 } 4564 } 4565 } 4566 4567 /// A helper function for checking whether an integer division-related 4568 /// instruction may divide by zero (in which case it must be predicated if 4569 /// executed conditionally in the scalar code). 4570 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4571 /// Non-zero divisors that are non compile-time constants will not be 4572 /// converted into multiplication, so we will still end up scalarizing 4573 /// the division, but can do so w/o predication. 4574 static bool mayDivideByZero(Instruction &I) { 4575 assert((I.getOpcode() == Instruction::UDiv || 4576 I.getOpcode() == Instruction::SDiv || 4577 I.getOpcode() == Instruction::URem || 4578 I.getOpcode() == Instruction::SRem) && 4579 "Unexpected instruction"); 4580 Value *Divisor = I.getOperand(1); 4581 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4582 return !CInt || CInt->isZero(); 4583 } 4584 4585 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4586 VPUser &ArgOperands, 4587 VPTransformState &State) { 4588 assert(!isa<DbgInfoIntrinsic>(I) && 4589 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4590 setDebugLocFromInst(&I); 4591 4592 Module *M = I.getParent()->getParent()->getParent(); 4593 auto *CI = cast<CallInst>(&I); 4594 4595 SmallVector<Type *, 4> Tys; 4596 for (Value *ArgOperand : CI->args()) 4597 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4598 4599 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4600 4601 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4602 // version of the instruction. 4603 // Is it beneficial to perform intrinsic call compared to lib call? 4604 bool NeedToScalarize = false; 4605 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4606 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4607 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4608 assert((UseVectorIntrinsic || !NeedToScalarize) && 4609 "Instruction should be scalarized elsewhere."); 4610 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 4611 "Either the intrinsic cost or vector call cost must be valid"); 4612 4613 for (unsigned Part = 0; Part < UF; ++Part) { 4614 SmallVector<Type *, 2> TysForDecl = {CI->getType()}; 4615 SmallVector<Value *, 4> Args; 4616 for (auto &I : enumerate(ArgOperands.operands())) { 4617 // Some intrinsics have a scalar argument - don't replace it with a 4618 // vector. 4619 Value *Arg; 4620 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4621 Arg = State.get(I.value(), Part); 4622 else { 4623 Arg = State.get(I.value(), VPIteration(0, 0)); 4624 if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index())) 4625 TysForDecl.push_back(Arg->getType()); 4626 } 4627 Args.push_back(Arg); 4628 } 4629 4630 Function *VectorF; 4631 if (UseVectorIntrinsic) { 4632 // Use vector version of the intrinsic. 4633 if (VF.isVector()) 4634 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4635 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4636 assert(VectorF && "Can't retrieve vector intrinsic."); 4637 } else { 4638 // Use vector version of the function call. 4639 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 4640 #ifndef NDEBUG 4641 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4642 "Can't create vector function."); 4643 #endif 4644 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4645 } 4646 SmallVector<OperandBundleDef, 1> OpBundles; 4647 CI->getOperandBundlesAsDefs(OpBundles); 4648 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4649 4650 if (isa<FPMathOperator>(V)) 4651 V->copyFastMathFlags(CI); 4652 4653 State.set(Def, V, Part); 4654 addMetadata(V, &I); 4655 } 4656 } 4657 4658 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 4659 // We should not collect Scalars more than once per VF. Right now, this 4660 // function is called from collectUniformsAndScalars(), which already does 4661 // this check. Collecting Scalars for VF=1 does not make any sense. 4662 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 4663 "This function should not be visited twice for the same VF"); 4664 4665 SmallSetVector<Instruction *, 8> Worklist; 4666 4667 // These sets are used to seed the analysis with pointers used by memory 4668 // accesses that will remain scalar. 4669 SmallSetVector<Instruction *, 8> ScalarPtrs; 4670 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4671 auto *Latch = TheLoop->getLoopLatch(); 4672 4673 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4674 // The pointer operands of loads and stores will be scalar as long as the 4675 // memory access is not a gather or scatter operation. The value operand of a 4676 // store will remain scalar if the store is scalarized. 4677 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4678 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4679 assert(WideningDecision != CM_Unknown && 4680 "Widening decision should be ready at this moment"); 4681 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4682 if (Ptr == Store->getValueOperand()) 4683 return WideningDecision == CM_Scalarize; 4684 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4685 "Ptr is neither a value or pointer operand"); 4686 return WideningDecision != CM_GatherScatter; 4687 }; 4688 4689 // A helper that returns true if the given value is a bitcast or 4690 // getelementptr instruction contained in the loop. 4691 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4692 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4693 isa<GetElementPtrInst>(V)) && 4694 !TheLoop->isLoopInvariant(V); 4695 }; 4696 4697 // A helper that evaluates a memory access's use of a pointer. If the use will 4698 // be a scalar use and the pointer is only used by memory accesses, we place 4699 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4700 // PossibleNonScalarPtrs. 4701 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4702 // We only care about bitcast and getelementptr instructions contained in 4703 // the loop. 4704 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4705 return; 4706 4707 // If the pointer has already been identified as scalar (e.g., if it was 4708 // also identified as uniform), there's nothing to do. 4709 auto *I = cast<Instruction>(Ptr); 4710 if (Worklist.count(I)) 4711 return; 4712 4713 // If the use of the pointer will be a scalar use, and all users of the 4714 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4715 // place the pointer in PossibleNonScalarPtrs. 4716 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4717 return isa<LoadInst>(U) || isa<StoreInst>(U); 4718 })) 4719 ScalarPtrs.insert(I); 4720 else 4721 PossibleNonScalarPtrs.insert(I); 4722 }; 4723 4724 // We seed the scalars analysis with three classes of instructions: (1) 4725 // instructions marked uniform-after-vectorization and (2) bitcast, 4726 // getelementptr and (pointer) phi instructions used by memory accesses 4727 // requiring a scalar use. 4728 // 4729 // (1) Add to the worklist all instructions that have been identified as 4730 // uniform-after-vectorization. 4731 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4732 4733 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4734 // memory accesses requiring a scalar use. The pointer operands of loads and 4735 // stores will be scalar as long as the memory accesses is not a gather or 4736 // scatter operation. The value operand of a store will remain scalar if the 4737 // store is scalarized. 4738 for (auto *BB : TheLoop->blocks()) 4739 for (auto &I : *BB) { 4740 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4741 evaluatePtrUse(Load, Load->getPointerOperand()); 4742 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4743 evaluatePtrUse(Store, Store->getPointerOperand()); 4744 evaluatePtrUse(Store, Store->getValueOperand()); 4745 } 4746 } 4747 for (auto *I : ScalarPtrs) 4748 if (!PossibleNonScalarPtrs.count(I)) { 4749 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4750 Worklist.insert(I); 4751 } 4752 4753 // Insert the forced scalars. 4754 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4755 // induction variable when the PHI user is scalarized. 4756 auto ForcedScalar = ForcedScalars.find(VF); 4757 if (ForcedScalar != ForcedScalars.end()) 4758 for (auto *I : ForcedScalar->second) 4759 Worklist.insert(I); 4760 4761 // Expand the worklist by looking through any bitcasts and getelementptr 4762 // instructions we've already identified as scalar. This is similar to the 4763 // expansion step in collectLoopUniforms(); however, here we're only 4764 // expanding to include additional bitcasts and getelementptr instructions. 4765 unsigned Idx = 0; 4766 while (Idx != Worklist.size()) { 4767 Instruction *Dst = Worklist[Idx++]; 4768 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4769 continue; 4770 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4771 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4772 auto *J = cast<Instruction>(U); 4773 return !TheLoop->contains(J) || Worklist.count(J) || 4774 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4775 isScalarUse(J, Src)); 4776 })) { 4777 Worklist.insert(Src); 4778 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4779 } 4780 } 4781 4782 // An induction variable will remain scalar if all users of the induction 4783 // variable and induction variable update remain scalar. 4784 for (auto &Induction : Legal->getInductionVars()) { 4785 auto *Ind = Induction.first; 4786 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4787 4788 // If tail-folding is applied, the primary induction variable will be used 4789 // to feed a vector compare. 4790 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 4791 continue; 4792 4793 // Returns true if \p Indvar is a pointer induction that is used directly by 4794 // load/store instruction \p I. 4795 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar, 4796 Instruction *I) { 4797 return Induction.second.getKind() == 4798 InductionDescriptor::IK_PtrInduction && 4799 (isa<LoadInst>(I) || isa<StoreInst>(I)) && 4800 Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar); 4801 }; 4802 4803 // Determine if all users of the induction variable are scalar after 4804 // vectorization. 4805 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4806 auto *I = cast<Instruction>(U); 4807 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4808 IsDirectLoadStoreFromPtrIndvar(Ind, I); 4809 }); 4810 if (!ScalarInd) 4811 continue; 4812 4813 // Determine if all users of the induction variable update instruction are 4814 // scalar after vectorization. 4815 auto ScalarIndUpdate = 4816 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4817 auto *I = cast<Instruction>(U); 4818 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4819 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I); 4820 }); 4821 if (!ScalarIndUpdate) 4822 continue; 4823 4824 // The induction variable and its update instruction will remain scalar. 4825 Worklist.insert(Ind); 4826 Worklist.insert(IndUpdate); 4827 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4828 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4829 << "\n"); 4830 } 4831 4832 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4833 } 4834 4835 bool LoopVectorizationCostModel::isScalarWithPredication( 4836 Instruction *I, ElementCount VF) const { 4837 if (!blockNeedsPredicationForAnyReason(I->getParent())) 4838 return false; 4839 switch(I->getOpcode()) { 4840 default: 4841 break; 4842 case Instruction::Load: 4843 case Instruction::Store: { 4844 if (!Legal->isMaskRequired(I)) 4845 return false; 4846 auto *Ptr = getLoadStorePointerOperand(I); 4847 auto *Ty = getLoadStoreType(I); 4848 Type *VTy = Ty; 4849 if (VF.isVector()) 4850 VTy = VectorType::get(Ty, VF); 4851 const Align Alignment = getLoadStoreAlignment(I); 4852 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 4853 TTI.isLegalMaskedGather(VTy, Alignment)) 4854 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 4855 TTI.isLegalMaskedScatter(VTy, Alignment)); 4856 } 4857 case Instruction::UDiv: 4858 case Instruction::SDiv: 4859 case Instruction::SRem: 4860 case Instruction::URem: 4861 return mayDivideByZero(*I); 4862 } 4863 return false; 4864 } 4865 4866 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 4867 Instruction *I, ElementCount VF) { 4868 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4869 assert(getWideningDecision(I, VF) == CM_Unknown && 4870 "Decision should not be set yet."); 4871 auto *Group = getInterleavedAccessGroup(I); 4872 assert(Group && "Must have a group."); 4873 4874 // If the instruction's allocated size doesn't equal it's type size, it 4875 // requires padding and will be scalarized. 4876 auto &DL = I->getModule()->getDataLayout(); 4877 auto *ScalarTy = getLoadStoreType(I); 4878 if (hasIrregularType(ScalarTy, DL)) 4879 return false; 4880 4881 // Check if masking is required. 4882 // A Group may need masking for one of two reasons: it resides in a block that 4883 // needs predication, or it was decided to use masking to deal with gaps 4884 // (either a gap at the end of a load-access that may result in a speculative 4885 // load, or any gaps in a store-access). 4886 bool PredicatedAccessRequiresMasking = 4887 blockNeedsPredicationForAnyReason(I->getParent()) && 4888 Legal->isMaskRequired(I); 4889 bool LoadAccessWithGapsRequiresEpilogMasking = 4890 isa<LoadInst>(I) && Group->requiresScalarEpilogue() && 4891 !isScalarEpilogueAllowed(); 4892 bool StoreAccessWithGapsRequiresMasking = 4893 isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()); 4894 if (!PredicatedAccessRequiresMasking && 4895 !LoadAccessWithGapsRequiresEpilogMasking && 4896 !StoreAccessWithGapsRequiresMasking) 4897 return true; 4898 4899 // If masked interleaving is required, we expect that the user/target had 4900 // enabled it, because otherwise it either wouldn't have been created or 4901 // it should have been invalidated by the CostModel. 4902 assert(useMaskedInterleavedAccesses(TTI) && 4903 "Masked interleave-groups for predicated accesses are not enabled."); 4904 4905 if (Group->isReverse()) 4906 return false; 4907 4908 auto *Ty = getLoadStoreType(I); 4909 const Align Alignment = getLoadStoreAlignment(I); 4910 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 4911 : TTI.isLegalMaskedStore(Ty, Alignment); 4912 } 4913 4914 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 4915 Instruction *I, ElementCount VF) { 4916 // Get and ensure we have a valid memory instruction. 4917 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction"); 4918 4919 auto *Ptr = getLoadStorePointerOperand(I); 4920 auto *ScalarTy = getLoadStoreType(I); 4921 4922 // In order to be widened, the pointer should be consecutive, first of all. 4923 if (!Legal->isConsecutivePtr(ScalarTy, Ptr)) 4924 return false; 4925 4926 // If the instruction is a store located in a predicated block, it will be 4927 // scalarized. 4928 if (isScalarWithPredication(I, VF)) 4929 return false; 4930 4931 // If the instruction's allocated size doesn't equal it's type size, it 4932 // requires padding and will be scalarized. 4933 auto &DL = I->getModule()->getDataLayout(); 4934 if (hasIrregularType(ScalarTy, DL)) 4935 return false; 4936 4937 return true; 4938 } 4939 4940 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 4941 // We should not collect Uniforms more than once per VF. Right now, 4942 // this function is called from collectUniformsAndScalars(), which 4943 // already does this check. Collecting Uniforms for VF=1 does not make any 4944 // sense. 4945 4946 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 4947 "This function should not be visited twice for the same VF"); 4948 4949 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4950 // not analyze again. Uniforms.count(VF) will return 1. 4951 Uniforms[VF].clear(); 4952 4953 // We now know that the loop is vectorizable! 4954 // Collect instructions inside the loop that will remain uniform after 4955 // vectorization. 4956 4957 // Global values, params and instructions outside of current loop are out of 4958 // scope. 4959 auto isOutOfScope = [&](Value *V) -> bool { 4960 Instruction *I = dyn_cast<Instruction>(V); 4961 return (!I || !TheLoop->contains(I)); 4962 }; 4963 4964 // Worklist containing uniform instructions demanding lane 0. 4965 SetVector<Instruction *> Worklist; 4966 BasicBlock *Latch = TheLoop->getLoopLatch(); 4967 4968 // Add uniform instructions demanding lane 0 to the worklist. Instructions 4969 // that are scalar with predication must not be considered uniform after 4970 // vectorization, because that would create an erroneous replicating region 4971 // where only a single instance out of VF should be formed. 4972 // TODO: optimize such seldom cases if found important, see PR40816. 4973 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 4974 if (isOutOfScope(I)) { 4975 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 4976 << *I << "\n"); 4977 return; 4978 } 4979 if (isScalarWithPredication(I, VF)) { 4980 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 4981 << *I << "\n"); 4982 return; 4983 } 4984 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 4985 Worklist.insert(I); 4986 }; 4987 4988 // Start with the conditional branch. If the branch condition is an 4989 // instruction contained in the loop that is only used by the branch, it is 4990 // uniform. 4991 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4992 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 4993 addToWorklistIfAllowed(Cmp); 4994 4995 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 4996 InstWidening WideningDecision = getWideningDecision(I, VF); 4997 assert(WideningDecision != CM_Unknown && 4998 "Widening decision should be ready at this moment"); 4999 5000 // A uniform memory op is itself uniform. We exclude uniform stores 5001 // here as they demand the last lane, not the first one. 5002 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 5003 assert(WideningDecision == CM_Scalarize); 5004 return true; 5005 } 5006 5007 return (WideningDecision == CM_Widen || 5008 WideningDecision == CM_Widen_Reverse || 5009 WideningDecision == CM_Interleave); 5010 }; 5011 5012 5013 // Returns true if Ptr is the pointer operand of a memory access instruction 5014 // I, and I is known to not require scalarization. 5015 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5016 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5017 }; 5018 5019 // Holds a list of values which are known to have at least one uniform use. 5020 // Note that there may be other uses which aren't uniform. A "uniform use" 5021 // here is something which only demands lane 0 of the unrolled iterations; 5022 // it does not imply that all lanes produce the same value (e.g. this is not 5023 // the usual meaning of uniform) 5024 SetVector<Value *> HasUniformUse; 5025 5026 // Scan the loop for instructions which are either a) known to have only 5027 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 5028 for (auto *BB : TheLoop->blocks()) 5029 for (auto &I : *BB) { 5030 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) { 5031 switch (II->getIntrinsicID()) { 5032 case Intrinsic::sideeffect: 5033 case Intrinsic::experimental_noalias_scope_decl: 5034 case Intrinsic::assume: 5035 case Intrinsic::lifetime_start: 5036 case Intrinsic::lifetime_end: 5037 if (TheLoop->hasLoopInvariantOperands(&I)) 5038 addToWorklistIfAllowed(&I); 5039 break; 5040 default: 5041 break; 5042 } 5043 } 5044 5045 // ExtractValue instructions must be uniform, because the operands are 5046 // known to be loop-invariant. 5047 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) { 5048 assert(isOutOfScope(EVI->getAggregateOperand()) && 5049 "Expected aggregate value to be loop invariant"); 5050 addToWorklistIfAllowed(EVI); 5051 continue; 5052 } 5053 5054 // If there's no pointer operand, there's nothing to do. 5055 auto *Ptr = getLoadStorePointerOperand(&I); 5056 if (!Ptr) 5057 continue; 5058 5059 // A uniform memory op is itself uniform. We exclude uniform stores 5060 // here as they demand the last lane, not the first one. 5061 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 5062 addToWorklistIfAllowed(&I); 5063 5064 if (isUniformDecision(&I, VF)) { 5065 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 5066 HasUniformUse.insert(Ptr); 5067 } 5068 } 5069 5070 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 5071 // demanding) users. Since loops are assumed to be in LCSSA form, this 5072 // disallows uses outside the loop as well. 5073 for (auto *V : HasUniformUse) { 5074 if (isOutOfScope(V)) 5075 continue; 5076 auto *I = cast<Instruction>(V); 5077 auto UsersAreMemAccesses = 5078 llvm::all_of(I->users(), [&](User *U) -> bool { 5079 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 5080 }); 5081 if (UsersAreMemAccesses) 5082 addToWorklistIfAllowed(I); 5083 } 5084 5085 // Expand Worklist in topological order: whenever a new instruction 5086 // is added , its users should be already inside Worklist. It ensures 5087 // a uniform instruction will only be used by uniform instructions. 5088 unsigned idx = 0; 5089 while (idx != Worklist.size()) { 5090 Instruction *I = Worklist[idx++]; 5091 5092 for (auto OV : I->operand_values()) { 5093 // isOutOfScope operands cannot be uniform instructions. 5094 if (isOutOfScope(OV)) 5095 continue; 5096 // First order recurrence Phi's should typically be considered 5097 // non-uniform. 5098 auto *OP = dyn_cast<PHINode>(OV); 5099 if (OP && Legal->isFirstOrderRecurrence(OP)) 5100 continue; 5101 // If all the users of the operand are uniform, then add the 5102 // operand into the uniform worklist. 5103 auto *OI = cast<Instruction>(OV); 5104 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5105 auto *J = cast<Instruction>(U); 5106 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 5107 })) 5108 addToWorklistIfAllowed(OI); 5109 } 5110 } 5111 5112 // For an instruction to be added into Worklist above, all its users inside 5113 // the loop should also be in Worklist. However, this condition cannot be 5114 // true for phi nodes that form a cyclic dependence. We must process phi 5115 // nodes separately. An induction variable will remain uniform if all users 5116 // of the induction variable and induction variable update remain uniform. 5117 // The code below handles both pointer and non-pointer induction variables. 5118 for (auto &Induction : Legal->getInductionVars()) { 5119 auto *Ind = Induction.first; 5120 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5121 5122 // Determine if all users of the induction variable are uniform after 5123 // vectorization. 5124 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5125 auto *I = cast<Instruction>(U); 5126 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5127 isVectorizedMemAccessUse(I, Ind); 5128 }); 5129 if (!UniformInd) 5130 continue; 5131 5132 // Determine if all users of the induction variable update instruction are 5133 // uniform after vectorization. 5134 auto UniformIndUpdate = 5135 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5136 auto *I = cast<Instruction>(U); 5137 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5138 isVectorizedMemAccessUse(I, IndUpdate); 5139 }); 5140 if (!UniformIndUpdate) 5141 continue; 5142 5143 // The induction variable and its update instruction will remain uniform. 5144 addToWorklistIfAllowed(Ind); 5145 addToWorklistIfAllowed(IndUpdate); 5146 } 5147 5148 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5149 } 5150 5151 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5152 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5153 5154 if (Legal->getRuntimePointerChecking()->Need) { 5155 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5156 "runtime pointer checks needed. Enable vectorization of this " 5157 "loop with '#pragma clang loop vectorize(enable)' when " 5158 "compiling with -Os/-Oz", 5159 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5160 return true; 5161 } 5162 5163 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5164 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5165 "runtime SCEV checks needed. Enable vectorization of this " 5166 "loop with '#pragma clang loop vectorize(enable)' when " 5167 "compiling with -Os/-Oz", 5168 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5169 return true; 5170 } 5171 5172 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5173 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5174 reportVectorizationFailure("Runtime stride check for small trip count", 5175 "runtime stride == 1 checks needed. Enable vectorization of " 5176 "this loop without such check by compiling with -Os/-Oz", 5177 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5178 return true; 5179 } 5180 5181 return false; 5182 } 5183 5184 ElementCount 5185 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 5186 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) 5187 return ElementCount::getScalable(0); 5188 5189 if (Hints->isScalableVectorizationDisabled()) { 5190 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 5191 "ScalableVectorizationDisabled", ORE, TheLoop); 5192 return ElementCount::getScalable(0); 5193 } 5194 5195 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n"); 5196 5197 auto MaxScalableVF = ElementCount::getScalable( 5198 std::numeric_limits<ElementCount::ScalarTy>::max()); 5199 5200 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 5201 // FIXME: While for scalable vectors this is currently sufficient, this should 5202 // be replaced by a more detailed mechanism that filters out specific VFs, 5203 // instead of invalidating vectorization for a whole set of VFs based on the 5204 // MaxVF. 5205 5206 // Disable scalable vectorization if the loop contains unsupported reductions. 5207 if (!canVectorizeReductions(MaxScalableVF)) { 5208 reportVectorizationInfo( 5209 "Scalable vectorization not supported for the reduction " 5210 "operations found in this loop.", 5211 "ScalableVFUnfeasible", ORE, TheLoop); 5212 return ElementCount::getScalable(0); 5213 } 5214 5215 // Disable scalable vectorization if the loop contains any instructions 5216 // with element types not supported for scalable vectors. 5217 if (any_of(ElementTypesInLoop, [&](Type *Ty) { 5218 return !Ty->isVoidTy() && 5219 !this->TTI.isElementTypeLegalForScalableVector(Ty); 5220 })) { 5221 reportVectorizationInfo("Scalable vectorization is not supported " 5222 "for all element types found in this loop.", 5223 "ScalableVFUnfeasible", ORE, TheLoop); 5224 return ElementCount::getScalable(0); 5225 } 5226 5227 if (Legal->isSafeForAnyVectorWidth()) 5228 return MaxScalableVF; 5229 5230 // Limit MaxScalableVF by the maximum safe dependence distance. 5231 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5232 if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) 5233 MaxVScale = 5234 TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax(); 5235 MaxScalableVF = ElementCount::getScalable( 5236 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5237 if (!MaxScalableVF) 5238 reportVectorizationInfo( 5239 "Max legal vector width too small, scalable vectorization " 5240 "unfeasible.", 5241 "ScalableVFUnfeasible", ORE, TheLoop); 5242 5243 return MaxScalableVF; 5244 } 5245 5246 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF( 5247 unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) { 5248 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5249 unsigned SmallestType, WidestType; 5250 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5251 5252 // Get the maximum safe dependence distance in bits computed by LAA. 5253 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5254 // the memory accesses that is most restrictive (involved in the smallest 5255 // dependence distance). 5256 unsigned MaxSafeElements = 5257 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 5258 5259 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 5260 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 5261 5262 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 5263 << ".\n"); 5264 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 5265 << ".\n"); 5266 5267 // First analyze the UserVF, fall back if the UserVF should be ignored. 5268 if (UserVF) { 5269 auto MaxSafeUserVF = 5270 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 5271 5272 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { 5273 // If `VF=vscale x N` is safe, then so is `VF=N` 5274 if (UserVF.isScalable()) 5275 return FixedScalableVFPair( 5276 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); 5277 else 5278 return UserVF; 5279 } 5280 5281 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 5282 5283 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 5284 // is better to ignore the hint and let the compiler choose a suitable VF. 5285 if (!UserVF.isScalable()) { 5286 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5287 << " is unsafe, clamping to max safe VF=" 5288 << MaxSafeFixedVF << ".\n"); 5289 ORE->emit([&]() { 5290 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5291 TheLoop->getStartLoc(), 5292 TheLoop->getHeader()) 5293 << "User-specified vectorization factor " 5294 << ore::NV("UserVectorizationFactor", UserVF) 5295 << " is unsafe, clamping to maximum safe vectorization factor " 5296 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 5297 }); 5298 return MaxSafeFixedVF; 5299 } 5300 5301 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 5302 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5303 << " is ignored because scalable vectors are not " 5304 "available.\n"); 5305 ORE->emit([&]() { 5306 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5307 TheLoop->getStartLoc(), 5308 TheLoop->getHeader()) 5309 << "User-specified vectorization factor " 5310 << ore::NV("UserVectorizationFactor", UserVF) 5311 << " is ignored because the target does not support scalable " 5312 "vectors. The compiler will pick a more suitable value."; 5313 }); 5314 } else { 5315 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5316 << " is unsafe. Ignoring scalable UserVF.\n"); 5317 ORE->emit([&]() { 5318 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5319 TheLoop->getStartLoc(), 5320 TheLoop->getHeader()) 5321 << "User-specified vectorization factor " 5322 << ore::NV("UserVectorizationFactor", UserVF) 5323 << " is unsafe. Ignoring the hint to let the compiler pick a " 5324 "more suitable value."; 5325 }); 5326 } 5327 } 5328 5329 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5330 << " / " << WidestType << " bits.\n"); 5331 5332 FixedScalableVFPair Result(ElementCount::getFixed(1), 5333 ElementCount::getScalable(0)); 5334 if (auto MaxVF = 5335 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 5336 MaxSafeFixedVF, FoldTailByMasking)) 5337 Result.FixedVF = MaxVF; 5338 5339 if (auto MaxVF = 5340 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 5341 MaxSafeScalableVF, FoldTailByMasking)) 5342 if (MaxVF.isScalable()) { 5343 Result.ScalableVF = MaxVF; 5344 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 5345 << "\n"); 5346 } 5347 5348 return Result; 5349 } 5350 5351 FixedScalableVFPair 5352 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5353 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5354 // TODO: It may by useful to do since it's still likely to be dynamically 5355 // uniform if the target can skip. 5356 reportVectorizationFailure( 5357 "Not inserting runtime ptr check for divergent target", 5358 "runtime pointer checks needed. Not enabled for divergent target", 5359 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5360 return FixedScalableVFPair::getNone(); 5361 } 5362 5363 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5364 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5365 if (TC == 1) { 5366 reportVectorizationFailure("Single iteration (non) loop", 5367 "loop trip count is one, irrelevant for vectorization", 5368 "SingleIterationLoop", ORE, TheLoop); 5369 return FixedScalableVFPair::getNone(); 5370 } 5371 5372 switch (ScalarEpilogueStatus) { 5373 case CM_ScalarEpilogueAllowed: 5374 return computeFeasibleMaxVF(TC, UserVF, false); 5375 case CM_ScalarEpilogueNotAllowedUsePredicate: 5376 LLVM_FALLTHROUGH; 5377 case CM_ScalarEpilogueNotNeededUsePredicate: 5378 LLVM_DEBUG( 5379 dbgs() << "LV: vector predicate hint/switch found.\n" 5380 << "LV: Not allowing scalar epilogue, creating predicated " 5381 << "vector loop.\n"); 5382 break; 5383 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5384 // fallthrough as a special case of OptForSize 5385 case CM_ScalarEpilogueNotAllowedOptSize: 5386 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5387 LLVM_DEBUG( 5388 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5389 else 5390 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5391 << "count.\n"); 5392 5393 // Bail if runtime checks are required, which are not good when optimising 5394 // for size. 5395 if (runtimeChecksRequired()) 5396 return FixedScalableVFPair::getNone(); 5397 5398 break; 5399 } 5400 5401 // The only loops we can vectorize without a scalar epilogue, are loops with 5402 // a bottom-test and a single exiting block. We'd have to handle the fact 5403 // that not every instruction executes on the last iteration. This will 5404 // require a lane mask which varies through the vector loop body. (TODO) 5405 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5406 // If there was a tail-folding hint/switch, but we can't fold the tail by 5407 // masking, fallback to a vectorization with a scalar epilogue. 5408 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5409 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5410 "scalar epilogue instead.\n"); 5411 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5412 return computeFeasibleMaxVF(TC, UserVF, false); 5413 } 5414 return FixedScalableVFPair::getNone(); 5415 } 5416 5417 // Now try the tail folding 5418 5419 // Invalidate interleave groups that require an epilogue if we can't mask 5420 // the interleave-group. 5421 if (!useMaskedInterleavedAccesses(TTI)) { 5422 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5423 "No decisions should have been taken at this point"); 5424 // Note: There is no need to invalidate any cost modeling decisions here, as 5425 // non where taken so far. 5426 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5427 } 5428 5429 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true); 5430 // Avoid tail folding if the trip count is known to be a multiple of any VF 5431 // we chose. 5432 // FIXME: The condition below pessimises the case for fixed-width vectors, 5433 // when scalable VFs are also candidates for vectorization. 5434 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5435 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5436 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5437 "MaxFixedVF must be a power of 2"); 5438 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5439 : MaxFixedVF.getFixedValue(); 5440 ScalarEvolution *SE = PSE.getSE(); 5441 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5442 const SCEV *ExitCount = SE->getAddExpr( 5443 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5444 const SCEV *Rem = SE->getURemExpr( 5445 SE->applyLoopGuards(ExitCount, TheLoop), 5446 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5447 if (Rem->isZero()) { 5448 // Accept MaxFixedVF if we do not have a tail. 5449 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5450 return MaxFactors; 5451 } 5452 } 5453 5454 // For scalable vectors don't use tail folding for low trip counts or 5455 // optimizing for code size. We only permit this if the user has explicitly 5456 // requested it. 5457 if (ScalarEpilogueStatus != CM_ScalarEpilogueNotNeededUsePredicate && 5458 ScalarEpilogueStatus != CM_ScalarEpilogueNotAllowedUsePredicate && 5459 MaxFactors.ScalableVF.isVector()) 5460 MaxFactors.ScalableVF = ElementCount::getScalable(0); 5461 5462 // If we don't know the precise trip count, or if the trip count that we 5463 // found modulo the vectorization factor is not zero, try to fold the tail 5464 // by masking. 5465 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5466 if (Legal->prepareToFoldTailByMasking()) { 5467 FoldTailByMasking = true; 5468 return MaxFactors; 5469 } 5470 5471 // If there was a tail-folding hint/switch, but we can't fold the tail by 5472 // masking, fallback to a vectorization with a scalar epilogue. 5473 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5474 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5475 "scalar epilogue instead.\n"); 5476 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5477 return MaxFactors; 5478 } 5479 5480 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5481 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5482 return FixedScalableVFPair::getNone(); 5483 } 5484 5485 if (TC == 0) { 5486 reportVectorizationFailure( 5487 "Unable to calculate the loop count due to complex control flow", 5488 "unable to calculate the loop count due to complex control flow", 5489 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5490 return FixedScalableVFPair::getNone(); 5491 } 5492 5493 reportVectorizationFailure( 5494 "Cannot optimize for size and vectorize at the same time.", 5495 "cannot optimize for size and vectorize at the same time. " 5496 "Enable vectorization of this loop with '#pragma clang loop " 5497 "vectorize(enable)' when compiling with -Os/-Oz", 5498 "NoTailLoopWithOptForSize", ORE, TheLoop); 5499 return FixedScalableVFPair::getNone(); 5500 } 5501 5502 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5503 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5504 const ElementCount &MaxSafeVF, bool FoldTailByMasking) { 5505 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5506 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5507 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5508 : TargetTransformInfo::RGK_FixedWidthVector); 5509 5510 // Convenience function to return the minimum of two ElementCounts. 5511 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5512 assert((LHS.isScalable() == RHS.isScalable()) && 5513 "Scalable flags must match"); 5514 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5515 }; 5516 5517 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5518 // Note that both WidestRegister and WidestType may not be a powers of 2. 5519 auto MaxVectorElementCount = ElementCount::get( 5520 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5521 ComputeScalableMaxVF); 5522 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5523 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5524 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5525 5526 if (!MaxVectorElementCount) { 5527 LLVM_DEBUG(dbgs() << "LV: The target has no " 5528 << (ComputeScalableMaxVF ? "scalable" : "fixed") 5529 << " vector registers.\n"); 5530 return ElementCount::getFixed(1); 5531 } 5532 5533 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5534 if (ConstTripCount && 5535 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5536 (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) { 5537 // If loop trip count (TC) is known at compile time there is no point in 5538 // choosing VF greater than TC (as done in the loop below). Select maximum 5539 // power of two which doesn't exceed TC. 5540 // If MaxVectorElementCount is scalable, we only fall back on a fixed VF 5541 // when the TC is less than or equal to the known number of lanes. 5542 auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount); 5543 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not " 5544 "exceeding the constant trip count: " 5545 << ClampedConstTripCount << "\n"); 5546 return ElementCount::getFixed(ClampedConstTripCount); 5547 } 5548 5549 ElementCount MaxVF = MaxVectorElementCount; 5550 if (TTI.shouldMaximizeVectorBandwidth() || 5551 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5552 auto MaxVectorElementCountMaxBW = ElementCount::get( 5553 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5554 ComputeScalableMaxVF); 5555 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5556 5557 // Collect all viable vectorization factors larger than the default MaxVF 5558 // (i.e. MaxVectorElementCount). 5559 SmallVector<ElementCount, 8> VFs; 5560 for (ElementCount VS = MaxVectorElementCount * 2; 5561 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5562 VFs.push_back(VS); 5563 5564 // For each VF calculate its register usage. 5565 auto RUs = calculateRegisterUsage(VFs); 5566 5567 // Select the largest VF which doesn't require more registers than existing 5568 // ones. 5569 for (int i = RUs.size() - 1; i >= 0; --i) { 5570 bool Selected = true; 5571 for (auto &pair : RUs[i].MaxLocalUsers) { 5572 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5573 if (pair.second > TargetNumRegisters) 5574 Selected = false; 5575 } 5576 if (Selected) { 5577 MaxVF = VFs[i]; 5578 break; 5579 } 5580 } 5581 if (ElementCount MinVF = 5582 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 5583 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5584 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5585 << ") with target's minimum: " << MinVF << '\n'); 5586 MaxVF = MinVF; 5587 } 5588 } 5589 } 5590 return MaxVF; 5591 } 5592 5593 Optional<unsigned> LoopVectorizationCostModel::getVScaleForTuning() const { 5594 if (TheFunction->hasFnAttribute(Attribute::VScaleRange)) { 5595 auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange); 5596 auto Min = Attr.getVScaleRangeMin(); 5597 auto Max = Attr.getVScaleRangeMax(); 5598 if (Max && Min == Max) 5599 return Max; 5600 } 5601 5602 return TTI.getVScaleForTuning(); 5603 } 5604 5605 bool LoopVectorizationCostModel::isMoreProfitable( 5606 const VectorizationFactor &A, const VectorizationFactor &B) const { 5607 InstructionCost CostA = A.Cost; 5608 InstructionCost CostB = B.Cost; 5609 5610 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 5611 5612 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 5613 MaxTripCount) { 5614 // If we are folding the tail and the trip count is a known (possibly small) 5615 // constant, the trip count will be rounded up to an integer number of 5616 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 5617 // which we compare directly. When not folding the tail, the total cost will 5618 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 5619 // approximated with the per-lane cost below instead of using the tripcount 5620 // as here. 5621 auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 5622 auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 5623 return RTCostA < RTCostB; 5624 } 5625 5626 // Improve estimate for the vector width if it is scalable. 5627 unsigned EstimatedWidthA = A.Width.getKnownMinValue(); 5628 unsigned EstimatedWidthB = B.Width.getKnownMinValue(); 5629 if (Optional<unsigned> VScale = getVScaleForTuning()) { 5630 if (A.Width.isScalable()) 5631 EstimatedWidthA *= VScale.getValue(); 5632 if (B.Width.isScalable()) 5633 EstimatedWidthB *= VScale.getValue(); 5634 } 5635 5636 // Assume vscale may be larger than 1 (or the value being tuned for), 5637 // so that scalable vectorization is slightly favorable over fixed-width 5638 // vectorization. 5639 if (A.Width.isScalable() && !B.Width.isScalable()) 5640 return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA); 5641 5642 // To avoid the need for FP division: 5643 // (CostA / A.Width) < (CostB / B.Width) 5644 // <=> (CostA * B.Width) < (CostB * A.Width) 5645 return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA); 5646 } 5647 5648 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 5649 const ElementCountSet &VFCandidates) { 5650 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5651 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5652 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5653 assert(VFCandidates.count(ElementCount::getFixed(1)) && 5654 "Expected Scalar VF to be a candidate"); 5655 5656 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 5657 VectorizationFactor ChosenFactor = ScalarCost; 5658 5659 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5660 if (ForceVectorization && VFCandidates.size() > 1) { 5661 // Ignore scalar width, because the user explicitly wants vectorization. 5662 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5663 // evaluation. 5664 ChosenFactor.Cost = InstructionCost::getMax(); 5665 } 5666 5667 SmallVector<InstructionVFPair> InvalidCosts; 5668 for (const auto &i : VFCandidates) { 5669 // The cost for scalar VF=1 is already calculated, so ignore it. 5670 if (i.isScalar()) 5671 continue; 5672 5673 VectorizationCostTy C = expectedCost(i, &InvalidCosts); 5674 VectorizationFactor Candidate(i, C.first); 5675 5676 #ifndef NDEBUG 5677 unsigned AssumedMinimumVscale = 1; 5678 if (Optional<unsigned> VScale = getVScaleForTuning()) 5679 AssumedMinimumVscale = VScale.getValue(); 5680 unsigned Width = 5681 Candidate.Width.isScalable() 5682 ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale 5683 : Candidate.Width.getFixedValue(); 5684 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5685 << " costs: " << (Candidate.Cost / Width)); 5686 if (i.isScalable()) 5687 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of " 5688 << AssumedMinimumVscale << ")"); 5689 LLVM_DEBUG(dbgs() << ".\n"); 5690 #endif 5691 5692 if (!C.second && !ForceVectorization) { 5693 LLVM_DEBUG( 5694 dbgs() << "LV: Not considering vector loop of width " << i 5695 << " because it will not generate any vector instructions.\n"); 5696 continue; 5697 } 5698 5699 // If profitable add it to ProfitableVF list. 5700 if (isMoreProfitable(Candidate, ScalarCost)) 5701 ProfitableVFs.push_back(Candidate); 5702 5703 if (isMoreProfitable(Candidate, ChosenFactor)) 5704 ChosenFactor = Candidate; 5705 } 5706 5707 // Emit a report of VFs with invalid costs in the loop. 5708 if (!InvalidCosts.empty()) { 5709 // Group the remarks per instruction, keeping the instruction order from 5710 // InvalidCosts. 5711 std::map<Instruction *, unsigned> Numbering; 5712 unsigned I = 0; 5713 for (auto &Pair : InvalidCosts) 5714 if (!Numbering.count(Pair.first)) 5715 Numbering[Pair.first] = I++; 5716 5717 // Sort the list, first on instruction(number) then on VF. 5718 llvm::sort(InvalidCosts, 5719 [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { 5720 if (Numbering[A.first] != Numbering[B.first]) 5721 return Numbering[A.first] < Numbering[B.first]; 5722 ElementCountComparator ECC; 5723 return ECC(A.second, B.second); 5724 }); 5725 5726 // For a list of ordered instruction-vf pairs: 5727 // [(load, vf1), (load, vf2), (store, vf1)] 5728 // Group the instructions together to emit separate remarks for: 5729 // load (vf1, vf2) 5730 // store (vf1) 5731 auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); 5732 auto Subset = ArrayRef<InstructionVFPair>(); 5733 do { 5734 if (Subset.empty()) 5735 Subset = Tail.take_front(1); 5736 5737 Instruction *I = Subset.front().first; 5738 5739 // If the next instruction is different, or if there are no other pairs, 5740 // emit a remark for the collated subset. e.g. 5741 // [(load, vf1), (load, vf2))] 5742 // to emit: 5743 // remark: invalid costs for 'load' at VF=(vf, vf2) 5744 if (Subset == Tail || Tail[Subset.size()].first != I) { 5745 std::string OutString; 5746 raw_string_ostream OS(OutString); 5747 assert(!Subset.empty() && "Unexpected empty range"); 5748 OS << "Instruction with invalid costs prevented vectorization at VF=("; 5749 for (auto &Pair : Subset) 5750 OS << (Pair.second == Subset.front().second ? "" : ", ") 5751 << Pair.second; 5752 OS << "):"; 5753 if (auto *CI = dyn_cast<CallInst>(I)) 5754 OS << " call to " << CI->getCalledFunction()->getName(); 5755 else 5756 OS << " " << I->getOpcodeName(); 5757 OS.flush(); 5758 reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); 5759 Tail = Tail.drop_front(Subset.size()); 5760 Subset = {}; 5761 } else 5762 // Grow the subset by one element 5763 Subset = Tail.take_front(Subset.size() + 1); 5764 } while (!Tail.empty()); 5765 } 5766 5767 if (!EnableCondStoresVectorization && NumPredStores) { 5768 reportVectorizationFailure("There are conditional stores.", 5769 "store that is conditionally executed prevents vectorization", 5770 "ConditionalStore", ORE, TheLoop); 5771 ChosenFactor = ScalarCost; 5772 } 5773 5774 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 5775 ChosenFactor.Cost >= ScalarCost.Cost) dbgs() 5776 << "LV: Vectorization seems to be not beneficial, " 5777 << "but was forced by a user.\n"); 5778 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 5779 return ChosenFactor; 5780 } 5781 5782 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5783 const Loop &L, ElementCount VF) const { 5784 // Cross iteration phis such as reductions need special handling and are 5785 // currently unsupported. 5786 if (any_of(L.getHeader()->phis(), 5787 [&](PHINode &Phi) { return Legal->isFirstOrderRecurrence(&Phi); })) 5788 return false; 5789 5790 // Phis with uses outside of the loop require special handling and are 5791 // currently unsupported. 5792 for (auto &Entry : Legal->getInductionVars()) { 5793 // Look for uses of the value of the induction at the last iteration. 5794 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5795 for (User *U : PostInc->users()) 5796 if (!L.contains(cast<Instruction>(U))) 5797 return false; 5798 // Look for uses of penultimate value of the induction. 5799 for (User *U : Entry.first->users()) 5800 if (!L.contains(cast<Instruction>(U))) 5801 return false; 5802 } 5803 5804 // Induction variables that are widened require special handling that is 5805 // currently not supported. 5806 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5807 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5808 this->isProfitableToScalarize(Entry.first, VF)); 5809 })) 5810 return false; 5811 5812 // Epilogue vectorization code has not been auditted to ensure it handles 5813 // non-latch exits properly. It may be fine, but it needs auditted and 5814 // tested. 5815 if (L.getExitingBlock() != L.getLoopLatch()) 5816 return false; 5817 5818 return true; 5819 } 5820 5821 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5822 const ElementCount VF) const { 5823 // FIXME: We need a much better cost-model to take different parameters such 5824 // as register pressure, code size increase and cost of extra branches into 5825 // account. For now we apply a very crude heuristic and only consider loops 5826 // with vectorization factors larger than a certain value. 5827 // We also consider epilogue vectorization unprofitable for targets that don't 5828 // consider interleaving beneficial (eg. MVE). 5829 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 5830 return false; 5831 // FIXME: We should consider changing the threshold for scalable 5832 // vectors to take VScaleForTuning into account. 5833 if (VF.getKnownMinValue() >= EpilogueVectorizationMinVF) 5834 return true; 5835 return false; 5836 } 5837 5838 VectorizationFactor 5839 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 5840 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 5841 VectorizationFactor Result = VectorizationFactor::Disabled(); 5842 if (!EnableEpilogueVectorization) { 5843 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 5844 return Result; 5845 } 5846 5847 if (!isScalarEpilogueAllowed()) { 5848 LLVM_DEBUG( 5849 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 5850 "allowed.\n";); 5851 return Result; 5852 } 5853 5854 // Not really a cost consideration, but check for unsupported cases here to 5855 // simplify the logic. 5856 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 5857 LLVM_DEBUG( 5858 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 5859 "not a supported candidate.\n";); 5860 return Result; 5861 } 5862 5863 if (EpilogueVectorizationForceVF > 1) { 5864 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 5865 ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF); 5866 if (LVP.hasPlanWithVF(ForcedEC)) 5867 return {ForcedEC, 0}; 5868 else { 5869 LLVM_DEBUG( 5870 dbgs() 5871 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 5872 return Result; 5873 } 5874 } 5875 5876 if (TheLoop->getHeader()->getParent()->hasOptSize() || 5877 TheLoop->getHeader()->getParent()->hasMinSize()) { 5878 LLVM_DEBUG( 5879 dbgs() 5880 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 5881 return Result; 5882 } 5883 5884 if (!isEpilogueVectorizationProfitable(MainLoopVF)) { 5885 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for " 5886 "this loop\n"); 5887 return Result; 5888 } 5889 5890 // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know 5891 // the main loop handles 8 lanes per iteration. We could still benefit from 5892 // vectorizing the epilogue loop with VF=4. 5893 ElementCount EstimatedRuntimeVF = MainLoopVF; 5894 if (MainLoopVF.isScalable()) { 5895 EstimatedRuntimeVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue()); 5896 if (Optional<unsigned> VScale = getVScaleForTuning()) 5897 EstimatedRuntimeVF *= VScale.getValue(); 5898 } 5899 5900 for (auto &NextVF : ProfitableVFs) 5901 if (((!NextVF.Width.isScalable() && MainLoopVF.isScalable() && 5902 ElementCount::isKnownLT(NextVF.Width, EstimatedRuntimeVF)) || 5903 ElementCount::isKnownLT(NextVF.Width, MainLoopVF)) && 5904 (Result.Width.isScalar() || isMoreProfitable(NextVF, Result)) && 5905 LVP.hasPlanWithVF(NextVF.Width)) 5906 Result = NextVF; 5907 5908 if (Result != VectorizationFactor::Disabled()) 5909 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 5910 << Result.Width << "\n";); 5911 return Result; 5912 } 5913 5914 std::pair<unsigned, unsigned> 5915 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5916 unsigned MinWidth = -1U; 5917 unsigned MaxWidth = 8; 5918 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5919 // For in-loop reductions, no element types are added to ElementTypesInLoop 5920 // if there are no loads/stores in the loop. In this case, check through the 5921 // reduction variables to determine the maximum width. 5922 if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) { 5923 // Reset MaxWidth so that we can find the smallest type used by recurrences 5924 // in the loop. 5925 MaxWidth = -1U; 5926 for (auto &PhiDescriptorPair : Legal->getReductionVars()) { 5927 const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second; 5928 // When finding the min width used by the recurrence we need to account 5929 // for casts on the input operands of the recurrence. 5930 MaxWidth = std::min<unsigned>( 5931 MaxWidth, std::min<unsigned>( 5932 RdxDesc.getMinWidthCastToRecurrenceTypeInBits(), 5933 RdxDesc.getRecurrenceType()->getScalarSizeInBits())); 5934 } 5935 } else { 5936 for (Type *T : ElementTypesInLoop) { 5937 MinWidth = std::min<unsigned>( 5938 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5939 MaxWidth = std::max<unsigned>( 5940 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5941 } 5942 } 5943 return {MinWidth, MaxWidth}; 5944 } 5945 5946 void LoopVectorizationCostModel::collectElementTypesForWidening() { 5947 ElementTypesInLoop.clear(); 5948 // For each block. 5949 for (BasicBlock *BB : TheLoop->blocks()) { 5950 // For each instruction in the loop. 5951 for (Instruction &I : BB->instructionsWithoutDebug()) { 5952 Type *T = I.getType(); 5953 5954 // Skip ignored values. 5955 if (ValuesToIgnore.count(&I)) 5956 continue; 5957 5958 // Only examine Loads, Stores and PHINodes. 5959 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5960 continue; 5961 5962 // Examine PHI nodes that are reduction variables. Update the type to 5963 // account for the recurrence type. 5964 if (auto *PN = dyn_cast<PHINode>(&I)) { 5965 if (!Legal->isReductionVariable(PN)) 5966 continue; 5967 const RecurrenceDescriptor &RdxDesc = 5968 Legal->getReductionVars().find(PN)->second; 5969 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 5970 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 5971 RdxDesc.getRecurrenceType(), 5972 TargetTransformInfo::ReductionFlags())) 5973 continue; 5974 T = RdxDesc.getRecurrenceType(); 5975 } 5976 5977 // Examine the stored values. 5978 if (auto *ST = dyn_cast<StoreInst>(&I)) 5979 T = ST->getValueOperand()->getType(); 5980 5981 assert(T->isSized() && 5982 "Expected the load/store/recurrence type to be sized"); 5983 5984 ElementTypesInLoop.insert(T); 5985 } 5986 } 5987 } 5988 5989 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 5990 unsigned LoopCost) { 5991 // -- The interleave heuristics -- 5992 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5993 // There are many micro-architectural considerations that we can't predict 5994 // at this level. For example, frontend pressure (on decode or fetch) due to 5995 // code size, or the number and capabilities of the execution ports. 5996 // 5997 // We use the following heuristics to select the interleave count: 5998 // 1. If the code has reductions, then we interleave to break the cross 5999 // iteration dependency. 6000 // 2. If the loop is really small, then we interleave to reduce the loop 6001 // overhead. 6002 // 3. We don't interleave if we think that we will spill registers to memory 6003 // due to the increased register pressure. 6004 6005 if (!isScalarEpilogueAllowed()) 6006 return 1; 6007 6008 // We used the distance for the interleave count. 6009 if (Legal->getMaxSafeDepDistBytes() != -1U) 6010 return 1; 6011 6012 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 6013 const bool HasReductions = !Legal->getReductionVars().empty(); 6014 // Do not interleave loops with a relatively small known or estimated trip 6015 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 6016 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 6017 // because with the above conditions interleaving can expose ILP and break 6018 // cross iteration dependences for reductions. 6019 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 6020 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 6021 return 1; 6022 6023 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6024 // We divide by these constants so assume that we have at least one 6025 // instruction that uses at least one register. 6026 for (auto& pair : R.MaxLocalUsers) { 6027 pair.second = std::max(pair.second, 1U); 6028 } 6029 6030 // We calculate the interleave count using the following formula. 6031 // Subtract the number of loop invariants from the number of available 6032 // registers. These registers are used by all of the interleaved instances. 6033 // Next, divide the remaining registers by the number of registers that is 6034 // required by the loop, in order to estimate how many parallel instances 6035 // fit without causing spills. All of this is rounded down if necessary to be 6036 // a power of two. We want power of two interleave count to simplify any 6037 // addressing operations or alignment considerations. 6038 // We also want power of two interleave counts to ensure that the induction 6039 // variable of the vector loop wraps to zero, when tail is folded by masking; 6040 // this currently happens when OptForSize, in which case IC is set to 1 above. 6041 unsigned IC = UINT_MAX; 6042 6043 for (auto& pair : R.MaxLocalUsers) { 6044 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 6045 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6046 << " registers of " 6047 << TTI.getRegisterClassName(pair.first) << " register class\n"); 6048 if (VF.isScalar()) { 6049 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6050 TargetNumRegisters = ForceTargetNumScalarRegs; 6051 } else { 6052 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6053 TargetNumRegisters = ForceTargetNumVectorRegs; 6054 } 6055 unsigned MaxLocalUsers = pair.second; 6056 unsigned LoopInvariantRegs = 0; 6057 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 6058 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 6059 6060 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 6061 // Don't count the induction variable as interleaved. 6062 if (EnableIndVarRegisterHeur) { 6063 TmpIC = 6064 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 6065 std::max(1U, (MaxLocalUsers - 1))); 6066 } 6067 6068 IC = std::min(IC, TmpIC); 6069 } 6070 6071 // Clamp the interleave ranges to reasonable counts. 6072 unsigned MaxInterleaveCount = 6073 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 6074 6075 // Check if the user has overridden the max. 6076 if (VF.isScalar()) { 6077 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6078 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6079 } else { 6080 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6081 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6082 } 6083 6084 // If trip count is known or estimated compile time constant, limit the 6085 // interleave count to be less than the trip count divided by VF, provided it 6086 // is at least 1. 6087 // 6088 // For scalable vectors we can't know if interleaving is beneficial. It may 6089 // not be beneficial for small loops if none of the lanes in the second vector 6090 // iterations is enabled. However, for larger loops, there is likely to be a 6091 // similar benefit as for fixed-width vectors. For now, we choose to leave 6092 // the InterleaveCount as if vscale is '1', although if some information about 6093 // the vector is known (e.g. min vector size), we can make a better decision. 6094 if (BestKnownTC) { 6095 MaxInterleaveCount = 6096 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 6097 // Make sure MaxInterleaveCount is greater than 0. 6098 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 6099 } 6100 6101 assert(MaxInterleaveCount > 0 && 6102 "Maximum interleave count must be greater than 0"); 6103 6104 // Clamp the calculated IC to be between the 1 and the max interleave count 6105 // that the target and trip count allows. 6106 if (IC > MaxInterleaveCount) 6107 IC = MaxInterleaveCount; 6108 else 6109 // Make sure IC is greater than 0. 6110 IC = std::max(1u, IC); 6111 6112 assert(IC > 0 && "Interleave count must be greater than 0."); 6113 6114 // If we did not calculate the cost for VF (because the user selected the VF) 6115 // then we calculate the cost of VF here. 6116 if (LoopCost == 0) { 6117 InstructionCost C = expectedCost(VF).first; 6118 assert(C.isValid() && "Expected to have chosen a VF with valid cost"); 6119 LoopCost = *C.getValue(); 6120 } 6121 6122 assert(LoopCost && "Non-zero loop cost expected"); 6123 6124 // Interleave if we vectorized this loop and there is a reduction that could 6125 // benefit from interleaving. 6126 if (VF.isVector() && HasReductions) { 6127 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6128 return IC; 6129 } 6130 6131 // For any scalar loop that either requires runtime checks or predication we 6132 // are better off leaving this to the unroller. Note that if we've already 6133 // vectorized the loop we will have done the runtime check and so interleaving 6134 // won't require further checks. 6135 bool ScalarInterleavingRequiresPredication = 6136 (VF.isScalar() && any_of(TheLoop->blocks(), [this](BasicBlock *BB) { 6137 return Legal->blockNeedsPredication(BB); 6138 })); 6139 bool ScalarInterleavingRequiresRuntimePointerCheck = 6140 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 6141 6142 // We want to interleave small loops in order to reduce the loop overhead and 6143 // potentially expose ILP opportunities. 6144 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 6145 << "LV: IC is " << IC << '\n' 6146 << "LV: VF is " << VF << '\n'); 6147 const bool AggressivelyInterleaveReductions = 6148 TTI.enableAggressiveInterleaving(HasReductions); 6149 if (!ScalarInterleavingRequiresRuntimePointerCheck && 6150 !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) { 6151 // We assume that the cost overhead is 1 and we use the cost model 6152 // to estimate the cost of the loop and interleave until the cost of the 6153 // loop overhead is about 5% of the cost of the loop. 6154 unsigned SmallIC = 6155 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6156 6157 // Interleave until store/load ports (estimated by max interleave count) are 6158 // saturated. 6159 unsigned NumStores = Legal->getNumStores(); 6160 unsigned NumLoads = Legal->getNumLoads(); 6161 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6162 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6163 6164 // There is little point in interleaving for reductions containing selects 6165 // and compares when VF=1 since it may just create more overhead than it's 6166 // worth for loops with small trip counts. This is because we still have to 6167 // do the final reduction after the loop. 6168 bool HasSelectCmpReductions = 6169 HasReductions && 6170 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 6171 const RecurrenceDescriptor &RdxDesc = Reduction.second; 6172 return RecurrenceDescriptor::isSelectCmpRecurrenceKind( 6173 RdxDesc.getRecurrenceKind()); 6174 }); 6175 if (HasSelectCmpReductions) { 6176 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n"); 6177 return 1; 6178 } 6179 6180 // If we have a scalar reduction (vector reductions are already dealt with 6181 // by this point), we can increase the critical path length if the loop 6182 // we're interleaving is inside another loop. For tree-wise reductions 6183 // set the limit to 2, and for ordered reductions it's best to disable 6184 // interleaving entirely. 6185 if (HasReductions && TheLoop->getLoopDepth() > 1) { 6186 bool HasOrderedReductions = 6187 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 6188 const RecurrenceDescriptor &RdxDesc = Reduction.second; 6189 return RdxDesc.isOrdered(); 6190 }); 6191 if (HasOrderedReductions) { 6192 LLVM_DEBUG( 6193 dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); 6194 return 1; 6195 } 6196 6197 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6198 SmallIC = std::min(SmallIC, F); 6199 StoresIC = std::min(StoresIC, F); 6200 LoadsIC = std::min(LoadsIC, F); 6201 } 6202 6203 if (EnableLoadStoreRuntimeInterleave && 6204 std::max(StoresIC, LoadsIC) > SmallIC) { 6205 LLVM_DEBUG( 6206 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6207 return std::max(StoresIC, LoadsIC); 6208 } 6209 6210 // If there are scalar reductions and TTI has enabled aggressive 6211 // interleaving for reductions, we will interleave to expose ILP. 6212 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6213 AggressivelyInterleaveReductions) { 6214 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6215 // Interleave no less than SmallIC but not as aggressive as the normal IC 6216 // to satisfy the rare situation when resources are too limited. 6217 return std::max(IC / 2, SmallIC); 6218 } else { 6219 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6220 return SmallIC; 6221 } 6222 } 6223 6224 // Interleave if this is a large loop (small loops are already dealt with by 6225 // this point) that could benefit from interleaving. 6226 if (AggressivelyInterleaveReductions) { 6227 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6228 return IC; 6229 } 6230 6231 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6232 return 1; 6233 } 6234 6235 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6236 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6237 // This function calculates the register usage by measuring the highest number 6238 // of values that are alive at a single location. Obviously, this is a very 6239 // rough estimation. We scan the loop in a topological order in order and 6240 // assign a number to each instruction. We use RPO to ensure that defs are 6241 // met before their users. We assume that each instruction that has in-loop 6242 // users starts an interval. We record every time that an in-loop value is 6243 // used, so we have a list of the first and last occurrences of each 6244 // instruction. Next, we transpose this data structure into a multi map that 6245 // holds the list of intervals that *end* at a specific location. This multi 6246 // map allows us to perform a linear search. We scan the instructions linearly 6247 // and record each time that a new interval starts, by placing it in a set. 6248 // If we find this value in the multi-map then we remove it from the set. 6249 // The max register usage is the maximum size of the set. 6250 // We also search for instructions that are defined outside the loop, but are 6251 // used inside the loop. We need this number separately from the max-interval 6252 // usage number because when we unroll, loop-invariant values do not take 6253 // more register. 6254 LoopBlocksDFS DFS(TheLoop); 6255 DFS.perform(LI); 6256 6257 RegisterUsage RU; 6258 6259 // Each 'key' in the map opens a new interval. The values 6260 // of the map are the index of the 'last seen' usage of the 6261 // instruction that is the key. 6262 using IntervalMap = DenseMap<Instruction *, unsigned>; 6263 6264 // Maps instruction to its index. 6265 SmallVector<Instruction *, 64> IdxToInstr; 6266 // Marks the end of each interval. 6267 IntervalMap EndPoint; 6268 // Saves the list of instruction indices that are used in the loop. 6269 SmallPtrSet<Instruction *, 8> Ends; 6270 // Saves the list of values that are used in the loop but are 6271 // defined outside the loop, such as arguments and constants. 6272 SmallPtrSet<Value *, 8> LoopInvariants; 6273 6274 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6275 for (Instruction &I : BB->instructionsWithoutDebug()) { 6276 IdxToInstr.push_back(&I); 6277 6278 // Save the end location of each USE. 6279 for (Value *U : I.operands()) { 6280 auto *Instr = dyn_cast<Instruction>(U); 6281 6282 // Ignore non-instruction values such as arguments, constants, etc. 6283 if (!Instr) 6284 continue; 6285 6286 // If this instruction is outside the loop then record it and continue. 6287 if (!TheLoop->contains(Instr)) { 6288 LoopInvariants.insert(Instr); 6289 continue; 6290 } 6291 6292 // Overwrite previous end points. 6293 EndPoint[Instr] = IdxToInstr.size(); 6294 Ends.insert(Instr); 6295 } 6296 } 6297 } 6298 6299 // Saves the list of intervals that end with the index in 'key'. 6300 using InstrList = SmallVector<Instruction *, 2>; 6301 DenseMap<unsigned, InstrList> TransposeEnds; 6302 6303 // Transpose the EndPoints to a list of values that end at each index. 6304 for (auto &Interval : EndPoint) 6305 TransposeEnds[Interval.second].push_back(Interval.first); 6306 6307 SmallPtrSet<Instruction *, 8> OpenIntervals; 6308 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6309 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6310 6311 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6312 6313 // A lambda that gets the register usage for the given type and VF. 6314 const auto &TTICapture = TTI; 6315 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned { 6316 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6317 return 0; 6318 InstructionCost::CostType RegUsage = 6319 *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue(); 6320 assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() && 6321 "Nonsensical values for register usage."); 6322 return RegUsage; 6323 }; 6324 6325 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6326 Instruction *I = IdxToInstr[i]; 6327 6328 // Remove all of the instructions that end at this location. 6329 InstrList &List = TransposeEnds[i]; 6330 for (Instruction *ToRemove : List) 6331 OpenIntervals.erase(ToRemove); 6332 6333 // Ignore instructions that are never used within the loop. 6334 if (!Ends.count(I)) 6335 continue; 6336 6337 // Skip ignored values. 6338 if (ValuesToIgnore.count(I)) 6339 continue; 6340 6341 // For each VF find the maximum usage of registers. 6342 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6343 // Count the number of live intervals. 6344 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6345 6346 if (VFs[j].isScalar()) { 6347 for (auto Inst : OpenIntervals) { 6348 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6349 if (RegUsage.find(ClassID) == RegUsage.end()) 6350 RegUsage[ClassID] = 1; 6351 else 6352 RegUsage[ClassID] += 1; 6353 } 6354 } else { 6355 collectUniformsAndScalars(VFs[j]); 6356 for (auto Inst : OpenIntervals) { 6357 // Skip ignored values for VF > 1. 6358 if (VecValuesToIgnore.count(Inst)) 6359 continue; 6360 if (isScalarAfterVectorization(Inst, VFs[j])) { 6361 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6362 if (RegUsage.find(ClassID) == RegUsage.end()) 6363 RegUsage[ClassID] = 1; 6364 else 6365 RegUsage[ClassID] += 1; 6366 } else { 6367 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6368 if (RegUsage.find(ClassID) == RegUsage.end()) 6369 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6370 else 6371 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6372 } 6373 } 6374 } 6375 6376 for (auto& pair : RegUsage) { 6377 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6378 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6379 else 6380 MaxUsages[j][pair.first] = pair.second; 6381 } 6382 } 6383 6384 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6385 << OpenIntervals.size() << '\n'); 6386 6387 // Add the current instruction to the list of open intervals. 6388 OpenIntervals.insert(I); 6389 } 6390 6391 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6392 SmallMapVector<unsigned, unsigned, 4> Invariant; 6393 6394 for (auto Inst : LoopInvariants) { 6395 unsigned Usage = 6396 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6397 unsigned ClassID = 6398 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6399 if (Invariant.find(ClassID) == Invariant.end()) 6400 Invariant[ClassID] = Usage; 6401 else 6402 Invariant[ClassID] += Usage; 6403 } 6404 6405 LLVM_DEBUG({ 6406 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6407 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6408 << " item\n"; 6409 for (const auto &pair : MaxUsages[i]) { 6410 dbgs() << "LV(REG): RegisterClass: " 6411 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6412 << " registers\n"; 6413 } 6414 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6415 << " item\n"; 6416 for (const auto &pair : Invariant) { 6417 dbgs() << "LV(REG): RegisterClass: " 6418 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6419 << " registers\n"; 6420 } 6421 }); 6422 6423 RU.LoopInvariantRegs = Invariant; 6424 RU.MaxLocalUsers = MaxUsages[i]; 6425 RUs[i] = RU; 6426 } 6427 6428 return RUs; 6429 } 6430 6431 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6432 // If we aren't vectorizing the loop, or if we've already collected the 6433 // instructions to scalarize, there's nothing to do. Collection may already 6434 // have occurred if we have a user-selected VF and are now computing the 6435 // expected cost for interleaving. 6436 if (VF.isScalar() || VF.isZero() || 6437 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6438 return; 6439 6440 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6441 // not profitable to scalarize any instructions, the presence of VF in the 6442 // map will indicate that we've analyzed it already. 6443 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6444 6445 // Find all the instructions that are scalar with predication in the loop and 6446 // determine if it would be better to not if-convert the blocks they are in. 6447 // If so, we also record the instructions to scalarize. 6448 for (BasicBlock *BB : TheLoop->blocks()) { 6449 if (!blockNeedsPredicationForAnyReason(BB)) 6450 continue; 6451 for (Instruction &I : *BB) 6452 if (isScalarWithPredication(&I, VF)) { 6453 ScalarCostsTy ScalarCosts; 6454 // Do not apply discount if scalable, because that would lead to 6455 // invalid scalarization costs. 6456 if (!VF.isScalable() && 6457 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6458 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6459 // Remember that BB will remain after vectorization. 6460 PredicatedBBsAfterVectorization.insert(BB); 6461 } 6462 } 6463 } 6464 6465 int LoopVectorizationCostModel::computePredInstDiscount( 6466 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6467 assert(!isUniformAfterVectorization(PredInst, VF) && 6468 "Instruction marked uniform-after-vectorization will be predicated"); 6469 6470 // Initialize the discount to zero, meaning that the scalar version and the 6471 // vector version cost the same. 6472 InstructionCost Discount = 0; 6473 6474 // Holds instructions to analyze. The instructions we visit are mapped in 6475 // ScalarCosts. Those instructions are the ones that would be scalarized if 6476 // we find that the scalar version costs less. 6477 SmallVector<Instruction *, 8> Worklist; 6478 6479 // Returns true if the given instruction can be scalarized. 6480 auto canBeScalarized = [&](Instruction *I) -> bool { 6481 // We only attempt to scalarize instructions forming a single-use chain 6482 // from the original predicated block that would otherwise be vectorized. 6483 // Although not strictly necessary, we give up on instructions we know will 6484 // already be scalar to avoid traversing chains that are unlikely to be 6485 // beneficial. 6486 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6487 isScalarAfterVectorization(I, VF)) 6488 return false; 6489 6490 // If the instruction is scalar with predication, it will be analyzed 6491 // separately. We ignore it within the context of PredInst. 6492 if (isScalarWithPredication(I, VF)) 6493 return false; 6494 6495 // If any of the instruction's operands are uniform after vectorization, 6496 // the instruction cannot be scalarized. This prevents, for example, a 6497 // masked load from being scalarized. 6498 // 6499 // We assume we will only emit a value for lane zero of an instruction 6500 // marked uniform after vectorization, rather than VF identical values. 6501 // Thus, if we scalarize an instruction that uses a uniform, we would 6502 // create uses of values corresponding to the lanes we aren't emitting code 6503 // for. This behavior can be changed by allowing getScalarValue to clone 6504 // the lane zero values for uniforms rather than asserting. 6505 for (Use &U : I->operands()) 6506 if (auto *J = dyn_cast<Instruction>(U.get())) 6507 if (isUniformAfterVectorization(J, VF)) 6508 return false; 6509 6510 // Otherwise, we can scalarize the instruction. 6511 return true; 6512 }; 6513 6514 // Compute the expected cost discount from scalarizing the entire expression 6515 // feeding the predicated instruction. We currently only consider expressions 6516 // that are single-use instruction chains. 6517 Worklist.push_back(PredInst); 6518 while (!Worklist.empty()) { 6519 Instruction *I = Worklist.pop_back_val(); 6520 6521 // If we've already analyzed the instruction, there's nothing to do. 6522 if (ScalarCosts.find(I) != ScalarCosts.end()) 6523 continue; 6524 6525 // Compute the cost of the vector instruction. Note that this cost already 6526 // includes the scalarization overhead of the predicated instruction. 6527 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6528 6529 // Compute the cost of the scalarized instruction. This cost is the cost of 6530 // the instruction as if it wasn't if-converted and instead remained in the 6531 // predicated block. We will scale this cost by block probability after 6532 // computing the scalarization overhead. 6533 InstructionCost ScalarCost = 6534 VF.getFixedValue() * 6535 getInstructionCost(I, ElementCount::getFixed(1)).first; 6536 6537 // Compute the scalarization overhead of needed insertelement instructions 6538 // and phi nodes. 6539 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) { 6540 ScalarCost += TTI.getScalarizationOverhead( 6541 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6542 APInt::getAllOnes(VF.getFixedValue()), true, false); 6543 ScalarCost += 6544 VF.getFixedValue() * 6545 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6546 } 6547 6548 // Compute the scalarization overhead of needed extractelement 6549 // instructions. For each of the instruction's operands, if the operand can 6550 // be scalarized, add it to the worklist; otherwise, account for the 6551 // overhead. 6552 for (Use &U : I->operands()) 6553 if (auto *J = dyn_cast<Instruction>(U.get())) { 6554 assert(VectorType::isValidElementType(J->getType()) && 6555 "Instruction has non-scalar type"); 6556 if (canBeScalarized(J)) 6557 Worklist.push_back(J); 6558 else if (needsExtract(J, VF)) { 6559 ScalarCost += TTI.getScalarizationOverhead( 6560 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6561 APInt::getAllOnes(VF.getFixedValue()), false, true); 6562 } 6563 } 6564 6565 // Scale the total scalar cost by block probability. 6566 ScalarCost /= getReciprocalPredBlockProb(); 6567 6568 // Compute the discount. A non-negative discount means the vector version 6569 // of the instruction costs more, and scalarizing would be beneficial. 6570 Discount += VectorCost - ScalarCost; 6571 ScalarCosts[I] = ScalarCost; 6572 } 6573 6574 return *Discount.getValue(); 6575 } 6576 6577 LoopVectorizationCostModel::VectorizationCostTy 6578 LoopVectorizationCostModel::expectedCost( 6579 ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { 6580 VectorizationCostTy Cost; 6581 6582 // For each block. 6583 for (BasicBlock *BB : TheLoop->blocks()) { 6584 VectorizationCostTy BlockCost; 6585 6586 // For each instruction in the old loop. 6587 for (Instruction &I : BB->instructionsWithoutDebug()) { 6588 // Skip ignored values. 6589 if (ValuesToIgnore.count(&I) || 6590 (VF.isVector() && VecValuesToIgnore.count(&I))) 6591 continue; 6592 6593 VectorizationCostTy C = getInstructionCost(&I, VF); 6594 6595 // Check if we should override the cost. 6596 if (C.first.isValid() && 6597 ForceTargetInstructionCost.getNumOccurrences() > 0) 6598 C.first = InstructionCost(ForceTargetInstructionCost); 6599 6600 // Keep a list of instructions with invalid costs. 6601 if (Invalid && !C.first.isValid()) 6602 Invalid->emplace_back(&I, VF); 6603 6604 BlockCost.first += C.first; 6605 BlockCost.second |= C.second; 6606 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6607 << " for VF " << VF << " For instruction: " << I 6608 << '\n'); 6609 } 6610 6611 // If we are vectorizing a predicated block, it will have been 6612 // if-converted. This means that the block's instructions (aside from 6613 // stores and instructions that may divide by zero) will now be 6614 // unconditionally executed. For the scalar case, we may not always execute 6615 // the predicated block, if it is an if-else block. Thus, scale the block's 6616 // cost by the probability of executing it. blockNeedsPredication from 6617 // Legal is used so as to not include all blocks in tail folded loops. 6618 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6619 BlockCost.first /= getReciprocalPredBlockProb(); 6620 6621 Cost.first += BlockCost.first; 6622 Cost.second |= BlockCost.second; 6623 } 6624 6625 return Cost; 6626 } 6627 6628 /// Gets Address Access SCEV after verifying that the access pattern 6629 /// is loop invariant except the induction variable dependence. 6630 /// 6631 /// This SCEV can be sent to the Target in order to estimate the address 6632 /// calculation cost. 6633 static const SCEV *getAddressAccessSCEV( 6634 Value *Ptr, 6635 LoopVectorizationLegality *Legal, 6636 PredicatedScalarEvolution &PSE, 6637 const Loop *TheLoop) { 6638 6639 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6640 if (!Gep) 6641 return nullptr; 6642 6643 // We are looking for a gep with all loop invariant indices except for one 6644 // which should be an induction variable. 6645 auto SE = PSE.getSE(); 6646 unsigned NumOperands = Gep->getNumOperands(); 6647 for (unsigned i = 1; i < NumOperands; ++i) { 6648 Value *Opd = Gep->getOperand(i); 6649 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6650 !Legal->isInductionVariable(Opd)) 6651 return nullptr; 6652 } 6653 6654 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6655 return PSE.getSCEV(Ptr); 6656 } 6657 6658 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6659 return Legal->hasStride(I->getOperand(0)) || 6660 Legal->hasStride(I->getOperand(1)); 6661 } 6662 6663 InstructionCost 6664 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6665 ElementCount VF) { 6666 assert(VF.isVector() && 6667 "Scalarization cost of instruction implies vectorization."); 6668 if (VF.isScalable()) 6669 return InstructionCost::getInvalid(); 6670 6671 Type *ValTy = getLoadStoreType(I); 6672 auto SE = PSE.getSE(); 6673 6674 unsigned AS = getLoadStoreAddressSpace(I); 6675 Value *Ptr = getLoadStorePointerOperand(I); 6676 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6677 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost` 6678 // that it is being called from this specific place. 6679 6680 // Figure out whether the access is strided and get the stride value 6681 // if it's known in compile time 6682 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6683 6684 // Get the cost of the scalar memory instruction and address computation. 6685 InstructionCost Cost = 6686 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6687 6688 // Don't pass *I here, since it is scalar but will actually be part of a 6689 // vectorized loop where the user of it is a vectorized instruction. 6690 const Align Alignment = getLoadStoreAlignment(I); 6691 Cost += VF.getKnownMinValue() * 6692 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6693 AS, TTI::TCK_RecipThroughput); 6694 6695 // Get the overhead of the extractelement and insertelement instructions 6696 // we might create due to scalarization. 6697 Cost += getScalarizationOverhead(I, VF); 6698 6699 // If we have a predicated load/store, it will need extra i1 extracts and 6700 // conditional branches, but may not be executed for each vector lane. Scale 6701 // the cost by the probability of executing the predicated block. 6702 if (isPredicatedInst(I, VF)) { 6703 Cost /= getReciprocalPredBlockProb(); 6704 6705 // Add the cost of an i1 extract and a branch 6706 auto *Vec_i1Ty = 6707 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 6708 Cost += TTI.getScalarizationOverhead( 6709 Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()), 6710 /*Insert=*/false, /*Extract=*/true); 6711 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 6712 } 6713 6714 return Cost; 6715 } 6716 6717 InstructionCost 6718 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6719 ElementCount VF) { 6720 Type *ValTy = getLoadStoreType(I); 6721 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6722 Value *Ptr = getLoadStorePointerOperand(I); 6723 unsigned AS = getLoadStoreAddressSpace(I); 6724 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr); 6725 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6726 6727 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6728 "Stride should be 1 or -1 for consecutive memory access"); 6729 const Align Alignment = getLoadStoreAlignment(I); 6730 InstructionCost Cost = 0; 6731 if (Legal->isMaskRequired(I)) 6732 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6733 CostKind); 6734 else 6735 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6736 CostKind, I); 6737 6738 bool Reverse = ConsecutiveStride < 0; 6739 if (Reverse) 6740 Cost += 6741 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6742 return Cost; 6743 } 6744 6745 InstructionCost 6746 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6747 ElementCount VF) { 6748 assert(Legal->isUniformMemOp(*I)); 6749 6750 Type *ValTy = getLoadStoreType(I); 6751 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6752 const Align Alignment = getLoadStoreAlignment(I); 6753 unsigned AS = getLoadStoreAddressSpace(I); 6754 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6755 if (isa<LoadInst>(I)) { 6756 return TTI.getAddressComputationCost(ValTy) + 6757 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6758 CostKind) + 6759 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6760 } 6761 StoreInst *SI = cast<StoreInst>(I); 6762 6763 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6764 return TTI.getAddressComputationCost(ValTy) + 6765 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6766 CostKind) + 6767 (isLoopInvariantStoreValue 6768 ? 0 6769 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6770 VF.getKnownMinValue() - 1)); 6771 } 6772 6773 InstructionCost 6774 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6775 ElementCount VF) { 6776 Type *ValTy = getLoadStoreType(I); 6777 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6778 const Align Alignment = getLoadStoreAlignment(I); 6779 const Value *Ptr = getLoadStorePointerOperand(I); 6780 6781 return TTI.getAddressComputationCost(VectorTy) + 6782 TTI.getGatherScatterOpCost( 6783 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6784 TargetTransformInfo::TCK_RecipThroughput, I); 6785 } 6786 6787 InstructionCost 6788 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6789 ElementCount VF) { 6790 // TODO: Once we have support for interleaving with scalable vectors 6791 // we can calculate the cost properly here. 6792 if (VF.isScalable()) 6793 return InstructionCost::getInvalid(); 6794 6795 Type *ValTy = getLoadStoreType(I); 6796 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6797 unsigned AS = getLoadStoreAddressSpace(I); 6798 6799 auto Group = getInterleavedAccessGroup(I); 6800 assert(Group && "Fail to get an interleaved access group."); 6801 6802 unsigned InterleaveFactor = Group->getFactor(); 6803 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6804 6805 // Holds the indices of existing members in the interleaved group. 6806 SmallVector<unsigned, 4> Indices; 6807 for (unsigned IF = 0; IF < InterleaveFactor; IF++) 6808 if (Group->getMember(IF)) 6809 Indices.push_back(IF); 6810 6811 // Calculate the cost of the whole interleaved group. 6812 bool UseMaskForGaps = 6813 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) || 6814 (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor())); 6815 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 6816 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6817 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6818 6819 if (Group->isReverse()) { 6820 // TODO: Add support for reversed masked interleaved access. 6821 assert(!Legal->isMaskRequired(I) && 6822 "Reverse masked interleaved access not supported."); 6823 Cost += 6824 Group->getNumMembers() * 6825 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6826 } 6827 return Cost; 6828 } 6829 6830 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( 6831 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 6832 using namespace llvm::PatternMatch; 6833 // Early exit for no inloop reductions 6834 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 6835 return None; 6836 auto *VectorTy = cast<VectorType>(Ty); 6837 6838 // We are looking for a pattern of, and finding the minimal acceptable cost: 6839 // reduce(mul(ext(A), ext(B))) or 6840 // reduce(mul(A, B)) or 6841 // reduce(ext(A)) or 6842 // reduce(A). 6843 // The basic idea is that we walk down the tree to do that, finding the root 6844 // reduction instruction in InLoopReductionImmediateChains. From there we find 6845 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 6846 // of the components. If the reduction cost is lower then we return it for the 6847 // reduction instruction and 0 for the other instructions in the pattern. If 6848 // it is not we return an invalid cost specifying the orignal cost method 6849 // should be used. 6850 Instruction *RetI = I; 6851 if (match(RetI, m_ZExtOrSExt(m_Value()))) { 6852 if (!RetI->hasOneUser()) 6853 return None; 6854 RetI = RetI->user_back(); 6855 } 6856 if (match(RetI, m_Mul(m_Value(), m_Value())) && 6857 RetI->user_back()->getOpcode() == Instruction::Add) { 6858 if (!RetI->hasOneUser()) 6859 return None; 6860 RetI = RetI->user_back(); 6861 } 6862 6863 // Test if the found instruction is a reduction, and if not return an invalid 6864 // cost specifying the parent to use the original cost modelling. 6865 if (!InLoopReductionImmediateChains.count(RetI)) 6866 return None; 6867 6868 // Find the reduction this chain is a part of and calculate the basic cost of 6869 // the reduction on its own. 6870 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 6871 Instruction *ReductionPhi = LastChain; 6872 while (!isa<PHINode>(ReductionPhi)) 6873 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 6874 6875 const RecurrenceDescriptor &RdxDesc = 6876 Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second; 6877 6878 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 6879 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); 6880 6881 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a 6882 // normal fmul instruction to the cost of the fadd reduction. 6883 if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd) 6884 BaseCost += 6885 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind); 6886 6887 // If we're using ordered reductions then we can just return the base cost 6888 // here, since getArithmeticReductionCost calculates the full ordered 6889 // reduction cost when FP reassociation is not allowed. 6890 if (useOrderedReductions(RdxDesc)) 6891 return BaseCost; 6892 6893 // Get the operand that was not the reduction chain and match it to one of the 6894 // patterns, returning the better cost if it is found. 6895 Instruction *RedOp = RetI->getOperand(1) == LastChain 6896 ? dyn_cast<Instruction>(RetI->getOperand(0)) 6897 : dyn_cast<Instruction>(RetI->getOperand(1)); 6898 6899 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 6900 6901 Instruction *Op0, *Op1; 6902 if (RedOp && 6903 match(RedOp, 6904 m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) && 6905 match(Op0, m_ZExtOrSExt(m_Value())) && 6906 Op0->getOpcode() == Op1->getOpcode() && 6907 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 6908 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) && 6909 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) { 6910 6911 // Matched reduce(ext(mul(ext(A), ext(B))) 6912 // Note that the extend opcodes need to all match, or if A==B they will have 6913 // been converted to zext(mul(sext(A), sext(A))) as it is known positive, 6914 // which is equally fine. 6915 bool IsUnsigned = isa<ZExtInst>(Op0); 6916 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 6917 auto *MulType = VectorType::get(Op0->getType(), VectorTy); 6918 6919 InstructionCost ExtCost = 6920 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType, 6921 TTI::CastContextHint::None, CostKind, Op0); 6922 InstructionCost MulCost = 6923 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind); 6924 InstructionCost Ext2Cost = 6925 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType, 6926 TTI::CastContextHint::None, CostKind, RedOp); 6927 6928 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6929 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6930 CostKind); 6931 6932 if (RedCost.isValid() && 6933 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost) 6934 return I == RetI ? RedCost : 0; 6935 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && 6936 !TheLoop->isLoopInvariant(RedOp)) { 6937 // Matched reduce(ext(A)) 6938 bool IsUnsigned = isa<ZExtInst>(RedOp); 6939 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 6940 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6941 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6942 CostKind); 6943 6944 InstructionCost ExtCost = 6945 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 6946 TTI::CastContextHint::None, CostKind, RedOp); 6947 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 6948 return I == RetI ? RedCost : 0; 6949 } else if (RedOp && 6950 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { 6951 if (match(Op0, m_ZExtOrSExt(m_Value())) && 6952 Op0->getOpcode() == Op1->getOpcode() && 6953 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 6954 bool IsUnsigned = isa<ZExtInst>(Op0); 6955 Type *Op0Ty = Op0->getOperand(0)->getType(); 6956 Type *Op1Ty = Op1->getOperand(0)->getType(); 6957 Type *LargestOpTy = 6958 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty 6959 : Op0Ty; 6960 auto *ExtType = VectorType::get(LargestOpTy, VectorTy); 6961 6962 // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of 6963 // different sizes. We take the largest type as the ext to reduce, and add 6964 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))). 6965 InstructionCost ExtCost0 = TTI.getCastInstrCost( 6966 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy), 6967 TTI::CastContextHint::None, CostKind, Op0); 6968 InstructionCost ExtCost1 = TTI.getCastInstrCost( 6969 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy), 6970 TTI::CastContextHint::None, CostKind, Op1); 6971 InstructionCost MulCost = 6972 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 6973 6974 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6975 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6976 CostKind); 6977 InstructionCost ExtraExtCost = 0; 6978 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) { 6979 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1; 6980 ExtraExtCost = TTI.getCastInstrCost( 6981 ExtraExtOp->getOpcode(), ExtType, 6982 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy), 6983 TTI::CastContextHint::None, CostKind, ExtraExtOp); 6984 } 6985 6986 if (RedCost.isValid() && 6987 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost)) 6988 return I == RetI ? RedCost : 0; 6989 } else if (!match(I, m_ZExtOrSExt(m_Value()))) { 6990 // Matched reduce(mul()) 6991 InstructionCost MulCost = 6992 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 6993 6994 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6995 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 6996 CostKind); 6997 6998 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 6999 return I == RetI ? RedCost : 0; 7000 } 7001 } 7002 7003 return I == RetI ? Optional<InstructionCost>(BaseCost) : None; 7004 } 7005 7006 InstructionCost 7007 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 7008 ElementCount VF) { 7009 // Calculate scalar cost only. Vectorization cost should be ready at this 7010 // moment. 7011 if (VF.isScalar()) { 7012 Type *ValTy = getLoadStoreType(I); 7013 const Align Alignment = getLoadStoreAlignment(I); 7014 unsigned AS = getLoadStoreAddressSpace(I); 7015 7016 return TTI.getAddressComputationCost(ValTy) + 7017 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 7018 TTI::TCK_RecipThroughput, I); 7019 } 7020 return getWideningCost(I, VF); 7021 } 7022 7023 LoopVectorizationCostModel::VectorizationCostTy 7024 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7025 ElementCount VF) { 7026 // If we know that this instruction will remain uniform, check the cost of 7027 // the scalar version. 7028 if (isUniformAfterVectorization(I, VF)) 7029 VF = ElementCount::getFixed(1); 7030 7031 if (VF.isVector() && isProfitableToScalarize(I, VF)) 7032 return VectorizationCostTy(InstsToScalarize[VF][I], false); 7033 7034 // Forced scalars do not have any scalarization overhead. 7035 auto ForcedScalar = ForcedScalars.find(VF); 7036 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 7037 auto InstSet = ForcedScalar->second; 7038 if (InstSet.count(I)) 7039 return VectorizationCostTy( 7040 (getInstructionCost(I, ElementCount::getFixed(1)).first * 7041 VF.getKnownMinValue()), 7042 false); 7043 } 7044 7045 Type *VectorTy; 7046 InstructionCost C = getInstructionCost(I, VF, VectorTy); 7047 7048 bool TypeNotScalarized = false; 7049 if (VF.isVector() && VectorTy->isVectorTy()) { 7050 unsigned NumParts = TTI.getNumberOfParts(VectorTy); 7051 if (NumParts) 7052 TypeNotScalarized = NumParts < VF.getKnownMinValue(); 7053 else 7054 C = InstructionCost::getInvalid(); 7055 } 7056 return VectorizationCostTy(C, TypeNotScalarized); 7057 } 7058 7059 InstructionCost 7060 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 7061 ElementCount VF) const { 7062 7063 // There is no mechanism yet to create a scalable scalarization loop, 7064 // so this is currently Invalid. 7065 if (VF.isScalable()) 7066 return InstructionCost::getInvalid(); 7067 7068 if (VF.isScalar()) 7069 return 0; 7070 7071 InstructionCost Cost = 0; 7072 Type *RetTy = ToVectorTy(I->getType(), VF); 7073 if (!RetTy->isVoidTy() && 7074 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 7075 Cost += TTI.getScalarizationOverhead( 7076 cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true, 7077 false); 7078 7079 // Some targets keep addresses scalar. 7080 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 7081 return Cost; 7082 7083 // Some targets support efficient element stores. 7084 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 7085 return Cost; 7086 7087 // Collect operands to consider. 7088 CallInst *CI = dyn_cast<CallInst>(I); 7089 Instruction::op_range Ops = CI ? CI->args() : I->operands(); 7090 7091 // Skip operands that do not require extraction/scalarization and do not incur 7092 // any overhead. 7093 SmallVector<Type *> Tys; 7094 for (auto *V : filterExtractingOperands(Ops, VF)) 7095 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 7096 return Cost + TTI.getOperandsScalarizationOverhead( 7097 filterExtractingOperands(Ops, VF), Tys); 7098 } 7099 7100 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 7101 if (VF.isScalar()) 7102 return; 7103 NumPredStores = 0; 7104 for (BasicBlock *BB : TheLoop->blocks()) { 7105 // For each instruction in the old loop. 7106 for (Instruction &I : *BB) { 7107 Value *Ptr = getLoadStorePointerOperand(&I); 7108 if (!Ptr) 7109 continue; 7110 7111 // TODO: We should generate better code and update the cost model for 7112 // predicated uniform stores. Today they are treated as any other 7113 // predicated store (see added test cases in 7114 // invariant-store-vectorization.ll). 7115 if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF)) 7116 NumPredStores++; 7117 7118 if (Legal->isUniformMemOp(I)) { 7119 // TODO: Avoid replicating loads and stores instead of 7120 // relying on instcombine to remove them. 7121 // Load: Scalar load + broadcast 7122 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 7123 InstructionCost Cost; 7124 if (isa<StoreInst>(&I) && VF.isScalable() && 7125 isLegalGatherOrScatter(&I, VF)) { 7126 Cost = getGatherScatterCost(&I, VF); 7127 setWideningDecision(&I, VF, CM_GatherScatter, Cost); 7128 } else { 7129 assert((isa<LoadInst>(&I) || !VF.isScalable()) && 7130 "Cannot yet scalarize uniform stores"); 7131 Cost = getUniformMemOpCost(&I, VF); 7132 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7133 } 7134 continue; 7135 } 7136 7137 // We assume that widening is the best solution when possible. 7138 if (memoryInstructionCanBeWidened(&I, VF)) { 7139 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 7140 int ConsecutiveStride = Legal->isConsecutivePtr( 7141 getLoadStoreType(&I), getLoadStorePointerOperand(&I)); 7142 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7143 "Expected consecutive stride."); 7144 InstWidening Decision = 7145 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 7146 setWideningDecision(&I, VF, Decision, Cost); 7147 continue; 7148 } 7149 7150 // Choose between Interleaving, Gather/Scatter or Scalarization. 7151 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 7152 unsigned NumAccesses = 1; 7153 if (isAccessInterleaved(&I)) { 7154 auto Group = getInterleavedAccessGroup(&I); 7155 assert(Group && "Fail to get an interleaved access group."); 7156 7157 // Make one decision for the whole group. 7158 if (getWideningDecision(&I, VF) != CM_Unknown) 7159 continue; 7160 7161 NumAccesses = Group->getNumMembers(); 7162 if (interleavedAccessCanBeWidened(&I, VF)) 7163 InterleaveCost = getInterleaveGroupCost(&I, VF); 7164 } 7165 7166 InstructionCost GatherScatterCost = 7167 isLegalGatherOrScatter(&I, VF) 7168 ? getGatherScatterCost(&I, VF) * NumAccesses 7169 : InstructionCost::getInvalid(); 7170 7171 InstructionCost ScalarizationCost = 7172 getMemInstScalarizationCost(&I, VF) * NumAccesses; 7173 7174 // Choose better solution for the current VF, 7175 // write down this decision and use it during vectorization. 7176 InstructionCost Cost; 7177 InstWidening Decision; 7178 if (InterleaveCost <= GatherScatterCost && 7179 InterleaveCost < ScalarizationCost) { 7180 Decision = CM_Interleave; 7181 Cost = InterleaveCost; 7182 } else if (GatherScatterCost < ScalarizationCost) { 7183 Decision = CM_GatherScatter; 7184 Cost = GatherScatterCost; 7185 } else { 7186 Decision = CM_Scalarize; 7187 Cost = ScalarizationCost; 7188 } 7189 // If the instructions belongs to an interleave group, the whole group 7190 // receives the same decision. The whole group receives the cost, but 7191 // the cost will actually be assigned to one instruction. 7192 if (auto Group = getInterleavedAccessGroup(&I)) 7193 setWideningDecision(Group, VF, Decision, Cost); 7194 else 7195 setWideningDecision(&I, VF, Decision, Cost); 7196 } 7197 } 7198 7199 // Make sure that any load of address and any other address computation 7200 // remains scalar unless there is gather/scatter support. This avoids 7201 // inevitable extracts into address registers, and also has the benefit of 7202 // activating LSR more, since that pass can't optimize vectorized 7203 // addresses. 7204 if (TTI.prefersVectorizedAddressing()) 7205 return; 7206 7207 // Start with all scalar pointer uses. 7208 SmallPtrSet<Instruction *, 8> AddrDefs; 7209 for (BasicBlock *BB : TheLoop->blocks()) 7210 for (Instruction &I : *BB) { 7211 Instruction *PtrDef = 7212 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7213 if (PtrDef && TheLoop->contains(PtrDef) && 7214 getWideningDecision(&I, VF) != CM_GatherScatter) 7215 AddrDefs.insert(PtrDef); 7216 } 7217 7218 // Add all instructions used to generate the addresses. 7219 SmallVector<Instruction *, 4> Worklist; 7220 append_range(Worklist, AddrDefs); 7221 while (!Worklist.empty()) { 7222 Instruction *I = Worklist.pop_back_val(); 7223 for (auto &Op : I->operands()) 7224 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7225 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7226 AddrDefs.insert(InstOp).second) 7227 Worklist.push_back(InstOp); 7228 } 7229 7230 for (auto *I : AddrDefs) { 7231 if (isa<LoadInst>(I)) { 7232 // Setting the desired widening decision should ideally be handled in 7233 // by cost functions, but since this involves the task of finding out 7234 // if the loaded register is involved in an address computation, it is 7235 // instead changed here when we know this is the case. 7236 InstWidening Decision = getWideningDecision(I, VF); 7237 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7238 // Scalarize a widened load of address. 7239 setWideningDecision( 7240 I, VF, CM_Scalarize, 7241 (VF.getKnownMinValue() * 7242 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7243 else if (auto Group = getInterleavedAccessGroup(I)) { 7244 // Scalarize an interleave group of address loads. 7245 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7246 if (Instruction *Member = Group->getMember(I)) 7247 setWideningDecision( 7248 Member, VF, CM_Scalarize, 7249 (VF.getKnownMinValue() * 7250 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7251 } 7252 } 7253 } else 7254 // Make sure I gets scalarized and a cost estimate without 7255 // scalarization overhead. 7256 ForcedScalars[VF].insert(I); 7257 } 7258 } 7259 7260 InstructionCost 7261 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 7262 Type *&VectorTy) { 7263 Type *RetTy = I->getType(); 7264 if (canTruncateToMinimalBitwidth(I, VF)) 7265 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7266 auto SE = PSE.getSE(); 7267 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7268 7269 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 7270 ElementCount VF) -> bool { 7271 if (VF.isScalar()) 7272 return true; 7273 7274 auto Scalarized = InstsToScalarize.find(VF); 7275 assert(Scalarized != InstsToScalarize.end() && 7276 "VF not yet analyzed for scalarization profitability"); 7277 return !Scalarized->second.count(I) && 7278 llvm::all_of(I->users(), [&](User *U) { 7279 auto *UI = cast<Instruction>(U); 7280 return !Scalarized->second.count(UI); 7281 }); 7282 }; 7283 (void) hasSingleCopyAfterVectorization; 7284 7285 if (isScalarAfterVectorization(I, VF)) { 7286 // With the exception of GEPs and PHIs, after scalarization there should 7287 // only be one copy of the instruction generated in the loop. This is 7288 // because the VF is either 1, or any instructions that need scalarizing 7289 // have already been dealt with by the the time we get here. As a result, 7290 // it means we don't have to multiply the instruction cost by VF. 7291 assert(I->getOpcode() == Instruction::GetElementPtr || 7292 I->getOpcode() == Instruction::PHI || 7293 (I->getOpcode() == Instruction::BitCast && 7294 I->getType()->isPointerTy()) || 7295 hasSingleCopyAfterVectorization(I, VF)); 7296 VectorTy = RetTy; 7297 } else 7298 VectorTy = ToVectorTy(RetTy, VF); 7299 7300 // TODO: We need to estimate the cost of intrinsic calls. 7301 switch (I->getOpcode()) { 7302 case Instruction::GetElementPtr: 7303 // We mark this instruction as zero-cost because the cost of GEPs in 7304 // vectorized code depends on whether the corresponding memory instruction 7305 // is scalarized or not. Therefore, we handle GEPs with the memory 7306 // instruction cost. 7307 return 0; 7308 case Instruction::Br: { 7309 // In cases of scalarized and predicated instructions, there will be VF 7310 // predicated blocks in the vectorized loop. Each branch around these 7311 // blocks requires also an extract of its vector compare i1 element. 7312 bool ScalarPredicatedBB = false; 7313 BranchInst *BI = cast<BranchInst>(I); 7314 if (VF.isVector() && BI->isConditional() && 7315 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7316 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7317 ScalarPredicatedBB = true; 7318 7319 if (ScalarPredicatedBB) { 7320 // Not possible to scalarize scalable vector with predicated instructions. 7321 if (VF.isScalable()) 7322 return InstructionCost::getInvalid(); 7323 // Return cost for branches around scalarized and predicated blocks. 7324 auto *Vec_i1Ty = 7325 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7326 return ( 7327 TTI.getScalarizationOverhead( 7328 Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) + 7329 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); 7330 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7331 // The back-edge branch will remain, as will all scalar branches. 7332 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7333 else 7334 // This branch will be eliminated by if-conversion. 7335 return 0; 7336 // Note: We currently assume zero cost for an unconditional branch inside 7337 // a predicated block since it will become a fall-through, although we 7338 // may decide in the future to call TTI for all branches. 7339 } 7340 case Instruction::PHI: { 7341 auto *Phi = cast<PHINode>(I); 7342 7343 // First-order recurrences are replaced by vector shuffles inside the loop. 7344 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7345 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7346 return TTI.getShuffleCost( 7347 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7348 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7349 7350 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7351 // converted into select instructions. We require N - 1 selects per phi 7352 // node, where N is the number of incoming values. 7353 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7354 return (Phi->getNumIncomingValues() - 1) * 7355 TTI.getCmpSelInstrCost( 7356 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7357 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7358 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7359 7360 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7361 } 7362 case Instruction::UDiv: 7363 case Instruction::SDiv: 7364 case Instruction::URem: 7365 case Instruction::SRem: 7366 // If we have a predicated instruction, it may not be executed for each 7367 // vector lane. Get the scalarization cost and scale this amount by the 7368 // probability of executing the predicated block. If the instruction is not 7369 // predicated, we fall through to the next case. 7370 if (VF.isVector() && isScalarWithPredication(I, VF)) { 7371 InstructionCost Cost = 0; 7372 7373 // These instructions have a non-void type, so account for the phi nodes 7374 // that we will create. This cost is likely to be zero. The phi node 7375 // cost, if any, should be scaled by the block probability because it 7376 // models a copy at the end of each predicated block. 7377 Cost += VF.getKnownMinValue() * 7378 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7379 7380 // The cost of the non-predicated instruction. 7381 Cost += VF.getKnownMinValue() * 7382 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7383 7384 // The cost of insertelement and extractelement instructions needed for 7385 // scalarization. 7386 Cost += getScalarizationOverhead(I, VF); 7387 7388 // Scale the cost by the probability of executing the predicated blocks. 7389 // This assumes the predicated block for each vector lane is equally 7390 // likely. 7391 return Cost / getReciprocalPredBlockProb(); 7392 } 7393 LLVM_FALLTHROUGH; 7394 case Instruction::Add: 7395 case Instruction::FAdd: 7396 case Instruction::Sub: 7397 case Instruction::FSub: 7398 case Instruction::Mul: 7399 case Instruction::FMul: 7400 case Instruction::FDiv: 7401 case Instruction::FRem: 7402 case Instruction::Shl: 7403 case Instruction::LShr: 7404 case Instruction::AShr: 7405 case Instruction::And: 7406 case Instruction::Or: 7407 case Instruction::Xor: { 7408 // Since we will replace the stride by 1 the multiplication should go away. 7409 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7410 return 0; 7411 7412 // Detect reduction patterns 7413 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7414 return *RedCost; 7415 7416 // Certain instructions can be cheaper to vectorize if they have a constant 7417 // second vector operand. One example of this are shifts on x86. 7418 Value *Op2 = I->getOperand(1); 7419 TargetTransformInfo::OperandValueProperties Op2VP; 7420 TargetTransformInfo::OperandValueKind Op2VK = 7421 TTI.getOperandInfo(Op2, Op2VP); 7422 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7423 Op2VK = TargetTransformInfo::OK_UniformValue; 7424 7425 SmallVector<const Value *, 4> Operands(I->operand_values()); 7426 return TTI.getArithmeticInstrCost( 7427 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7428 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7429 } 7430 case Instruction::FNeg: { 7431 return TTI.getArithmeticInstrCost( 7432 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7433 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7434 TargetTransformInfo::OP_None, I->getOperand(0), I); 7435 } 7436 case Instruction::Select: { 7437 SelectInst *SI = cast<SelectInst>(I); 7438 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7439 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7440 7441 const Value *Op0, *Op1; 7442 using namespace llvm::PatternMatch; 7443 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7444 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7445 // select x, y, false --> x & y 7446 // select x, true, y --> x | y 7447 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7448 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7449 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7450 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7451 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7452 Op1->getType()->getScalarSizeInBits() == 1); 7453 7454 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7455 return TTI.getArithmeticInstrCost( 7456 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7457 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7458 } 7459 7460 Type *CondTy = SI->getCondition()->getType(); 7461 if (!ScalarCond) 7462 CondTy = VectorType::get(CondTy, VF); 7463 7464 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 7465 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition())) 7466 Pred = Cmp->getPredicate(); 7467 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred, 7468 CostKind, I); 7469 } 7470 case Instruction::ICmp: 7471 case Instruction::FCmp: { 7472 Type *ValTy = I->getOperand(0)->getType(); 7473 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7474 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7475 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7476 VectorTy = ToVectorTy(ValTy, VF); 7477 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7478 cast<CmpInst>(I)->getPredicate(), CostKind, 7479 I); 7480 } 7481 case Instruction::Store: 7482 case Instruction::Load: { 7483 ElementCount Width = VF; 7484 if (Width.isVector()) { 7485 InstWidening Decision = getWideningDecision(I, Width); 7486 assert(Decision != CM_Unknown && 7487 "CM decision should be taken at this point"); 7488 if (Decision == CM_Scalarize) 7489 Width = ElementCount::getFixed(1); 7490 } 7491 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7492 return getMemoryInstructionCost(I, VF); 7493 } 7494 case Instruction::BitCast: 7495 if (I->getType()->isPointerTy()) 7496 return 0; 7497 LLVM_FALLTHROUGH; 7498 case Instruction::ZExt: 7499 case Instruction::SExt: 7500 case Instruction::FPToUI: 7501 case Instruction::FPToSI: 7502 case Instruction::FPExt: 7503 case Instruction::PtrToInt: 7504 case Instruction::IntToPtr: 7505 case Instruction::SIToFP: 7506 case Instruction::UIToFP: 7507 case Instruction::Trunc: 7508 case Instruction::FPTrunc: { 7509 // Computes the CastContextHint from a Load/Store instruction. 7510 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7511 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7512 "Expected a load or a store!"); 7513 7514 if (VF.isScalar() || !TheLoop->contains(I)) 7515 return TTI::CastContextHint::Normal; 7516 7517 switch (getWideningDecision(I, VF)) { 7518 case LoopVectorizationCostModel::CM_GatherScatter: 7519 return TTI::CastContextHint::GatherScatter; 7520 case LoopVectorizationCostModel::CM_Interleave: 7521 return TTI::CastContextHint::Interleave; 7522 case LoopVectorizationCostModel::CM_Scalarize: 7523 case LoopVectorizationCostModel::CM_Widen: 7524 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7525 : TTI::CastContextHint::Normal; 7526 case LoopVectorizationCostModel::CM_Widen_Reverse: 7527 return TTI::CastContextHint::Reversed; 7528 case LoopVectorizationCostModel::CM_Unknown: 7529 llvm_unreachable("Instr did not go through cost modelling?"); 7530 } 7531 7532 llvm_unreachable("Unhandled case!"); 7533 }; 7534 7535 unsigned Opcode = I->getOpcode(); 7536 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7537 // For Trunc, the context is the only user, which must be a StoreInst. 7538 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7539 if (I->hasOneUse()) 7540 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7541 CCH = ComputeCCH(Store); 7542 } 7543 // For Z/Sext, the context is the operand, which must be a LoadInst. 7544 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7545 Opcode == Instruction::FPExt) { 7546 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7547 CCH = ComputeCCH(Load); 7548 } 7549 7550 // We optimize the truncation of induction variables having constant 7551 // integer steps. The cost of these truncations is the same as the scalar 7552 // operation. 7553 if (isOptimizableIVTruncate(I, VF)) { 7554 auto *Trunc = cast<TruncInst>(I); 7555 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7556 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7557 } 7558 7559 // Detect reduction patterns 7560 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7561 return *RedCost; 7562 7563 Type *SrcScalarTy = I->getOperand(0)->getType(); 7564 Type *SrcVecTy = 7565 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7566 if (canTruncateToMinimalBitwidth(I, VF)) { 7567 // This cast is going to be shrunk. This may remove the cast or it might 7568 // turn it into slightly different cast. For example, if MinBW == 16, 7569 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7570 // 7571 // Calculate the modified src and dest types. 7572 Type *MinVecTy = VectorTy; 7573 if (Opcode == Instruction::Trunc) { 7574 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7575 VectorTy = 7576 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7577 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7578 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7579 VectorTy = 7580 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7581 } 7582 } 7583 7584 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7585 } 7586 case Instruction::Call: { 7587 if (RecurrenceDescriptor::isFMulAddIntrinsic(I)) 7588 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7589 return *RedCost; 7590 bool NeedToScalarize; 7591 CallInst *CI = cast<CallInst>(I); 7592 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7593 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7594 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7595 return std::min(CallCost, IntrinsicCost); 7596 } 7597 return CallCost; 7598 } 7599 case Instruction::ExtractValue: 7600 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7601 case Instruction::Alloca: 7602 // We cannot easily widen alloca to a scalable alloca, as 7603 // the result would need to be a vector of pointers. 7604 if (VF.isScalable()) 7605 return InstructionCost::getInvalid(); 7606 LLVM_FALLTHROUGH; 7607 default: 7608 // This opcode is unknown. Assume that it is the same as 'mul'. 7609 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7610 } // end of switch. 7611 } 7612 7613 char LoopVectorize::ID = 0; 7614 7615 static const char lv_name[] = "Loop Vectorization"; 7616 7617 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7618 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7619 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7620 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7621 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7622 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7623 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7624 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7625 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7626 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7627 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7628 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7629 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7630 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7631 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7632 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7633 7634 namespace llvm { 7635 7636 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7637 7638 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7639 bool VectorizeOnlyWhenForced) { 7640 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7641 } 7642 7643 } // end namespace llvm 7644 7645 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7646 // Check if the pointer operand of a load or store instruction is 7647 // consecutive. 7648 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7649 return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr); 7650 return false; 7651 } 7652 7653 void LoopVectorizationCostModel::collectValuesToIgnore() { 7654 // Ignore ephemeral values. 7655 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7656 7657 // Ignore type-promoting instructions we identified during reduction 7658 // detection. 7659 for (auto &Reduction : Legal->getReductionVars()) { 7660 const RecurrenceDescriptor &RedDes = Reduction.second; 7661 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7662 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7663 } 7664 // Ignore type-casting instructions we identified during induction 7665 // detection. 7666 for (auto &Induction : Legal->getInductionVars()) { 7667 const InductionDescriptor &IndDes = Induction.second; 7668 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7669 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7670 } 7671 } 7672 7673 void LoopVectorizationCostModel::collectInLoopReductions() { 7674 for (auto &Reduction : Legal->getReductionVars()) { 7675 PHINode *Phi = Reduction.first; 7676 const RecurrenceDescriptor &RdxDesc = Reduction.second; 7677 7678 // We don't collect reductions that are type promoted (yet). 7679 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7680 continue; 7681 7682 // If the target would prefer this reduction to happen "in-loop", then we 7683 // want to record it as such. 7684 unsigned Opcode = RdxDesc.getOpcode(); 7685 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 7686 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7687 TargetTransformInfo::ReductionFlags())) 7688 continue; 7689 7690 // Check that we can correctly put the reductions into the loop, by 7691 // finding the chain of operations that leads from the phi to the loop 7692 // exit value. 7693 SmallVector<Instruction *, 4> ReductionOperations = 7694 RdxDesc.getReductionOpChain(Phi, TheLoop); 7695 bool InLoop = !ReductionOperations.empty(); 7696 if (InLoop) { 7697 InLoopReductionChains[Phi] = ReductionOperations; 7698 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7699 Instruction *LastChain = Phi; 7700 for (auto *I : ReductionOperations) { 7701 InLoopReductionImmediateChains[I] = LastChain; 7702 LastChain = I; 7703 } 7704 } 7705 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7706 << " reduction for phi: " << *Phi << "\n"); 7707 } 7708 } 7709 7710 // TODO: we could return a pair of values that specify the max VF and 7711 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7712 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7713 // doesn't have a cost model that can choose which plan to execute if 7714 // more than one is generated. 7715 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7716 LoopVectorizationCostModel &CM) { 7717 unsigned WidestType; 7718 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7719 return WidestVectorRegBits / WidestType; 7720 } 7721 7722 VectorizationFactor 7723 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7724 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7725 ElementCount VF = UserVF; 7726 // Outer loop handling: They may require CFG and instruction level 7727 // transformations before even evaluating whether vectorization is profitable. 7728 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7729 // the vectorization pipeline. 7730 if (!OrigLoop->isInnermost()) { 7731 // If the user doesn't provide a vectorization factor, determine a 7732 // reasonable one. 7733 if (UserVF.isZero()) { 7734 VF = ElementCount::getFixed(determineVPlanVF( 7735 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 7736 .getFixedSize(), 7737 CM)); 7738 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7739 7740 // Make sure we have a VF > 1 for stress testing. 7741 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7742 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7743 << "overriding computed VF.\n"); 7744 VF = ElementCount::getFixed(4); 7745 } 7746 } 7747 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7748 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7749 "VF needs to be a power of two"); 7750 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7751 << "VF " << VF << " to build VPlans.\n"); 7752 buildVPlans(VF, VF); 7753 7754 // For VPlan build stress testing, we bail out after VPlan construction. 7755 if (VPlanBuildStressTest) 7756 return VectorizationFactor::Disabled(); 7757 7758 return {VF, 0 /*Cost*/}; 7759 } 7760 7761 LLVM_DEBUG( 7762 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7763 "VPlan-native path.\n"); 7764 return VectorizationFactor::Disabled(); 7765 } 7766 7767 Optional<VectorizationFactor> 7768 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7769 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7770 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 7771 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 7772 return None; 7773 7774 // Invalidate interleave groups if all blocks of loop will be predicated. 7775 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) && 7776 !useMaskedInterleavedAccesses(*TTI)) { 7777 LLVM_DEBUG( 7778 dbgs() 7779 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7780 "which requires masked-interleaved support.\n"); 7781 if (CM.InterleaveInfo.invalidateGroups()) 7782 // Invalidating interleave groups also requires invalidating all decisions 7783 // based on them, which includes widening decisions and uniform and scalar 7784 // values. 7785 CM.invalidateCostModelingDecisions(); 7786 } 7787 7788 ElementCount MaxUserVF = 7789 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 7790 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 7791 if (!UserVF.isZero() && UserVFIsLegal) { 7792 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 7793 "VF needs to be a power of two"); 7794 // Collect the instructions (and their associated costs) that will be more 7795 // profitable to scalarize. 7796 if (CM.selectUserVectorizationFactor(UserVF)) { 7797 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 7798 CM.collectInLoopReductions(); 7799 buildVPlansWithVPRecipes(UserVF, UserVF); 7800 LLVM_DEBUG(printPlans(dbgs())); 7801 return {{UserVF, 0}}; 7802 } else 7803 reportVectorizationInfo("UserVF ignored because of invalid costs.", 7804 "InvalidCost", ORE, OrigLoop); 7805 } 7806 7807 // Populate the set of Vectorization Factor Candidates. 7808 ElementCountSet VFCandidates; 7809 for (auto VF = ElementCount::getFixed(1); 7810 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 7811 VFCandidates.insert(VF); 7812 for (auto VF = ElementCount::getScalable(1); 7813 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 7814 VFCandidates.insert(VF); 7815 7816 for (const auto &VF : VFCandidates) { 7817 // Collect Uniform and Scalar instructions after vectorization with VF. 7818 CM.collectUniformsAndScalars(VF); 7819 7820 // Collect the instructions (and their associated costs) that will be more 7821 // profitable to scalarize. 7822 if (VF.isVector()) 7823 CM.collectInstsToScalarize(VF); 7824 } 7825 7826 CM.collectInLoopReductions(); 7827 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 7828 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 7829 7830 LLVM_DEBUG(printPlans(dbgs())); 7831 if (!MaxFactors.hasVector()) 7832 return VectorizationFactor::Disabled(); 7833 7834 // Select the optimal vectorization factor. 7835 auto SelectedVF = CM.selectVectorizationFactor(VFCandidates); 7836 7837 // Check if it is profitable to vectorize with runtime checks. 7838 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 7839 if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { 7840 bool PragmaThresholdReached = 7841 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 7842 bool ThresholdReached = 7843 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 7844 if ((ThresholdReached && !Hints.allowReordering()) || 7845 PragmaThresholdReached) { 7846 ORE->emit([&]() { 7847 return OptimizationRemarkAnalysisAliasing( 7848 DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(), 7849 OrigLoop->getHeader()) 7850 << "loop not vectorized: cannot prove it is safe to reorder " 7851 "memory operations"; 7852 }); 7853 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 7854 Hints.emitRemarkWithHints(); 7855 return VectorizationFactor::Disabled(); 7856 } 7857 } 7858 return SelectedVF; 7859 } 7860 7861 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const { 7862 assert(count_if(VPlans, 7863 [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) == 7864 1 && 7865 "Best VF has not a single VPlan."); 7866 7867 for (const VPlanPtr &Plan : VPlans) { 7868 if (Plan->hasVF(VF)) 7869 return *Plan.get(); 7870 } 7871 llvm_unreachable("No plan found!"); 7872 } 7873 7874 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7875 SmallVector<Metadata *, 4> MDs; 7876 // Reserve first location for self reference to the LoopID metadata node. 7877 MDs.push_back(nullptr); 7878 bool IsUnrollMetadata = false; 7879 MDNode *LoopID = L->getLoopID(); 7880 if (LoopID) { 7881 // First find existing loop unrolling disable metadata. 7882 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7883 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7884 if (MD) { 7885 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7886 IsUnrollMetadata = 7887 S && S->getString().startswith("llvm.loop.unroll.disable"); 7888 } 7889 MDs.push_back(LoopID->getOperand(i)); 7890 } 7891 } 7892 7893 if (!IsUnrollMetadata) { 7894 // Add runtime unroll disable metadata. 7895 LLVMContext &Context = L->getHeader()->getContext(); 7896 SmallVector<Metadata *, 1> DisableOperands; 7897 DisableOperands.push_back( 7898 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7899 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7900 MDs.push_back(DisableNode); 7901 MDNode *NewLoopID = MDNode::get(Context, MDs); 7902 // Set operand 0 to refer to the loop id itself. 7903 NewLoopID->replaceOperandWith(0, NewLoopID); 7904 L->setLoopID(NewLoopID); 7905 } 7906 } 7907 7908 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF, 7909 VPlan &BestVPlan, 7910 InnerLoopVectorizer &ILV, 7911 DominatorTree *DT) { 7912 LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF 7913 << '\n'); 7914 7915 // Perform the actual loop transformation. 7916 7917 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7918 VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan}; 7919 Value *CanonicalIVStartValue; 7920 std::tie(State.CFG.PrevBB, CanonicalIVStartValue) = 7921 ILV.createVectorizedLoopSkeleton(); 7922 ILV.collectPoisonGeneratingRecipes(State); 7923 7924 ILV.printDebugTracesAtStart(); 7925 7926 //===------------------------------------------------===// 7927 // 7928 // Notice: any optimization or new instruction that go 7929 // into the code below should also be implemented in 7930 // the cost-model. 7931 // 7932 //===------------------------------------------------===// 7933 7934 // 2. Copy and widen instructions from the old loop into the new loop. 7935 BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr), 7936 ILV.getOrCreateVectorTripCount(nullptr), 7937 CanonicalIVStartValue, State); 7938 BestVPlan.execute(&State); 7939 7940 // Keep all loop hints from the original loop on the vector loop (we'll 7941 // replace the vectorizer-specific hints below). 7942 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7943 7944 Optional<MDNode *> VectorizedLoopID = 7945 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 7946 LLVMLoopVectorizeFollowupVectorized}); 7947 7948 Loop *L = LI->getLoopFor(State.CFG.PrevBB); 7949 if (VectorizedLoopID.hasValue()) 7950 L->setLoopID(VectorizedLoopID.getValue()); 7951 else { 7952 // Keep all loop hints from the original loop on the vector loop (we'll 7953 // replace the vectorizer-specific hints below). 7954 if (MDNode *LID = OrigLoop->getLoopID()) 7955 L->setLoopID(LID); 7956 7957 LoopVectorizeHints Hints(L, true, *ORE); 7958 Hints.setAlreadyVectorized(); 7959 } 7960 // Disable runtime unrolling when vectorizing the epilogue loop. 7961 if (CanonicalIVStartValue) 7962 AddRuntimeUnrollDisableMetaData(L); 7963 7964 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7965 // predication, updating analyses. 7966 ILV.fixVectorizedLoop(State); 7967 7968 ILV.printDebugTracesAtEnd(); 7969 } 7970 7971 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 7972 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 7973 for (const auto &Plan : VPlans) 7974 if (PrintVPlansInDotFormat) 7975 Plan->printDOT(O); 7976 else 7977 Plan->print(O); 7978 } 7979 #endif 7980 7981 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 7982 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 7983 7984 // We create new control-flow for the vectorized loop, so the original exit 7985 // conditions will be dead after vectorization if it's only used by the 7986 // terminator 7987 SmallVector<BasicBlock*> ExitingBlocks; 7988 OrigLoop->getExitingBlocks(ExitingBlocks); 7989 for (auto *BB : ExitingBlocks) { 7990 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 7991 if (!Cmp || !Cmp->hasOneUse()) 7992 continue; 7993 7994 // TODO: we should introduce a getUniqueExitingBlocks on Loop 7995 if (!DeadInstructions.insert(Cmp).second) 7996 continue; 7997 7998 // The operands of the icmp is often a dead trunc, used by IndUpdate. 7999 // TODO: can recurse through operands in general 8000 for (Value *Op : Cmp->operands()) { 8001 if (isa<TruncInst>(Op) && Op->hasOneUse()) 8002 DeadInstructions.insert(cast<Instruction>(Op)); 8003 } 8004 } 8005 8006 // We create new "steps" for induction variable updates to which the original 8007 // induction variables map. An original update instruction will be dead if 8008 // all its users except the induction variable are dead. 8009 auto *Latch = OrigLoop->getLoopLatch(); 8010 for (auto &Induction : Legal->getInductionVars()) { 8011 PHINode *Ind = Induction.first; 8012 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 8013 8014 // If the tail is to be folded by masking, the primary induction variable, 8015 // if exists, isn't dead: it will be used for masking. Don't kill it. 8016 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 8017 continue; 8018 8019 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 8020 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 8021 })) 8022 DeadInstructions.insert(IndUpdate); 8023 } 8024 } 8025 8026 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 8027 8028 //===--------------------------------------------------------------------===// 8029 // EpilogueVectorizerMainLoop 8030 //===--------------------------------------------------------------------===// 8031 8032 /// This function is partially responsible for generating the control flow 8033 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8034 std::pair<BasicBlock *, Value *> 8035 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 8036 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8037 Loop *Lp = createVectorLoopSkeleton(""); 8038 8039 // Generate the code to check the minimum iteration count of the vector 8040 // epilogue (see below). 8041 EPI.EpilogueIterationCountCheck = 8042 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); 8043 EPI.EpilogueIterationCountCheck->setName("iter.check"); 8044 8045 // Generate the code to check any assumptions that we've made for SCEV 8046 // expressions. 8047 EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader); 8048 8049 // Generate the code that checks at runtime if arrays overlap. We put the 8050 // checks into a separate block to make the more common case of few elements 8051 // faster. 8052 EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 8053 8054 // Generate the iteration count check for the main loop, *after* the check 8055 // for the epilogue loop, so that the path-length is shorter for the case 8056 // that goes directly through the vector epilogue. The longer-path length for 8057 // the main loop is compensated for, by the gain from vectorizing the larger 8058 // trip count. Note: the branch will get updated later on when we vectorize 8059 // the epilogue. 8060 EPI.MainLoopIterationCountCheck = 8061 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); 8062 8063 // Generate the induction variable. 8064 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8065 EPI.VectorTripCount = CountRoundDown; 8066 createHeaderBranch(Lp); 8067 8068 // Skip induction resume value creation here because they will be created in 8069 // the second pass. If we created them here, they wouldn't be used anyway, 8070 // because the vplan in the second pass still contains the inductions from the 8071 // original loop. 8072 8073 return {completeLoopSkeleton(Lp, OrigLoopID), nullptr}; 8074 } 8075 8076 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 8077 LLVM_DEBUG({ 8078 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 8079 << "Main Loop VF:" << EPI.MainLoopVF 8080 << ", Main Loop UF:" << EPI.MainLoopUF 8081 << ", Epilogue Loop VF:" << EPI.EpilogueVF 8082 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8083 }); 8084 } 8085 8086 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 8087 DEBUG_WITH_TYPE(VerboseDebug, { 8088 dbgs() << "intermediate fn:\n" 8089 << *OrigLoop->getHeader()->getParent() << "\n"; 8090 }); 8091 } 8092 8093 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( 8094 Loop *L, BasicBlock *Bypass, bool ForEpilogue) { 8095 assert(L && "Expected valid Loop."); 8096 assert(Bypass && "Expected valid bypass basic block."); 8097 ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF; 8098 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 8099 Value *Count = getOrCreateTripCount(L); 8100 // Reuse existing vector loop preheader for TC checks. 8101 // Note that new preheader block is generated for vector loop. 8102 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 8103 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 8104 8105 // Generate code to check if the loop's trip count is less than VF * UF of the 8106 // main vector loop. 8107 auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? 8108 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8109 8110 Value *CheckMinIters = Builder.CreateICmp( 8111 P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor), 8112 "min.iters.check"); 8113 8114 if (!ForEpilogue) 8115 TCCheckBlock->setName("vector.main.loop.iter.check"); 8116 8117 // Create new preheader for vector loop. 8118 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 8119 DT, LI, nullptr, "vector.ph"); 8120 8121 if (ForEpilogue) { 8122 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 8123 DT->getNode(Bypass)->getIDom()) && 8124 "TC check is expected to dominate Bypass"); 8125 8126 // Update dominator for Bypass & LoopExit. 8127 DT->changeImmediateDominator(Bypass, TCCheckBlock); 8128 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8129 // For loops with multiple exits, there's no edge from the middle block 8130 // to exit blocks (as the epilogue must run) and thus no need to update 8131 // the immediate dominator of the exit blocks. 8132 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 8133 8134 LoopBypassBlocks.push_back(TCCheckBlock); 8135 8136 // Save the trip count so we don't have to regenerate it in the 8137 // vec.epilog.iter.check. This is safe to do because the trip count 8138 // generated here dominates the vector epilog iter check. 8139 EPI.TripCount = Count; 8140 } 8141 8142 ReplaceInstWithInst( 8143 TCCheckBlock->getTerminator(), 8144 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8145 8146 return TCCheckBlock; 8147 } 8148 8149 //===--------------------------------------------------------------------===// 8150 // EpilogueVectorizerEpilogueLoop 8151 //===--------------------------------------------------------------------===// 8152 8153 /// This function is partially responsible for generating the control flow 8154 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8155 std::pair<BasicBlock *, Value *> 8156 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 8157 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8158 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 8159 8160 // Now, compare the remaining count and if there aren't enough iterations to 8161 // execute the vectorized epilogue skip to the scalar part. 8162 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 8163 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 8164 LoopVectorPreHeader = 8165 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 8166 LI, nullptr, "vec.epilog.ph"); 8167 emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, 8168 VecEpilogueIterationCountCheck); 8169 8170 // Adjust the control flow taking the state info from the main loop 8171 // vectorization into account. 8172 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 8173 "expected this to be saved from the previous pass."); 8174 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 8175 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 8176 8177 DT->changeImmediateDominator(LoopVectorPreHeader, 8178 EPI.MainLoopIterationCountCheck); 8179 8180 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 8181 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8182 8183 if (EPI.SCEVSafetyCheck) 8184 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 8185 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8186 if (EPI.MemSafetyCheck) 8187 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 8188 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8189 8190 DT->changeImmediateDominator( 8191 VecEpilogueIterationCountCheck, 8192 VecEpilogueIterationCountCheck->getSinglePredecessor()); 8193 8194 DT->changeImmediateDominator(LoopScalarPreHeader, 8195 EPI.EpilogueIterationCountCheck); 8196 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8197 // If there is an epilogue which must run, there's no edge from the 8198 // middle block to exit blocks and thus no need to update the immediate 8199 // dominator of the exit blocks. 8200 DT->changeImmediateDominator(LoopExitBlock, 8201 EPI.EpilogueIterationCountCheck); 8202 8203 // Keep track of bypass blocks, as they feed start values to the induction 8204 // phis in the scalar loop preheader. 8205 if (EPI.SCEVSafetyCheck) 8206 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 8207 if (EPI.MemSafetyCheck) 8208 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 8209 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 8210 8211 // The vec.epilog.iter.check block may contain Phi nodes from reductions which 8212 // merge control-flow from the latch block and the middle block. Update the 8213 // incoming values here and move the Phi into the preheader. 8214 SmallVector<PHINode *, 4> PhisInBlock; 8215 for (PHINode &Phi : VecEpilogueIterationCountCheck->phis()) 8216 PhisInBlock.push_back(&Phi); 8217 8218 for (PHINode *Phi : PhisInBlock) { 8219 Phi->replaceIncomingBlockWith( 8220 VecEpilogueIterationCountCheck->getSinglePredecessor(), 8221 VecEpilogueIterationCountCheck); 8222 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck); 8223 if (EPI.SCEVSafetyCheck) 8224 Phi->removeIncomingValue(EPI.SCEVSafetyCheck); 8225 if (EPI.MemSafetyCheck) 8226 Phi->removeIncomingValue(EPI.MemSafetyCheck); 8227 Phi->moveBefore(LoopVectorPreHeader->getFirstNonPHI()); 8228 } 8229 8230 // Generate a resume induction for the vector epilogue and put it in the 8231 // vector epilogue preheader 8232 Type *IdxTy = Legal->getWidestInductionType(); 8233 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 8234 LoopVectorPreHeader->getFirstNonPHI()); 8235 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 8236 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 8237 EPI.MainLoopIterationCountCheck); 8238 8239 // Generate the induction variable. 8240 createHeaderBranch(Lp); 8241 8242 // Generate induction resume values. These variables save the new starting 8243 // indexes for the scalar loop. They are used to test if there are any tail 8244 // iterations left once the vector loop has completed. 8245 // Note that when the vectorized epilogue is skipped due to iteration count 8246 // check, then the resume value for the induction variable comes from 8247 // the trip count of the main vector loop, hence passing the AdditionalBypass 8248 // argument. 8249 createInductionResumeValues(Lp, {VecEpilogueIterationCountCheck, 8250 EPI.VectorTripCount} /* AdditionalBypass */); 8251 8252 return {completeLoopSkeleton(Lp, OrigLoopID), EPResumeVal}; 8253 } 8254 8255 BasicBlock * 8256 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 8257 Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { 8258 8259 assert(EPI.TripCount && 8260 "Expected trip count to have been safed in the first pass."); 8261 assert( 8262 (!isa<Instruction>(EPI.TripCount) || 8263 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 8264 "saved trip count does not dominate insertion point."); 8265 Value *TC = EPI.TripCount; 8266 IRBuilder<> Builder(Insert->getTerminator()); 8267 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 8268 8269 // Generate code to check if the loop's trip count is less than VF * UF of the 8270 // vector epilogue loop. 8271 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? 8272 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8273 8274 Value *CheckMinIters = 8275 Builder.CreateICmp(P, Count, 8276 createStepForVF(Builder, Count->getType(), 8277 EPI.EpilogueVF, EPI.EpilogueUF), 8278 "min.epilog.iters.check"); 8279 8280 ReplaceInstWithInst( 8281 Insert->getTerminator(), 8282 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8283 8284 LoopBypassBlocks.push_back(Insert); 8285 return Insert; 8286 } 8287 8288 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 8289 LLVM_DEBUG({ 8290 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 8291 << "Epilogue Loop VF:" << EPI.EpilogueVF 8292 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8293 }); 8294 } 8295 8296 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 8297 DEBUG_WITH_TYPE(VerboseDebug, { 8298 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n"; 8299 }); 8300 } 8301 8302 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8303 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8304 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8305 bool PredicateAtRangeStart = Predicate(Range.Start); 8306 8307 for (ElementCount TmpVF = Range.Start * 2; 8308 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8309 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8310 Range.End = TmpVF; 8311 break; 8312 } 8313 8314 return PredicateAtRangeStart; 8315 } 8316 8317 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8318 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8319 /// of VF's starting at a given VF and extending it as much as possible. Each 8320 /// vectorization decision can potentially shorten this sub-range during 8321 /// buildVPlan(). 8322 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8323 ElementCount MaxVF) { 8324 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8325 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8326 VFRange SubRange = {VF, MaxVFPlusOne}; 8327 VPlans.push_back(buildVPlan(SubRange)); 8328 VF = SubRange.End; 8329 } 8330 } 8331 8332 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8333 VPlanPtr &Plan) { 8334 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8335 8336 // Look for cached value. 8337 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8338 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8339 if (ECEntryIt != EdgeMaskCache.end()) 8340 return ECEntryIt->second; 8341 8342 VPValue *SrcMask = createBlockInMask(Src, Plan); 8343 8344 // The terminator has to be a branch inst! 8345 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8346 assert(BI && "Unexpected terminator found"); 8347 8348 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8349 return EdgeMaskCache[Edge] = SrcMask; 8350 8351 // If source is an exiting block, we know the exit edge is dynamically dead 8352 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8353 // adding uses of an otherwise potentially dead instruction. 8354 if (OrigLoop->isLoopExiting(Src)) 8355 return EdgeMaskCache[Edge] = SrcMask; 8356 8357 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8358 assert(EdgeMask && "No Edge Mask found for condition"); 8359 8360 if (BI->getSuccessor(0) != Dst) 8361 EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc()); 8362 8363 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8364 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8365 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8366 // The select version does not introduce new UB if SrcMask is false and 8367 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8368 VPValue *False = Plan->getOrAddVPValue( 8369 ConstantInt::getFalse(BI->getCondition()->getType())); 8370 EdgeMask = 8371 Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc()); 8372 } 8373 8374 return EdgeMaskCache[Edge] = EdgeMask; 8375 } 8376 8377 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8378 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8379 8380 // Look for cached value. 8381 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8382 if (BCEntryIt != BlockMaskCache.end()) 8383 return BCEntryIt->second; 8384 8385 // All-one mask is modelled as no-mask following the convention for masked 8386 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8387 VPValue *BlockMask = nullptr; 8388 8389 if (OrigLoop->getHeader() == BB) { 8390 if (!CM.blockNeedsPredicationForAnyReason(BB)) 8391 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8392 8393 // Introduce the early-exit compare IV <= BTC to form header block mask. 8394 // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by 8395 // constructing the desired canonical IV in the header block as its first 8396 // non-phi instructions. 8397 assert(CM.foldTailByMasking() && "must fold the tail"); 8398 VPBasicBlock *HeaderVPBB = Plan->getEntry()->getEntryBasicBlock(); 8399 auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi(); 8400 auto *IV = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV()); 8401 HeaderVPBB->insert(IV, HeaderVPBB->getFirstNonPhi()); 8402 8403 VPBuilder::InsertPointGuard Guard(Builder); 8404 Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint); 8405 if (CM.TTI.emitGetActiveLaneMask()) { 8406 VPValue *TC = Plan->getOrCreateTripCount(); 8407 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC}); 8408 } else { 8409 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8410 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8411 } 8412 return BlockMaskCache[BB] = BlockMask; 8413 } 8414 8415 // This is the block mask. We OR all incoming edges. 8416 for (auto *Predecessor : predecessors(BB)) { 8417 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8418 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8419 return BlockMaskCache[BB] = EdgeMask; 8420 8421 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8422 BlockMask = EdgeMask; 8423 continue; 8424 } 8425 8426 BlockMask = Builder.createOr(BlockMask, EdgeMask, {}); 8427 } 8428 8429 return BlockMaskCache[BB] = BlockMask; 8430 } 8431 8432 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8433 ArrayRef<VPValue *> Operands, 8434 VFRange &Range, 8435 VPlanPtr &Plan) { 8436 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8437 "Must be called with either a load or store"); 8438 8439 auto willWiden = [&](ElementCount VF) -> bool { 8440 if (VF.isScalar()) 8441 return false; 8442 LoopVectorizationCostModel::InstWidening Decision = 8443 CM.getWideningDecision(I, VF); 8444 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8445 "CM decision should be taken at this point."); 8446 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8447 return true; 8448 if (CM.isScalarAfterVectorization(I, VF) || 8449 CM.isProfitableToScalarize(I, VF)) 8450 return false; 8451 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8452 }; 8453 8454 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8455 return nullptr; 8456 8457 VPValue *Mask = nullptr; 8458 if (Legal->isMaskRequired(I)) 8459 Mask = createBlockInMask(I->getParent(), Plan); 8460 8461 // Determine if the pointer operand of the access is either consecutive or 8462 // reverse consecutive. 8463 LoopVectorizationCostModel::InstWidening Decision = 8464 CM.getWideningDecision(I, Range.Start); 8465 bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse; 8466 bool Consecutive = 8467 Reverse || Decision == LoopVectorizationCostModel::CM_Widen; 8468 8469 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8470 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask, 8471 Consecutive, Reverse); 8472 8473 StoreInst *Store = cast<StoreInst>(I); 8474 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8475 Mask, Consecutive, Reverse); 8476 } 8477 8478 static VPWidenIntOrFpInductionRecipe * 8479 createWidenInductionRecipe(PHINode *Phi, Instruction *PhiOrTrunc, 8480 VPValue *Start, const InductionDescriptor &IndDesc, 8481 LoopVectorizationCostModel &CM, Loop &OrigLoop, 8482 VFRange &Range) { 8483 // Returns true if an instruction \p I should be scalarized instead of 8484 // vectorized for the chosen vectorization factor. 8485 auto ShouldScalarizeInstruction = [&CM](Instruction *I, ElementCount VF) { 8486 return CM.isScalarAfterVectorization(I, VF) || 8487 CM.isProfitableToScalarize(I, VF); 8488 }; 8489 8490 bool NeedsScalarIV = LoopVectorizationPlanner::getDecisionAndClampRange( 8491 [&](ElementCount VF) { 8492 // Returns true if we should generate a scalar version of \p IV. 8493 if (ShouldScalarizeInstruction(PhiOrTrunc, VF)) 8494 return true; 8495 auto isScalarInst = [&](User *U) -> bool { 8496 auto *I = cast<Instruction>(U); 8497 return OrigLoop.contains(I) && ShouldScalarizeInstruction(I, VF); 8498 }; 8499 return any_of(PhiOrTrunc->users(), isScalarInst); 8500 }, 8501 Range); 8502 bool NeedsScalarIVOnly = LoopVectorizationPlanner::getDecisionAndClampRange( 8503 [&](ElementCount VF) { 8504 return ShouldScalarizeInstruction(PhiOrTrunc, VF); 8505 }, 8506 Range); 8507 assert(IndDesc.getStartValue() == 8508 Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader())); 8509 if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) { 8510 return new VPWidenIntOrFpInductionRecipe(Phi, Start, IndDesc, TruncI, 8511 NeedsScalarIV, !NeedsScalarIVOnly); 8512 } 8513 assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here"); 8514 return new VPWidenIntOrFpInductionRecipe(Phi, Start, IndDesc, NeedsScalarIV, 8515 !NeedsScalarIVOnly); 8516 } 8517 8518 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionPHI( 8519 PHINode *Phi, ArrayRef<VPValue *> Operands, VFRange &Range) const { 8520 8521 // Check if this is an integer or fp induction. If so, build the recipe that 8522 // produces its scalar and vector values. 8523 if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi)) 8524 return createWidenInductionRecipe(Phi, Phi, Operands[0], *II, CM, *OrigLoop, 8525 Range); 8526 8527 return nullptr; 8528 } 8529 8530 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8531 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, 8532 VPlan &Plan) const { 8533 // Optimize the special case where the source is a constant integer 8534 // induction variable. Notice that we can only optimize the 'trunc' case 8535 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8536 // (c) other casts depend on pointer size. 8537 8538 // Determine whether \p K is a truncation based on an induction variable that 8539 // can be optimized. 8540 auto isOptimizableIVTruncate = 8541 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8542 return [=](ElementCount VF) -> bool { 8543 return CM.isOptimizableIVTruncate(K, VF); 8544 }; 8545 }; 8546 8547 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8548 isOptimizableIVTruncate(I), Range)) { 8549 8550 auto *Phi = cast<PHINode>(I->getOperand(0)); 8551 const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi); 8552 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8553 return createWidenInductionRecipe(Phi, I, Start, II, CM, *OrigLoop, Range); 8554 } 8555 return nullptr; 8556 } 8557 8558 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8559 ArrayRef<VPValue *> Operands, 8560 VPlanPtr &Plan) { 8561 // If all incoming values are equal, the incoming VPValue can be used directly 8562 // instead of creating a new VPBlendRecipe. 8563 VPValue *FirstIncoming = Operands[0]; 8564 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8565 return FirstIncoming == Inc; 8566 })) { 8567 return Operands[0]; 8568 } 8569 8570 // We know that all PHIs in non-header blocks are converted into selects, so 8571 // we don't have to worry about the insertion order and we can just use the 8572 // builder. At this point we generate the predication tree. There may be 8573 // duplications since this is a simple recursive scan, but future 8574 // optimizations will clean it up. 8575 SmallVector<VPValue *, 2> OperandsWithMask; 8576 unsigned NumIncoming = Phi->getNumIncomingValues(); 8577 8578 for (unsigned In = 0; In < NumIncoming; In++) { 8579 VPValue *EdgeMask = 8580 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8581 assert((EdgeMask || NumIncoming == 1) && 8582 "Multiple predecessors with one having a full mask"); 8583 OperandsWithMask.push_back(Operands[In]); 8584 if (EdgeMask) 8585 OperandsWithMask.push_back(EdgeMask); 8586 } 8587 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8588 } 8589 8590 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8591 ArrayRef<VPValue *> Operands, 8592 VFRange &Range) const { 8593 8594 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8595 [this, CI](ElementCount VF) { 8596 return CM.isScalarWithPredication(CI, VF); 8597 }, 8598 Range); 8599 8600 if (IsPredicated) 8601 return nullptr; 8602 8603 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8604 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8605 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8606 ID == Intrinsic::pseudoprobe || 8607 ID == Intrinsic::experimental_noalias_scope_decl)) 8608 return nullptr; 8609 8610 auto willWiden = [&](ElementCount VF) -> bool { 8611 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8612 // The following case may be scalarized depending on the VF. 8613 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8614 // version of the instruction. 8615 // Is it beneficial to perform intrinsic call compared to lib call? 8616 bool NeedToScalarize = false; 8617 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8618 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8619 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8620 return UseVectorIntrinsic || !NeedToScalarize; 8621 }; 8622 8623 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8624 return nullptr; 8625 8626 ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size()); 8627 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8628 } 8629 8630 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8631 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8632 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8633 // Instruction should be widened, unless it is scalar after vectorization, 8634 // scalarization is profitable or it is predicated. 8635 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8636 return CM.isScalarAfterVectorization(I, VF) || 8637 CM.isProfitableToScalarize(I, VF) || 8638 CM.isScalarWithPredication(I, VF); 8639 }; 8640 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8641 Range); 8642 } 8643 8644 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8645 ArrayRef<VPValue *> Operands) const { 8646 auto IsVectorizableOpcode = [](unsigned Opcode) { 8647 switch (Opcode) { 8648 case Instruction::Add: 8649 case Instruction::And: 8650 case Instruction::AShr: 8651 case Instruction::BitCast: 8652 case Instruction::FAdd: 8653 case Instruction::FCmp: 8654 case Instruction::FDiv: 8655 case Instruction::FMul: 8656 case Instruction::FNeg: 8657 case Instruction::FPExt: 8658 case Instruction::FPToSI: 8659 case Instruction::FPToUI: 8660 case Instruction::FPTrunc: 8661 case Instruction::FRem: 8662 case Instruction::FSub: 8663 case Instruction::ICmp: 8664 case Instruction::IntToPtr: 8665 case Instruction::LShr: 8666 case Instruction::Mul: 8667 case Instruction::Or: 8668 case Instruction::PtrToInt: 8669 case Instruction::SDiv: 8670 case Instruction::Select: 8671 case Instruction::SExt: 8672 case Instruction::Shl: 8673 case Instruction::SIToFP: 8674 case Instruction::SRem: 8675 case Instruction::Sub: 8676 case Instruction::Trunc: 8677 case Instruction::UDiv: 8678 case Instruction::UIToFP: 8679 case Instruction::URem: 8680 case Instruction::Xor: 8681 case Instruction::ZExt: 8682 return true; 8683 } 8684 return false; 8685 }; 8686 8687 if (!IsVectorizableOpcode(I->getOpcode())) 8688 return nullptr; 8689 8690 // Success: widen this instruction. 8691 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 8692 } 8693 8694 void VPRecipeBuilder::fixHeaderPhis() { 8695 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 8696 for (VPHeaderPHIRecipe *R : PhisToFix) { 8697 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 8698 VPRecipeBase *IncR = 8699 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 8700 R->addOperand(IncR->getVPSingleValue()); 8701 } 8702 } 8703 8704 VPBasicBlock *VPRecipeBuilder::handleReplication( 8705 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8706 VPlanPtr &Plan) { 8707 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8708 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8709 Range); 8710 8711 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8712 [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); }, 8713 Range); 8714 8715 // Even if the instruction is not marked as uniform, there are certain 8716 // intrinsic calls that can be effectively treated as such, so we check for 8717 // them here. Conservatively, we only do this for scalable vectors, since 8718 // for fixed-width VFs we can always fall back on full scalarization. 8719 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) { 8720 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) { 8721 case Intrinsic::assume: 8722 case Intrinsic::lifetime_start: 8723 case Intrinsic::lifetime_end: 8724 // For scalable vectors if one of the operands is variant then we still 8725 // want to mark as uniform, which will generate one instruction for just 8726 // the first lane of the vector. We can't scalarize the call in the same 8727 // way as for fixed-width vectors because we don't know how many lanes 8728 // there are. 8729 // 8730 // The reasons for doing it this way for scalable vectors are: 8731 // 1. For the assume intrinsic generating the instruction for the first 8732 // lane is still be better than not generating any at all. For 8733 // example, the input may be a splat across all lanes. 8734 // 2. For the lifetime start/end intrinsics the pointer operand only 8735 // does anything useful when the input comes from a stack object, 8736 // which suggests it should always be uniform. For non-stack objects 8737 // the effect is to poison the object, which still allows us to 8738 // remove the call. 8739 IsUniform = true; 8740 break; 8741 default: 8742 break; 8743 } 8744 } 8745 8746 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8747 IsUniform, IsPredicated); 8748 setRecipe(I, Recipe); 8749 Plan->addVPValue(I, Recipe); 8750 8751 // Find if I uses a predicated instruction. If so, it will use its scalar 8752 // value. Avoid hoisting the insert-element which packs the scalar value into 8753 // a vector value, as that happens iff all users use the vector value. 8754 for (VPValue *Op : Recipe->operands()) { 8755 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 8756 if (!PredR) 8757 continue; 8758 auto *RepR = 8759 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 8760 assert(RepR->isPredicated() && 8761 "expected Replicate recipe to be predicated"); 8762 RepR->setAlsoPack(false); 8763 } 8764 8765 // Finalize the recipe for Instr, first if it is not predicated. 8766 if (!IsPredicated) { 8767 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8768 VPBB->appendRecipe(Recipe); 8769 return VPBB; 8770 } 8771 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8772 8773 VPBlockBase *SingleSucc = VPBB->getSingleSuccessor(); 8774 assert(SingleSucc && "VPBB must have a single successor when handling " 8775 "predicated replication."); 8776 VPBlockUtils::disconnectBlocks(VPBB, SingleSucc); 8777 // Record predicated instructions for above packing optimizations. 8778 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8779 VPBlockUtils::insertBlockAfter(Region, VPBB); 8780 auto *RegSucc = new VPBasicBlock(); 8781 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8782 VPBlockUtils::connectBlocks(RegSucc, SingleSucc); 8783 return RegSucc; 8784 } 8785 8786 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8787 VPRecipeBase *PredRecipe, 8788 VPlanPtr &Plan) { 8789 // Instructions marked for predication are replicated and placed under an 8790 // if-then construct to prevent side-effects. 8791 8792 // Generate recipes to compute the block mask for this region. 8793 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8794 8795 // Build the triangular if-then region. 8796 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8797 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8798 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8799 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8800 auto *PHIRecipe = Instr->getType()->isVoidTy() 8801 ? nullptr 8802 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8803 if (PHIRecipe) { 8804 Plan->removeVPValueFor(Instr); 8805 Plan->addVPValue(Instr, PHIRecipe); 8806 } 8807 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8808 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8809 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8810 8811 // Note: first set Entry as region entry and then connect successors starting 8812 // from it in order, to propagate the "parent" of each VPBasicBlock. 8813 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8814 VPBlockUtils::connectBlocks(Pred, Exit); 8815 8816 return Region; 8817 } 8818 8819 VPRecipeOrVPValueTy 8820 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8821 ArrayRef<VPValue *> Operands, 8822 VFRange &Range, VPlanPtr &Plan) { 8823 // First, check for specific widening recipes that deal with calls, memory 8824 // operations, inductions and Phi nodes. 8825 if (auto *CI = dyn_cast<CallInst>(Instr)) 8826 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 8827 8828 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8829 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 8830 8831 VPRecipeBase *Recipe; 8832 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8833 if (Phi->getParent() != OrigLoop->getHeader()) 8834 return tryToBlend(Phi, Operands, Plan); 8835 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, Range))) 8836 return toVPRecipeResult(Recipe); 8837 8838 VPHeaderPHIRecipe *PhiRecipe = nullptr; 8839 if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) { 8840 VPValue *StartV = Operands[0]; 8841 if (Legal->isReductionVariable(Phi)) { 8842 const RecurrenceDescriptor &RdxDesc = 8843 Legal->getReductionVars().find(Phi)->second; 8844 assert(RdxDesc.getRecurrenceStartValue() == 8845 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8846 PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, 8847 CM.isInLoopReduction(Phi), 8848 CM.useOrderedReductions(RdxDesc)); 8849 } else { 8850 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); 8851 } 8852 8853 // Record the incoming value from the backedge, so we can add the incoming 8854 // value from the backedge after all recipes have been created. 8855 recordRecipeOf(cast<Instruction>( 8856 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 8857 PhisToFix.push_back(PhiRecipe); 8858 } else { 8859 // TODO: record backedge value for remaining pointer induction phis. 8860 assert(Phi->getType()->isPointerTy() && 8861 "only pointer phis should be handled here"); 8862 assert(Legal->getInductionVars().count(Phi) && 8863 "Not an induction variable"); 8864 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8865 VPValue *Start = Plan->getOrAddVPValue(II.getStartValue()); 8866 PhiRecipe = new VPWidenPHIRecipe(Phi, Start); 8867 } 8868 8869 return toVPRecipeResult(PhiRecipe); 8870 } 8871 8872 if (isa<TruncInst>(Instr) && 8873 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 8874 Range, *Plan))) 8875 return toVPRecipeResult(Recipe); 8876 8877 if (!shouldWiden(Instr, Range)) 8878 return nullptr; 8879 8880 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8881 return toVPRecipeResult(new VPWidenGEPRecipe( 8882 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 8883 8884 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8885 bool InvariantCond = 8886 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8887 return toVPRecipeResult(new VPWidenSelectRecipe( 8888 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 8889 } 8890 8891 return toVPRecipeResult(tryToWiden(Instr, Operands)); 8892 } 8893 8894 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8895 ElementCount MaxVF) { 8896 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8897 8898 // Collect instructions from the original loop that will become trivially dead 8899 // in the vectorized loop. We don't need to vectorize these instructions. For 8900 // example, original induction update instructions can become dead because we 8901 // separately emit induction "steps" when generating code for the new loop. 8902 // Similarly, we create a new latch condition when setting up the structure 8903 // of the new loop, so the old one can become dead. 8904 SmallPtrSet<Instruction *, 4> DeadInstructions; 8905 collectTriviallyDeadInstructions(DeadInstructions); 8906 8907 // Add assume instructions we need to drop to DeadInstructions, to prevent 8908 // them from being added to the VPlan. 8909 // TODO: We only need to drop assumes in blocks that get flattend. If the 8910 // control flow is preserved, we should keep them. 8911 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8912 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8913 8914 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8915 // Dead instructions do not need sinking. Remove them from SinkAfter. 8916 for (Instruction *I : DeadInstructions) 8917 SinkAfter.erase(I); 8918 8919 // Cannot sink instructions after dead instructions (there won't be any 8920 // recipes for them). Instead, find the first non-dead previous instruction. 8921 for (auto &P : Legal->getSinkAfter()) { 8922 Instruction *SinkTarget = P.second; 8923 Instruction *FirstInst = &*SinkTarget->getParent()->begin(); 8924 (void)FirstInst; 8925 while (DeadInstructions.contains(SinkTarget)) { 8926 assert( 8927 SinkTarget != FirstInst && 8928 "Must find a live instruction (at least the one feeding the " 8929 "first-order recurrence PHI) before reaching beginning of the block"); 8930 SinkTarget = SinkTarget->getPrevNode(); 8931 assert(SinkTarget != P.first && 8932 "sink source equals target, no sinking required"); 8933 } 8934 P.second = SinkTarget; 8935 } 8936 8937 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8938 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8939 VFRange SubRange = {VF, MaxVFPlusOne}; 8940 VPlans.push_back( 8941 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 8942 VF = SubRange.End; 8943 } 8944 } 8945 8946 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header, a 8947 // CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF and a 8948 // BranchOnCount VPInstruction to the latch. 8949 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL, 8950 bool HasNUW, bool IsVPlanNative) { 8951 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8952 auto *StartV = Plan.getOrAddVPValue(StartIdx); 8953 8954 auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL); 8955 VPRegionBlock *TopRegion = Plan.getVectorLoopRegion(); 8956 VPBasicBlock *Header = TopRegion->getEntryBasicBlock(); 8957 if (IsVPlanNative) 8958 Header = cast<VPBasicBlock>(Header->getSingleSuccessor()); 8959 Header->insert(CanonicalIVPHI, Header->begin()); 8960 8961 auto *CanonicalIVIncrement = 8962 new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW 8963 : VPInstruction::CanonicalIVIncrement, 8964 {CanonicalIVPHI}, DL); 8965 CanonicalIVPHI->addOperand(CanonicalIVIncrement); 8966 8967 VPBasicBlock *EB = TopRegion->getExitBasicBlock(); 8968 if (IsVPlanNative) { 8969 EB = cast<VPBasicBlock>(EB->getSinglePredecessor()); 8970 EB->setCondBit(nullptr); 8971 } 8972 EB->appendRecipe(CanonicalIVIncrement); 8973 8974 auto *BranchOnCount = 8975 new VPInstruction(VPInstruction::BranchOnCount, 8976 {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL); 8977 EB->appendRecipe(BranchOnCount); 8978 } 8979 8980 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 8981 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 8982 const MapVector<Instruction *, Instruction *> &SinkAfter) { 8983 8984 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 8985 8986 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 8987 8988 // --------------------------------------------------------------------------- 8989 // Pre-construction: record ingredients whose recipes we'll need to further 8990 // process after constructing the initial VPlan. 8991 // --------------------------------------------------------------------------- 8992 8993 // Mark instructions we'll need to sink later and their targets as 8994 // ingredients whose recipe we'll need to record. 8995 for (auto &Entry : SinkAfter) { 8996 RecipeBuilder.recordRecipeOf(Entry.first); 8997 RecipeBuilder.recordRecipeOf(Entry.second); 8998 } 8999 for (auto &Reduction : CM.getInLoopReductionChains()) { 9000 PHINode *Phi = Reduction.first; 9001 RecurKind Kind = 9002 Legal->getReductionVars().find(Phi)->second.getRecurrenceKind(); 9003 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9004 9005 RecipeBuilder.recordRecipeOf(Phi); 9006 for (auto &R : ReductionOperations) { 9007 RecipeBuilder.recordRecipeOf(R); 9008 // For min/max reducitons, where we have a pair of icmp/select, we also 9009 // need to record the ICmp recipe, so it can be removed later. 9010 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9011 "Only min/max recurrences allowed for inloop reductions"); 9012 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 9013 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 9014 } 9015 } 9016 9017 // For each interleave group which is relevant for this (possibly trimmed) 9018 // Range, add it to the set of groups to be later applied to the VPlan and add 9019 // placeholders for its members' Recipes which we'll be replacing with a 9020 // single VPInterleaveRecipe. 9021 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 9022 auto applyIG = [IG, this](ElementCount VF) -> bool { 9023 return (VF.isVector() && // Query is illegal for VF == 1 9024 CM.getWideningDecision(IG->getInsertPos(), VF) == 9025 LoopVectorizationCostModel::CM_Interleave); 9026 }; 9027 if (!getDecisionAndClampRange(applyIG, Range)) 9028 continue; 9029 InterleaveGroups.insert(IG); 9030 for (unsigned i = 0; i < IG->getFactor(); i++) 9031 if (Instruction *Member = IG->getMember(i)) 9032 RecipeBuilder.recordRecipeOf(Member); 9033 }; 9034 9035 // --------------------------------------------------------------------------- 9036 // Build initial VPlan: Scan the body of the loop in a topological order to 9037 // visit each basic block after having visited its predecessor basic blocks. 9038 // --------------------------------------------------------------------------- 9039 9040 // Create initial VPlan skeleton, with separate header and latch blocks. 9041 VPBasicBlock *HeaderVPBB = new VPBasicBlock(); 9042 VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch"); 9043 VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB); 9044 auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop"); 9045 auto Plan = std::make_unique<VPlan>(TopRegion); 9046 9047 Instruction *DLInst = 9048 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()); 9049 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), 9050 DLInst ? DLInst->getDebugLoc() : DebugLoc(), 9051 !CM.foldTailByMasking(), false); 9052 9053 // Scan the body of the loop in a topological order to visit each basic block 9054 // after having visited its predecessor basic blocks. 9055 LoopBlocksDFS DFS(OrigLoop); 9056 DFS.perform(LI); 9057 9058 VPBasicBlock *VPBB = HeaderVPBB; 9059 SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove; 9060 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 9061 // Relevant instructions from basic block BB will be grouped into VPRecipe 9062 // ingredients and fill a new VPBasicBlock. 9063 unsigned VPBBsForBB = 0; 9064 VPBB->setName(BB->getName()); 9065 Builder.setInsertPoint(VPBB); 9066 9067 // Introduce each ingredient into VPlan. 9068 // TODO: Model and preserve debug instrinsics in VPlan. 9069 for (Instruction &I : BB->instructionsWithoutDebug()) { 9070 Instruction *Instr = &I; 9071 9072 // First filter out irrelevant instructions, to ensure no recipes are 9073 // built for them. 9074 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 9075 continue; 9076 9077 SmallVector<VPValue *, 4> Operands; 9078 auto *Phi = dyn_cast<PHINode>(Instr); 9079 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 9080 Operands.push_back(Plan->getOrAddVPValue( 9081 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 9082 } else { 9083 auto OpRange = Plan->mapToVPValues(Instr->operands()); 9084 Operands = {OpRange.begin(), OpRange.end()}; 9085 } 9086 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 9087 Instr, Operands, Range, Plan)) { 9088 // If Instr can be simplified to an existing VPValue, use it. 9089 if (RecipeOrValue.is<VPValue *>()) { 9090 auto *VPV = RecipeOrValue.get<VPValue *>(); 9091 Plan->addVPValue(Instr, VPV); 9092 // If the re-used value is a recipe, register the recipe for the 9093 // instruction, in case the recipe for Instr needs to be recorded. 9094 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 9095 RecipeBuilder.setRecipe(Instr, R); 9096 continue; 9097 } 9098 // Otherwise, add the new recipe. 9099 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 9100 for (auto *Def : Recipe->definedValues()) { 9101 auto *UV = Def->getUnderlyingValue(); 9102 Plan->addVPValue(UV, Def); 9103 } 9104 9105 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && 9106 HeaderVPBB->getFirstNonPhi() != VPBB->end()) { 9107 // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section 9108 // of the header block. That can happen for truncates of induction 9109 // variables. Those recipes are moved to the phi section of the header 9110 // block after applying SinkAfter, which relies on the original 9111 // position of the trunc. 9112 assert(isa<TruncInst>(Instr)); 9113 InductionsToMove.push_back( 9114 cast<VPWidenIntOrFpInductionRecipe>(Recipe)); 9115 } 9116 RecipeBuilder.setRecipe(Instr, Recipe); 9117 VPBB->appendRecipe(Recipe); 9118 continue; 9119 } 9120 9121 // Otherwise, if all widening options failed, Instruction is to be 9122 // replicated. This may create a successor for VPBB. 9123 VPBasicBlock *NextVPBB = 9124 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 9125 if (NextVPBB != VPBB) { 9126 VPBB = NextVPBB; 9127 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 9128 : ""); 9129 } 9130 } 9131 9132 VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB); 9133 VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor()); 9134 } 9135 9136 // Fold the last, empty block into its predecessor. 9137 VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB); 9138 assert(VPBB && "expected to fold last (empty) block"); 9139 // After here, VPBB should not be used. 9140 VPBB = nullptr; 9141 9142 assert(isa<VPRegionBlock>(Plan->getEntry()) && 9143 !Plan->getEntry()->getEntryBasicBlock()->empty() && 9144 "entry block must be set to a VPRegionBlock having a non-empty entry " 9145 "VPBasicBlock"); 9146 RecipeBuilder.fixHeaderPhis(); 9147 9148 // --------------------------------------------------------------------------- 9149 // Transform initial VPlan: Apply previously taken decisions, in order, to 9150 // bring the VPlan to its final state. 9151 // --------------------------------------------------------------------------- 9152 9153 // Apply Sink-After legal constraints. 9154 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 9155 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 9156 if (Region && Region->isReplicator()) { 9157 assert(Region->getNumSuccessors() == 1 && 9158 Region->getNumPredecessors() == 1 && "Expected SESE region!"); 9159 assert(R->getParent()->size() == 1 && 9160 "A recipe in an original replicator region must be the only " 9161 "recipe in its block"); 9162 return Region; 9163 } 9164 return nullptr; 9165 }; 9166 for (auto &Entry : SinkAfter) { 9167 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 9168 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 9169 9170 auto *TargetRegion = GetReplicateRegion(Target); 9171 auto *SinkRegion = GetReplicateRegion(Sink); 9172 if (!SinkRegion) { 9173 // If the sink source is not a replicate region, sink the recipe directly. 9174 if (TargetRegion) { 9175 // The target is in a replication region, make sure to move Sink to 9176 // the block after it, not into the replication region itself. 9177 VPBasicBlock *NextBlock = 9178 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 9179 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 9180 } else 9181 Sink->moveAfter(Target); 9182 continue; 9183 } 9184 9185 // The sink source is in a replicate region. Unhook the region from the CFG. 9186 auto *SinkPred = SinkRegion->getSinglePredecessor(); 9187 auto *SinkSucc = SinkRegion->getSingleSuccessor(); 9188 VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); 9189 VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); 9190 VPBlockUtils::connectBlocks(SinkPred, SinkSucc); 9191 9192 if (TargetRegion) { 9193 // The target recipe is also in a replicate region, move the sink region 9194 // after the target region. 9195 auto *TargetSucc = TargetRegion->getSingleSuccessor(); 9196 VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); 9197 VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); 9198 VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); 9199 } else { 9200 // The sink source is in a replicate region, we need to move the whole 9201 // replicate region, which should only contain a single recipe in the 9202 // main block. 9203 auto *SplitBlock = 9204 Target->getParent()->splitAt(std::next(Target->getIterator())); 9205 9206 auto *SplitPred = SplitBlock->getSinglePredecessor(); 9207 9208 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 9209 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 9210 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 9211 } 9212 } 9213 9214 VPlanTransforms::removeRedundantCanonicalIVs(*Plan); 9215 VPlanTransforms::removeRedundantInductionCasts(*Plan); 9216 9217 // Now that sink-after is done, move induction recipes for optimized truncates 9218 // to the phi section of the header block. 9219 for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove) 9220 Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi()); 9221 9222 // Adjust the recipes for any inloop reductions. 9223 adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExit()), Plan, 9224 RecipeBuilder, Range.Start); 9225 9226 // Introduce a recipe to combine the incoming and previous values of a 9227 // first-order recurrence. 9228 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9229 auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); 9230 if (!RecurPhi) 9231 continue; 9232 9233 VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); 9234 VPBasicBlock *InsertBlock = PrevRecipe->getParent(); 9235 auto *Region = GetReplicateRegion(PrevRecipe); 9236 if (Region) 9237 InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor()); 9238 if (Region || PrevRecipe->isPhi()) 9239 Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi()); 9240 else 9241 Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator())); 9242 9243 auto *RecurSplice = cast<VPInstruction>( 9244 Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 9245 {RecurPhi, RecurPhi->getBackedgeValue()})); 9246 9247 RecurPhi->replaceAllUsesWith(RecurSplice); 9248 // Set the first operand of RecurSplice to RecurPhi again, after replacing 9249 // all users. 9250 RecurSplice->setOperand(0, RecurPhi); 9251 } 9252 9253 // Interleave memory: for each Interleave Group we marked earlier as relevant 9254 // for this VPlan, replace the Recipes widening its memory instructions with a 9255 // single VPInterleaveRecipe at its insertion point. 9256 for (auto IG : InterleaveGroups) { 9257 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 9258 RecipeBuilder.getRecipe(IG->getInsertPos())); 9259 SmallVector<VPValue *, 4> StoredValues; 9260 for (unsigned i = 0; i < IG->getFactor(); ++i) 9261 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 9262 auto *StoreR = 9263 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); 9264 StoredValues.push_back(StoreR->getStoredValue()); 9265 } 9266 9267 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 9268 Recipe->getMask()); 9269 VPIG->insertBefore(Recipe); 9270 unsigned J = 0; 9271 for (unsigned i = 0; i < IG->getFactor(); ++i) 9272 if (Instruction *Member = IG->getMember(i)) { 9273 if (!Member->getType()->isVoidTy()) { 9274 VPValue *OriginalV = Plan->getVPValue(Member); 9275 Plan->removeVPValueFor(Member); 9276 Plan->addVPValue(Member, VPIG->getVPValue(J)); 9277 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 9278 J++; 9279 } 9280 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9281 } 9282 } 9283 9284 // From this point onwards, VPlan-to-VPlan transformations may change the plan 9285 // in ways that accessing values using original IR values is incorrect. 9286 Plan->disableValue2VPValue(); 9287 9288 VPlanTransforms::sinkScalarOperands(*Plan); 9289 VPlanTransforms::mergeReplicateRegions(*Plan); 9290 9291 std::string PlanName; 9292 raw_string_ostream RSO(PlanName); 9293 ElementCount VF = Range.Start; 9294 Plan->addVF(VF); 9295 RSO << "Initial VPlan for VF={" << VF; 9296 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9297 Plan->addVF(VF); 9298 RSO << "," << VF; 9299 } 9300 RSO << "},UF>=1"; 9301 RSO.flush(); 9302 Plan->setName(PlanName); 9303 9304 // Fold Exit block into its predecessor if possible. 9305 // TODO: Fold block earlier once all VPlan transforms properly maintain a 9306 // VPBasicBlock as exit. 9307 VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExit()); 9308 9309 assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid"); 9310 return Plan; 9311 } 9312 9313 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9314 // Outer loop handling: They may require CFG and instruction level 9315 // transformations before even evaluating whether vectorization is profitable. 9316 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9317 // the vectorization pipeline. 9318 assert(!OrigLoop->isInnermost()); 9319 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9320 9321 // Create new empty VPlan 9322 auto Plan = std::make_unique<VPlan>(); 9323 9324 // Build hierarchical CFG 9325 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9326 HCFGBuilder.buildHierarchicalCFG(); 9327 9328 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9329 VF *= 2) 9330 Plan->addVF(VF); 9331 9332 if (EnableVPlanPredication) { 9333 VPlanPredicator VPP(*Plan); 9334 VPP.predicate(); 9335 9336 // Avoid running transformation to recipes until masked code generation in 9337 // VPlan-native path is in place. 9338 return Plan; 9339 } 9340 9341 SmallPtrSet<Instruction *, 1> DeadInstructions; 9342 VPlanTransforms::VPInstructionsToVPRecipes( 9343 OrigLoop, Plan, 9344 [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); }, 9345 DeadInstructions, *PSE.getSE()); 9346 9347 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(), 9348 true, true); 9349 return Plan; 9350 } 9351 9352 // Adjust the recipes for reductions. For in-loop reductions the chain of 9353 // instructions leading from the loop exit instr to the phi need to be converted 9354 // to reductions, with one operand being vector and the other being the scalar 9355 // reduction chain. For other reductions, a select is introduced between the phi 9356 // and live-out recipes when folding the tail. 9357 void LoopVectorizationPlanner::adjustRecipesForReductions( 9358 VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, 9359 ElementCount MinVF) { 9360 for (auto &Reduction : CM.getInLoopReductionChains()) { 9361 PHINode *Phi = Reduction.first; 9362 const RecurrenceDescriptor &RdxDesc = 9363 Legal->getReductionVars().find(Phi)->second; 9364 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9365 9366 if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) 9367 continue; 9368 9369 // ReductionOperations are orders top-down from the phi's use to the 9370 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9371 // which of the two operands will remain scalar and which will be reduced. 9372 // For minmax the chain will be the select instructions. 9373 Instruction *Chain = Phi; 9374 for (Instruction *R : ReductionOperations) { 9375 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9376 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9377 9378 VPValue *ChainOp = Plan->getVPValue(Chain); 9379 unsigned FirstOpId; 9380 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9381 "Only min/max recurrences allowed for inloop reductions"); 9382 // Recognize a call to the llvm.fmuladd intrinsic. 9383 bool IsFMulAdd = (Kind == RecurKind::FMulAdd); 9384 assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) && 9385 "Expected instruction to be a call to the llvm.fmuladd intrinsic"); 9386 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9387 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9388 "Expected to replace a VPWidenSelectSC"); 9389 FirstOpId = 1; 9390 } else { 9391 assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) || 9392 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) && 9393 "Expected to replace a VPWidenSC"); 9394 FirstOpId = 0; 9395 } 9396 unsigned VecOpId = 9397 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9398 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9399 9400 auto *CondOp = CM.foldTailByMasking() 9401 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9402 : nullptr; 9403 9404 if (IsFMulAdd) { 9405 // If the instruction is a call to the llvm.fmuladd intrinsic then we 9406 // need to create an fmul recipe to use as the vector operand for the 9407 // fadd reduction. 9408 VPInstruction *FMulRecipe = new VPInstruction( 9409 Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))}); 9410 FMulRecipe->setFastMathFlags(R->getFastMathFlags()); 9411 WidenRecipe->getParent()->insert(FMulRecipe, 9412 WidenRecipe->getIterator()); 9413 VecOp = FMulRecipe; 9414 } 9415 VPReductionRecipe *RedRecipe = 9416 new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9417 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9418 Plan->removeVPValueFor(R); 9419 Plan->addVPValue(R, RedRecipe); 9420 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9421 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9422 WidenRecipe->eraseFromParent(); 9423 9424 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9425 VPRecipeBase *CompareRecipe = 9426 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9427 assert(isa<VPWidenRecipe>(CompareRecipe) && 9428 "Expected to replace a VPWidenSC"); 9429 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9430 "Expected no remaining users"); 9431 CompareRecipe->eraseFromParent(); 9432 } 9433 Chain = R; 9434 } 9435 } 9436 9437 // If tail is folded by masking, introduce selects between the phi 9438 // and the live-out instruction of each reduction, at the beginning of the 9439 // dedicated latch block. 9440 if (CM.foldTailByMasking()) { 9441 Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin()); 9442 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9443 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R); 9444 if (!PhiR || PhiR->isInLoop()) 9445 continue; 9446 VPValue *Cond = 9447 RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9448 VPValue *Red = PhiR->getBackedgeValue(); 9449 assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB && 9450 "reduction recipe must be defined before latch"); 9451 Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR}); 9452 } 9453 } 9454 } 9455 9456 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9457 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9458 VPSlotTracker &SlotTracker) const { 9459 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9460 IG->getInsertPos()->printAsOperand(O, false); 9461 O << ", "; 9462 getAddr()->printAsOperand(O, SlotTracker); 9463 VPValue *Mask = getMask(); 9464 if (Mask) { 9465 O << ", "; 9466 Mask->printAsOperand(O, SlotTracker); 9467 } 9468 9469 unsigned OpIdx = 0; 9470 for (unsigned i = 0; i < IG->getFactor(); ++i) { 9471 if (!IG->getMember(i)) 9472 continue; 9473 if (getNumStoreOperands() > 0) { 9474 O << "\n" << Indent << " store "; 9475 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker); 9476 O << " to index " << i; 9477 } else { 9478 O << "\n" << Indent << " "; 9479 getVPValue(OpIdx)->printAsOperand(O, SlotTracker); 9480 O << " = load from index " << i; 9481 } 9482 ++OpIdx; 9483 } 9484 } 9485 #endif 9486 9487 void VPWidenCallRecipe::execute(VPTransformState &State) { 9488 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9489 *this, State); 9490 } 9491 9492 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9493 auto &I = *cast<SelectInst>(getUnderlyingInstr()); 9494 State.ILV->setDebugLocFromInst(&I); 9495 9496 // The condition can be loop invariant but still defined inside the 9497 // loop. This means that we can't just use the original 'cond' value. 9498 // We have to take the 'vectorized' value and pick the first lane. 9499 // Instcombine will make this a no-op. 9500 auto *InvarCond = 9501 InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr; 9502 9503 for (unsigned Part = 0; Part < State.UF; ++Part) { 9504 Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part); 9505 Value *Op0 = State.get(getOperand(1), Part); 9506 Value *Op1 = State.get(getOperand(2), Part); 9507 Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1); 9508 State.set(this, Sel, Part); 9509 State.ILV->addMetadata(Sel, &I); 9510 } 9511 } 9512 9513 void VPWidenRecipe::execute(VPTransformState &State) { 9514 auto &I = *cast<Instruction>(getUnderlyingValue()); 9515 auto &Builder = State.Builder; 9516 switch (I.getOpcode()) { 9517 case Instruction::Call: 9518 case Instruction::Br: 9519 case Instruction::PHI: 9520 case Instruction::GetElementPtr: 9521 case Instruction::Select: 9522 llvm_unreachable("This instruction is handled by a different recipe."); 9523 case Instruction::UDiv: 9524 case Instruction::SDiv: 9525 case Instruction::SRem: 9526 case Instruction::URem: 9527 case Instruction::Add: 9528 case Instruction::FAdd: 9529 case Instruction::Sub: 9530 case Instruction::FSub: 9531 case Instruction::FNeg: 9532 case Instruction::Mul: 9533 case Instruction::FMul: 9534 case Instruction::FDiv: 9535 case Instruction::FRem: 9536 case Instruction::Shl: 9537 case Instruction::LShr: 9538 case Instruction::AShr: 9539 case Instruction::And: 9540 case Instruction::Or: 9541 case Instruction::Xor: { 9542 // Just widen unops and binops. 9543 State.ILV->setDebugLocFromInst(&I); 9544 9545 for (unsigned Part = 0; Part < State.UF; ++Part) { 9546 SmallVector<Value *, 2> Ops; 9547 for (VPValue *VPOp : operands()) 9548 Ops.push_back(State.get(VPOp, Part)); 9549 9550 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 9551 9552 if (auto *VecOp = dyn_cast<Instruction>(V)) { 9553 VecOp->copyIRFlags(&I); 9554 9555 // If the instruction is vectorized and was in a basic block that needed 9556 // predication, we can't propagate poison-generating flags (nuw/nsw, 9557 // exact, etc.). The control flow has been linearized and the 9558 // instruction is no longer guarded by the predicate, which could make 9559 // the flag properties to no longer hold. 9560 if (State.MayGeneratePoisonRecipes.contains(this)) 9561 VecOp->dropPoisonGeneratingFlags(); 9562 } 9563 9564 // Use this vector value for all users of the original instruction. 9565 State.set(this, V, Part); 9566 State.ILV->addMetadata(V, &I); 9567 } 9568 9569 break; 9570 } 9571 case Instruction::ICmp: 9572 case Instruction::FCmp: { 9573 // Widen compares. Generate vector compares. 9574 bool FCmp = (I.getOpcode() == Instruction::FCmp); 9575 auto *Cmp = cast<CmpInst>(&I); 9576 State.ILV->setDebugLocFromInst(Cmp); 9577 for (unsigned Part = 0; Part < State.UF; ++Part) { 9578 Value *A = State.get(getOperand(0), Part); 9579 Value *B = State.get(getOperand(1), Part); 9580 Value *C = nullptr; 9581 if (FCmp) { 9582 // Propagate fast math flags. 9583 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9584 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 9585 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 9586 } else { 9587 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 9588 } 9589 State.set(this, C, Part); 9590 State.ILV->addMetadata(C, &I); 9591 } 9592 9593 break; 9594 } 9595 9596 case Instruction::ZExt: 9597 case Instruction::SExt: 9598 case Instruction::FPToUI: 9599 case Instruction::FPToSI: 9600 case Instruction::FPExt: 9601 case Instruction::PtrToInt: 9602 case Instruction::IntToPtr: 9603 case Instruction::SIToFP: 9604 case Instruction::UIToFP: 9605 case Instruction::Trunc: 9606 case Instruction::FPTrunc: 9607 case Instruction::BitCast: { 9608 auto *CI = cast<CastInst>(&I); 9609 State.ILV->setDebugLocFromInst(CI); 9610 9611 /// Vectorize casts. 9612 Type *DestTy = (State.VF.isScalar()) 9613 ? CI->getType() 9614 : VectorType::get(CI->getType(), State.VF); 9615 9616 for (unsigned Part = 0; Part < State.UF; ++Part) { 9617 Value *A = State.get(getOperand(0), Part); 9618 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 9619 State.set(this, Cast, Part); 9620 State.ILV->addMetadata(Cast, &I); 9621 } 9622 break; 9623 } 9624 default: 9625 // This instruction is not vectorized by simple widening. 9626 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 9627 llvm_unreachable("Unhandled instruction!"); 9628 } // end of switch. 9629 } 9630 9631 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9632 auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr()); 9633 // Construct a vector GEP by widening the operands of the scalar GEP as 9634 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 9635 // results in a vector of pointers when at least one operand of the GEP 9636 // is vector-typed. Thus, to keep the representation compact, we only use 9637 // vector-typed operands for loop-varying values. 9638 9639 if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 9640 // If we are vectorizing, but the GEP has only loop-invariant operands, 9641 // the GEP we build (by only using vector-typed operands for 9642 // loop-varying values) would be a scalar pointer. Thus, to ensure we 9643 // produce a vector of pointers, we need to either arbitrarily pick an 9644 // operand to broadcast, or broadcast a clone of the original GEP. 9645 // Here, we broadcast a clone of the original. 9646 // 9647 // TODO: If at some point we decide to scalarize instructions having 9648 // loop-invariant operands, this special case will no longer be 9649 // required. We would add the scalarization decision to 9650 // collectLoopScalars() and teach getVectorValue() to broadcast 9651 // the lane-zero scalar value. 9652 auto *Clone = State.Builder.Insert(GEP->clone()); 9653 for (unsigned Part = 0; Part < State.UF; ++Part) { 9654 Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone); 9655 State.set(this, EntryPart, Part); 9656 State.ILV->addMetadata(EntryPart, GEP); 9657 } 9658 } else { 9659 // If the GEP has at least one loop-varying operand, we are sure to 9660 // produce a vector of pointers. But if we are only unrolling, we want 9661 // to produce a scalar GEP for each unroll part. Thus, the GEP we 9662 // produce with the code below will be scalar (if VF == 1) or vector 9663 // (otherwise). Note that for the unroll-only case, we still maintain 9664 // values in the vector mapping with initVector, as we do for other 9665 // instructions. 9666 for (unsigned Part = 0; Part < State.UF; ++Part) { 9667 // The pointer operand of the new GEP. If it's loop-invariant, we 9668 // won't broadcast it. 9669 auto *Ptr = IsPtrLoopInvariant 9670 ? State.get(getOperand(0), VPIteration(0, 0)) 9671 : State.get(getOperand(0), Part); 9672 9673 // Collect all the indices for the new GEP. If any index is 9674 // loop-invariant, we won't broadcast it. 9675 SmallVector<Value *, 4> Indices; 9676 for (unsigned I = 1, E = getNumOperands(); I < E; I++) { 9677 VPValue *Operand = getOperand(I); 9678 if (IsIndexLoopInvariant[I - 1]) 9679 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 9680 else 9681 Indices.push_back(State.get(Operand, Part)); 9682 } 9683 9684 // If the GEP instruction is vectorized and was in a basic block that 9685 // needed predication, we can't propagate the poison-generating 'inbounds' 9686 // flag. The control flow has been linearized and the GEP is no longer 9687 // guarded by the predicate, which could make the 'inbounds' properties to 9688 // no longer hold. 9689 bool IsInBounds = 9690 GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0; 9691 9692 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 9693 // but it should be a vector, otherwise. 9694 auto *NewGEP = IsInBounds 9695 ? State.Builder.CreateInBoundsGEP( 9696 GEP->getSourceElementType(), Ptr, Indices) 9697 : State.Builder.CreateGEP(GEP->getSourceElementType(), 9698 Ptr, Indices); 9699 assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) && 9700 "NewGEP is not a pointer vector"); 9701 State.set(this, NewGEP, Part); 9702 State.ILV->addMetadata(NewGEP, GEP); 9703 } 9704 } 9705 } 9706 9707 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9708 assert(!State.Instance && "Int or FP induction being replicated."); 9709 auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0); 9710 State.ILV->widenIntOrFpInduction(IV, this, State, CanonicalIV); 9711 } 9712 9713 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9714 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this, 9715 State); 9716 } 9717 9718 void VPBlendRecipe::execute(VPTransformState &State) { 9719 State.ILV->setDebugLocFromInst(Phi, &State.Builder); 9720 // We know that all PHIs in non-header blocks are converted into 9721 // selects, so we don't have to worry about the insertion order and we 9722 // can just use the builder. 9723 // At this point we generate the predication tree. There may be 9724 // duplications since this is a simple recursive scan, but future 9725 // optimizations will clean it up. 9726 9727 unsigned NumIncoming = getNumIncomingValues(); 9728 9729 // Generate a sequence of selects of the form: 9730 // SELECT(Mask3, In3, 9731 // SELECT(Mask2, In2, 9732 // SELECT(Mask1, In1, 9733 // In0))) 9734 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9735 // are essentially undef are taken from In0. 9736 InnerLoopVectorizer::VectorParts Entry(State.UF); 9737 for (unsigned In = 0; In < NumIncoming; ++In) { 9738 for (unsigned Part = 0; Part < State.UF; ++Part) { 9739 // We might have single edge PHIs (blocks) - use an identity 9740 // 'select' for the first PHI operand. 9741 Value *In0 = State.get(getIncomingValue(In), Part); 9742 if (In == 0) 9743 Entry[Part] = In0; // Initialize with the first incoming value. 9744 else { 9745 // Select between the current value and the previous incoming edge 9746 // based on the incoming mask. 9747 Value *Cond = State.get(getMask(In), Part); 9748 Entry[Part] = 9749 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9750 } 9751 } 9752 } 9753 for (unsigned Part = 0; Part < State.UF; ++Part) 9754 State.set(this, Entry[Part], Part); 9755 } 9756 9757 void VPInterleaveRecipe::execute(VPTransformState &State) { 9758 assert(!State.Instance && "Interleave group being replicated."); 9759 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9760 getStoredValues(), getMask()); 9761 } 9762 9763 void VPReductionRecipe::execute(VPTransformState &State) { 9764 assert(!State.Instance && "Reduction being replicated."); 9765 Value *PrevInChain = State.get(getChainOp(), 0); 9766 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9767 bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); 9768 // Propagate the fast-math flags carried by the underlying instruction. 9769 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 9770 State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags()); 9771 for (unsigned Part = 0; Part < State.UF; ++Part) { 9772 Value *NewVecOp = State.get(getVecOp(), Part); 9773 if (VPValue *Cond = getCondOp()) { 9774 Value *NewCond = State.get(Cond, Part); 9775 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9776 Value *Iden = RdxDesc->getRecurrenceIdentity( 9777 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9778 Value *IdenVec = 9779 State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden); 9780 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9781 NewVecOp = Select; 9782 } 9783 Value *NewRed; 9784 Value *NextInChain; 9785 if (IsOrdered) { 9786 if (State.VF.isVector()) 9787 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9788 PrevInChain); 9789 else 9790 NewRed = State.Builder.CreateBinOp( 9791 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain, 9792 NewVecOp); 9793 PrevInChain = NewRed; 9794 } else { 9795 PrevInChain = State.get(getChainOp(), Part); 9796 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9797 } 9798 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9799 NextInChain = 9800 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9801 NewRed, PrevInChain); 9802 } else if (IsOrdered) 9803 NextInChain = NewRed; 9804 else 9805 NextInChain = State.Builder.CreateBinOp( 9806 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed, 9807 PrevInChain); 9808 State.set(this, NextInChain, Part); 9809 } 9810 } 9811 9812 void VPReplicateRecipe::execute(VPTransformState &State) { 9813 if (State.Instance) { // Generate a single instance. 9814 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9815 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance, 9816 IsPredicated, State); 9817 // Insert scalar instance packing it into a vector. 9818 if (AlsoPack && State.VF.isVector()) { 9819 // If we're constructing lane 0, initialize to start from poison. 9820 if (State.Instance->Lane.isFirstLane()) { 9821 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9822 Value *Poison = PoisonValue::get( 9823 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9824 State.set(this, Poison, State.Instance->Part); 9825 } 9826 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9827 } 9828 return; 9829 } 9830 9831 // Generate scalar instances for all VF lanes of all UF parts, unless the 9832 // instruction is uniform inwhich case generate only the first lane for each 9833 // of the UF parts. 9834 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9835 assert((!State.VF.isScalable() || IsUniform) && 9836 "Can't scalarize a scalable vector"); 9837 for (unsigned Part = 0; Part < State.UF; ++Part) 9838 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9839 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, 9840 VPIteration(Part, Lane), IsPredicated, 9841 State); 9842 } 9843 9844 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9845 assert(State.Instance && "Branch on Mask works only on single instance."); 9846 9847 unsigned Part = State.Instance->Part; 9848 unsigned Lane = State.Instance->Lane.getKnownLane(); 9849 9850 Value *ConditionBit = nullptr; 9851 VPValue *BlockInMask = getMask(); 9852 if (BlockInMask) { 9853 ConditionBit = State.get(BlockInMask, Part); 9854 if (ConditionBit->getType()->isVectorTy()) 9855 ConditionBit = State.Builder.CreateExtractElement( 9856 ConditionBit, State.Builder.getInt32(Lane)); 9857 } else // Block in mask is all-one. 9858 ConditionBit = State.Builder.getTrue(); 9859 9860 // Replace the temporary unreachable terminator with a new conditional branch, 9861 // whose two destinations will be set later when they are created. 9862 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9863 assert(isa<UnreachableInst>(CurrentTerminator) && 9864 "Expected to replace unreachable terminator with conditional branch."); 9865 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9866 CondBr->setSuccessor(0, nullptr); 9867 ReplaceInstWithInst(CurrentTerminator, CondBr); 9868 } 9869 9870 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9871 assert(State.Instance && "Predicated instruction PHI works per instance."); 9872 Instruction *ScalarPredInst = 9873 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9874 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9875 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9876 assert(PredicatingBB && "Predicated block has no single predecessor."); 9877 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9878 "operand must be VPReplicateRecipe"); 9879 9880 // By current pack/unpack logic we need to generate only a single phi node: if 9881 // a vector value for the predicated instruction exists at this point it means 9882 // the instruction has vector users only, and a phi for the vector value is 9883 // needed. In this case the recipe of the predicated instruction is marked to 9884 // also do that packing, thereby "hoisting" the insert-element sequence. 9885 // Otherwise, a phi node for the scalar value is needed. 9886 unsigned Part = State.Instance->Part; 9887 if (State.hasVectorValue(getOperand(0), Part)) { 9888 Value *VectorValue = State.get(getOperand(0), Part); 9889 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9890 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9891 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9892 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9893 if (State.hasVectorValue(this, Part)) 9894 State.reset(this, VPhi, Part); 9895 else 9896 State.set(this, VPhi, Part); 9897 // NOTE: Currently we need to update the value of the operand, so the next 9898 // predicated iteration inserts its generated value in the correct vector. 9899 State.reset(getOperand(0), VPhi, Part); 9900 } else { 9901 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9902 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9903 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9904 PredicatingBB); 9905 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9906 if (State.hasScalarValue(this, *State.Instance)) 9907 State.reset(this, Phi, *State.Instance); 9908 else 9909 State.set(this, Phi, *State.Instance); 9910 // NOTE: Currently we need to update the value of the operand, so the next 9911 // predicated iteration inserts its generated value in the correct vector. 9912 State.reset(getOperand(0), Phi, *State.Instance); 9913 } 9914 } 9915 9916 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9917 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9918 9919 // Attempt to issue a wide load. 9920 LoadInst *LI = dyn_cast<LoadInst>(&Ingredient); 9921 StoreInst *SI = dyn_cast<StoreInst>(&Ingredient); 9922 9923 assert((LI || SI) && "Invalid Load/Store instruction"); 9924 assert((!SI || StoredValue) && "No stored value provided for widened store"); 9925 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 9926 9927 Type *ScalarDataTy = getLoadStoreType(&Ingredient); 9928 9929 auto *DataTy = VectorType::get(ScalarDataTy, State.VF); 9930 const Align Alignment = getLoadStoreAlignment(&Ingredient); 9931 bool CreateGatherScatter = !Consecutive; 9932 9933 auto &Builder = State.Builder; 9934 InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF); 9935 bool isMaskRequired = getMask(); 9936 if (isMaskRequired) 9937 for (unsigned Part = 0; Part < State.UF; ++Part) 9938 BlockInMaskParts[Part] = State.get(getMask(), Part); 9939 9940 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 9941 // Calculate the pointer for the specific unroll-part. 9942 GetElementPtrInst *PartPtr = nullptr; 9943 9944 bool InBounds = false; 9945 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 9946 InBounds = gep->isInBounds(); 9947 if (Reverse) { 9948 // If the address is consecutive but reversed, then the 9949 // wide store needs to start at the last vector element. 9950 // RunTimeVF = VScale * VF.getKnownMinValue() 9951 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 9952 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF); 9953 // NumElt = -Part * RunTimeVF 9954 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 9955 // LastLane = 1 - RunTimeVF 9956 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 9957 PartPtr = 9958 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 9959 PartPtr->setIsInBounds(InBounds); 9960 PartPtr = cast<GetElementPtrInst>( 9961 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 9962 PartPtr->setIsInBounds(InBounds); 9963 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 9964 BlockInMaskParts[Part] = 9965 Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse"); 9966 } else { 9967 Value *Increment = 9968 createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part); 9969 PartPtr = cast<GetElementPtrInst>( 9970 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 9971 PartPtr->setIsInBounds(InBounds); 9972 } 9973 9974 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 9975 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 9976 }; 9977 9978 // Handle Stores: 9979 if (SI) { 9980 State.ILV->setDebugLocFromInst(SI); 9981 9982 for (unsigned Part = 0; Part < State.UF; ++Part) { 9983 Instruction *NewSI = nullptr; 9984 Value *StoredVal = State.get(StoredValue, Part); 9985 if (CreateGatherScatter) { 9986 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 9987 Value *VectorGep = State.get(getAddr(), Part); 9988 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 9989 MaskPart); 9990 } else { 9991 if (Reverse) { 9992 // If we store to reverse consecutive memory locations, then we need 9993 // to reverse the order of elements in the stored value. 9994 StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse"); 9995 // We don't want to update the value in the map as it might be used in 9996 // another expression. So don't call resetVectorValue(StoredVal). 9997 } 9998 auto *VecPtr = 9999 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 10000 if (isMaskRequired) 10001 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 10002 BlockInMaskParts[Part]); 10003 else 10004 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 10005 } 10006 State.ILV->addMetadata(NewSI, SI); 10007 } 10008 return; 10009 } 10010 10011 // Handle loads. 10012 assert(LI && "Must have a load instruction"); 10013 State.ILV->setDebugLocFromInst(LI); 10014 for (unsigned Part = 0; Part < State.UF; ++Part) { 10015 Value *NewLI; 10016 if (CreateGatherScatter) { 10017 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 10018 Value *VectorGep = State.get(getAddr(), Part); 10019 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, 10020 nullptr, "wide.masked.gather"); 10021 State.ILV->addMetadata(NewLI, LI); 10022 } else { 10023 auto *VecPtr = 10024 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 10025 if (isMaskRequired) 10026 NewLI = Builder.CreateMaskedLoad( 10027 DataTy, VecPtr, Alignment, BlockInMaskParts[Part], 10028 PoisonValue::get(DataTy), "wide.masked.load"); 10029 else 10030 NewLI = 10031 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 10032 10033 // Add metadata to the load, but setVectorValue to the reverse shuffle. 10034 State.ILV->addMetadata(NewLI, LI); 10035 if (Reverse) 10036 NewLI = Builder.CreateVectorReverse(NewLI, "reverse"); 10037 } 10038 10039 State.set(this, NewLI, Part); 10040 } 10041 } 10042 10043 // Determine how to lower the scalar epilogue, which depends on 1) optimising 10044 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 10045 // predication, and 4) a TTI hook that analyses whether the loop is suitable 10046 // for predication. 10047 static ScalarEpilogueLowering getScalarEpilogueLowering( 10048 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 10049 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 10050 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 10051 LoopVectorizationLegality &LVL) { 10052 // 1) OptSize takes precedence over all other options, i.e. if this is set, 10053 // don't look at hints or options, and don't request a scalar epilogue. 10054 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 10055 // LoopAccessInfo (due to code dependency and not being able to reliably get 10056 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 10057 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 10058 // versioning when the vectorization is forced, unlike hasOptSize. So revert 10059 // back to the old way and vectorize with versioning when forced. See D81345.) 10060 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 10061 PGSOQueryType::IRPass) && 10062 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 10063 return CM_ScalarEpilogueNotAllowedOptSize; 10064 10065 // 2) If set, obey the directives 10066 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 10067 switch (PreferPredicateOverEpilogue) { 10068 case PreferPredicateTy::ScalarEpilogue: 10069 return CM_ScalarEpilogueAllowed; 10070 case PreferPredicateTy::PredicateElseScalarEpilogue: 10071 return CM_ScalarEpilogueNotNeededUsePredicate; 10072 case PreferPredicateTy::PredicateOrDontVectorize: 10073 return CM_ScalarEpilogueNotAllowedUsePredicate; 10074 }; 10075 } 10076 10077 // 3) If set, obey the hints 10078 switch (Hints.getPredicate()) { 10079 case LoopVectorizeHints::FK_Enabled: 10080 return CM_ScalarEpilogueNotNeededUsePredicate; 10081 case LoopVectorizeHints::FK_Disabled: 10082 return CM_ScalarEpilogueAllowed; 10083 }; 10084 10085 // 4) if the TTI hook indicates this is profitable, request predication. 10086 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 10087 LVL.getLAI())) 10088 return CM_ScalarEpilogueNotNeededUsePredicate; 10089 10090 return CM_ScalarEpilogueAllowed; 10091 } 10092 10093 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 10094 // If Values have been set for this Def return the one relevant for \p Part. 10095 if (hasVectorValue(Def, Part)) 10096 return Data.PerPartOutput[Def][Part]; 10097 10098 if (!hasScalarValue(Def, {Part, 0})) { 10099 Value *IRV = Def->getLiveInIRValue(); 10100 Value *B = ILV->getBroadcastInstrs(IRV); 10101 set(Def, B, Part); 10102 return B; 10103 } 10104 10105 Value *ScalarValue = get(Def, {Part, 0}); 10106 // If we aren't vectorizing, we can just copy the scalar map values over 10107 // to the vector map. 10108 if (VF.isScalar()) { 10109 set(Def, ScalarValue, Part); 10110 return ScalarValue; 10111 } 10112 10113 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 10114 bool IsUniform = RepR && RepR->isUniform(); 10115 10116 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 10117 // Check if there is a scalar value for the selected lane. 10118 if (!hasScalarValue(Def, {Part, LastLane})) { 10119 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 10120 assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) && 10121 "unexpected recipe found to be invariant"); 10122 IsUniform = true; 10123 LastLane = 0; 10124 } 10125 10126 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 10127 // Set the insert point after the last scalarized instruction or after the 10128 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 10129 // will directly follow the scalar definitions. 10130 auto OldIP = Builder.saveIP(); 10131 auto NewIP = 10132 isa<PHINode>(LastInst) 10133 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 10134 : std::next(BasicBlock::iterator(LastInst)); 10135 Builder.SetInsertPoint(&*NewIP); 10136 10137 // However, if we are vectorizing, we need to construct the vector values. 10138 // If the value is known to be uniform after vectorization, we can just 10139 // broadcast the scalar value corresponding to lane zero for each unroll 10140 // iteration. Otherwise, we construct the vector values using 10141 // insertelement instructions. Since the resulting vectors are stored in 10142 // State, we will only generate the insertelements once. 10143 Value *VectorValue = nullptr; 10144 if (IsUniform) { 10145 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 10146 set(Def, VectorValue, Part); 10147 } else { 10148 // Initialize packing with insertelements to start from undef. 10149 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 10150 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 10151 set(Def, Undef, Part); 10152 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 10153 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 10154 VectorValue = get(Def, Part); 10155 } 10156 Builder.restoreIP(OldIP); 10157 return VectorValue; 10158 } 10159 10160 // Process the loop in the VPlan-native vectorization path. This path builds 10161 // VPlan upfront in the vectorization pipeline, which allows to apply 10162 // VPlan-to-VPlan transformations from the very beginning without modifying the 10163 // input LLVM IR. 10164 static bool processLoopInVPlanNativePath( 10165 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 10166 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 10167 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 10168 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 10169 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 10170 LoopVectorizationRequirements &Requirements) { 10171 10172 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 10173 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 10174 return false; 10175 } 10176 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 10177 Function *F = L->getHeader()->getParent(); 10178 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 10179 10180 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10181 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 10182 10183 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 10184 &Hints, IAI); 10185 // Use the planner for outer loop vectorization. 10186 // TODO: CM is not used at this point inside the planner. Turn CM into an 10187 // optional argument if we don't need it in the future. 10188 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 10189 Requirements, ORE); 10190 10191 // Get user vectorization factor. 10192 ElementCount UserVF = Hints.getWidth(); 10193 10194 CM.collectElementTypesForWidening(); 10195 10196 // Plan how to best vectorize, return the best VF and its cost. 10197 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 10198 10199 // If we are stress testing VPlan builds, do not attempt to generate vector 10200 // code. Masked vector code generation support will follow soon. 10201 // Also, do not attempt to vectorize if no vector code will be produced. 10202 if (VPlanBuildStressTest || EnableVPlanPredication || 10203 VectorizationFactor::Disabled() == VF) 10204 return false; 10205 10206 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10207 10208 { 10209 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10210 F->getParent()->getDataLayout()); 10211 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 10212 &CM, BFI, PSI, Checks); 10213 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 10214 << L->getHeader()->getParent()->getName() << "\"\n"); 10215 LVP.executePlan(VF.Width, 1, BestPlan, LB, DT); 10216 } 10217 10218 // Mark the loop as already vectorized to avoid vectorizing again. 10219 Hints.setAlreadyVectorized(); 10220 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10221 return true; 10222 } 10223 10224 // Emit a remark if there are stores to floats that required a floating point 10225 // extension. If the vectorized loop was generated with floating point there 10226 // will be a performance penalty from the conversion overhead and the change in 10227 // the vector width. 10228 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 10229 SmallVector<Instruction *, 4> Worklist; 10230 for (BasicBlock *BB : L->getBlocks()) { 10231 for (Instruction &Inst : *BB) { 10232 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 10233 if (S->getValueOperand()->getType()->isFloatTy()) 10234 Worklist.push_back(S); 10235 } 10236 } 10237 } 10238 10239 // Traverse the floating point stores upwards searching, for floating point 10240 // conversions. 10241 SmallPtrSet<const Instruction *, 4> Visited; 10242 SmallPtrSet<const Instruction *, 4> EmittedRemark; 10243 while (!Worklist.empty()) { 10244 auto *I = Worklist.pop_back_val(); 10245 if (!L->contains(I)) 10246 continue; 10247 if (!Visited.insert(I).second) 10248 continue; 10249 10250 // Emit a remark if the floating point store required a floating 10251 // point conversion. 10252 // TODO: More work could be done to identify the root cause such as a 10253 // constant or a function return type and point the user to it. 10254 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 10255 ORE->emit([&]() { 10256 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 10257 I->getDebugLoc(), L->getHeader()) 10258 << "floating point conversion changes vector width. " 10259 << "Mixed floating point precision requires an up/down " 10260 << "cast that will negatively impact performance."; 10261 }); 10262 10263 for (Use &Op : I->operands()) 10264 if (auto *OpI = dyn_cast<Instruction>(Op)) 10265 Worklist.push_back(OpI); 10266 } 10267 } 10268 10269 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 10270 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 10271 !EnableLoopInterleaving), 10272 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 10273 !EnableLoopVectorization) {} 10274 10275 bool LoopVectorizePass::processLoop(Loop *L) { 10276 assert((EnableVPlanNativePath || L->isInnermost()) && 10277 "VPlan-native path is not enabled. Only process inner loops."); 10278 10279 #ifndef NDEBUG 10280 const std::string DebugLocStr = getDebugLocString(L); 10281 #endif /* NDEBUG */ 10282 10283 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 10284 << L->getHeader()->getParent()->getName() << "\" from " 10285 << DebugLocStr << "\n"); 10286 10287 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI); 10288 10289 LLVM_DEBUG( 10290 dbgs() << "LV: Loop hints:" 10291 << " force=" 10292 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 10293 ? "disabled" 10294 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 10295 ? "enabled" 10296 : "?")) 10297 << " width=" << Hints.getWidth() 10298 << " interleave=" << Hints.getInterleave() << "\n"); 10299 10300 // Function containing loop 10301 Function *F = L->getHeader()->getParent(); 10302 10303 // Looking at the diagnostic output is the only way to determine if a loop 10304 // was vectorized (other than looking at the IR or machine code), so it 10305 // is important to generate an optimization remark for each loop. Most of 10306 // these messages are generated as OptimizationRemarkAnalysis. Remarks 10307 // generated as OptimizationRemark and OptimizationRemarkMissed are 10308 // less verbose reporting vectorized loops and unvectorized loops that may 10309 // benefit from vectorization, respectively. 10310 10311 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 10312 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 10313 return false; 10314 } 10315 10316 PredicatedScalarEvolution PSE(*SE, *L); 10317 10318 // Check if it is legal to vectorize the loop. 10319 LoopVectorizationRequirements Requirements; 10320 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 10321 &Requirements, &Hints, DB, AC, BFI, PSI); 10322 if (!LVL.canVectorize(EnableVPlanNativePath)) { 10323 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 10324 Hints.emitRemarkWithHints(); 10325 return false; 10326 } 10327 10328 // Check the function attributes and profiles to find out if this function 10329 // should be optimized for size. 10330 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10331 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 10332 10333 // Entrance to the VPlan-native vectorization path. Outer loops are processed 10334 // here. They may require CFG and instruction level transformations before 10335 // even evaluating whether vectorization is profitable. Since we cannot modify 10336 // the incoming IR, we need to build VPlan upfront in the vectorization 10337 // pipeline. 10338 if (!L->isInnermost()) 10339 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 10340 ORE, BFI, PSI, Hints, Requirements); 10341 10342 assert(L->isInnermost() && "Inner loop expected."); 10343 10344 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 10345 // count by optimizing for size, to minimize overheads. 10346 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 10347 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 10348 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 10349 << "This loop is worth vectorizing only if no scalar " 10350 << "iteration overheads are incurred."); 10351 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 10352 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 10353 else { 10354 LLVM_DEBUG(dbgs() << "\n"); 10355 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 10356 } 10357 } 10358 10359 // Check the function attributes to see if implicit floats are allowed. 10360 // FIXME: This check doesn't seem possibly correct -- what if the loop is 10361 // an integer loop and the vector instructions selected are purely integer 10362 // vector instructions? 10363 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10364 reportVectorizationFailure( 10365 "Can't vectorize when the NoImplicitFloat attribute is used", 10366 "loop not vectorized due to NoImplicitFloat attribute", 10367 "NoImplicitFloat", ORE, L); 10368 Hints.emitRemarkWithHints(); 10369 return false; 10370 } 10371 10372 // Check if the target supports potentially unsafe FP vectorization. 10373 // FIXME: Add a check for the type of safety issue (denormal, signaling) 10374 // for the target we're vectorizing for, to make sure none of the 10375 // additional fp-math flags can help. 10376 if (Hints.isPotentiallyUnsafe() && 10377 TTI->isFPVectorizationPotentiallyUnsafe()) { 10378 reportVectorizationFailure( 10379 "Potentially unsafe FP op prevents vectorization", 10380 "loop not vectorized due to unsafe FP support.", 10381 "UnsafeFP", ORE, L); 10382 Hints.emitRemarkWithHints(); 10383 return false; 10384 } 10385 10386 bool AllowOrderedReductions; 10387 // If the flag is set, use that instead and override the TTI behaviour. 10388 if (ForceOrderedReductions.getNumOccurrences() > 0) 10389 AllowOrderedReductions = ForceOrderedReductions; 10390 else 10391 AllowOrderedReductions = TTI->enableOrderedReductions(); 10392 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) { 10393 ORE->emit([&]() { 10394 auto *ExactFPMathInst = Requirements.getExactFPInst(); 10395 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 10396 ExactFPMathInst->getDebugLoc(), 10397 ExactFPMathInst->getParent()) 10398 << "loop not vectorized: cannot prove it is safe to reorder " 10399 "floating-point operations"; 10400 }); 10401 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 10402 "reorder floating-point operations\n"); 10403 Hints.emitRemarkWithHints(); 10404 return false; 10405 } 10406 10407 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 10408 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 10409 10410 // If an override option has been passed in for interleaved accesses, use it. 10411 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 10412 UseInterleaved = EnableInterleavedMemAccesses; 10413 10414 // Analyze interleaved memory accesses. 10415 if (UseInterleaved) { 10416 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10417 } 10418 10419 // Use the cost model. 10420 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10421 F, &Hints, IAI); 10422 CM.collectValuesToIgnore(); 10423 CM.collectElementTypesForWidening(); 10424 10425 // Use the planner for vectorization. 10426 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 10427 Requirements, ORE); 10428 10429 // Get user vectorization factor and interleave count. 10430 ElementCount UserVF = Hints.getWidth(); 10431 unsigned UserIC = Hints.getInterleave(); 10432 10433 // Plan how to best vectorize, return the best VF and its cost. 10434 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10435 10436 VectorizationFactor VF = VectorizationFactor::Disabled(); 10437 unsigned IC = 1; 10438 10439 if (MaybeVF) { 10440 VF = *MaybeVF; 10441 // Select the interleave count. 10442 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10443 } 10444 10445 // Identify the diagnostic messages that should be produced. 10446 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10447 bool VectorizeLoop = true, InterleaveLoop = true; 10448 if (VF.Width.isScalar()) { 10449 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10450 VecDiagMsg = std::make_pair( 10451 "VectorizationNotBeneficial", 10452 "the cost-model indicates that vectorization is not beneficial"); 10453 VectorizeLoop = false; 10454 } 10455 10456 if (!MaybeVF && UserIC > 1) { 10457 // Tell the user interleaving was avoided up-front, despite being explicitly 10458 // requested. 10459 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10460 "interleaving should be avoided up front\n"); 10461 IntDiagMsg = std::make_pair( 10462 "InterleavingAvoided", 10463 "Ignoring UserIC, because interleaving was avoided up front"); 10464 InterleaveLoop = false; 10465 } else if (IC == 1 && UserIC <= 1) { 10466 // Tell the user interleaving is not beneficial. 10467 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10468 IntDiagMsg = std::make_pair( 10469 "InterleavingNotBeneficial", 10470 "the cost-model indicates that interleaving is not beneficial"); 10471 InterleaveLoop = false; 10472 if (UserIC == 1) { 10473 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10474 IntDiagMsg.second += 10475 " and is explicitly disabled or interleave count is set to 1"; 10476 } 10477 } else if (IC > 1 && UserIC == 1) { 10478 // Tell the user interleaving is beneficial, but it explicitly disabled. 10479 LLVM_DEBUG( 10480 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10481 IntDiagMsg = std::make_pair( 10482 "InterleavingBeneficialButDisabled", 10483 "the cost-model indicates that interleaving is beneficial " 10484 "but is explicitly disabled or interleave count is set to 1"); 10485 InterleaveLoop = false; 10486 } 10487 10488 // Override IC if user provided an interleave count. 10489 IC = UserIC > 0 ? UserIC : IC; 10490 10491 // Emit diagnostic messages, if any. 10492 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10493 if (!VectorizeLoop && !InterleaveLoop) { 10494 // Do not vectorize or interleaving the loop. 10495 ORE->emit([&]() { 10496 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10497 L->getStartLoc(), L->getHeader()) 10498 << VecDiagMsg.second; 10499 }); 10500 ORE->emit([&]() { 10501 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10502 L->getStartLoc(), L->getHeader()) 10503 << IntDiagMsg.second; 10504 }); 10505 return false; 10506 } else if (!VectorizeLoop && InterleaveLoop) { 10507 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10508 ORE->emit([&]() { 10509 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10510 L->getStartLoc(), L->getHeader()) 10511 << VecDiagMsg.second; 10512 }); 10513 } else if (VectorizeLoop && !InterleaveLoop) { 10514 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10515 << ") in " << DebugLocStr << '\n'); 10516 ORE->emit([&]() { 10517 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10518 L->getStartLoc(), L->getHeader()) 10519 << IntDiagMsg.second; 10520 }); 10521 } else if (VectorizeLoop && InterleaveLoop) { 10522 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10523 << ") in " << DebugLocStr << '\n'); 10524 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10525 } 10526 10527 bool DisableRuntimeUnroll = false; 10528 MDNode *OrigLoopID = L->getLoopID(); 10529 { 10530 // Optimistically generate runtime checks. Drop them if they turn out to not 10531 // be profitable. Limit the scope of Checks, so the cleanup happens 10532 // immediately after vector codegeneration is done. 10533 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10534 F->getParent()->getDataLayout()); 10535 if (!VF.Width.isScalar() || IC > 1) 10536 Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate()); 10537 10538 using namespace ore; 10539 if (!VectorizeLoop) { 10540 assert(IC > 1 && "interleave count should not be 1 or 0"); 10541 // If we decided that it is not legal to vectorize the loop, then 10542 // interleave it. 10543 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10544 &CM, BFI, PSI, Checks); 10545 10546 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10547 LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT); 10548 10549 ORE->emit([&]() { 10550 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10551 L->getHeader()) 10552 << "interleaved loop (interleaved count: " 10553 << NV("InterleaveCount", IC) << ")"; 10554 }); 10555 } else { 10556 // If we decided that it is *legal* to vectorize the loop, then do it. 10557 10558 // Consider vectorizing the epilogue too if it's profitable. 10559 VectorizationFactor EpilogueVF = 10560 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10561 if (EpilogueVF.Width.isVector()) { 10562 10563 // The first pass vectorizes the main loop and creates a scalar epilogue 10564 // to be vectorized by executing the plan (potentially with a different 10565 // factor) again shortly afterwards. 10566 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1); 10567 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10568 EPI, &LVL, &CM, BFI, PSI, Checks); 10569 10570 VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF); 10571 LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, 10572 DT); 10573 ++LoopsVectorized; 10574 10575 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10576 formLCSSARecursively(*L, *DT, LI, SE); 10577 10578 // Second pass vectorizes the epilogue and adjusts the control flow 10579 // edges from the first pass. 10580 EPI.MainLoopVF = EPI.EpilogueVF; 10581 EPI.MainLoopUF = EPI.EpilogueUF; 10582 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10583 ORE, EPI, &LVL, &CM, BFI, PSI, 10584 Checks); 10585 10586 VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF); 10587 10588 // Ensure that the start values for any VPReductionPHIRecipes are 10589 // updated before vectorising the epilogue loop. 10590 VPBasicBlock *Header = BestEpiPlan.getEntry()->getEntryBasicBlock(); 10591 for (VPRecipeBase &R : Header->phis()) { 10592 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) { 10593 if (auto *Resume = MainILV.getReductionResumeValue( 10594 ReductionPhi->getRecurrenceDescriptor())) { 10595 VPValue *StartVal = new VPValue(Resume); 10596 BestEpiPlan.addExternalDef(StartVal); 10597 ReductionPhi->setOperand(0, StartVal); 10598 } 10599 } 10600 } 10601 10602 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, 10603 DT); 10604 ++LoopsEpilogueVectorized; 10605 10606 if (!MainILV.areSafetyChecksAdded()) 10607 DisableRuntimeUnroll = true; 10608 } else { 10609 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 10610 &LVL, &CM, BFI, PSI, Checks); 10611 10612 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10613 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT); 10614 ++LoopsVectorized; 10615 10616 // Add metadata to disable runtime unrolling a scalar loop when there 10617 // are no runtime checks about strides and memory. A scalar loop that is 10618 // rarely used is not worth unrolling. 10619 if (!LB.areSafetyChecksAdded()) 10620 DisableRuntimeUnroll = true; 10621 } 10622 // Report the vectorization decision. 10623 ORE->emit([&]() { 10624 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10625 L->getHeader()) 10626 << "vectorized loop (vectorization width: " 10627 << NV("VectorizationFactor", VF.Width) 10628 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10629 }); 10630 } 10631 10632 if (ORE->allowExtraAnalysis(LV_NAME)) 10633 checkMixedPrecision(L, ORE); 10634 } 10635 10636 Optional<MDNode *> RemainderLoopID = 10637 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10638 LLVMLoopVectorizeFollowupEpilogue}); 10639 if (RemainderLoopID.hasValue()) { 10640 L->setLoopID(RemainderLoopID.getValue()); 10641 } else { 10642 if (DisableRuntimeUnroll) 10643 AddRuntimeUnrollDisableMetaData(L); 10644 10645 // Mark the loop as already vectorized to avoid vectorizing again. 10646 Hints.setAlreadyVectorized(); 10647 } 10648 10649 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10650 return true; 10651 } 10652 10653 LoopVectorizeResult LoopVectorizePass::runImpl( 10654 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10655 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10656 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10657 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10658 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10659 SE = &SE_; 10660 LI = &LI_; 10661 TTI = &TTI_; 10662 DT = &DT_; 10663 BFI = &BFI_; 10664 TLI = TLI_; 10665 AA = &AA_; 10666 AC = &AC_; 10667 GetLAA = &GetLAA_; 10668 DB = &DB_; 10669 ORE = &ORE_; 10670 PSI = PSI_; 10671 10672 // Don't attempt if 10673 // 1. the target claims to have no vector registers, and 10674 // 2. interleaving won't help ILP. 10675 // 10676 // The second condition is necessary because, even if the target has no 10677 // vector registers, loop vectorization may still enable scalar 10678 // interleaving. 10679 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10680 TTI->getMaxInterleaveFactor(1) < 2) 10681 return LoopVectorizeResult(false, false); 10682 10683 bool Changed = false, CFGChanged = false; 10684 10685 // The vectorizer requires loops to be in simplified form. 10686 // Since simplification may add new inner loops, it has to run before the 10687 // legality and profitability checks. This means running the loop vectorizer 10688 // will simplify all loops, regardless of whether anything end up being 10689 // vectorized. 10690 for (auto &L : *LI) 10691 Changed |= CFGChanged |= 10692 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10693 10694 // Build up a worklist of inner-loops to vectorize. This is necessary as 10695 // the act of vectorizing or partially unrolling a loop creates new loops 10696 // and can invalidate iterators across the loops. 10697 SmallVector<Loop *, 8> Worklist; 10698 10699 for (Loop *L : *LI) 10700 collectSupportedLoops(*L, LI, ORE, Worklist); 10701 10702 LoopsAnalyzed += Worklist.size(); 10703 10704 // Now walk the identified inner loops. 10705 while (!Worklist.empty()) { 10706 Loop *L = Worklist.pop_back_val(); 10707 10708 // For the inner loops we actually process, form LCSSA to simplify the 10709 // transform. 10710 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10711 10712 Changed |= CFGChanged |= processLoop(L); 10713 } 10714 10715 // Process each loop nest in the function. 10716 return LoopVectorizeResult(Changed, CFGChanged); 10717 } 10718 10719 PreservedAnalyses LoopVectorizePass::run(Function &F, 10720 FunctionAnalysisManager &AM) { 10721 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10722 auto &LI = AM.getResult<LoopAnalysis>(F); 10723 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10724 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10725 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10726 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10727 auto &AA = AM.getResult<AAManager>(F); 10728 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10729 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10730 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10731 10732 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10733 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10734 [&](Loop &L) -> const LoopAccessInfo & { 10735 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10736 TLI, TTI, nullptr, nullptr, nullptr}; 10737 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10738 }; 10739 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10740 ProfileSummaryInfo *PSI = 10741 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10742 LoopVectorizeResult Result = 10743 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10744 if (!Result.MadeAnyChange) 10745 return PreservedAnalyses::all(); 10746 PreservedAnalyses PA; 10747 10748 // We currently do not preserve loopinfo/dominator analyses with outer loop 10749 // vectorization. Until this is addressed, mark these analyses as preserved 10750 // only for non-VPlan-native path. 10751 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10752 if (!EnableVPlanNativePath) { 10753 PA.preserve<LoopAnalysis>(); 10754 PA.preserve<DominatorTreeAnalysis>(); 10755 } 10756 10757 if (Result.MadeCFGChange) { 10758 // Making CFG changes likely means a loop got vectorized. Indicate that 10759 // extra simplification passes should be run. 10760 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only 10761 // be run if runtime checks have been added. 10762 AM.getResult<ShouldRunExtraVectorPasses>(F); 10763 PA.preserve<ShouldRunExtraVectorPasses>(); 10764 } else { 10765 PA.preserveSet<CFGAnalyses>(); 10766 } 10767 return PA; 10768 } 10769 10770 void LoopVectorizePass::printPipeline( 10771 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 10772 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline( 10773 OS, MapClassName2PassName); 10774 10775 OS << "<"; 10776 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;"; 10777 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;"; 10778 OS << ">"; 10779 } 10780