1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 91 #include "llvm/Analysis/ProfileSummaryInfo.h" 92 #include "llvm/Analysis/ScalarEvolution.h" 93 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 94 #include "llvm/Analysis/TargetLibraryInfo.h" 95 #include "llvm/Analysis/TargetTransformInfo.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/LLVMContext.h" 116 #include "llvm/IR/Metadata.h" 117 #include "llvm/IR/Module.h" 118 #include "llvm/IR/Operator.h" 119 #include "llvm/IR/PatternMatch.h" 120 #include "llvm/IR/Type.h" 121 #include "llvm/IR/Use.h" 122 #include "llvm/IR/User.h" 123 #include "llvm/IR/Value.h" 124 #include "llvm/IR/ValueHandle.h" 125 #include "llvm/IR/Verifier.h" 126 #include "llvm/InitializePasses.h" 127 #include "llvm/Pass.h" 128 #include "llvm/Support/Casting.h" 129 #include "llvm/Support/CommandLine.h" 130 #include "llvm/Support/Compiler.h" 131 #include "llvm/Support/Debug.h" 132 #include "llvm/Support/ErrorHandling.h" 133 #include "llvm/Support/InstructionCost.h" 134 #include "llvm/Support/MathExtras.h" 135 #include "llvm/Support/raw_ostream.h" 136 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 137 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 138 #include "llvm/Transforms/Utils/LoopSimplify.h" 139 #include "llvm/Transforms/Utils/LoopUtils.h" 140 #include "llvm/Transforms/Utils/LoopVersioning.h" 141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 142 #include "llvm/Transforms/Utils/SizeOpts.h" 143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 144 #include <algorithm> 145 #include <cassert> 146 #include <cstdint> 147 #include <cstdlib> 148 #include <functional> 149 #include <iterator> 150 #include <limits> 151 #include <memory> 152 #include <string> 153 #include <tuple> 154 #include <utility> 155 156 using namespace llvm; 157 158 #define LV_NAME "loop-vectorize" 159 #define DEBUG_TYPE LV_NAME 160 161 #ifndef NDEBUG 162 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 163 #endif 164 165 /// @{ 166 /// Metadata attribute names 167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 168 const char LLVMLoopVectorizeFollowupVectorized[] = 169 "llvm.loop.vectorize.followup_vectorized"; 170 const char LLVMLoopVectorizeFollowupEpilogue[] = 171 "llvm.loop.vectorize.followup_epilogue"; 172 /// @} 173 174 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 177 178 static cl::opt<bool> EnableEpilogueVectorization( 179 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 180 cl::desc("Enable vectorization of epilogue loops.")); 181 182 static cl::opt<unsigned> EpilogueVectorizationForceVF( 183 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 184 cl::desc("When epilogue vectorization is enabled, and a value greater than " 185 "1 is specified, forces the given VF for all applicable epilogue " 186 "loops.")); 187 188 static cl::opt<unsigned> EpilogueVectorizationMinVF( 189 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 190 cl::desc("Only loops with vectorization factor equal to or larger than " 191 "the specified value are considered for epilogue vectorization.")); 192 193 /// Loops with a known constant trip count below this number are vectorized only 194 /// if no scalar iteration overheads are incurred. 195 static cl::opt<unsigned> TinyTripCountVectorThreshold( 196 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 197 cl::desc("Loops with a constant trip count that is smaller than this " 198 "value are vectorized only if no scalar iteration overheads " 199 "are incurred.")); 200 201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 202 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 203 cl::desc("The maximum allowed number of runtime memory checks with a " 204 "vectorize(enable) pragma.")); 205 206 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 207 // that predication is preferred, and this lists all options. I.e., the 208 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 209 // and predicate the instructions accordingly. If tail-folding fails, there are 210 // different fallback strategies depending on these values: 211 namespace PreferPredicateTy { 212 enum Option { 213 ScalarEpilogue = 0, 214 PredicateElseScalarEpilogue, 215 PredicateOrDontVectorize 216 }; 217 } // namespace PreferPredicateTy 218 219 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 220 "prefer-predicate-over-epilogue", 221 cl::init(PreferPredicateTy::ScalarEpilogue), 222 cl::Hidden, 223 cl::desc("Tail-folding and predication preferences over creating a scalar " 224 "epilogue loop."), 225 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 226 "scalar-epilogue", 227 "Don't tail-predicate loops, create scalar epilogue"), 228 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 229 "predicate-else-scalar-epilogue", 230 "prefer tail-folding, create scalar epilogue if tail " 231 "folding fails."), 232 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 233 "predicate-dont-vectorize", 234 "prefers tail-folding, don't attempt vectorization if " 235 "tail-folding fails."))); 236 237 static cl::opt<bool> MaximizeBandwidth( 238 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 239 cl::desc("Maximize bandwidth when selecting vectorization factor which " 240 "will be determined by the smallest type in loop.")); 241 242 static cl::opt<bool> EnableInterleavedMemAccesses( 243 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 244 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 245 246 /// An interleave-group may need masking if it resides in a block that needs 247 /// predication, or in order to mask away gaps. 248 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 249 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 250 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 251 252 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 253 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 254 cl::desc("We don't interleave loops with a estimated constant trip count " 255 "below this number")); 256 257 static cl::opt<unsigned> ForceTargetNumScalarRegs( 258 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 259 cl::desc("A flag that overrides the target's number of scalar registers.")); 260 261 static cl::opt<unsigned> ForceTargetNumVectorRegs( 262 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 263 cl::desc("A flag that overrides the target's number of vector registers.")); 264 265 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 266 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 267 cl::desc("A flag that overrides the target's max interleave factor for " 268 "scalar loops.")); 269 270 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 271 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 272 cl::desc("A flag that overrides the target's max interleave factor for " 273 "vectorized loops.")); 274 275 static cl::opt<unsigned> ForceTargetInstructionCost( 276 "force-target-instruction-cost", cl::init(0), cl::Hidden, 277 cl::desc("A flag that overrides the target's expected cost for " 278 "an instruction to a single constant value. Mostly " 279 "useful for getting consistent testing.")); 280 281 static cl::opt<bool> ForceTargetSupportsScalableVectors( 282 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 283 cl::desc( 284 "Pretend that scalable vectors are supported, even if the target does " 285 "not support them. This flag should only be used for testing.")); 286 287 static cl::opt<unsigned> SmallLoopCost( 288 "small-loop-cost", cl::init(20), cl::Hidden, 289 cl::desc( 290 "The cost of a loop that is considered 'small' by the interleaver.")); 291 292 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 293 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 294 cl::desc("Enable the use of the block frequency analysis to access PGO " 295 "heuristics minimizing code growth in cold regions and being more " 296 "aggressive in hot regions.")); 297 298 // Runtime interleave loops for load/store throughput. 299 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 300 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 301 cl::desc( 302 "Enable runtime interleaving until load/store ports are saturated")); 303 304 /// Interleave small loops with scalar reductions. 305 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 306 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 307 cl::desc("Enable interleaving for loops with small iteration counts that " 308 "contain scalar reductions to expose ILP.")); 309 310 /// The number of stores in a loop that are allowed to need predication. 311 static cl::opt<unsigned> NumberOfStoresToPredicate( 312 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 313 cl::desc("Max number of stores to be predicated behind an if.")); 314 315 static cl::opt<bool> EnableIndVarRegisterHeur( 316 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 317 cl::desc("Count the induction variable only once when interleaving")); 318 319 static cl::opt<bool> EnableCondStoresVectorization( 320 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 321 cl::desc("Enable if predication of stores during vectorization.")); 322 323 static cl::opt<unsigned> MaxNestedScalarReductionIC( 324 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 325 cl::desc("The maximum interleave count to use when interleaving a scalar " 326 "reduction in a nested loop.")); 327 328 static cl::opt<bool> 329 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 330 cl::Hidden, 331 cl::desc("Prefer in-loop vector reductions, " 332 "overriding the targets preference.")); 333 334 static cl::opt<bool> ForceOrderedReductions( 335 "force-ordered-reductions", cl::init(false), cl::Hidden, 336 cl::desc("Enable the vectorisation of loops with in-order (strict) " 337 "FP reductions")); 338 339 static cl::opt<bool> PreferPredicatedReductionSelect( 340 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 341 cl::desc( 342 "Prefer predicating a reduction operation over an after loop select.")); 343 344 cl::opt<bool> EnableVPlanNativePath( 345 "enable-vplan-native-path", cl::init(false), cl::Hidden, 346 cl::desc("Enable VPlan-native vectorization path with " 347 "support for outer loop vectorization.")); 348 349 // FIXME: Remove this switch once we have divergence analysis. Currently we 350 // assume divergent non-backedge branches when this switch is true. 351 cl::opt<bool> EnableVPlanPredication( 352 "enable-vplan-predication", cl::init(false), cl::Hidden, 353 cl::desc("Enable VPlan-native vectorization path predicator with " 354 "support for outer loop vectorization.")); 355 356 // This flag enables the stress testing of the VPlan H-CFG construction in the 357 // VPlan-native vectorization path. It must be used in conjuction with 358 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 359 // verification of the H-CFGs built. 360 static cl::opt<bool> VPlanBuildStressTest( 361 "vplan-build-stress-test", cl::init(false), cl::Hidden, 362 cl::desc( 363 "Build VPlan for every supported loop nest in the function and bail " 364 "out right after the build (stress test the VPlan H-CFG construction " 365 "in the VPlan-native vectorization path).")); 366 367 cl::opt<bool> llvm::EnableLoopInterleaving( 368 "interleave-loops", cl::init(true), cl::Hidden, 369 cl::desc("Enable loop interleaving in Loop vectorization passes")); 370 cl::opt<bool> llvm::EnableLoopVectorization( 371 "vectorize-loops", cl::init(true), cl::Hidden, 372 cl::desc("Run the Loop vectorization passes")); 373 374 cl::opt<bool> PrintVPlansInDotFormat( 375 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 376 cl::desc("Use dot format instead of plain text when dumping VPlans")); 377 378 /// A helper function that returns true if the given type is irregular. The 379 /// type is irregular if its allocated size doesn't equal the store size of an 380 /// element of the corresponding vector type. 381 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 382 // Determine if an array of N elements of type Ty is "bitcast compatible" 383 // with a <N x Ty> vector. 384 // This is only true if there is no padding between the array elements. 385 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 386 } 387 388 /// A helper function that returns the reciprocal of the block probability of 389 /// predicated blocks. If we return X, we are assuming the predicated block 390 /// will execute once for every X iterations of the loop header. 391 /// 392 /// TODO: We should use actual block probability here, if available. Currently, 393 /// we always assume predicated blocks have a 50% chance of executing. 394 static unsigned getReciprocalPredBlockProb() { return 2; } 395 396 /// A helper function that returns an integer or floating-point constant with 397 /// value C. 398 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 399 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 400 : ConstantFP::get(Ty, C); 401 } 402 403 /// Returns "best known" trip count for the specified loop \p L as defined by 404 /// the following procedure: 405 /// 1) Returns exact trip count if it is known. 406 /// 2) Returns expected trip count according to profile data if any. 407 /// 3) Returns upper bound estimate if it is known. 408 /// 4) Returns None if all of the above failed. 409 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 410 // Check if exact trip count is known. 411 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 412 return ExpectedTC; 413 414 // Check if there is an expected trip count available from profile data. 415 if (LoopVectorizeWithBlockFrequency) 416 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 417 return EstimatedTC; 418 419 // Check if upper bound estimate is known. 420 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 421 return ExpectedTC; 422 423 return None; 424 } 425 426 // Forward declare GeneratedRTChecks. 427 class GeneratedRTChecks; 428 429 namespace llvm { 430 431 AnalysisKey ShouldRunExtraVectorPasses::Key; 432 433 /// InnerLoopVectorizer vectorizes loops which contain only one basic 434 /// block to a specified vectorization factor (VF). 435 /// This class performs the widening of scalars into vectors, or multiple 436 /// scalars. This class also implements the following features: 437 /// * It inserts an epilogue loop for handling loops that don't have iteration 438 /// counts that are known to be a multiple of the vectorization factor. 439 /// * It handles the code generation for reduction variables. 440 /// * Scalarization (implementation using scalars) of un-vectorizable 441 /// instructions. 442 /// InnerLoopVectorizer does not perform any vectorization-legality 443 /// checks, and relies on the caller to check for the different legality 444 /// aspects. The InnerLoopVectorizer relies on the 445 /// LoopVectorizationLegality class to provide information about the induction 446 /// and reduction variables that were found to a given vectorization factor. 447 class InnerLoopVectorizer { 448 public: 449 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 450 LoopInfo *LI, DominatorTree *DT, 451 const TargetLibraryInfo *TLI, 452 const TargetTransformInfo *TTI, AssumptionCache *AC, 453 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 454 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 455 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 456 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 457 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 458 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 459 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 460 PSI(PSI), RTChecks(RTChecks) { 461 // Query this against the original loop and save it here because the profile 462 // of the original loop header may change as the transformation happens. 463 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 464 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 465 } 466 467 virtual ~InnerLoopVectorizer() = default; 468 469 /// Create a new empty loop that will contain vectorized instructions later 470 /// on, while the old loop will be used as the scalar remainder. Control flow 471 /// is generated around the vectorized (and scalar epilogue) loops consisting 472 /// of various checks and bypasses. Return the pre-header block of the new 473 /// loop. 474 /// In the case of epilogue vectorization, this function is overriden to 475 /// handle the more complex control flow around the loops. 476 virtual BasicBlock *createVectorizedLoopSkeleton(); 477 478 /// Widen a single call instruction within the innermost loop. 479 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 480 VPTransformState &State); 481 482 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 483 void fixVectorizedLoop(VPTransformState &State); 484 485 // Return true if any runtime check is added. 486 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 487 488 /// A type for vectorized values in the new loop. Each value from the 489 /// original loop, when vectorized, is represented by UF vector values in the 490 /// new unrolled loop, where UF is the unroll factor. 491 using VectorParts = SmallVector<Value *, 2>; 492 493 /// Vectorize a single first-order recurrence or pointer induction PHINode in 494 /// a block. This method handles the induction variable canonicalization. It 495 /// supports both VF = 1 for unrolled loops and arbitrary length vectors. 496 void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR, 497 VPTransformState &State); 498 499 /// A helper function to scalarize a single Instruction in the innermost loop. 500 /// Generates a sequence of scalar instances for each lane between \p MinLane 501 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 502 /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p 503 /// Instr's operands. 504 void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe, 505 const VPIteration &Instance, bool IfPredicateInstr, 506 VPTransformState &State); 507 508 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 509 /// is provided, the integer induction variable will first be truncated to 510 /// the corresponding type. 511 void widenIntOrFpInduction(PHINode *IV, const InductionDescriptor &ID, 512 Value *Start, TruncInst *Trunc, VPValue *Def, 513 VPTransformState &State); 514 515 /// Construct the vector value of a scalarized value \p V one lane at a time. 516 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 517 VPTransformState &State); 518 519 /// Try to vectorize interleaved access group \p Group with the base address 520 /// given in \p Addr, optionally masking the vector operations if \p 521 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 522 /// values in the vectorized loop. 523 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 524 ArrayRef<VPValue *> VPDefs, 525 VPTransformState &State, VPValue *Addr, 526 ArrayRef<VPValue *> StoredValues, 527 VPValue *BlockInMask = nullptr); 528 529 /// Set the debug location in the builder \p Ptr using the debug location in 530 /// \p V. If \p Ptr is None then it uses the class member's Builder. 531 void setDebugLocFromInst(const Value *V, 532 Optional<IRBuilder<> *> CustomBuilder = None); 533 534 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 535 void fixNonInductionPHIs(VPTransformState &State); 536 537 /// Returns true if the reordering of FP operations is not allowed, but we are 538 /// able to vectorize with strict in-order reductions for the given RdxDesc. 539 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc); 540 541 /// Create a broadcast instruction. This method generates a broadcast 542 /// instruction (shuffle) for loop invariant values and for the induction 543 /// value. If this is the induction variable then we extend it to N, N+1, ... 544 /// this is needed because each iteration in the loop corresponds to a SIMD 545 /// element. 546 virtual Value *getBroadcastInstrs(Value *V); 547 548 /// Add metadata from one instruction to another. 549 /// 550 /// This includes both the original MDs from \p From and additional ones (\see 551 /// addNewMetadata). Use this for *newly created* instructions in the vector 552 /// loop. 553 void addMetadata(Instruction *To, Instruction *From); 554 555 /// Similar to the previous function but it adds the metadata to a 556 /// vector of instructions. 557 void addMetadata(ArrayRef<Value *> To, Instruction *From); 558 559 protected: 560 friend class LoopVectorizationPlanner; 561 562 /// A small list of PHINodes. 563 using PhiVector = SmallVector<PHINode *, 4>; 564 565 /// A type for scalarized values in the new loop. Each value from the 566 /// original loop, when scalarized, is represented by UF x VF scalar values 567 /// in the new unrolled loop, where UF is the unroll factor and VF is the 568 /// vectorization factor. 569 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 570 571 /// Set up the values of the IVs correctly when exiting the vector loop. 572 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 573 Value *CountRoundDown, Value *EndValue, 574 BasicBlock *MiddleBlock); 575 576 /// Create a new induction variable inside L. 577 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 578 Value *Step, Instruction *DL); 579 580 /// Handle all cross-iteration phis in the header. 581 void fixCrossIterationPHIs(VPTransformState &State); 582 583 /// Create the exit value of first order recurrences in the middle block and 584 /// update their users. 585 void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR, 586 VPTransformState &State); 587 588 /// Create code for the loop exit value of the reduction. 589 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); 590 591 /// Clear NSW/NUW flags from reduction instructions if necessary. 592 void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 593 VPTransformState &State); 594 595 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 596 /// means we need to add the appropriate incoming value from the middle 597 /// block as exiting edges from the scalar epilogue loop (if present) are 598 /// already in place, and we exit the vector loop exclusively to the middle 599 /// block. 600 void fixLCSSAPHIs(VPTransformState &State); 601 602 /// Iteratively sink the scalarized operands of a predicated instruction into 603 /// the block that was created for it. 604 void sinkScalarOperands(Instruction *PredInst); 605 606 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 607 /// represented as. 608 void truncateToMinimalBitwidths(VPTransformState &State); 609 610 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 611 /// variable on which to base the steps, \p Step is the size of the step, and 612 /// \p EntryVal is the value from the original loop that maps to the steps. 613 /// Note that \p EntryVal doesn't have to be an induction variable - it 614 /// can also be a truncate instruction. 615 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 616 const InductionDescriptor &ID, VPValue *Def, 617 VPTransformState &State); 618 619 /// Create a vector induction phi node based on an existing scalar one. \p 620 /// EntryVal is the value from the original loop that maps to the vector phi 621 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 622 /// truncate instruction, instead of widening the original IV, we widen a 623 /// version of the IV truncated to \p EntryVal's type. 624 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 625 Value *Step, Value *Start, 626 Instruction *EntryVal, VPValue *Def, 627 VPTransformState &State); 628 629 /// Returns true if an instruction \p I should be scalarized instead of 630 /// vectorized for the chosen vectorization factor. 631 bool shouldScalarizeInstruction(Instruction *I) const; 632 633 /// Returns true if we should generate a scalar version of \p IV. 634 bool needsScalarInduction(Instruction *IV) const; 635 636 /// Generate a shuffle sequence that will reverse the vector Vec. 637 virtual Value *reverseVector(Value *Vec); 638 639 /// Returns (and creates if needed) the original loop trip count. 640 Value *getOrCreateTripCount(Loop *NewLoop); 641 642 /// Returns (and creates if needed) the trip count of the widened loop. 643 Value *getOrCreateVectorTripCount(Loop *NewLoop); 644 645 /// Returns a bitcasted value to the requested vector type. 646 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 647 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 648 const DataLayout &DL); 649 650 /// Emit a bypass check to see if the vector trip count is zero, including if 651 /// it overflows. 652 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 653 654 /// Emit a bypass check to see if all of the SCEV assumptions we've 655 /// had to make are correct. Returns the block containing the checks or 656 /// nullptr if no checks have been added. 657 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass); 658 659 /// Emit bypass checks to check any memory assumptions we may have made. 660 /// Returns the block containing the checks or nullptr if no checks have been 661 /// added. 662 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 663 664 /// Compute the transformed value of Index at offset StartValue using step 665 /// StepValue. 666 /// For integer induction, returns StartValue + Index * StepValue. 667 /// For pointer induction, returns StartValue[Index * StepValue]. 668 /// FIXME: The newly created binary instructions should contain nsw/nuw 669 /// flags, which can be found from the original scalar operations. 670 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 671 const DataLayout &DL, 672 const InductionDescriptor &ID, 673 BasicBlock *VectorHeader) const; 674 675 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 676 /// vector loop preheader, middle block and scalar preheader. Also 677 /// allocate a loop object for the new vector loop and return it. 678 Loop *createVectorLoopSkeleton(StringRef Prefix); 679 680 /// Create new phi nodes for the induction variables to resume iteration count 681 /// in the scalar epilogue, from where the vectorized loop left off (given by 682 /// \p VectorTripCount). 683 /// In cases where the loop skeleton is more complicated (eg. epilogue 684 /// vectorization) and the resume values can come from an additional bypass 685 /// block, the \p AdditionalBypass pair provides information about the bypass 686 /// block and the end value on the edge from bypass to this loop. 687 void createInductionResumeValues( 688 Loop *L, Value *VectorTripCount, 689 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 690 691 /// Complete the loop skeleton by adding debug MDs, creating appropriate 692 /// conditional branches in the middle block, preparing the builder and 693 /// running the verifier. Take in the vector loop \p L as argument, and return 694 /// the preheader of the completed vector loop. 695 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 696 697 /// Add additional metadata to \p To that was not present on \p Orig. 698 /// 699 /// Currently this is used to add the noalias annotations based on the 700 /// inserted memchecks. Use this for instructions that are *cloned* into the 701 /// vector loop. 702 void addNewMetadata(Instruction *To, const Instruction *Orig); 703 704 /// Collect poison-generating recipes that may generate a poison value that is 705 /// used after vectorization, even when their operands are not poison. Those 706 /// recipes meet the following conditions: 707 /// * Contribute to the address computation of a recipe generating a widen 708 /// memory load/store (VPWidenMemoryInstructionRecipe or 709 /// VPInterleaveRecipe). 710 /// * Such a widen memory load/store has at least one underlying Instruction 711 /// that is in a basic block that needs predication and after vectorization 712 /// the generated instruction won't be predicated. 713 void collectPoisonGeneratingRecipes(VPTransformState &State); 714 715 /// Allow subclasses to override and print debug traces before/after vplan 716 /// execution, when trace information is requested. 717 virtual void printDebugTracesAtStart(){}; 718 virtual void printDebugTracesAtEnd(){}; 719 720 /// The original loop. 721 Loop *OrigLoop; 722 723 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 724 /// dynamic knowledge to simplify SCEV expressions and converts them to a 725 /// more usable form. 726 PredicatedScalarEvolution &PSE; 727 728 /// Loop Info. 729 LoopInfo *LI; 730 731 /// Dominator Tree. 732 DominatorTree *DT; 733 734 /// Alias Analysis. 735 AAResults *AA; 736 737 /// Target Library Info. 738 const TargetLibraryInfo *TLI; 739 740 /// Target Transform Info. 741 const TargetTransformInfo *TTI; 742 743 /// Assumption Cache. 744 AssumptionCache *AC; 745 746 /// Interface to emit optimization remarks. 747 OptimizationRemarkEmitter *ORE; 748 749 /// LoopVersioning. It's only set up (non-null) if memchecks were 750 /// used. 751 /// 752 /// This is currently only used to add no-alias metadata based on the 753 /// memchecks. The actually versioning is performed manually. 754 std::unique_ptr<LoopVersioning> LVer; 755 756 /// The vectorization SIMD factor to use. Each vector will have this many 757 /// vector elements. 758 ElementCount VF; 759 760 /// The vectorization unroll factor to use. Each scalar is vectorized to this 761 /// many different vector instructions. 762 unsigned UF; 763 764 /// The builder that we use 765 IRBuilder<> Builder; 766 767 // --- Vectorization state --- 768 769 /// The vector-loop preheader. 770 BasicBlock *LoopVectorPreHeader; 771 772 /// The scalar-loop preheader. 773 BasicBlock *LoopScalarPreHeader; 774 775 /// Middle Block between the vector and the scalar. 776 BasicBlock *LoopMiddleBlock; 777 778 /// The unique ExitBlock of the scalar loop if one exists. Note that 779 /// there can be multiple exiting edges reaching this block. 780 BasicBlock *LoopExitBlock; 781 782 /// The vector loop body. 783 BasicBlock *LoopVectorBody; 784 785 /// The scalar loop body. 786 BasicBlock *LoopScalarBody; 787 788 /// A list of all bypass blocks. The first block is the entry of the loop. 789 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 790 791 /// The new Induction variable which was added to the new block. 792 PHINode *Induction = nullptr; 793 794 /// The induction variable of the old basic block. 795 PHINode *OldInduction = nullptr; 796 797 /// Store instructions that were predicated. 798 SmallVector<Instruction *, 4> PredicatedInstructions; 799 800 /// Trip count of the original loop. 801 Value *TripCount = nullptr; 802 803 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 804 Value *VectorTripCount = nullptr; 805 806 /// The legality analysis. 807 LoopVectorizationLegality *Legal; 808 809 /// The profitablity analysis. 810 LoopVectorizationCostModel *Cost; 811 812 // Record whether runtime checks are added. 813 bool AddedSafetyChecks = false; 814 815 // Holds the end values for each induction variable. We save the end values 816 // so we can later fix-up the external users of the induction variables. 817 DenseMap<PHINode *, Value *> IVEndValues; 818 819 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 820 // fixed up at the end of vector code generation. 821 SmallVector<PHINode *, 8> OrigPHIsToFix; 822 823 /// BFI and PSI are used to check for profile guided size optimizations. 824 BlockFrequencyInfo *BFI; 825 ProfileSummaryInfo *PSI; 826 827 // Whether this loop should be optimized for size based on profile guided size 828 // optimizatios. 829 bool OptForSizeBasedOnProfile; 830 831 /// Structure to hold information about generated runtime checks, responsible 832 /// for cleaning the checks, if vectorization turns out unprofitable. 833 GeneratedRTChecks &RTChecks; 834 }; 835 836 class InnerLoopUnroller : public InnerLoopVectorizer { 837 public: 838 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 839 LoopInfo *LI, DominatorTree *DT, 840 const TargetLibraryInfo *TLI, 841 const TargetTransformInfo *TTI, AssumptionCache *AC, 842 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 843 LoopVectorizationLegality *LVL, 844 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 845 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 846 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 847 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 848 BFI, PSI, Check) {} 849 850 private: 851 Value *getBroadcastInstrs(Value *V) override; 852 Value *reverseVector(Value *Vec) override; 853 }; 854 855 /// Encapsulate information regarding vectorization of a loop and its epilogue. 856 /// This information is meant to be updated and used across two stages of 857 /// epilogue vectorization. 858 struct EpilogueLoopVectorizationInfo { 859 ElementCount MainLoopVF = ElementCount::getFixed(0); 860 unsigned MainLoopUF = 0; 861 ElementCount EpilogueVF = ElementCount::getFixed(0); 862 unsigned EpilogueUF = 0; 863 BasicBlock *MainLoopIterationCountCheck = nullptr; 864 BasicBlock *EpilogueIterationCountCheck = nullptr; 865 BasicBlock *SCEVSafetyCheck = nullptr; 866 BasicBlock *MemSafetyCheck = nullptr; 867 Value *TripCount = nullptr; 868 Value *VectorTripCount = nullptr; 869 870 EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, 871 ElementCount EVF, unsigned EUF) 872 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) { 873 assert(EUF == 1 && 874 "A high UF for the epilogue loop is likely not beneficial."); 875 } 876 }; 877 878 /// An extension of the inner loop vectorizer that creates a skeleton for a 879 /// vectorized loop that has its epilogue (residual) also vectorized. 880 /// The idea is to run the vplan on a given loop twice, firstly to setup the 881 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 882 /// from the first step and vectorize the epilogue. This is achieved by 883 /// deriving two concrete strategy classes from this base class and invoking 884 /// them in succession from the loop vectorizer planner. 885 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 886 public: 887 InnerLoopAndEpilogueVectorizer( 888 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 889 DominatorTree *DT, const TargetLibraryInfo *TLI, 890 const TargetTransformInfo *TTI, AssumptionCache *AC, 891 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 892 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 893 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 894 GeneratedRTChecks &Checks) 895 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 896 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 897 Checks), 898 EPI(EPI) {} 899 900 // Override this function to handle the more complex control flow around the 901 // three loops. 902 BasicBlock *createVectorizedLoopSkeleton() final override { 903 return createEpilogueVectorizedLoopSkeleton(); 904 } 905 906 /// The interface for creating a vectorized skeleton using one of two 907 /// different strategies, each corresponding to one execution of the vplan 908 /// as described above. 909 virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0; 910 911 /// Holds and updates state information required to vectorize the main loop 912 /// and its epilogue in two separate passes. This setup helps us avoid 913 /// regenerating and recomputing runtime safety checks. It also helps us to 914 /// shorten the iteration-count-check path length for the cases where the 915 /// iteration count of the loop is so small that the main vector loop is 916 /// completely skipped. 917 EpilogueLoopVectorizationInfo &EPI; 918 }; 919 920 /// A specialized derived class of inner loop vectorizer that performs 921 /// vectorization of *main* loops in the process of vectorizing loops and their 922 /// epilogues. 923 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 924 public: 925 EpilogueVectorizerMainLoop( 926 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 927 DominatorTree *DT, const TargetLibraryInfo *TLI, 928 const TargetTransformInfo *TTI, AssumptionCache *AC, 929 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 930 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 931 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 932 GeneratedRTChecks &Check) 933 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 934 EPI, LVL, CM, BFI, PSI, Check) {} 935 /// Implements the interface for creating a vectorized skeleton using the 936 /// *main loop* strategy (ie the first pass of vplan execution). 937 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 938 939 protected: 940 /// Emits an iteration count bypass check once for the main loop (when \p 941 /// ForEpilogue is false) and once for the epilogue loop (when \p 942 /// ForEpilogue is true). 943 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, 944 bool ForEpilogue); 945 void printDebugTracesAtStart() override; 946 void printDebugTracesAtEnd() override; 947 }; 948 949 // A specialized derived class of inner loop vectorizer that performs 950 // vectorization of *epilogue* loops in the process of vectorizing loops and 951 // their epilogues. 952 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 953 public: 954 EpilogueVectorizerEpilogueLoop( 955 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 956 DominatorTree *DT, const TargetLibraryInfo *TLI, 957 const TargetTransformInfo *TTI, AssumptionCache *AC, 958 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 959 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 960 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 961 GeneratedRTChecks &Checks) 962 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 963 EPI, LVL, CM, BFI, PSI, Checks) {} 964 /// Implements the interface for creating a vectorized skeleton using the 965 /// *epilogue loop* strategy (ie the second pass of vplan execution). 966 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 967 968 protected: 969 /// Emits an iteration count bypass check after the main vector loop has 970 /// finished to see if there are any iterations left to execute by either 971 /// the vector epilogue or the scalar epilogue. 972 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, 973 BasicBlock *Bypass, 974 BasicBlock *Insert); 975 void printDebugTracesAtStart() override; 976 void printDebugTracesAtEnd() override; 977 }; 978 } // end namespace llvm 979 980 /// Look for a meaningful debug location on the instruction or it's 981 /// operands. 982 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 983 if (!I) 984 return I; 985 986 DebugLoc Empty; 987 if (I->getDebugLoc() != Empty) 988 return I; 989 990 for (Use &Op : I->operands()) { 991 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 992 if (OpInst->getDebugLoc() != Empty) 993 return OpInst; 994 } 995 996 return I; 997 } 998 999 void InnerLoopVectorizer::setDebugLocFromInst( 1000 const Value *V, Optional<IRBuilder<> *> CustomBuilder) { 1001 IRBuilder<> *B = (CustomBuilder == None) ? &Builder : *CustomBuilder; 1002 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) { 1003 const DILocation *DIL = Inst->getDebugLoc(); 1004 1005 // When a FSDiscriminator is enabled, we don't need to add the multiply 1006 // factors to the discriminators. 1007 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 1008 !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { 1009 // FIXME: For scalable vectors, assume vscale=1. 1010 auto NewDIL = 1011 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 1012 if (NewDIL) 1013 B->SetCurrentDebugLocation(NewDIL.getValue()); 1014 else 1015 LLVM_DEBUG(dbgs() 1016 << "Failed to create new discriminator: " 1017 << DIL->getFilename() << " Line: " << DIL->getLine()); 1018 } else 1019 B->SetCurrentDebugLocation(DIL); 1020 } else 1021 B->SetCurrentDebugLocation(DebugLoc()); 1022 } 1023 1024 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 1025 /// is passed, the message relates to that particular instruction. 1026 #ifndef NDEBUG 1027 static void debugVectorizationMessage(const StringRef Prefix, 1028 const StringRef DebugMsg, 1029 Instruction *I) { 1030 dbgs() << "LV: " << Prefix << DebugMsg; 1031 if (I != nullptr) 1032 dbgs() << " " << *I; 1033 else 1034 dbgs() << '.'; 1035 dbgs() << '\n'; 1036 } 1037 #endif 1038 1039 /// Create an analysis remark that explains why vectorization failed 1040 /// 1041 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 1042 /// RemarkName is the identifier for the remark. If \p I is passed it is an 1043 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 1044 /// the location of the remark. \return the remark object that can be 1045 /// streamed to. 1046 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1047 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1048 Value *CodeRegion = TheLoop->getHeader(); 1049 DebugLoc DL = TheLoop->getStartLoc(); 1050 1051 if (I) { 1052 CodeRegion = I->getParent(); 1053 // If there is no debug location attached to the instruction, revert back to 1054 // using the loop's. 1055 if (I->getDebugLoc()) 1056 DL = I->getDebugLoc(); 1057 } 1058 1059 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 1060 } 1061 1062 /// Return a value for Step multiplied by VF. 1063 static Value *createStepForVF(IRBuilder<> &B, Type *Ty, ElementCount VF, 1064 int64_t Step) { 1065 assert(Ty->isIntegerTy() && "Expected an integer step"); 1066 Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue()); 1067 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1068 } 1069 1070 namespace llvm { 1071 1072 /// Return the runtime value for VF. 1073 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) { 1074 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1075 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1076 } 1077 1078 static Value *getRuntimeVFAsFloat(IRBuilder<> &B, Type *FTy, ElementCount VF) { 1079 assert(FTy->isFloatingPointTy() && "Expected floating point type!"); 1080 Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits()); 1081 Value *RuntimeVF = getRuntimeVF(B, IntTy, VF); 1082 return B.CreateUIToFP(RuntimeVF, FTy); 1083 } 1084 1085 void reportVectorizationFailure(const StringRef DebugMsg, 1086 const StringRef OREMsg, const StringRef ORETag, 1087 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1088 Instruction *I) { 1089 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 1090 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1091 ORE->emit( 1092 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1093 << "loop not vectorized: " << OREMsg); 1094 } 1095 1096 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 1097 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1098 Instruction *I) { 1099 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 1100 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1101 ORE->emit( 1102 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1103 << Msg); 1104 } 1105 1106 } // end namespace llvm 1107 1108 #ifndef NDEBUG 1109 /// \return string containing a file name and a line # for the given loop. 1110 static std::string getDebugLocString(const Loop *L) { 1111 std::string Result; 1112 if (L) { 1113 raw_string_ostream OS(Result); 1114 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1115 LoopDbgLoc.print(OS); 1116 else 1117 // Just print the module name. 1118 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1119 OS.flush(); 1120 } 1121 return Result; 1122 } 1123 #endif 1124 1125 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1126 const Instruction *Orig) { 1127 // If the loop was versioned with memchecks, add the corresponding no-alias 1128 // metadata. 1129 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1130 LVer->annotateInstWithNoAlias(To, Orig); 1131 } 1132 1133 void InnerLoopVectorizer::collectPoisonGeneratingRecipes( 1134 VPTransformState &State) { 1135 1136 // Collect recipes in the backward slice of `Root` that may generate a poison 1137 // value that is used after vectorization. 1138 SmallPtrSet<VPRecipeBase *, 16> Visited; 1139 auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) { 1140 SmallVector<VPRecipeBase *, 16> Worklist; 1141 Worklist.push_back(Root); 1142 1143 // Traverse the backward slice of Root through its use-def chain. 1144 while (!Worklist.empty()) { 1145 VPRecipeBase *CurRec = Worklist.back(); 1146 Worklist.pop_back(); 1147 1148 if (!Visited.insert(CurRec).second) 1149 continue; 1150 1151 // Prune search if we find another recipe generating a widen memory 1152 // instruction. Widen memory instructions involved in address computation 1153 // will lead to gather/scatter instructions, which don't need to be 1154 // handled. 1155 if (isa<VPWidenMemoryInstructionRecipe>(CurRec) || 1156 isa<VPInterleaveRecipe>(CurRec)) 1157 continue; 1158 1159 // This recipe contributes to the address computation of a widen 1160 // load/store. Collect recipe if its underlying instruction has 1161 // poison-generating flags. 1162 Instruction *Instr = CurRec->getUnderlyingInstr(); 1163 if (Instr && Instr->hasPoisonGeneratingFlags()) 1164 State.MayGeneratePoisonRecipes.insert(CurRec); 1165 1166 // Add new definitions to the worklist. 1167 for (VPValue *operand : CurRec->operands()) 1168 if (VPDef *OpDef = operand->getDef()) 1169 Worklist.push_back(cast<VPRecipeBase>(OpDef)); 1170 } 1171 }); 1172 1173 // Traverse all the recipes in the VPlan and collect the poison-generating 1174 // recipes in the backward slice starting at the address of a VPWidenRecipe or 1175 // VPInterleaveRecipe. 1176 auto Iter = depth_first( 1177 VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry())); 1178 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) { 1179 for (VPRecipeBase &Recipe : *VPBB) { 1180 if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) { 1181 Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr(); 1182 VPDef *AddrDef = WidenRec->getAddr()->getDef(); 1183 if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr && 1184 Legal->blockNeedsPredication(UnderlyingInstr->getParent())) 1185 collectPoisonGeneratingInstrsInBackwardSlice( 1186 cast<VPRecipeBase>(AddrDef)); 1187 } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) { 1188 VPDef *AddrDef = InterleaveRec->getAddr()->getDef(); 1189 if (AddrDef) { 1190 // Check if any member of the interleave group needs predication. 1191 const InterleaveGroup<Instruction> *InterGroup = 1192 InterleaveRec->getInterleaveGroup(); 1193 bool NeedPredication = false; 1194 for (int I = 0, NumMembers = InterGroup->getNumMembers(); 1195 I < NumMembers; ++I) { 1196 Instruction *Member = InterGroup->getMember(I); 1197 if (Member) 1198 NeedPredication |= 1199 Legal->blockNeedsPredication(Member->getParent()); 1200 } 1201 1202 if (NeedPredication) 1203 collectPoisonGeneratingInstrsInBackwardSlice( 1204 cast<VPRecipeBase>(AddrDef)); 1205 } 1206 } 1207 } 1208 } 1209 } 1210 1211 void InnerLoopVectorizer::addMetadata(Instruction *To, 1212 Instruction *From) { 1213 propagateMetadata(To, From); 1214 addNewMetadata(To, From); 1215 } 1216 1217 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1218 Instruction *From) { 1219 for (Value *V : To) { 1220 if (Instruction *I = dyn_cast<Instruction>(V)) 1221 addMetadata(I, From); 1222 } 1223 } 1224 1225 namespace llvm { 1226 1227 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1228 // lowered. 1229 enum ScalarEpilogueLowering { 1230 1231 // The default: allowing scalar epilogues. 1232 CM_ScalarEpilogueAllowed, 1233 1234 // Vectorization with OptForSize: don't allow epilogues. 1235 CM_ScalarEpilogueNotAllowedOptSize, 1236 1237 // A special case of vectorisation with OptForSize: loops with a very small 1238 // trip count are considered for vectorization under OptForSize, thereby 1239 // making sure the cost of their loop body is dominant, free of runtime 1240 // guards and scalar iteration overheads. 1241 CM_ScalarEpilogueNotAllowedLowTripLoop, 1242 1243 // Loop hint predicate indicating an epilogue is undesired. 1244 CM_ScalarEpilogueNotNeededUsePredicate, 1245 1246 // Directive indicating we must either tail fold or not vectorize 1247 CM_ScalarEpilogueNotAllowedUsePredicate 1248 }; 1249 1250 /// ElementCountComparator creates a total ordering for ElementCount 1251 /// for the purposes of using it in a set structure. 1252 struct ElementCountComparator { 1253 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1254 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1255 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1256 } 1257 }; 1258 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1259 1260 /// LoopVectorizationCostModel - estimates the expected speedups due to 1261 /// vectorization. 1262 /// In many cases vectorization is not profitable. This can happen because of 1263 /// a number of reasons. In this class we mainly attempt to predict the 1264 /// expected speedup/slowdowns due to the supported instruction set. We use the 1265 /// TargetTransformInfo to query the different backends for the cost of 1266 /// different operations. 1267 class LoopVectorizationCostModel { 1268 public: 1269 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1270 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1271 LoopVectorizationLegality *Legal, 1272 const TargetTransformInfo &TTI, 1273 const TargetLibraryInfo *TLI, DemandedBits *DB, 1274 AssumptionCache *AC, 1275 OptimizationRemarkEmitter *ORE, const Function *F, 1276 const LoopVectorizeHints *Hints, 1277 InterleavedAccessInfo &IAI) 1278 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1279 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1280 Hints(Hints), InterleaveInfo(IAI) {} 1281 1282 /// \return An upper bound for the vectorization factors (both fixed and 1283 /// scalable). If the factors are 0, vectorization and interleaving should be 1284 /// avoided up front. 1285 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1286 1287 /// \return True if runtime checks are required for vectorization, and false 1288 /// otherwise. 1289 bool runtimeChecksRequired(); 1290 1291 /// \return The most profitable vectorization factor and the cost of that VF. 1292 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1293 /// then this vectorization factor will be selected if vectorization is 1294 /// possible. 1295 VectorizationFactor 1296 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1297 1298 VectorizationFactor 1299 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1300 const LoopVectorizationPlanner &LVP); 1301 1302 /// Setup cost-based decisions for user vectorization factor. 1303 /// \return true if the UserVF is a feasible VF to be chosen. 1304 bool selectUserVectorizationFactor(ElementCount UserVF) { 1305 collectUniformsAndScalars(UserVF); 1306 collectInstsToScalarize(UserVF); 1307 return expectedCost(UserVF).first.isValid(); 1308 } 1309 1310 /// \return The size (in bits) of the smallest and widest types in the code 1311 /// that needs to be vectorized. We ignore values that remain scalar such as 1312 /// 64 bit loop indices. 1313 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1314 1315 /// \return The desired interleave count. 1316 /// If interleave count has been specified by metadata it will be returned. 1317 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1318 /// are the selected vectorization factor and the cost of the selected VF. 1319 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1320 1321 /// Memory access instruction may be vectorized in more than one way. 1322 /// Form of instruction after vectorization depends on cost. 1323 /// This function takes cost-based decisions for Load/Store instructions 1324 /// and collects them in a map. This decisions map is used for building 1325 /// the lists of loop-uniform and loop-scalar instructions. 1326 /// The calculated cost is saved with widening decision in order to 1327 /// avoid redundant calculations. 1328 void setCostBasedWideningDecision(ElementCount VF); 1329 1330 /// A struct that represents some properties of the register usage 1331 /// of a loop. 1332 struct RegisterUsage { 1333 /// Holds the number of loop invariant values that are used in the loop. 1334 /// The key is ClassID of target-provided register class. 1335 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1336 /// Holds the maximum number of concurrent live intervals in the loop. 1337 /// The key is ClassID of target-provided register class. 1338 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1339 }; 1340 1341 /// \return Returns information about the register usages of the loop for the 1342 /// given vectorization factors. 1343 SmallVector<RegisterUsage, 8> 1344 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1345 1346 /// Collect values we want to ignore in the cost model. 1347 void collectValuesToIgnore(); 1348 1349 /// Collect all element types in the loop for which widening is needed. 1350 void collectElementTypesForWidening(); 1351 1352 /// Split reductions into those that happen in the loop, and those that happen 1353 /// outside. In loop reductions are collected into InLoopReductionChains. 1354 void collectInLoopReductions(); 1355 1356 /// Returns true if we should use strict in-order reductions for the given 1357 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, 1358 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering 1359 /// of FP operations. 1360 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) { 1361 return !Hints->allowReordering() && RdxDesc.isOrdered(); 1362 } 1363 1364 /// \returns The smallest bitwidth each instruction can be represented with. 1365 /// The vector equivalents of these instructions should be truncated to this 1366 /// type. 1367 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1368 return MinBWs; 1369 } 1370 1371 /// \returns True if it is more profitable to scalarize instruction \p I for 1372 /// vectorization factor \p VF. 1373 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1374 assert(VF.isVector() && 1375 "Profitable to scalarize relevant only for VF > 1."); 1376 1377 // Cost model is not run in the VPlan-native path - return conservative 1378 // result until this changes. 1379 if (EnableVPlanNativePath) 1380 return false; 1381 1382 auto Scalars = InstsToScalarize.find(VF); 1383 assert(Scalars != InstsToScalarize.end() && 1384 "VF not yet analyzed for scalarization profitability"); 1385 return Scalars->second.find(I) != Scalars->second.end(); 1386 } 1387 1388 /// Returns true if \p I is known to be uniform after vectorization. 1389 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1390 if (VF.isScalar()) 1391 return true; 1392 1393 // Cost model is not run in the VPlan-native path - return conservative 1394 // result until this changes. 1395 if (EnableVPlanNativePath) 1396 return false; 1397 1398 auto UniformsPerVF = Uniforms.find(VF); 1399 assert(UniformsPerVF != Uniforms.end() && 1400 "VF not yet analyzed for uniformity"); 1401 return UniformsPerVF->second.count(I); 1402 } 1403 1404 /// Returns true if \p I is known to be scalar after vectorization. 1405 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1406 if (VF.isScalar()) 1407 return true; 1408 1409 // Cost model is not run in the VPlan-native path - return conservative 1410 // result until this changes. 1411 if (EnableVPlanNativePath) 1412 return false; 1413 1414 auto ScalarsPerVF = Scalars.find(VF); 1415 assert(ScalarsPerVF != Scalars.end() && 1416 "Scalar values are not calculated for VF"); 1417 return ScalarsPerVF->second.count(I); 1418 } 1419 1420 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1421 /// for vectorization factor \p VF. 1422 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1423 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1424 !isProfitableToScalarize(I, VF) && 1425 !isScalarAfterVectorization(I, VF); 1426 } 1427 1428 /// Decision that was taken during cost calculation for memory instruction. 1429 enum InstWidening { 1430 CM_Unknown, 1431 CM_Widen, // For consecutive accesses with stride +1. 1432 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1433 CM_Interleave, 1434 CM_GatherScatter, 1435 CM_Scalarize 1436 }; 1437 1438 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1439 /// instruction \p I and vector width \p VF. 1440 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1441 InstructionCost Cost) { 1442 assert(VF.isVector() && "Expected VF >=2"); 1443 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1444 } 1445 1446 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1447 /// interleaving group \p Grp and vector width \p VF. 1448 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1449 ElementCount VF, InstWidening W, 1450 InstructionCost Cost) { 1451 assert(VF.isVector() && "Expected VF >=2"); 1452 /// Broadcast this decicion to all instructions inside the group. 1453 /// But the cost will be assigned to one instruction only. 1454 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1455 if (auto *I = Grp->getMember(i)) { 1456 if (Grp->getInsertPos() == I) 1457 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1458 else 1459 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1460 } 1461 } 1462 } 1463 1464 /// Return the cost model decision for the given instruction \p I and vector 1465 /// width \p VF. Return CM_Unknown if this instruction did not pass 1466 /// through the cost modeling. 1467 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1468 assert(VF.isVector() && "Expected VF to be a vector VF"); 1469 // Cost model is not run in the VPlan-native path - return conservative 1470 // result until this changes. 1471 if (EnableVPlanNativePath) 1472 return CM_GatherScatter; 1473 1474 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1475 auto Itr = WideningDecisions.find(InstOnVF); 1476 if (Itr == WideningDecisions.end()) 1477 return CM_Unknown; 1478 return Itr->second.first; 1479 } 1480 1481 /// Return the vectorization cost for the given instruction \p I and vector 1482 /// width \p VF. 1483 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1484 assert(VF.isVector() && "Expected VF >=2"); 1485 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1486 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1487 "The cost is not calculated"); 1488 return WideningDecisions[InstOnVF].second; 1489 } 1490 1491 /// Return True if instruction \p I is an optimizable truncate whose operand 1492 /// is an induction variable. Such a truncate will be removed by adding a new 1493 /// induction variable with the destination type. 1494 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1495 // If the instruction is not a truncate, return false. 1496 auto *Trunc = dyn_cast<TruncInst>(I); 1497 if (!Trunc) 1498 return false; 1499 1500 // Get the source and destination types of the truncate. 1501 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1502 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1503 1504 // If the truncate is free for the given types, return false. Replacing a 1505 // free truncate with an induction variable would add an induction variable 1506 // update instruction to each iteration of the loop. We exclude from this 1507 // check the primary induction variable since it will need an update 1508 // instruction regardless. 1509 Value *Op = Trunc->getOperand(0); 1510 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1511 return false; 1512 1513 // If the truncated value is not an induction variable, return false. 1514 return Legal->isInductionPhi(Op); 1515 } 1516 1517 /// Collects the instructions to scalarize for each predicated instruction in 1518 /// the loop. 1519 void collectInstsToScalarize(ElementCount VF); 1520 1521 /// Collect Uniform and Scalar values for the given \p VF. 1522 /// The sets depend on CM decision for Load/Store instructions 1523 /// that may be vectorized as interleave, gather-scatter or scalarized. 1524 void collectUniformsAndScalars(ElementCount VF) { 1525 // Do the analysis once. 1526 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1527 return; 1528 setCostBasedWideningDecision(VF); 1529 collectLoopUniforms(VF); 1530 collectLoopScalars(VF); 1531 } 1532 1533 /// Returns true if the target machine supports masked store operation 1534 /// for the given \p DataType and kind of access to \p Ptr. 1535 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1536 return Legal->isConsecutivePtr(DataType, Ptr) && 1537 TTI.isLegalMaskedStore(DataType, Alignment); 1538 } 1539 1540 /// Returns true if the target machine supports masked load operation 1541 /// for the given \p DataType and kind of access to \p Ptr. 1542 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1543 return Legal->isConsecutivePtr(DataType, Ptr) && 1544 TTI.isLegalMaskedLoad(DataType, Alignment); 1545 } 1546 1547 /// Returns true if the target machine can represent \p V as a masked gather 1548 /// or scatter operation. 1549 bool isLegalGatherOrScatter(Value *V) { 1550 bool LI = isa<LoadInst>(V); 1551 bool SI = isa<StoreInst>(V); 1552 if (!LI && !SI) 1553 return false; 1554 auto *Ty = getLoadStoreType(V); 1555 Align Align = getLoadStoreAlignment(V); 1556 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1557 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1558 } 1559 1560 /// Returns true if the target machine supports all of the reduction 1561 /// variables found for the given VF. 1562 bool canVectorizeReductions(ElementCount VF) const { 1563 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1564 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1565 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1566 })); 1567 } 1568 1569 /// Returns true if \p I is an instruction that will be scalarized with 1570 /// predication. Such instructions include conditional stores and 1571 /// instructions that may divide by zero. 1572 /// If a non-zero VF has been calculated, we check if I will be scalarized 1573 /// predication for that VF. 1574 bool isScalarWithPredication(Instruction *I) const; 1575 1576 // Returns true if \p I is an instruction that will be predicated either 1577 // through scalar predication or masked load/store or masked gather/scatter. 1578 // Superset of instructions that return true for isScalarWithPredication. 1579 bool isPredicatedInst(Instruction *I, bool IsKnownUniform = false) { 1580 // When we know the load is uniform and the original scalar loop was not 1581 // predicated we don't need to mark it as a predicated instruction. Any 1582 // vectorised blocks created when tail-folding are something artificial we 1583 // have introduced and we know there is always at least one active lane. 1584 // That's why we call Legal->blockNeedsPredication here because it doesn't 1585 // query tail-folding. 1586 if (IsKnownUniform && isa<LoadInst>(I) && 1587 !Legal->blockNeedsPredication(I->getParent())) 1588 return false; 1589 if (!blockNeedsPredicationForAnyReason(I->getParent())) 1590 return false; 1591 // Loads and stores that need some form of masked operation are predicated 1592 // instructions. 1593 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1594 return Legal->isMaskRequired(I); 1595 return isScalarWithPredication(I); 1596 } 1597 1598 /// Returns true if \p I is a memory instruction with consecutive memory 1599 /// access that can be widened. 1600 bool 1601 memoryInstructionCanBeWidened(Instruction *I, 1602 ElementCount VF = ElementCount::getFixed(1)); 1603 1604 /// Returns true if \p I is a memory instruction in an interleaved-group 1605 /// of memory accesses that can be vectorized with wide vector loads/stores 1606 /// and shuffles. 1607 bool 1608 interleavedAccessCanBeWidened(Instruction *I, 1609 ElementCount VF = ElementCount::getFixed(1)); 1610 1611 /// Check if \p Instr belongs to any interleaved access group. 1612 bool isAccessInterleaved(Instruction *Instr) { 1613 return InterleaveInfo.isInterleaved(Instr); 1614 } 1615 1616 /// Get the interleaved access group that \p Instr belongs to. 1617 const InterleaveGroup<Instruction> * 1618 getInterleavedAccessGroup(Instruction *Instr) { 1619 return InterleaveInfo.getInterleaveGroup(Instr); 1620 } 1621 1622 /// Returns true if we're required to use a scalar epilogue for at least 1623 /// the final iteration of the original loop. 1624 bool requiresScalarEpilogue(ElementCount VF) const { 1625 if (!isScalarEpilogueAllowed()) 1626 return false; 1627 // If we might exit from anywhere but the latch, must run the exiting 1628 // iteration in scalar form. 1629 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1630 return true; 1631 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); 1632 } 1633 1634 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1635 /// loop hint annotation. 1636 bool isScalarEpilogueAllowed() const { 1637 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1638 } 1639 1640 /// Returns true if all loop blocks should be masked to fold tail loop. 1641 bool foldTailByMasking() const { return FoldTailByMasking; } 1642 1643 /// Returns true if the instructions in this block requires predication 1644 /// for any reason, e.g. because tail folding now requires a predicate 1645 /// or because the block in the original loop was predicated. 1646 bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const { 1647 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1648 } 1649 1650 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1651 /// nodes to the chain of instructions representing the reductions. Uses a 1652 /// MapVector to ensure deterministic iteration order. 1653 using ReductionChainMap = 1654 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1655 1656 /// Return the chain of instructions representing an inloop reduction. 1657 const ReductionChainMap &getInLoopReductionChains() const { 1658 return InLoopReductionChains; 1659 } 1660 1661 /// Returns true if the Phi is part of an inloop reduction. 1662 bool isInLoopReduction(PHINode *Phi) const { 1663 return InLoopReductionChains.count(Phi); 1664 } 1665 1666 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1667 /// with factor VF. Return the cost of the instruction, including 1668 /// scalarization overhead if it's needed. 1669 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1670 1671 /// Estimate cost of a call instruction CI if it were vectorized with factor 1672 /// VF. Return the cost of the instruction, including scalarization overhead 1673 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1674 /// scalarized - 1675 /// i.e. either vector version isn't available, or is too expensive. 1676 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1677 bool &NeedToScalarize) const; 1678 1679 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1680 /// that of B. 1681 bool isMoreProfitable(const VectorizationFactor &A, 1682 const VectorizationFactor &B) const; 1683 1684 /// Invalidates decisions already taken by the cost model. 1685 void invalidateCostModelingDecisions() { 1686 WideningDecisions.clear(); 1687 Uniforms.clear(); 1688 Scalars.clear(); 1689 } 1690 1691 private: 1692 unsigned NumPredStores = 0; 1693 1694 /// \return An upper bound for the vectorization factors for both 1695 /// fixed and scalable vectorization, where the minimum-known number of 1696 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1697 /// disabled or unsupported, then the scalable part will be equal to 1698 /// ElementCount::getScalable(0). 1699 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1700 ElementCount UserVF, 1701 bool FoldTailByMasking); 1702 1703 /// \return the maximized element count based on the targets vector 1704 /// registers and the loop trip-count, but limited to a maximum safe VF. 1705 /// This is a helper function of computeFeasibleMaxVF. 1706 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure 1707 /// issue that occurred on one of the buildbots which cannot be reproduced 1708 /// without having access to the properietary compiler (see comments on 1709 /// D98509). The issue is currently under investigation and this workaround 1710 /// will be removed as soon as possible. 1711 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1712 unsigned SmallestType, 1713 unsigned WidestType, 1714 const ElementCount &MaxSafeVF, 1715 bool FoldTailByMasking); 1716 1717 /// \return the maximum legal scalable VF, based on the safe max number 1718 /// of elements. 1719 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1720 1721 /// The vectorization cost is a combination of the cost itself and a boolean 1722 /// indicating whether any of the contributing operations will actually 1723 /// operate on vector values after type legalization in the backend. If this 1724 /// latter value is false, then all operations will be scalarized (i.e. no 1725 /// vectorization has actually taken place). 1726 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1727 1728 /// Returns the expected execution cost. The unit of the cost does 1729 /// not matter because we use the 'cost' units to compare different 1730 /// vector widths. The cost that is returned is *not* normalized by 1731 /// the factor width. If \p Invalid is not nullptr, this function 1732 /// will add a pair(Instruction*, ElementCount) to \p Invalid for 1733 /// each instruction that has an Invalid cost for the given VF. 1734 using InstructionVFPair = std::pair<Instruction *, ElementCount>; 1735 VectorizationCostTy 1736 expectedCost(ElementCount VF, 1737 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); 1738 1739 /// Returns the execution time cost of an instruction for a given vector 1740 /// width. Vector width of one means scalar. 1741 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1742 1743 /// The cost-computation logic from getInstructionCost which provides 1744 /// the vector type as an output parameter. 1745 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1746 Type *&VectorTy); 1747 1748 /// Return the cost of instructions in an inloop reduction pattern, if I is 1749 /// part of that pattern. 1750 Optional<InstructionCost> 1751 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, 1752 TTI::TargetCostKind CostKind); 1753 1754 /// Calculate vectorization cost of memory instruction \p I. 1755 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1756 1757 /// The cost computation for scalarized memory instruction. 1758 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1759 1760 /// The cost computation for interleaving group of memory instructions. 1761 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1762 1763 /// The cost computation for Gather/Scatter instruction. 1764 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1765 1766 /// The cost computation for widening instruction \p I with consecutive 1767 /// memory access. 1768 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1769 1770 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1771 /// Load: scalar load + broadcast. 1772 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1773 /// element) 1774 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1775 1776 /// Estimate the overhead of scalarizing an instruction. This is a 1777 /// convenience wrapper for the type-based getScalarizationOverhead API. 1778 InstructionCost getScalarizationOverhead(Instruction *I, 1779 ElementCount VF) const; 1780 1781 /// Returns whether the instruction is a load or store and will be a emitted 1782 /// as a vector operation. 1783 bool isConsecutiveLoadOrStore(Instruction *I); 1784 1785 /// Returns true if an artificially high cost for emulated masked memrefs 1786 /// should be used. 1787 bool useEmulatedMaskMemRefHack(Instruction *I); 1788 1789 /// Map of scalar integer values to the smallest bitwidth they can be legally 1790 /// represented as. The vector equivalents of these values should be truncated 1791 /// to this type. 1792 MapVector<Instruction *, uint64_t> MinBWs; 1793 1794 /// A type representing the costs for instructions if they were to be 1795 /// scalarized rather than vectorized. The entries are Instruction-Cost 1796 /// pairs. 1797 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1798 1799 /// A set containing all BasicBlocks that are known to present after 1800 /// vectorization as a predicated block. 1801 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1802 1803 /// Records whether it is allowed to have the original scalar loop execute at 1804 /// least once. This may be needed as a fallback loop in case runtime 1805 /// aliasing/dependence checks fail, or to handle the tail/remainder 1806 /// iterations when the trip count is unknown or doesn't divide by the VF, 1807 /// or as a peel-loop to handle gaps in interleave-groups. 1808 /// Under optsize and when the trip count is very small we don't allow any 1809 /// iterations to execute in the scalar loop. 1810 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1811 1812 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1813 bool FoldTailByMasking = false; 1814 1815 /// A map holding scalar costs for different vectorization factors. The 1816 /// presence of a cost for an instruction in the mapping indicates that the 1817 /// instruction will be scalarized when vectorizing with the associated 1818 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1819 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1820 1821 /// Holds the instructions known to be uniform after vectorization. 1822 /// The data is collected per VF. 1823 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1824 1825 /// Holds the instructions known to be scalar after vectorization. 1826 /// The data is collected per VF. 1827 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1828 1829 /// Holds the instructions (address computations) that are forced to be 1830 /// scalarized. 1831 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1832 1833 /// PHINodes of the reductions that should be expanded in-loop along with 1834 /// their associated chains of reduction operations, in program order from top 1835 /// (PHI) to bottom 1836 ReductionChainMap InLoopReductionChains; 1837 1838 /// A Map of inloop reduction operations and their immediate chain operand. 1839 /// FIXME: This can be removed once reductions can be costed correctly in 1840 /// vplan. This was added to allow quick lookup to the inloop operations, 1841 /// without having to loop through InLoopReductionChains. 1842 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1843 1844 /// Returns the expected difference in cost from scalarizing the expression 1845 /// feeding a predicated instruction \p PredInst. The instructions to 1846 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1847 /// non-negative return value implies the expression will be scalarized. 1848 /// Currently, only single-use chains are considered for scalarization. 1849 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1850 ElementCount VF); 1851 1852 /// Collect the instructions that are uniform after vectorization. An 1853 /// instruction is uniform if we represent it with a single scalar value in 1854 /// the vectorized loop corresponding to each vector iteration. Examples of 1855 /// uniform instructions include pointer operands of consecutive or 1856 /// interleaved memory accesses. Note that although uniformity implies an 1857 /// instruction will be scalar, the reverse is not true. In general, a 1858 /// scalarized instruction will be represented by VF scalar values in the 1859 /// vectorized loop, each corresponding to an iteration of the original 1860 /// scalar loop. 1861 void collectLoopUniforms(ElementCount VF); 1862 1863 /// Collect the instructions that are scalar after vectorization. An 1864 /// instruction is scalar if it is known to be uniform or will be scalarized 1865 /// during vectorization. collectLoopScalars should only add non-uniform nodes 1866 /// to the list if they are used by a load/store instruction that is marked as 1867 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by 1868 /// VF values in the vectorized loop, each corresponding to an iteration of 1869 /// the original scalar loop. 1870 void collectLoopScalars(ElementCount VF); 1871 1872 /// Keeps cost model vectorization decision and cost for instructions. 1873 /// Right now it is used for memory instructions only. 1874 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1875 std::pair<InstWidening, InstructionCost>>; 1876 1877 DecisionList WideningDecisions; 1878 1879 /// Returns true if \p V is expected to be vectorized and it needs to be 1880 /// extracted. 1881 bool needsExtract(Value *V, ElementCount VF) const { 1882 Instruction *I = dyn_cast<Instruction>(V); 1883 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1884 TheLoop->isLoopInvariant(I)) 1885 return false; 1886 1887 // Assume we can vectorize V (and hence we need extraction) if the 1888 // scalars are not computed yet. This can happen, because it is called 1889 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1890 // the scalars are collected. That should be a safe assumption in most 1891 // cases, because we check if the operands have vectorizable types 1892 // beforehand in LoopVectorizationLegality. 1893 return Scalars.find(VF) == Scalars.end() || 1894 !isScalarAfterVectorization(I, VF); 1895 }; 1896 1897 /// Returns a range containing only operands needing to be extracted. 1898 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1899 ElementCount VF) const { 1900 return SmallVector<Value *, 4>(make_filter_range( 1901 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1902 } 1903 1904 /// Determines if we have the infrastructure to vectorize loop \p L and its 1905 /// epilogue, assuming the main loop is vectorized by \p VF. 1906 bool isCandidateForEpilogueVectorization(const Loop &L, 1907 const ElementCount VF) const; 1908 1909 /// Returns true if epilogue vectorization is considered profitable, and 1910 /// false otherwise. 1911 /// \p VF is the vectorization factor chosen for the original loop. 1912 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1913 1914 public: 1915 /// The loop that we evaluate. 1916 Loop *TheLoop; 1917 1918 /// Predicated scalar evolution analysis. 1919 PredicatedScalarEvolution &PSE; 1920 1921 /// Loop Info analysis. 1922 LoopInfo *LI; 1923 1924 /// Vectorization legality. 1925 LoopVectorizationLegality *Legal; 1926 1927 /// Vector target information. 1928 const TargetTransformInfo &TTI; 1929 1930 /// Target Library Info. 1931 const TargetLibraryInfo *TLI; 1932 1933 /// Demanded bits analysis. 1934 DemandedBits *DB; 1935 1936 /// Assumption cache. 1937 AssumptionCache *AC; 1938 1939 /// Interface to emit optimization remarks. 1940 OptimizationRemarkEmitter *ORE; 1941 1942 const Function *TheFunction; 1943 1944 /// Loop Vectorize Hint. 1945 const LoopVectorizeHints *Hints; 1946 1947 /// The interleave access information contains groups of interleaved accesses 1948 /// with the same stride and close to each other. 1949 InterleavedAccessInfo &InterleaveInfo; 1950 1951 /// Values to ignore in the cost model. 1952 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1953 1954 /// Values to ignore in the cost model when VF > 1. 1955 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1956 1957 /// All element types found in the loop. 1958 SmallPtrSet<Type *, 16> ElementTypesInLoop; 1959 1960 /// Profitable vector factors. 1961 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1962 }; 1963 } // end namespace llvm 1964 1965 /// Helper struct to manage generating runtime checks for vectorization. 1966 /// 1967 /// The runtime checks are created up-front in temporary blocks to allow better 1968 /// estimating the cost and un-linked from the existing IR. After deciding to 1969 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1970 /// temporary blocks are completely removed. 1971 class GeneratedRTChecks { 1972 /// Basic block which contains the generated SCEV checks, if any. 1973 BasicBlock *SCEVCheckBlock = nullptr; 1974 1975 /// The value representing the result of the generated SCEV checks. If it is 1976 /// nullptr, either no SCEV checks have been generated or they have been used. 1977 Value *SCEVCheckCond = nullptr; 1978 1979 /// Basic block which contains the generated memory runtime checks, if any. 1980 BasicBlock *MemCheckBlock = nullptr; 1981 1982 /// The value representing the result of the generated memory runtime checks. 1983 /// If it is nullptr, either no memory runtime checks have been generated or 1984 /// they have been used. 1985 Value *MemRuntimeCheckCond = nullptr; 1986 1987 DominatorTree *DT; 1988 LoopInfo *LI; 1989 1990 SCEVExpander SCEVExp; 1991 SCEVExpander MemCheckExp; 1992 1993 public: 1994 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 1995 const DataLayout &DL) 1996 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 1997 MemCheckExp(SE, DL, "scev.check") {} 1998 1999 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 2000 /// accurately estimate the cost of the runtime checks. The blocks are 2001 /// un-linked from the IR and is added back during vector code generation. If 2002 /// there is no vector code generation, the check blocks are removed 2003 /// completely. 2004 void Create(Loop *L, const LoopAccessInfo &LAI, 2005 const SCEVUnionPredicate &UnionPred) { 2006 2007 BasicBlock *LoopHeader = L->getHeader(); 2008 BasicBlock *Preheader = L->getLoopPreheader(); 2009 2010 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 2011 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 2012 // may be used by SCEVExpander. The blocks will be un-linked from their 2013 // predecessors and removed from LI & DT at the end of the function. 2014 if (!UnionPred.isAlwaysTrue()) { 2015 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 2016 nullptr, "vector.scevcheck"); 2017 2018 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 2019 &UnionPred, SCEVCheckBlock->getTerminator()); 2020 } 2021 2022 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 2023 if (RtPtrChecking.Need) { 2024 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 2025 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 2026 "vector.memcheck"); 2027 2028 MemRuntimeCheckCond = 2029 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 2030 RtPtrChecking.getChecks(), MemCheckExp); 2031 assert(MemRuntimeCheckCond && 2032 "no RT checks generated although RtPtrChecking " 2033 "claimed checks are required"); 2034 } 2035 2036 if (!MemCheckBlock && !SCEVCheckBlock) 2037 return; 2038 2039 // Unhook the temporary block with the checks, update various places 2040 // accordingly. 2041 if (SCEVCheckBlock) 2042 SCEVCheckBlock->replaceAllUsesWith(Preheader); 2043 if (MemCheckBlock) 2044 MemCheckBlock->replaceAllUsesWith(Preheader); 2045 2046 if (SCEVCheckBlock) { 2047 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2048 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 2049 Preheader->getTerminator()->eraseFromParent(); 2050 } 2051 if (MemCheckBlock) { 2052 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2053 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 2054 Preheader->getTerminator()->eraseFromParent(); 2055 } 2056 2057 DT->changeImmediateDominator(LoopHeader, Preheader); 2058 if (MemCheckBlock) { 2059 DT->eraseNode(MemCheckBlock); 2060 LI->removeBlock(MemCheckBlock); 2061 } 2062 if (SCEVCheckBlock) { 2063 DT->eraseNode(SCEVCheckBlock); 2064 LI->removeBlock(SCEVCheckBlock); 2065 } 2066 } 2067 2068 /// Remove the created SCEV & memory runtime check blocks & instructions, if 2069 /// unused. 2070 ~GeneratedRTChecks() { 2071 SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT); 2072 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT); 2073 if (!SCEVCheckCond) 2074 SCEVCleaner.markResultUsed(); 2075 2076 if (!MemRuntimeCheckCond) 2077 MemCheckCleaner.markResultUsed(); 2078 2079 if (MemRuntimeCheckCond) { 2080 auto &SE = *MemCheckExp.getSE(); 2081 // Memory runtime check generation creates compares that use expanded 2082 // values. Remove them before running the SCEVExpanderCleaners. 2083 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2084 if (MemCheckExp.isInsertedInstruction(&I)) 2085 continue; 2086 SE.forgetValue(&I); 2087 I.eraseFromParent(); 2088 } 2089 } 2090 MemCheckCleaner.cleanup(); 2091 SCEVCleaner.cleanup(); 2092 2093 if (SCEVCheckCond) 2094 SCEVCheckBlock->eraseFromParent(); 2095 if (MemRuntimeCheckCond) 2096 MemCheckBlock->eraseFromParent(); 2097 } 2098 2099 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2100 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2101 /// depending on the generated condition. 2102 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass, 2103 BasicBlock *LoopVectorPreHeader, 2104 BasicBlock *LoopExitBlock) { 2105 if (!SCEVCheckCond) 2106 return nullptr; 2107 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) 2108 if (C->isZero()) 2109 return nullptr; 2110 2111 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2112 2113 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2114 // Create new preheader for vector loop. 2115 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2116 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2117 2118 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2119 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2120 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2121 SCEVCheckBlock); 2122 2123 DT->addNewBlock(SCEVCheckBlock, Pred); 2124 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2125 2126 ReplaceInstWithInst( 2127 SCEVCheckBlock->getTerminator(), 2128 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); 2129 // Mark the check as used, to prevent it from being removed during cleanup. 2130 SCEVCheckCond = nullptr; 2131 return SCEVCheckBlock; 2132 } 2133 2134 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2135 /// the branches to branch to the vector preheader or \p Bypass, depending on 2136 /// the generated condition. 2137 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass, 2138 BasicBlock *LoopVectorPreHeader) { 2139 // Check if we generated code that checks in runtime if arrays overlap. 2140 if (!MemRuntimeCheckCond) 2141 return nullptr; 2142 2143 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2144 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2145 MemCheckBlock); 2146 2147 DT->addNewBlock(MemCheckBlock, Pred); 2148 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2149 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2150 2151 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2152 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2153 2154 ReplaceInstWithInst( 2155 MemCheckBlock->getTerminator(), 2156 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2157 MemCheckBlock->getTerminator()->setDebugLoc( 2158 Pred->getTerminator()->getDebugLoc()); 2159 2160 // Mark the check as used, to prevent it from being removed during cleanup. 2161 MemRuntimeCheckCond = nullptr; 2162 return MemCheckBlock; 2163 } 2164 }; 2165 2166 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2167 // vectorization. The loop needs to be annotated with #pragma omp simd 2168 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2169 // vector length information is not provided, vectorization is not considered 2170 // explicit. Interleave hints are not allowed either. These limitations will be 2171 // relaxed in the future. 2172 // Please, note that we are currently forced to abuse the pragma 'clang 2173 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2174 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2175 // provides *explicit vectorization hints* (LV can bypass legal checks and 2176 // assume that vectorization is legal). However, both hints are implemented 2177 // using the same metadata (llvm.loop.vectorize, processed by 2178 // LoopVectorizeHints). This will be fixed in the future when the native IR 2179 // representation for pragma 'omp simd' is introduced. 2180 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2181 OptimizationRemarkEmitter *ORE) { 2182 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2183 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2184 2185 // Only outer loops with an explicit vectorization hint are supported. 2186 // Unannotated outer loops are ignored. 2187 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2188 return false; 2189 2190 Function *Fn = OuterLp->getHeader()->getParent(); 2191 if (!Hints.allowVectorization(Fn, OuterLp, 2192 true /*VectorizeOnlyWhenForced*/)) { 2193 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2194 return false; 2195 } 2196 2197 if (Hints.getInterleave() > 1) { 2198 // TODO: Interleave support is future work. 2199 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2200 "outer loops.\n"); 2201 Hints.emitRemarkWithHints(); 2202 return false; 2203 } 2204 2205 return true; 2206 } 2207 2208 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2209 OptimizationRemarkEmitter *ORE, 2210 SmallVectorImpl<Loop *> &V) { 2211 // Collect inner loops and outer loops without irreducible control flow. For 2212 // now, only collect outer loops that have explicit vectorization hints. If we 2213 // are stress testing the VPlan H-CFG construction, we collect the outermost 2214 // loop of every loop nest. 2215 if (L.isInnermost() || VPlanBuildStressTest || 2216 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2217 LoopBlocksRPO RPOT(&L); 2218 RPOT.perform(LI); 2219 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2220 V.push_back(&L); 2221 // TODO: Collect inner loops inside marked outer loops in case 2222 // vectorization fails for the outer loop. Do not invoke 2223 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2224 // already known to be reducible. We can use an inherited attribute for 2225 // that. 2226 return; 2227 } 2228 } 2229 for (Loop *InnerL : L) 2230 collectSupportedLoops(*InnerL, LI, ORE, V); 2231 } 2232 2233 namespace { 2234 2235 /// The LoopVectorize Pass. 2236 struct LoopVectorize : public FunctionPass { 2237 /// Pass identification, replacement for typeid 2238 static char ID; 2239 2240 LoopVectorizePass Impl; 2241 2242 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2243 bool VectorizeOnlyWhenForced = false) 2244 : FunctionPass(ID), 2245 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2246 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2247 } 2248 2249 bool runOnFunction(Function &F) override { 2250 if (skipFunction(F)) 2251 return false; 2252 2253 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2254 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2255 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2256 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2257 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2258 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2259 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2260 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2261 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2262 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2263 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2264 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2265 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2266 2267 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2268 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2269 2270 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2271 GetLAA, *ORE, PSI).MadeAnyChange; 2272 } 2273 2274 void getAnalysisUsage(AnalysisUsage &AU) const override { 2275 AU.addRequired<AssumptionCacheTracker>(); 2276 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2277 AU.addRequired<DominatorTreeWrapperPass>(); 2278 AU.addRequired<LoopInfoWrapperPass>(); 2279 AU.addRequired<ScalarEvolutionWrapperPass>(); 2280 AU.addRequired<TargetTransformInfoWrapperPass>(); 2281 AU.addRequired<AAResultsWrapperPass>(); 2282 AU.addRequired<LoopAccessLegacyAnalysis>(); 2283 AU.addRequired<DemandedBitsWrapperPass>(); 2284 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2285 AU.addRequired<InjectTLIMappingsLegacy>(); 2286 2287 // We currently do not preserve loopinfo/dominator analyses with outer loop 2288 // vectorization. Until this is addressed, mark these analyses as preserved 2289 // only for non-VPlan-native path. 2290 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2291 if (!EnableVPlanNativePath) { 2292 AU.addPreserved<LoopInfoWrapperPass>(); 2293 AU.addPreserved<DominatorTreeWrapperPass>(); 2294 } 2295 2296 AU.addPreserved<BasicAAWrapperPass>(); 2297 AU.addPreserved<GlobalsAAWrapperPass>(); 2298 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2299 } 2300 }; 2301 2302 } // end anonymous namespace 2303 2304 //===----------------------------------------------------------------------===// 2305 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2306 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2307 //===----------------------------------------------------------------------===// 2308 2309 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2310 // We need to place the broadcast of invariant variables outside the loop, 2311 // but only if it's proven safe to do so. Else, broadcast will be inside 2312 // vector loop body. 2313 Instruction *Instr = dyn_cast<Instruction>(V); 2314 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2315 (!Instr || 2316 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2317 // Place the code for broadcasting invariant variables in the new preheader. 2318 IRBuilder<>::InsertPointGuard Guard(Builder); 2319 if (SafeToHoist) 2320 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2321 2322 // Broadcast the scalar into all locations in the vector. 2323 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2324 2325 return Shuf; 2326 } 2327 2328 /// This function adds 2329 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 2330 /// to each vector element of Val. The sequence starts at StartIndex. 2331 /// \p Opcode is relevant for FP induction variable. 2332 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step, 2333 Instruction::BinaryOps BinOp, ElementCount VF, 2334 IRBuilder<> &Builder) { 2335 if (VF.isScalar()) { 2336 // When unrolling and the VF is 1, we only need to add a simple scalar. 2337 Type *Ty = Val->getType(); 2338 assert(!Ty->isVectorTy() && "Val must be a scalar"); 2339 2340 if (Ty->isFloatingPointTy()) { 2341 // Floating-point operations inherit FMF via the builder's flags. 2342 Value *MulOp = Builder.CreateFMul(StartIdx, Step); 2343 return Builder.CreateBinOp(BinOp, Val, MulOp); 2344 } 2345 return Builder.CreateAdd(Val, Builder.CreateMul(StartIdx, Step), 2346 "induction"); 2347 } 2348 2349 // Create and check the types. 2350 auto *ValVTy = cast<VectorType>(Val->getType()); 2351 ElementCount VLen = ValVTy->getElementCount(); 2352 2353 Type *STy = Val->getType()->getScalarType(); 2354 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2355 "Induction Step must be an integer or FP"); 2356 assert(Step->getType() == STy && "Step has wrong type"); 2357 2358 SmallVector<Constant *, 8> Indices; 2359 2360 // Create a vector of consecutive numbers from zero to VF. 2361 VectorType *InitVecValVTy = ValVTy; 2362 Type *InitVecValSTy = STy; 2363 if (STy->isFloatingPointTy()) { 2364 InitVecValSTy = 2365 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2366 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2367 } 2368 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2369 2370 // Splat the StartIdx 2371 Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx); 2372 2373 if (STy->isIntegerTy()) { 2374 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2375 Step = Builder.CreateVectorSplat(VLen, Step); 2376 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2377 // FIXME: The newly created binary instructions should contain nsw/nuw 2378 // flags, which can be found from the original scalar operations. 2379 Step = Builder.CreateMul(InitVec, Step); 2380 return Builder.CreateAdd(Val, Step, "induction"); 2381 } 2382 2383 // Floating point induction. 2384 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2385 "Binary Opcode should be specified for FP induction"); 2386 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2387 InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat); 2388 2389 Step = Builder.CreateVectorSplat(VLen, Step); 2390 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2391 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2392 } 2393 2394 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2395 const InductionDescriptor &II, Value *Step, Value *Start, 2396 Instruction *EntryVal, VPValue *Def, VPTransformState &State) { 2397 IRBuilder<> &Builder = State.Builder; 2398 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2399 "Expected either an induction phi-node or a truncate of it!"); 2400 2401 // Construct the initial value of the vector IV in the vector loop preheader 2402 auto CurrIP = Builder.saveIP(); 2403 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2404 if (isa<TruncInst>(EntryVal)) { 2405 assert(Start->getType()->isIntegerTy() && 2406 "Truncation requires an integer type"); 2407 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2408 Step = Builder.CreateTrunc(Step, TruncType); 2409 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2410 } 2411 2412 Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0); 2413 Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start); 2414 Value *SteppedStart = getStepVector( 2415 SplatStart, Zero, Step, II.getInductionOpcode(), State.VF, State.Builder); 2416 2417 // We create vector phi nodes for both integer and floating-point induction 2418 // variables. Here, we determine the kind of arithmetic we will perform. 2419 Instruction::BinaryOps AddOp; 2420 Instruction::BinaryOps MulOp; 2421 if (Step->getType()->isIntegerTy()) { 2422 AddOp = Instruction::Add; 2423 MulOp = Instruction::Mul; 2424 } else { 2425 AddOp = II.getInductionOpcode(); 2426 MulOp = Instruction::FMul; 2427 } 2428 2429 // Multiply the vectorization factor by the step using integer or 2430 // floating-point arithmetic as appropriate. 2431 Type *StepType = Step->getType(); 2432 Value *RuntimeVF; 2433 if (Step->getType()->isFloatingPointTy()) 2434 RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF); 2435 else 2436 RuntimeVF = getRuntimeVF(Builder, StepType, State.VF); 2437 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 2438 2439 // Create a vector splat to use in the induction update. 2440 // 2441 // FIXME: If the step is non-constant, we create the vector splat with 2442 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2443 // handle a constant vector splat. 2444 Value *SplatVF = isa<Constant>(Mul) 2445 ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul)) 2446 : Builder.CreateVectorSplat(State.VF, Mul); 2447 Builder.restoreIP(CurrIP); 2448 2449 // We may need to add the step a number of times, depending on the unroll 2450 // factor. The last of those goes into the PHI. 2451 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2452 &*LoopVectorBody->getFirstInsertionPt()); 2453 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 2454 Instruction *LastInduction = VecInd; 2455 for (unsigned Part = 0; Part < UF; ++Part) { 2456 State.set(Def, LastInduction, Part); 2457 2458 if (isa<TruncInst>(EntryVal)) 2459 addMetadata(LastInduction, EntryVal); 2460 2461 LastInduction = cast<Instruction>( 2462 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 2463 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 2464 } 2465 2466 // Move the last step to the end of the latch block. This ensures consistent 2467 // placement of all induction updates. 2468 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2469 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2470 auto *ICmp = cast<Instruction>(Br->getCondition()); 2471 LastInduction->moveBefore(ICmp); 2472 LastInduction->setName("vec.ind.next"); 2473 2474 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2475 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2476 } 2477 2478 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2479 return Cost->isScalarAfterVectorization(I, VF) || 2480 Cost->isProfitableToScalarize(I, VF); 2481 } 2482 2483 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2484 if (shouldScalarizeInstruction(IV)) 2485 return true; 2486 auto isScalarInst = [&](User *U) -> bool { 2487 auto *I = cast<Instruction>(U); 2488 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2489 }; 2490 return llvm::any_of(IV->users(), isScalarInst); 2491 } 2492 2493 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, 2494 const InductionDescriptor &ID, 2495 Value *Start, TruncInst *Trunc, 2496 VPValue *Def, 2497 VPTransformState &State) { 2498 IRBuilder<> &Builder = State.Builder; 2499 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2500 "Primary induction variable must have an integer type"); 2501 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2502 assert(!State.VF.isZero() && "VF must be non-zero"); 2503 2504 // The value from the original loop to which we are mapping the new induction 2505 // variable. 2506 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2507 2508 auto &DL = EntryVal->getModule()->getDataLayout(); 2509 2510 // Generate code for the induction step. Note that induction steps are 2511 // required to be loop-invariant 2512 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 2513 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 2514 "Induction step should be loop invariant"); 2515 if (PSE.getSE()->isSCEVable(IV->getType())) { 2516 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2517 return Exp.expandCodeFor(Step, Step->getType(), 2518 State.CFG.VectorPreHeader->getTerminator()); 2519 } 2520 return cast<SCEVUnknown>(Step)->getValue(); 2521 }; 2522 2523 // The scalar value to broadcast. This is derived from the canonical 2524 // induction variable. If a truncation type is given, truncate the canonical 2525 // induction variable and step. Otherwise, derive these values from the 2526 // induction descriptor. 2527 auto CreateScalarIV = [&](Value *&Step) -> Value * { 2528 Value *ScalarIV = Induction; 2529 if (IV != OldInduction) { 2530 ScalarIV = IV->getType()->isIntegerTy() 2531 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2532 : Builder.CreateCast(Instruction::SIToFP, Induction, 2533 IV->getType()); 2534 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID, 2535 State.CFG.PrevBB); 2536 ScalarIV->setName("offset.idx"); 2537 } 2538 if (Trunc) { 2539 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2540 assert(Step->getType()->isIntegerTy() && 2541 "Truncation requires an integer step"); 2542 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2543 Step = Builder.CreateTrunc(Step, TruncType); 2544 } 2545 return ScalarIV; 2546 }; 2547 2548 // Create the vector values from the scalar IV, in the absence of creating a 2549 // vector IV. 2550 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 2551 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2552 for (unsigned Part = 0; Part < UF; ++Part) { 2553 assert(!State.VF.isScalable() && "scalable vectors not yet supported."); 2554 Value *StartIdx; 2555 if (Step->getType()->isFloatingPointTy()) 2556 StartIdx = 2557 getRuntimeVFAsFloat(Builder, Step->getType(), State.VF * Part); 2558 else 2559 StartIdx = getRuntimeVF(Builder, Step->getType(), State.VF * Part); 2560 2561 Value *EntryPart = 2562 getStepVector(Broadcasted, StartIdx, Step, ID.getInductionOpcode(), 2563 State.VF, State.Builder); 2564 State.set(Def, EntryPart, Part); 2565 if (Trunc) 2566 addMetadata(EntryPart, Trunc); 2567 } 2568 }; 2569 2570 // Fast-math-flags propagate from the original induction instruction. 2571 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 2572 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 2573 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 2574 2575 // Now do the actual transformations, and start with creating the step value. 2576 Value *Step = CreateStepValue(ID.getStep()); 2577 if (State.VF.isScalar()) { 2578 Value *ScalarIV = CreateScalarIV(Step); 2579 CreateSplatIV(ScalarIV, Step); 2580 return; 2581 } 2582 2583 // Determine if we want a scalar version of the induction variable. This is 2584 // true if the induction variable itself is not widened, or if it has at 2585 // least one user in the loop that is not widened. 2586 auto NeedsScalarIV = needsScalarInduction(EntryVal); 2587 if (!NeedsScalarIV) { 2588 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, State); 2589 return; 2590 } 2591 2592 // Try to create a new independent vector induction variable. If we can't 2593 // create the phi node, we will splat the scalar induction variable in each 2594 // loop iteration. 2595 if (!shouldScalarizeInstruction(EntryVal)) { 2596 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, State); 2597 Value *ScalarIV = CreateScalarIV(Step); 2598 // Create scalar steps that can be used by instructions we will later 2599 // scalarize. Note that the addition of the scalar steps will not increase 2600 // the number of instructions in the loop in the common case prior to 2601 // InstCombine. We will be trading one vector extract for each scalar step. 2602 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, State); 2603 return; 2604 } 2605 2606 // All IV users are scalar instructions, so only emit a scalar IV, not a 2607 // vectorised IV. Except when we tail-fold, then the splat IV feeds the 2608 // predicate used by the masked loads/stores. 2609 Value *ScalarIV = CreateScalarIV(Step); 2610 if (!Cost->isScalarEpilogueAllowed()) 2611 CreateSplatIV(ScalarIV, Step); 2612 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, State); 2613 } 2614 2615 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2616 Instruction *EntryVal, 2617 const InductionDescriptor &ID, 2618 VPValue *Def, 2619 VPTransformState &State) { 2620 IRBuilder<> &Builder = State.Builder; 2621 // We shouldn't have to build scalar steps if we aren't vectorizing. 2622 assert(State.VF.isVector() && "VF should be greater than one"); 2623 // Get the value type and ensure it and the step have the same integer type. 2624 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2625 assert(ScalarIVTy == Step->getType() && 2626 "Val and Step should have the same type"); 2627 2628 // We build scalar steps for both integer and floating-point induction 2629 // variables. Here, we determine the kind of arithmetic we will perform. 2630 Instruction::BinaryOps AddOp; 2631 Instruction::BinaryOps MulOp; 2632 if (ScalarIVTy->isIntegerTy()) { 2633 AddOp = Instruction::Add; 2634 MulOp = Instruction::Mul; 2635 } else { 2636 AddOp = ID.getInductionOpcode(); 2637 MulOp = Instruction::FMul; 2638 } 2639 2640 // Determine the number of scalars we need to generate for each unroll 2641 // iteration. If EntryVal is uniform, we only need to generate the first 2642 // lane. Otherwise, we generate all VF values. 2643 bool IsUniform = 2644 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), State.VF); 2645 unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue(); 2646 // Compute the scalar steps and save the results in State. 2647 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2648 ScalarIVTy->getScalarSizeInBits()); 2649 Type *VecIVTy = nullptr; 2650 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2651 if (!IsUniform && State.VF.isScalable()) { 2652 VecIVTy = VectorType::get(ScalarIVTy, State.VF); 2653 UnitStepVec = 2654 Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF)); 2655 SplatStep = Builder.CreateVectorSplat(State.VF, Step); 2656 SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV); 2657 } 2658 2659 for (unsigned Part = 0; Part < State.UF; ++Part) { 2660 Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part); 2661 2662 if (!IsUniform && State.VF.isScalable()) { 2663 auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0); 2664 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2665 if (ScalarIVTy->isFloatingPointTy()) 2666 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2667 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2668 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2669 State.set(Def, Add, Part); 2670 // It's useful to record the lane values too for the known minimum number 2671 // of elements so we do those below. This improves the code quality when 2672 // trying to extract the first element, for example. 2673 } 2674 2675 if (ScalarIVTy->isFloatingPointTy()) 2676 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2677 2678 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2679 Value *StartIdx = Builder.CreateBinOp( 2680 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2681 // The step returned by `createStepForVF` is a runtime-evaluated value 2682 // when VF is scalable. Otherwise, it should be folded into a Constant. 2683 assert((State.VF.isScalable() || isa<Constant>(StartIdx)) && 2684 "Expected StartIdx to be folded to a constant when VF is not " 2685 "scalable"); 2686 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2687 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2688 State.set(Def, Add, VPIteration(Part, Lane)); 2689 } 2690 } 2691 } 2692 2693 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2694 const VPIteration &Instance, 2695 VPTransformState &State) { 2696 Value *ScalarInst = State.get(Def, Instance); 2697 Value *VectorValue = State.get(Def, Instance.Part); 2698 VectorValue = Builder.CreateInsertElement( 2699 VectorValue, ScalarInst, 2700 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2701 State.set(Def, VectorValue, Instance.Part); 2702 } 2703 2704 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2705 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2706 return Builder.CreateVectorReverse(Vec, "reverse"); 2707 } 2708 2709 // Return whether we allow using masked interleave-groups (for dealing with 2710 // strided loads/stores that reside in predicated blocks, or for dealing 2711 // with gaps). 2712 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2713 // If an override option has been passed in for interleaved accesses, use it. 2714 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2715 return EnableMaskedInterleavedMemAccesses; 2716 2717 return TTI.enableMaskedInterleavedAccessVectorization(); 2718 } 2719 2720 // Try to vectorize the interleave group that \p Instr belongs to. 2721 // 2722 // E.g. Translate following interleaved load group (factor = 3): 2723 // for (i = 0; i < N; i+=3) { 2724 // R = Pic[i]; // Member of index 0 2725 // G = Pic[i+1]; // Member of index 1 2726 // B = Pic[i+2]; // Member of index 2 2727 // ... // do something to R, G, B 2728 // } 2729 // To: 2730 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2731 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2732 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2733 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2734 // 2735 // Or translate following interleaved store group (factor = 3): 2736 // for (i = 0; i < N; i+=3) { 2737 // ... do something to R, G, B 2738 // Pic[i] = R; // Member of index 0 2739 // Pic[i+1] = G; // Member of index 1 2740 // Pic[i+2] = B; // Member of index 2 2741 // } 2742 // To: 2743 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2744 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2745 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2746 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2747 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2748 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2749 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2750 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2751 VPValue *BlockInMask) { 2752 Instruction *Instr = Group->getInsertPos(); 2753 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2754 2755 // Prepare for the vector type of the interleaved load/store. 2756 Type *ScalarTy = getLoadStoreType(Instr); 2757 unsigned InterleaveFactor = Group->getFactor(); 2758 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2759 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2760 2761 // Prepare for the new pointers. 2762 SmallVector<Value *, 2> AddrParts; 2763 unsigned Index = Group->getIndex(Instr); 2764 2765 // TODO: extend the masked interleaved-group support to reversed access. 2766 assert((!BlockInMask || !Group->isReverse()) && 2767 "Reversed masked interleave-group not supported."); 2768 2769 // If the group is reverse, adjust the index to refer to the last vector lane 2770 // instead of the first. We adjust the index from the first vector lane, 2771 // rather than directly getting the pointer for lane VF - 1, because the 2772 // pointer operand of the interleaved access is supposed to be uniform. For 2773 // uniform instructions, we're only required to generate a value for the 2774 // first vector lane in each unroll iteration. 2775 if (Group->isReverse()) 2776 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2777 2778 for (unsigned Part = 0; Part < UF; Part++) { 2779 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2780 setDebugLocFromInst(AddrPart); 2781 2782 // Notice current instruction could be any index. Need to adjust the address 2783 // to the member of index 0. 2784 // 2785 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2786 // b = A[i]; // Member of index 0 2787 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2788 // 2789 // E.g. A[i+1] = a; // Member of index 1 2790 // A[i] = b; // Member of index 0 2791 // A[i+2] = c; // Member of index 2 (Current instruction) 2792 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2793 2794 bool InBounds = false; 2795 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2796 InBounds = gep->isInBounds(); 2797 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2798 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2799 2800 // Cast to the vector pointer type. 2801 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2802 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2803 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2804 } 2805 2806 setDebugLocFromInst(Instr); 2807 Value *PoisonVec = PoisonValue::get(VecTy); 2808 2809 Value *MaskForGaps = nullptr; 2810 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2811 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2812 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2813 } 2814 2815 // Vectorize the interleaved load group. 2816 if (isa<LoadInst>(Instr)) { 2817 // For each unroll part, create a wide load for the group. 2818 SmallVector<Value *, 2> NewLoads; 2819 for (unsigned Part = 0; Part < UF; Part++) { 2820 Instruction *NewLoad; 2821 if (BlockInMask || MaskForGaps) { 2822 assert(useMaskedInterleavedAccesses(*TTI) && 2823 "masked interleaved groups are not allowed."); 2824 Value *GroupMask = MaskForGaps; 2825 if (BlockInMask) { 2826 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2827 Value *ShuffledMask = Builder.CreateShuffleVector( 2828 BlockInMaskPart, 2829 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2830 "interleaved.mask"); 2831 GroupMask = MaskForGaps 2832 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2833 MaskForGaps) 2834 : ShuffledMask; 2835 } 2836 NewLoad = 2837 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), 2838 GroupMask, PoisonVec, "wide.masked.vec"); 2839 } 2840 else 2841 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2842 Group->getAlign(), "wide.vec"); 2843 Group->addMetadata(NewLoad); 2844 NewLoads.push_back(NewLoad); 2845 } 2846 2847 // For each member in the group, shuffle out the appropriate data from the 2848 // wide loads. 2849 unsigned J = 0; 2850 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2851 Instruction *Member = Group->getMember(I); 2852 2853 // Skip the gaps in the group. 2854 if (!Member) 2855 continue; 2856 2857 auto StrideMask = 2858 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2859 for (unsigned Part = 0; Part < UF; Part++) { 2860 Value *StridedVec = Builder.CreateShuffleVector( 2861 NewLoads[Part], StrideMask, "strided.vec"); 2862 2863 // If this member has different type, cast the result type. 2864 if (Member->getType() != ScalarTy) { 2865 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2866 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2867 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2868 } 2869 2870 if (Group->isReverse()) 2871 StridedVec = reverseVector(StridedVec); 2872 2873 State.set(VPDefs[J], StridedVec, Part); 2874 } 2875 ++J; 2876 } 2877 return; 2878 } 2879 2880 // The sub vector type for current instruction. 2881 auto *SubVT = VectorType::get(ScalarTy, VF); 2882 2883 // Vectorize the interleaved store group. 2884 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2885 assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && 2886 "masked interleaved groups are not allowed."); 2887 assert((!MaskForGaps || !VF.isScalable()) && 2888 "masking gaps for scalable vectors is not yet supported."); 2889 for (unsigned Part = 0; Part < UF; Part++) { 2890 // Collect the stored vector from each member. 2891 SmallVector<Value *, 4> StoredVecs; 2892 for (unsigned i = 0; i < InterleaveFactor; i++) { 2893 assert((Group->getMember(i) || MaskForGaps) && 2894 "Fail to get a member from an interleaved store group"); 2895 Instruction *Member = Group->getMember(i); 2896 2897 // Skip the gaps in the group. 2898 if (!Member) { 2899 Value *Undef = PoisonValue::get(SubVT); 2900 StoredVecs.push_back(Undef); 2901 continue; 2902 } 2903 2904 Value *StoredVec = State.get(StoredValues[i], Part); 2905 2906 if (Group->isReverse()) 2907 StoredVec = reverseVector(StoredVec); 2908 2909 // If this member has different type, cast it to a unified type. 2910 2911 if (StoredVec->getType() != SubVT) 2912 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2913 2914 StoredVecs.push_back(StoredVec); 2915 } 2916 2917 // Concatenate all vectors into a wide vector. 2918 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2919 2920 // Interleave the elements in the wide vector. 2921 Value *IVec = Builder.CreateShuffleVector( 2922 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2923 "interleaved.vec"); 2924 2925 Instruction *NewStoreInstr; 2926 if (BlockInMask || MaskForGaps) { 2927 Value *GroupMask = MaskForGaps; 2928 if (BlockInMask) { 2929 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2930 Value *ShuffledMask = Builder.CreateShuffleVector( 2931 BlockInMaskPart, 2932 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2933 "interleaved.mask"); 2934 GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And, 2935 ShuffledMask, MaskForGaps) 2936 : ShuffledMask; 2937 } 2938 NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part], 2939 Group->getAlign(), GroupMask); 2940 } else 2941 NewStoreInstr = 2942 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2943 2944 Group->addMetadata(NewStoreInstr); 2945 } 2946 } 2947 2948 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2949 VPReplicateRecipe *RepRecipe, 2950 const VPIteration &Instance, 2951 bool IfPredicateInstr, 2952 VPTransformState &State) { 2953 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2954 2955 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 2956 // the first lane and part. 2957 if (isa<NoAliasScopeDeclInst>(Instr)) 2958 if (!Instance.isFirstIteration()) 2959 return; 2960 2961 setDebugLocFromInst(Instr); 2962 2963 // Does this instruction return a value ? 2964 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2965 2966 Instruction *Cloned = Instr->clone(); 2967 if (!IsVoidRetTy) 2968 Cloned->setName(Instr->getName() + ".cloned"); 2969 2970 // If the scalarized instruction contributes to the address computation of a 2971 // widen masked load/store which was in a basic block that needed predication 2972 // and is not predicated after vectorization, we can't propagate 2973 // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized 2974 // instruction could feed a poison value to the base address of the widen 2975 // load/store. 2976 if (State.MayGeneratePoisonRecipes.contains(RepRecipe)) 2977 Cloned->dropPoisonGeneratingFlags(); 2978 2979 State.Builder.SetInsertPoint(Builder.GetInsertBlock(), 2980 Builder.GetInsertPoint()); 2981 // Replace the operands of the cloned instructions with their scalar 2982 // equivalents in the new loop. 2983 for (auto &I : enumerate(RepRecipe->operands())) { 2984 auto InputInstance = Instance; 2985 VPValue *Operand = I.value(); 2986 if (State.Plan->isUniformAfterVectorization(Operand)) 2987 InputInstance.Lane = VPLane::getFirstLane(); 2988 Cloned->setOperand(I.index(), State.get(Operand, InputInstance)); 2989 } 2990 addNewMetadata(Cloned, Instr); 2991 2992 // Place the cloned scalar in the new loop. 2993 Builder.Insert(Cloned); 2994 2995 State.set(RepRecipe, Cloned, Instance); 2996 2997 // If we just cloned a new assumption, add it the assumption cache. 2998 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 2999 AC->registerAssumption(II); 3000 3001 // End if-block. 3002 if (IfPredicateInstr) 3003 PredicatedInstructions.push_back(Cloned); 3004 } 3005 3006 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3007 Value *End, Value *Step, 3008 Instruction *DL) { 3009 BasicBlock *Header = L->getHeader(); 3010 BasicBlock *Latch = L->getLoopLatch(); 3011 // As we're just creating this loop, it's possible no latch exists 3012 // yet. If so, use the header as this will be a single block loop. 3013 if (!Latch) 3014 Latch = Header; 3015 3016 IRBuilder<> B(&*Header->getFirstInsertionPt()); 3017 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 3018 setDebugLocFromInst(OldInst, &B); 3019 auto *Induction = B.CreatePHI(Start->getType(), 2, "index"); 3020 3021 B.SetInsertPoint(Latch->getTerminator()); 3022 setDebugLocFromInst(OldInst, &B); 3023 3024 // Create i+1 and fill the PHINode. 3025 // 3026 // If the tail is not folded, we know that End - Start >= Step (either 3027 // statically or through the minimum iteration checks). We also know that both 3028 // Start % Step == 0 and End % Step == 0. We exit the vector loop if %IV + 3029 // %Step == %End. Hence we must exit the loop before %IV + %Step unsigned 3030 // overflows and we can mark the induction increment as NUW. 3031 Value *Next = B.CreateAdd(Induction, Step, "index.next", 3032 /*NUW=*/!Cost->foldTailByMasking(), /*NSW=*/false); 3033 Induction->addIncoming(Start, L->getLoopPreheader()); 3034 Induction->addIncoming(Next, Latch); 3035 // Create the compare. 3036 Value *ICmp = B.CreateICmpEQ(Next, End); 3037 B.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header); 3038 3039 // Now we have two terminators. Remove the old one from the block. 3040 Latch->getTerminator()->eraseFromParent(); 3041 3042 return Induction; 3043 } 3044 3045 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3046 if (TripCount) 3047 return TripCount; 3048 3049 assert(L && "Create Trip Count for null loop."); 3050 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3051 // Find the loop boundaries. 3052 ScalarEvolution *SE = PSE.getSE(); 3053 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3054 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 3055 "Invalid loop count"); 3056 3057 Type *IdxTy = Legal->getWidestInductionType(); 3058 assert(IdxTy && "No type for induction"); 3059 3060 // The exit count might have the type of i64 while the phi is i32. This can 3061 // happen if we have an induction variable that is sign extended before the 3062 // compare. The only way that we get a backedge taken count is that the 3063 // induction variable was signed and as such will not overflow. In such a case 3064 // truncation is legal. 3065 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 3066 IdxTy->getPrimitiveSizeInBits()) 3067 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3068 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3069 3070 // Get the total trip count from the count by adding 1. 3071 const SCEV *ExitCount = SE->getAddExpr( 3072 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3073 3074 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3075 3076 // Expand the trip count and place the new instructions in the preheader. 3077 // Notice that the pre-header does not change, only the loop body. 3078 SCEVExpander Exp(*SE, DL, "induction"); 3079 3080 // Count holds the overall loop count (N). 3081 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3082 L->getLoopPreheader()->getTerminator()); 3083 3084 if (TripCount->getType()->isPointerTy()) 3085 TripCount = 3086 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3087 L->getLoopPreheader()->getTerminator()); 3088 3089 return TripCount; 3090 } 3091 3092 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3093 if (VectorTripCount) 3094 return VectorTripCount; 3095 3096 Value *TC = getOrCreateTripCount(L); 3097 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3098 3099 Type *Ty = TC->getType(); 3100 // This is where we can make the step a runtime constant. 3101 Value *Step = createStepForVF(Builder, Ty, VF, UF); 3102 3103 // If the tail is to be folded by masking, round the number of iterations N 3104 // up to a multiple of Step instead of rounding down. This is done by first 3105 // adding Step-1 and then rounding down. Note that it's ok if this addition 3106 // overflows: the vector induction variable will eventually wrap to zero given 3107 // that it starts at zero and its Step is a power of two; the loop will then 3108 // exit, with the last early-exit vector comparison also producing all-true. 3109 if (Cost->foldTailByMasking()) { 3110 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 3111 "VF*UF must be a power of 2 when folding tail by masking"); 3112 assert(!VF.isScalable() && 3113 "Tail folding not yet supported for scalable vectors"); 3114 TC = Builder.CreateAdd( 3115 TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up"); 3116 } 3117 3118 // Now we need to generate the expression for the part of the loop that the 3119 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3120 // iterations are not required for correctness, or N - Step, otherwise. Step 3121 // is equal to the vectorization factor (number of SIMD elements) times the 3122 // unroll factor (number of SIMD instructions). 3123 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3124 3125 // There are cases where we *must* run at least one iteration in the remainder 3126 // loop. See the cost model for when this can happen. If the step evenly 3127 // divides the trip count, we set the remainder to be equal to the step. If 3128 // the step does not evenly divide the trip count, no adjustment is necessary 3129 // since there will already be scalar iterations. Note that the minimum 3130 // iterations check ensures that N >= Step. 3131 if (Cost->requiresScalarEpilogue(VF)) { 3132 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3133 R = Builder.CreateSelect(IsZero, Step, R); 3134 } 3135 3136 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3137 3138 return VectorTripCount; 3139 } 3140 3141 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3142 const DataLayout &DL) { 3143 // Verify that V is a vector type with same number of elements as DstVTy. 3144 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 3145 unsigned VF = DstFVTy->getNumElements(); 3146 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 3147 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3148 Type *SrcElemTy = SrcVecTy->getElementType(); 3149 Type *DstElemTy = DstFVTy->getElementType(); 3150 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3151 "Vector elements must have same size"); 3152 3153 // Do a direct cast if element types are castable. 3154 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3155 return Builder.CreateBitOrPointerCast(V, DstFVTy); 3156 } 3157 // V cannot be directly casted to desired vector type. 3158 // May happen when V is a floating point vector but DstVTy is a vector of 3159 // pointers or vice-versa. Handle this using a two-step bitcast using an 3160 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3161 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3162 "Only one type should be a pointer type"); 3163 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3164 "Only one type should be a floating point type"); 3165 Type *IntTy = 3166 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3167 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 3168 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3169 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 3170 } 3171 3172 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3173 BasicBlock *Bypass) { 3174 Value *Count = getOrCreateTripCount(L); 3175 // Reuse existing vector loop preheader for TC checks. 3176 // Note that new preheader block is generated for vector loop. 3177 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 3178 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 3179 3180 // Generate code to check if the loop's trip count is less than VF * UF, or 3181 // equal to it in case a scalar epilogue is required; this implies that the 3182 // vector trip count is zero. This check also covers the case where adding one 3183 // to the backedge-taken count overflowed leading to an incorrect trip count 3184 // of zero. In this case we will also jump to the scalar loop. 3185 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE 3186 : ICmpInst::ICMP_ULT; 3187 3188 // If tail is to be folded, vector loop takes care of all iterations. 3189 Value *CheckMinIters = Builder.getFalse(); 3190 if (!Cost->foldTailByMasking()) { 3191 Value *Step = createStepForVF(Builder, Count->getType(), VF, UF); 3192 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3193 } 3194 // Create new preheader for vector loop. 3195 LoopVectorPreHeader = 3196 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3197 "vector.ph"); 3198 3199 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3200 DT->getNode(Bypass)->getIDom()) && 3201 "TC check is expected to dominate Bypass"); 3202 3203 // Update dominator for Bypass & LoopExit (if needed). 3204 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3205 if (!Cost->requiresScalarEpilogue(VF)) 3206 // If there is an epilogue which must run, there's no edge from the 3207 // middle block to exit blocks and thus no need to update the immediate 3208 // dominator of the exit blocks. 3209 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3210 3211 ReplaceInstWithInst( 3212 TCCheckBlock->getTerminator(), 3213 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3214 LoopBypassBlocks.push_back(TCCheckBlock); 3215 } 3216 3217 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3218 3219 BasicBlock *const SCEVCheckBlock = 3220 RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock); 3221 if (!SCEVCheckBlock) 3222 return nullptr; 3223 3224 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3225 (OptForSizeBasedOnProfile && 3226 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3227 "Cannot SCEV check stride or overflow when optimizing for size"); 3228 3229 3230 // Update dominator only if this is first RT check. 3231 if (LoopBypassBlocks.empty()) { 3232 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3233 if (!Cost->requiresScalarEpilogue(VF)) 3234 // If there is an epilogue which must run, there's no edge from the 3235 // middle block to exit blocks and thus no need to update the immediate 3236 // dominator of the exit blocks. 3237 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3238 } 3239 3240 LoopBypassBlocks.push_back(SCEVCheckBlock); 3241 AddedSafetyChecks = true; 3242 return SCEVCheckBlock; 3243 } 3244 3245 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, 3246 BasicBlock *Bypass) { 3247 // VPlan-native path does not do any analysis for runtime checks currently. 3248 if (EnableVPlanNativePath) 3249 return nullptr; 3250 3251 BasicBlock *const MemCheckBlock = 3252 RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader); 3253 3254 // Check if we generated code that checks in runtime if arrays overlap. We put 3255 // the checks into a separate block to make the more common case of few 3256 // elements faster. 3257 if (!MemCheckBlock) 3258 return nullptr; 3259 3260 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3261 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3262 "Cannot emit memory checks when optimizing for size, unless forced " 3263 "to vectorize."); 3264 ORE->emit([&]() { 3265 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3266 L->getStartLoc(), L->getHeader()) 3267 << "Code-size may be reduced by not forcing " 3268 "vectorization, or by source-code modifications " 3269 "eliminating the need for runtime checks " 3270 "(e.g., adding 'restrict')."; 3271 }); 3272 } 3273 3274 LoopBypassBlocks.push_back(MemCheckBlock); 3275 3276 AddedSafetyChecks = true; 3277 3278 // We currently don't use LoopVersioning for the actual loop cloning but we 3279 // still use it to add the noalias metadata. 3280 LVer = std::make_unique<LoopVersioning>( 3281 *Legal->getLAI(), 3282 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3283 DT, PSE.getSE()); 3284 LVer->prepareNoAliasMetadata(); 3285 return MemCheckBlock; 3286 } 3287 3288 Value *InnerLoopVectorizer::emitTransformedIndex( 3289 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 3290 const InductionDescriptor &ID, BasicBlock *VectorHeader) const { 3291 3292 SCEVExpander Exp(*SE, DL, "induction"); 3293 auto Step = ID.getStep(); 3294 auto StartValue = ID.getStartValue(); 3295 assert(Index->getType()->getScalarType() == Step->getType() && 3296 "Index scalar type does not match StepValue type"); 3297 3298 // Note: the IR at this point is broken. We cannot use SE to create any new 3299 // SCEV and then expand it, hoping that SCEV's simplification will give us 3300 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 3301 // lead to various SCEV crashes. So all we can do is to use builder and rely 3302 // on InstCombine for future simplifications. Here we handle some trivial 3303 // cases only. 3304 auto CreateAdd = [&B](Value *X, Value *Y) { 3305 assert(X->getType() == Y->getType() && "Types don't match!"); 3306 if (auto *CX = dyn_cast<ConstantInt>(X)) 3307 if (CX->isZero()) 3308 return Y; 3309 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3310 if (CY->isZero()) 3311 return X; 3312 return B.CreateAdd(X, Y); 3313 }; 3314 3315 // We allow X to be a vector type, in which case Y will potentially be 3316 // splatted into a vector with the same element count. 3317 auto CreateMul = [&B](Value *X, Value *Y) { 3318 assert(X->getType()->getScalarType() == Y->getType() && 3319 "Types don't match!"); 3320 if (auto *CX = dyn_cast<ConstantInt>(X)) 3321 if (CX->isOne()) 3322 return Y; 3323 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3324 if (CY->isOne()) 3325 return X; 3326 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 3327 if (XVTy && !isa<VectorType>(Y->getType())) 3328 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 3329 return B.CreateMul(X, Y); 3330 }; 3331 3332 // Get a suitable insert point for SCEV expansion. For blocks in the vector 3333 // loop, choose the end of the vector loop header (=VectorHeader), because 3334 // the DomTree is not kept up-to-date for additional blocks generated in the 3335 // vector loop. By using the header as insertion point, we guarantee that the 3336 // expanded instructions dominate all their uses. 3337 auto GetInsertPoint = [this, &B, VectorHeader]() { 3338 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 3339 if (InsertBB != LoopVectorBody && 3340 LI->getLoopFor(VectorHeader) == LI->getLoopFor(InsertBB)) 3341 return VectorHeader->getTerminator(); 3342 return &*B.GetInsertPoint(); 3343 }; 3344 3345 switch (ID.getKind()) { 3346 case InductionDescriptor::IK_IntInduction: { 3347 assert(!isa<VectorType>(Index->getType()) && 3348 "Vector indices not supported for integer inductions yet"); 3349 assert(Index->getType() == StartValue->getType() && 3350 "Index type does not match StartValue type"); 3351 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 3352 return B.CreateSub(StartValue, Index); 3353 auto *Offset = CreateMul( 3354 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 3355 return CreateAdd(StartValue, Offset); 3356 } 3357 case InductionDescriptor::IK_PtrInduction: { 3358 assert(isa<SCEVConstant>(Step) && 3359 "Expected constant step for pointer induction"); 3360 return B.CreateGEP( 3361 ID.getElementType(), StartValue, 3362 CreateMul(Index, 3363 Exp.expandCodeFor(Step, Index->getType()->getScalarType(), 3364 GetInsertPoint()))); 3365 } 3366 case InductionDescriptor::IK_FpInduction: { 3367 assert(!isa<VectorType>(Index->getType()) && 3368 "Vector indices not supported for FP inductions yet"); 3369 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 3370 auto InductionBinOp = ID.getInductionBinOp(); 3371 assert(InductionBinOp && 3372 (InductionBinOp->getOpcode() == Instruction::FAdd || 3373 InductionBinOp->getOpcode() == Instruction::FSub) && 3374 "Original bin op should be defined for FP induction"); 3375 3376 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 3377 Value *MulExp = B.CreateFMul(StepValue, Index); 3378 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 3379 "induction"); 3380 } 3381 case InductionDescriptor::IK_NoInduction: 3382 return nullptr; 3383 } 3384 llvm_unreachable("invalid enum"); 3385 } 3386 3387 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3388 LoopScalarBody = OrigLoop->getHeader(); 3389 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3390 assert(LoopVectorPreHeader && "Invalid loop structure"); 3391 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr 3392 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && 3393 "multiple exit loop without required epilogue?"); 3394 3395 LoopMiddleBlock = 3396 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3397 LI, nullptr, Twine(Prefix) + "middle.block"); 3398 LoopScalarPreHeader = 3399 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3400 nullptr, Twine(Prefix) + "scalar.ph"); 3401 3402 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3403 3404 // Set up the middle block terminator. Two cases: 3405 // 1) If we know that we must execute the scalar epilogue, emit an 3406 // unconditional branch. 3407 // 2) Otherwise, we must have a single unique exit block (due to how we 3408 // implement the multiple exit case). In this case, set up a conditonal 3409 // branch from the middle block to the loop scalar preheader, and the 3410 // exit block. completeLoopSkeleton will update the condition to use an 3411 // iteration check, if required to decide whether to execute the remainder. 3412 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? 3413 BranchInst::Create(LoopScalarPreHeader) : 3414 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, 3415 Builder.getTrue()); 3416 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3417 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3418 3419 // We intentionally don't let SplitBlock to update LoopInfo since 3420 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3421 // LoopVectorBody is explicitly added to the correct place few lines later. 3422 LoopVectorBody = 3423 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3424 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3425 3426 // Update dominator for loop exit. 3427 if (!Cost->requiresScalarEpilogue(VF)) 3428 // If there is an epilogue which must run, there's no edge from the 3429 // middle block to exit blocks and thus no need to update the immediate 3430 // dominator of the exit blocks. 3431 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3432 3433 // Create and register the new vector loop. 3434 Loop *Lp = LI->AllocateLoop(); 3435 Loop *ParentLoop = OrigLoop->getParentLoop(); 3436 3437 // Insert the new loop into the loop nest and register the new basic blocks 3438 // before calling any utilities such as SCEV that require valid LoopInfo. 3439 if (ParentLoop) { 3440 ParentLoop->addChildLoop(Lp); 3441 } else { 3442 LI->addTopLevelLoop(Lp); 3443 } 3444 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3445 return Lp; 3446 } 3447 3448 void InnerLoopVectorizer::createInductionResumeValues( 3449 Loop *L, Value *VectorTripCount, 3450 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3451 assert(VectorTripCount && L && "Expected valid arguments"); 3452 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3453 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3454 "Inconsistent information about additional bypass."); 3455 // We are going to resume the execution of the scalar loop. 3456 // Go over all of the induction variables that we found and fix the 3457 // PHIs that are left in the scalar version of the loop. 3458 // The starting values of PHI nodes depend on the counter of the last 3459 // iteration in the vectorized loop. 3460 // If we come from a bypass edge then we need to start from the original 3461 // start value. 3462 for (auto &InductionEntry : Legal->getInductionVars()) { 3463 PHINode *OrigPhi = InductionEntry.first; 3464 InductionDescriptor II = InductionEntry.second; 3465 3466 // Create phi nodes to merge from the backedge-taken check block. 3467 PHINode *BCResumeVal = 3468 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3469 LoopScalarPreHeader->getTerminator()); 3470 // Copy original phi DL over to the new one. 3471 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3472 Value *&EndValue = IVEndValues[OrigPhi]; 3473 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3474 if (OrigPhi == OldInduction) { 3475 // We know what the end value is. 3476 EndValue = VectorTripCount; 3477 } else { 3478 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3479 3480 // Fast-math-flags propagate from the original induction instruction. 3481 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3482 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3483 3484 Type *StepType = II.getStep()->getType(); 3485 Instruction::CastOps CastOp = 3486 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3487 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3488 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3489 EndValue = 3490 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II, LoopVectorBody); 3491 EndValue->setName("ind.end"); 3492 3493 // Compute the end value for the additional bypass (if applicable). 3494 if (AdditionalBypass.first) { 3495 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3496 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3497 StepType, true); 3498 CRD = 3499 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3500 EndValueFromAdditionalBypass = 3501 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II, LoopVectorBody); 3502 EndValueFromAdditionalBypass->setName("ind.end"); 3503 } 3504 } 3505 // The new PHI merges the original incoming value, in case of a bypass, 3506 // or the value at the end of the vectorized loop. 3507 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3508 3509 // Fix the scalar body counter (PHI node). 3510 // The old induction's phi node in the scalar body needs the truncated 3511 // value. 3512 for (BasicBlock *BB : LoopBypassBlocks) 3513 BCResumeVal->addIncoming(II.getStartValue(), BB); 3514 3515 if (AdditionalBypass.first) 3516 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3517 EndValueFromAdditionalBypass); 3518 3519 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3520 } 3521 } 3522 3523 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3524 MDNode *OrigLoopID) { 3525 assert(L && "Expected valid loop."); 3526 3527 // The trip counts should be cached by now. 3528 Value *Count = getOrCreateTripCount(L); 3529 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3530 3531 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3532 3533 // Add a check in the middle block to see if we have completed 3534 // all of the iterations in the first vector loop. Three cases: 3535 // 1) If we require a scalar epilogue, there is no conditional branch as 3536 // we unconditionally branch to the scalar preheader. Do nothing. 3537 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. 3538 // Thus if tail is to be folded, we know we don't need to run the 3539 // remainder and we can use the previous value for the condition (true). 3540 // 3) Otherwise, construct a runtime check. 3541 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { 3542 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3543 Count, VectorTripCount, "cmp.n", 3544 LoopMiddleBlock->getTerminator()); 3545 3546 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3547 // of the corresponding compare because they may have ended up with 3548 // different line numbers and we want to avoid awkward line stepping while 3549 // debugging. Eg. if the compare has got a line number inside the loop. 3550 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3551 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3552 } 3553 3554 // Get ready to start creating new instructions into the vectorized body. 3555 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3556 "Inconsistent vector loop preheader"); 3557 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3558 3559 Optional<MDNode *> VectorizedLoopID = 3560 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3561 LLVMLoopVectorizeFollowupVectorized}); 3562 if (VectorizedLoopID.hasValue()) { 3563 L->setLoopID(VectorizedLoopID.getValue()); 3564 3565 // Do not setAlreadyVectorized if loop attributes have been defined 3566 // explicitly. 3567 return LoopVectorPreHeader; 3568 } 3569 3570 // Keep all loop hints from the original loop on the vector loop (we'll 3571 // replace the vectorizer-specific hints below). 3572 if (MDNode *LID = OrigLoop->getLoopID()) 3573 L->setLoopID(LID); 3574 3575 LoopVectorizeHints Hints(L, true, *ORE, TTI); 3576 Hints.setAlreadyVectorized(); 3577 3578 #ifdef EXPENSIVE_CHECKS 3579 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3580 LI->verify(*DT); 3581 #endif 3582 3583 return LoopVectorPreHeader; 3584 } 3585 3586 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3587 /* 3588 In this function we generate a new loop. The new loop will contain 3589 the vectorized instructions while the old loop will continue to run the 3590 scalar remainder. 3591 3592 [ ] <-- loop iteration number check. 3593 / | 3594 / v 3595 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3596 | / | 3597 | / v 3598 || [ ] <-- vector pre header. 3599 |/ | 3600 | v 3601 | [ ] \ 3602 | [ ]_| <-- vector loop. 3603 | | 3604 | v 3605 \ -[ ] <--- middle-block. 3606 \/ | 3607 /\ v 3608 | ->[ ] <--- new preheader. 3609 | | 3610 (opt) v <-- edge from middle to exit iff epilogue is not required. 3611 | [ ] \ 3612 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). 3613 \ | 3614 \ v 3615 >[ ] <-- exit block(s). 3616 ... 3617 */ 3618 3619 // Get the metadata of the original loop before it gets modified. 3620 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3621 3622 // Workaround! Compute the trip count of the original loop and cache it 3623 // before we start modifying the CFG. This code has a systemic problem 3624 // wherein it tries to run analysis over partially constructed IR; this is 3625 // wrong, and not simply for SCEV. The trip count of the original loop 3626 // simply happens to be prone to hitting this in practice. In theory, we 3627 // can hit the same issue for any SCEV, or ValueTracking query done during 3628 // mutation. See PR49900. 3629 getOrCreateTripCount(OrigLoop); 3630 3631 // Create an empty vector loop, and prepare basic blocks for the runtime 3632 // checks. 3633 Loop *Lp = createVectorLoopSkeleton(""); 3634 3635 // Now, compare the new count to zero. If it is zero skip the vector loop and 3636 // jump to the scalar loop. This check also covers the case where the 3637 // backedge-taken count is uint##_max: adding one to it will overflow leading 3638 // to an incorrect trip count of zero. In this (rare) case we will also jump 3639 // to the scalar loop. 3640 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3641 3642 // Generate the code to check any assumptions that we've made for SCEV 3643 // expressions. 3644 emitSCEVChecks(Lp, LoopScalarPreHeader); 3645 3646 // Generate the code that checks in runtime if arrays overlap. We put the 3647 // checks into a separate block to make the more common case of few elements 3648 // faster. 3649 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3650 3651 // Some loops have a single integer induction variable, while other loops 3652 // don't. One example is c++ iterators that often have multiple pointer 3653 // induction variables. In the code below we also support a case where we 3654 // don't have a single induction variable. 3655 // 3656 // We try to obtain an induction variable from the original loop as hard 3657 // as possible. However if we don't find one that: 3658 // - is an integer 3659 // - counts from zero, stepping by one 3660 // - is the size of the widest induction variable type 3661 // then we create a new one. 3662 OldInduction = Legal->getPrimaryInduction(); 3663 Type *IdxTy = Legal->getWidestInductionType(); 3664 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3665 // The loop step is equal to the vectorization factor (num of SIMD elements) 3666 // times the unroll factor (num of SIMD instructions). 3667 Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt()); 3668 Value *Step = createStepForVF(Builder, IdxTy, VF, UF); 3669 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3670 Induction = 3671 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3672 getDebugLocFromInstOrOperands(OldInduction)); 3673 3674 // Emit phis for the new starting index of the scalar loop. 3675 createInductionResumeValues(Lp, CountRoundDown); 3676 3677 return completeLoopSkeleton(Lp, OrigLoopID); 3678 } 3679 3680 // Fix up external users of the induction variable. At this point, we are 3681 // in LCSSA form, with all external PHIs that use the IV having one input value, 3682 // coming from the remainder loop. We need those PHIs to also have a correct 3683 // value for the IV when arriving directly from the middle block. 3684 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3685 const InductionDescriptor &II, 3686 Value *CountRoundDown, Value *EndValue, 3687 BasicBlock *MiddleBlock) { 3688 // There are two kinds of external IV usages - those that use the value 3689 // computed in the last iteration (the PHI) and those that use the penultimate 3690 // value (the value that feeds into the phi from the loop latch). 3691 // We allow both, but they, obviously, have different values. 3692 3693 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3694 3695 DenseMap<Value *, Value *> MissingVals; 3696 3697 // An external user of the last iteration's value should see the value that 3698 // the remainder loop uses to initialize its own IV. 3699 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3700 for (User *U : PostInc->users()) { 3701 Instruction *UI = cast<Instruction>(U); 3702 if (!OrigLoop->contains(UI)) { 3703 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3704 MissingVals[UI] = EndValue; 3705 } 3706 } 3707 3708 // An external user of the penultimate value need to see EndValue - Step. 3709 // The simplest way to get this is to recompute it from the constituent SCEVs, 3710 // that is Start + (Step * (CRD - 1)). 3711 for (User *U : OrigPhi->users()) { 3712 auto *UI = cast<Instruction>(U); 3713 if (!OrigLoop->contains(UI)) { 3714 const DataLayout &DL = 3715 OrigLoop->getHeader()->getModule()->getDataLayout(); 3716 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3717 3718 IRBuilder<> B(MiddleBlock->getTerminator()); 3719 3720 // Fast-math-flags propagate from the original induction instruction. 3721 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3722 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3723 3724 Value *CountMinusOne = B.CreateSub( 3725 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3726 Value *CMO = 3727 !II.getStep()->getType()->isIntegerTy() 3728 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3729 II.getStep()->getType()) 3730 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3731 CMO->setName("cast.cmo"); 3732 Value *Escape = 3733 emitTransformedIndex(B, CMO, PSE.getSE(), DL, II, LoopVectorBody); 3734 Escape->setName("ind.escape"); 3735 MissingVals[UI] = Escape; 3736 } 3737 } 3738 3739 for (auto &I : MissingVals) { 3740 PHINode *PHI = cast<PHINode>(I.first); 3741 // One corner case we have to handle is two IVs "chasing" each-other, 3742 // that is %IV2 = phi [...], [ %IV1, %latch ] 3743 // In this case, if IV1 has an external use, we need to avoid adding both 3744 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3745 // don't already have an incoming value for the middle block. 3746 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3747 PHI->addIncoming(I.second, MiddleBlock); 3748 } 3749 } 3750 3751 namespace { 3752 3753 struct CSEDenseMapInfo { 3754 static bool canHandle(const Instruction *I) { 3755 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3756 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3757 } 3758 3759 static inline Instruction *getEmptyKey() { 3760 return DenseMapInfo<Instruction *>::getEmptyKey(); 3761 } 3762 3763 static inline Instruction *getTombstoneKey() { 3764 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3765 } 3766 3767 static unsigned getHashValue(const Instruction *I) { 3768 assert(canHandle(I) && "Unknown instruction!"); 3769 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3770 I->value_op_end())); 3771 } 3772 3773 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3774 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3775 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3776 return LHS == RHS; 3777 return LHS->isIdenticalTo(RHS); 3778 } 3779 }; 3780 3781 } // end anonymous namespace 3782 3783 ///Perform cse of induction variable instructions. 3784 static void cse(BasicBlock *BB) { 3785 // Perform simple cse. 3786 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3787 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 3788 if (!CSEDenseMapInfo::canHandle(&In)) 3789 continue; 3790 3791 // Check if we can replace this instruction with any of the 3792 // visited instructions. 3793 if (Instruction *V = CSEMap.lookup(&In)) { 3794 In.replaceAllUsesWith(V); 3795 In.eraseFromParent(); 3796 continue; 3797 } 3798 3799 CSEMap[&In] = &In; 3800 } 3801 } 3802 3803 InstructionCost 3804 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3805 bool &NeedToScalarize) const { 3806 Function *F = CI->getCalledFunction(); 3807 Type *ScalarRetTy = CI->getType(); 3808 SmallVector<Type *, 4> Tys, ScalarTys; 3809 for (auto &ArgOp : CI->args()) 3810 ScalarTys.push_back(ArgOp->getType()); 3811 3812 // Estimate cost of scalarized vector call. The source operands are assumed 3813 // to be vectors, so we need to extract individual elements from there, 3814 // execute VF scalar calls, and then gather the result into the vector return 3815 // value. 3816 InstructionCost ScalarCallCost = 3817 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3818 if (VF.isScalar()) 3819 return ScalarCallCost; 3820 3821 // Compute corresponding vector type for return value and arguments. 3822 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3823 for (Type *ScalarTy : ScalarTys) 3824 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3825 3826 // Compute costs of unpacking argument values for the scalar calls and 3827 // packing the return values to a vector. 3828 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3829 3830 InstructionCost Cost = 3831 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3832 3833 // If we can't emit a vector call for this function, then the currently found 3834 // cost is the cost we need to return. 3835 NeedToScalarize = true; 3836 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3837 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3838 3839 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3840 return Cost; 3841 3842 // If the corresponding vector cost is cheaper, return its cost. 3843 InstructionCost VectorCallCost = 3844 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3845 if (VectorCallCost < Cost) { 3846 NeedToScalarize = false; 3847 Cost = VectorCallCost; 3848 } 3849 return Cost; 3850 } 3851 3852 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3853 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3854 return Elt; 3855 return VectorType::get(Elt, VF); 3856 } 3857 3858 InstructionCost 3859 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3860 ElementCount VF) const { 3861 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3862 assert(ID && "Expected intrinsic call!"); 3863 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3864 FastMathFlags FMF; 3865 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3866 FMF = FPMO->getFastMathFlags(); 3867 3868 SmallVector<const Value *> Arguments(CI->args()); 3869 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3870 SmallVector<Type *> ParamTys; 3871 std::transform(FTy->param_begin(), FTy->param_end(), 3872 std::back_inserter(ParamTys), 3873 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3874 3875 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3876 dyn_cast<IntrinsicInst>(CI)); 3877 return TTI.getIntrinsicInstrCost(CostAttrs, 3878 TargetTransformInfo::TCK_RecipThroughput); 3879 } 3880 3881 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3882 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3883 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3884 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3885 } 3886 3887 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3888 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3889 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3890 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3891 } 3892 3893 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3894 // For every instruction `I` in MinBWs, truncate the operands, create a 3895 // truncated version of `I` and reextend its result. InstCombine runs 3896 // later and will remove any ext/trunc pairs. 3897 SmallPtrSet<Value *, 4> Erased; 3898 for (const auto &KV : Cost->getMinimalBitwidths()) { 3899 // If the value wasn't vectorized, we must maintain the original scalar 3900 // type. The absence of the value from State indicates that it 3901 // wasn't vectorized. 3902 // FIXME: Should not rely on getVPValue at this point. 3903 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3904 if (!State.hasAnyVectorValue(Def)) 3905 continue; 3906 for (unsigned Part = 0; Part < UF; ++Part) { 3907 Value *I = State.get(Def, Part); 3908 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3909 continue; 3910 Type *OriginalTy = I->getType(); 3911 Type *ScalarTruncatedTy = 3912 IntegerType::get(OriginalTy->getContext(), KV.second); 3913 auto *TruncatedTy = VectorType::get( 3914 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); 3915 if (TruncatedTy == OriginalTy) 3916 continue; 3917 3918 IRBuilder<> B(cast<Instruction>(I)); 3919 auto ShrinkOperand = [&](Value *V) -> Value * { 3920 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3921 if (ZI->getSrcTy() == TruncatedTy) 3922 return ZI->getOperand(0); 3923 return B.CreateZExtOrTrunc(V, TruncatedTy); 3924 }; 3925 3926 // The actual instruction modification depends on the instruction type, 3927 // unfortunately. 3928 Value *NewI = nullptr; 3929 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3930 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3931 ShrinkOperand(BO->getOperand(1))); 3932 3933 // Any wrapping introduced by shrinking this operation shouldn't be 3934 // considered undefined behavior. So, we can't unconditionally copy 3935 // arithmetic wrapping flags to NewI. 3936 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3937 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3938 NewI = 3939 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3940 ShrinkOperand(CI->getOperand(1))); 3941 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3942 NewI = B.CreateSelect(SI->getCondition(), 3943 ShrinkOperand(SI->getTrueValue()), 3944 ShrinkOperand(SI->getFalseValue())); 3945 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3946 switch (CI->getOpcode()) { 3947 default: 3948 llvm_unreachable("Unhandled cast!"); 3949 case Instruction::Trunc: 3950 NewI = ShrinkOperand(CI->getOperand(0)); 3951 break; 3952 case Instruction::SExt: 3953 NewI = B.CreateSExtOrTrunc( 3954 CI->getOperand(0), 3955 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3956 break; 3957 case Instruction::ZExt: 3958 NewI = B.CreateZExtOrTrunc( 3959 CI->getOperand(0), 3960 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3961 break; 3962 } 3963 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3964 auto Elements0 = 3965 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); 3966 auto *O0 = B.CreateZExtOrTrunc( 3967 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3968 auto Elements1 = 3969 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); 3970 auto *O1 = B.CreateZExtOrTrunc( 3971 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3972 3973 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3974 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3975 // Don't do anything with the operands, just extend the result. 3976 continue; 3977 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3978 auto Elements = 3979 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); 3980 auto *O0 = B.CreateZExtOrTrunc( 3981 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3982 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3983 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3984 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3985 auto Elements = 3986 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); 3987 auto *O0 = B.CreateZExtOrTrunc( 3988 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3989 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3990 } else { 3991 // If we don't know what to do, be conservative and don't do anything. 3992 continue; 3993 } 3994 3995 // Lastly, extend the result. 3996 NewI->takeName(cast<Instruction>(I)); 3997 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3998 I->replaceAllUsesWith(Res); 3999 cast<Instruction>(I)->eraseFromParent(); 4000 Erased.insert(I); 4001 State.reset(Def, Res, Part); 4002 } 4003 } 4004 4005 // We'll have created a bunch of ZExts that are now parentless. Clean up. 4006 for (const auto &KV : Cost->getMinimalBitwidths()) { 4007 // If the value wasn't vectorized, we must maintain the original scalar 4008 // type. The absence of the value from State indicates that it 4009 // wasn't vectorized. 4010 // FIXME: Should not rely on getVPValue at this point. 4011 VPValue *Def = State.Plan->getVPValue(KV.first, true); 4012 if (!State.hasAnyVectorValue(Def)) 4013 continue; 4014 for (unsigned Part = 0; Part < UF; ++Part) { 4015 Value *I = State.get(Def, Part); 4016 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 4017 if (Inst && Inst->use_empty()) { 4018 Value *NewI = Inst->getOperand(0); 4019 Inst->eraseFromParent(); 4020 State.reset(Def, NewI, Part); 4021 } 4022 } 4023 } 4024 } 4025 4026 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 4027 // Insert truncates and extends for any truncated instructions as hints to 4028 // InstCombine. 4029 if (VF.isVector()) 4030 truncateToMinimalBitwidths(State); 4031 4032 // Fix widened non-induction PHIs by setting up the PHI operands. 4033 if (OrigPHIsToFix.size()) { 4034 assert(EnableVPlanNativePath && 4035 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 4036 fixNonInductionPHIs(State); 4037 } 4038 4039 // At this point every instruction in the original loop is widened to a 4040 // vector form. Now we need to fix the recurrences in the loop. These PHI 4041 // nodes are currently empty because we did not want to introduce cycles. 4042 // This is the second stage of vectorizing recurrences. 4043 fixCrossIterationPHIs(State); 4044 4045 // Forget the original basic block. 4046 PSE.getSE()->forgetLoop(OrigLoop); 4047 4048 // If we inserted an edge from the middle block to the unique exit block, 4049 // update uses outside the loop (phis) to account for the newly inserted 4050 // edge. 4051 if (!Cost->requiresScalarEpilogue(VF)) { 4052 // Fix-up external users of the induction variables. 4053 for (auto &Entry : Legal->getInductionVars()) 4054 fixupIVUsers(Entry.first, Entry.second, 4055 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 4056 IVEndValues[Entry.first], LoopMiddleBlock); 4057 4058 fixLCSSAPHIs(State); 4059 } 4060 4061 for (Instruction *PI : PredicatedInstructions) 4062 sinkScalarOperands(&*PI); 4063 4064 // Remove redundant induction instructions. 4065 cse(LoopVectorBody); 4066 4067 // Set/update profile weights for the vector and remainder loops as original 4068 // loop iterations are now distributed among them. Note that original loop 4069 // represented by LoopScalarBody becomes remainder loop after vectorization. 4070 // 4071 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 4072 // end up getting slightly roughened result but that should be OK since 4073 // profile is not inherently precise anyway. Note also possible bypass of 4074 // vector code caused by legality checks is ignored, assigning all the weight 4075 // to the vector loop, optimistically. 4076 // 4077 // For scalable vectorization we can't know at compile time how many iterations 4078 // of the loop are handled in one vector iteration, so instead assume a pessimistic 4079 // vscale of '1'. 4080 setProfileInfoAfterUnrolling( 4081 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), 4082 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); 4083 } 4084 4085 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 4086 // In order to support recurrences we need to be able to vectorize Phi nodes. 4087 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4088 // stage #2: We now need to fix the recurrences by adding incoming edges to 4089 // the currently empty PHI nodes. At this point every instruction in the 4090 // original loop is widened to a vector form so we can use them to construct 4091 // the incoming edges. 4092 VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock(); 4093 for (VPRecipeBase &R : Header->phis()) { 4094 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) 4095 fixReduction(ReductionPhi, State); 4096 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 4097 fixFirstOrderRecurrence(FOR, State); 4098 } 4099 } 4100 4101 void InnerLoopVectorizer::fixFirstOrderRecurrence( 4102 VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) { 4103 // This is the second phase of vectorizing first-order recurrences. An 4104 // overview of the transformation is described below. Suppose we have the 4105 // following loop. 4106 // 4107 // for (int i = 0; i < n; ++i) 4108 // b[i] = a[i] - a[i - 1]; 4109 // 4110 // There is a first-order recurrence on "a". For this loop, the shorthand 4111 // scalar IR looks like: 4112 // 4113 // scalar.ph: 4114 // s_init = a[-1] 4115 // br scalar.body 4116 // 4117 // scalar.body: 4118 // i = phi [0, scalar.ph], [i+1, scalar.body] 4119 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4120 // s2 = a[i] 4121 // b[i] = s2 - s1 4122 // br cond, scalar.body, ... 4123 // 4124 // In this example, s1 is a recurrence because it's value depends on the 4125 // previous iteration. In the first phase of vectorization, we created a 4126 // vector phi v1 for s1. We now complete the vectorization and produce the 4127 // shorthand vector IR shown below (for VF = 4, UF = 1). 4128 // 4129 // vector.ph: 4130 // v_init = vector(..., ..., ..., a[-1]) 4131 // br vector.body 4132 // 4133 // vector.body 4134 // i = phi [0, vector.ph], [i+4, vector.body] 4135 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4136 // v2 = a[i, i+1, i+2, i+3]; 4137 // v3 = vector(v1(3), v2(0, 1, 2)) 4138 // b[i, i+1, i+2, i+3] = v2 - v3 4139 // br cond, vector.body, middle.block 4140 // 4141 // middle.block: 4142 // x = v2(3) 4143 // br scalar.ph 4144 // 4145 // scalar.ph: 4146 // s_init = phi [x, middle.block], [a[-1], otherwise] 4147 // br scalar.body 4148 // 4149 // After execution completes the vector loop, we extract the next value of 4150 // the recurrence (x) to use as the initial value in the scalar loop. 4151 4152 // Extract the last vector element in the middle block. This will be the 4153 // initial value for the recurrence when jumping to the scalar loop. 4154 VPValue *PreviousDef = PhiR->getBackedgeValue(); 4155 Value *Incoming = State.get(PreviousDef, UF - 1); 4156 auto *ExtractForScalar = Incoming; 4157 auto *IdxTy = Builder.getInt32Ty(); 4158 if (VF.isVector()) { 4159 auto *One = ConstantInt::get(IdxTy, 1); 4160 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4161 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4162 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 4163 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 4164 "vector.recur.extract"); 4165 } 4166 // Extract the second last element in the middle block if the 4167 // Phi is used outside the loop. We need to extract the phi itself 4168 // and not the last element (the phi update in the current iteration). This 4169 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4170 // when the scalar loop is not run at all. 4171 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4172 if (VF.isVector()) { 4173 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4174 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 4175 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4176 Incoming, Idx, "vector.recur.extract.for.phi"); 4177 } else if (UF > 1) 4178 // When loop is unrolled without vectorizing, initialize 4179 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 4180 // of `Incoming`. This is analogous to the vectorized case above: extracting 4181 // the second last element when VF > 1. 4182 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 4183 4184 // Fix the initial value of the original recurrence in the scalar loop. 4185 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4186 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); 4187 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4188 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); 4189 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4190 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4191 Start->addIncoming(Incoming, BB); 4192 } 4193 4194 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 4195 Phi->setName("scalar.recur"); 4196 4197 // Finally, fix users of the recurrence outside the loop. The users will need 4198 // either the last value of the scalar recurrence or the last value of the 4199 // vector recurrence we extracted in the middle block. Since the loop is in 4200 // LCSSA form, we just need to find all the phi nodes for the original scalar 4201 // recurrence in the exit block, and then add an edge for the middle block. 4202 // Note that LCSSA does not imply single entry when the original scalar loop 4203 // had multiple exiting edges (as we always run the last iteration in the 4204 // scalar epilogue); in that case, there is no edge from middle to exit and 4205 // and thus no phis which needed updated. 4206 if (!Cost->requiresScalarEpilogue(VF)) 4207 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4208 if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi)) 4209 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4210 } 4211 4212 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, 4213 VPTransformState &State) { 4214 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 4215 // Get it's reduction variable descriptor. 4216 assert(Legal->isReductionVariable(OrigPhi) && 4217 "Unable to find the reduction variable"); 4218 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 4219 4220 RecurKind RK = RdxDesc.getRecurrenceKind(); 4221 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4222 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4223 setDebugLocFromInst(ReductionStartValue); 4224 4225 VPValue *LoopExitInstDef = PhiR->getBackedgeValue(); 4226 // This is the vector-clone of the value that leaves the loop. 4227 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 4228 4229 // Wrap flags are in general invalid after vectorization, clear them. 4230 clearReductionWrapFlags(RdxDesc, State); 4231 4232 // Before each round, move the insertion point right between 4233 // the PHIs and the values we are going to write. 4234 // This allows us to write both PHINodes and the extractelement 4235 // instructions. 4236 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4237 4238 setDebugLocFromInst(LoopExitInst); 4239 4240 Type *PhiTy = OrigPhi->getType(); 4241 // If tail is folded by masking, the vector value to leave the loop should be 4242 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 4243 // instead of the former. For an inloop reduction the reduction will already 4244 // be predicated, and does not need to be handled here. 4245 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { 4246 for (unsigned Part = 0; Part < UF; ++Part) { 4247 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 4248 Value *Sel = nullptr; 4249 for (User *U : VecLoopExitInst->users()) { 4250 if (isa<SelectInst>(U)) { 4251 assert(!Sel && "Reduction exit feeding two selects"); 4252 Sel = U; 4253 } else 4254 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 4255 } 4256 assert(Sel && "Reduction exit feeds no select"); 4257 State.reset(LoopExitInstDef, Sel, Part); 4258 4259 // If the target can create a predicated operator for the reduction at no 4260 // extra cost in the loop (for example a predicated vadd), it can be 4261 // cheaper for the select to remain in the loop than be sunk out of it, 4262 // and so use the select value for the phi instead of the old 4263 // LoopExitValue. 4264 if (PreferPredicatedReductionSelect || 4265 TTI->preferPredicatedReductionSelect( 4266 RdxDesc.getOpcode(), PhiTy, 4267 TargetTransformInfo::ReductionFlags())) { 4268 auto *VecRdxPhi = 4269 cast<PHINode>(State.get(PhiR, Part)); 4270 VecRdxPhi->setIncomingValueForBlock( 4271 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 4272 } 4273 } 4274 } 4275 4276 // If the vector reduction can be performed in a smaller type, we truncate 4277 // then extend the loop exit value to enable InstCombine to evaluate the 4278 // entire expression in the smaller type. 4279 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 4280 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"); 4281 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4282 Builder.SetInsertPoint( 4283 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4284 VectorParts RdxParts(UF); 4285 for (unsigned Part = 0; Part < UF; ++Part) { 4286 RdxParts[Part] = State.get(LoopExitInstDef, Part); 4287 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4288 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4289 : Builder.CreateZExt(Trunc, VecTy); 4290 for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users())) 4291 if (U != Trunc) { 4292 U->replaceUsesOfWith(RdxParts[Part], Extnd); 4293 RdxParts[Part] = Extnd; 4294 } 4295 } 4296 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4297 for (unsigned Part = 0; Part < UF; ++Part) { 4298 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4299 State.reset(LoopExitInstDef, RdxParts[Part], Part); 4300 } 4301 } 4302 4303 // Reduce all of the unrolled parts into a single vector. 4304 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 4305 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 4306 4307 // The middle block terminator has already been assigned a DebugLoc here (the 4308 // OrigLoop's single latch terminator). We want the whole middle block to 4309 // appear to execute on this line because: (a) it is all compiler generated, 4310 // (b) these instructions are always executed after evaluating the latch 4311 // conditional branch, and (c) other passes may add new predecessors which 4312 // terminate on this line. This is the easiest way to ensure we don't 4313 // accidentally cause an extra step back into the loop while debugging. 4314 setDebugLocFromInst(LoopMiddleBlock->getTerminator()); 4315 if (PhiR->isOrdered()) 4316 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 4317 else { 4318 // Floating-point operations should have some FMF to enable the reduction. 4319 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 4320 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 4321 for (unsigned Part = 1; Part < UF; ++Part) { 4322 Value *RdxPart = State.get(LoopExitInstDef, Part); 4323 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 4324 ReducedPartRdx = Builder.CreateBinOp( 4325 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 4326 } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) 4327 ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK, 4328 ReducedPartRdx, RdxPart); 4329 else 4330 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4331 } 4332 } 4333 4334 // Create the reduction after the loop. Note that inloop reductions create the 4335 // target reduction in the loop using a Reduction recipe. 4336 if (VF.isVector() && !PhiR->isInLoop()) { 4337 ReducedPartRdx = 4338 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi); 4339 // If the reduction can be performed in a smaller type, we need to extend 4340 // the reduction to the wider type before we branch to the original loop. 4341 if (PhiTy != RdxDesc.getRecurrenceType()) 4342 ReducedPartRdx = RdxDesc.isSigned() 4343 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 4344 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 4345 } 4346 4347 // Create a phi node that merges control-flow from the backedge-taken check 4348 // block and the middle block. 4349 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 4350 LoopScalarPreHeader->getTerminator()); 4351 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4352 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4353 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4354 4355 // Now, we need to fix the users of the reduction variable 4356 // inside and outside of the scalar remainder loop. 4357 4358 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4359 // in the exit blocks. See comment on analogous loop in 4360 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4361 if (!Cost->requiresScalarEpilogue(VF)) 4362 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4363 if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst)) 4364 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4365 4366 // Fix the scalar loop reduction variable with the incoming reduction sum 4367 // from the vector body and from the backedge value. 4368 int IncomingEdgeBlockIdx = 4369 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4370 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4371 // Pick the other block. 4372 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4373 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4374 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4375 } 4376 4377 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 4378 VPTransformState &State) { 4379 RecurKind RK = RdxDesc.getRecurrenceKind(); 4380 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4381 return; 4382 4383 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4384 assert(LoopExitInstr && "null loop exit instruction"); 4385 SmallVector<Instruction *, 8> Worklist; 4386 SmallPtrSet<Instruction *, 8> Visited; 4387 Worklist.push_back(LoopExitInstr); 4388 Visited.insert(LoopExitInstr); 4389 4390 while (!Worklist.empty()) { 4391 Instruction *Cur = Worklist.pop_back_val(); 4392 if (isa<OverflowingBinaryOperator>(Cur)) 4393 for (unsigned Part = 0; Part < UF; ++Part) { 4394 // FIXME: Should not rely on getVPValue at this point. 4395 Value *V = State.get(State.Plan->getVPValue(Cur, true), Part); 4396 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4397 } 4398 4399 for (User *U : Cur->users()) { 4400 Instruction *UI = cast<Instruction>(U); 4401 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4402 Visited.insert(UI).second) 4403 Worklist.push_back(UI); 4404 } 4405 } 4406 } 4407 4408 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4409 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4410 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4411 // Some phis were already hand updated by the reduction and recurrence 4412 // code above, leave them alone. 4413 continue; 4414 4415 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4416 // Non-instruction incoming values will have only one value. 4417 4418 VPLane Lane = VPLane::getFirstLane(); 4419 if (isa<Instruction>(IncomingValue) && 4420 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), 4421 VF)) 4422 Lane = VPLane::getLastLaneForVF(VF); 4423 4424 // Can be a loop invariant incoming value or the last scalar value to be 4425 // extracted from the vectorized loop. 4426 // FIXME: Should not rely on getVPValue at this point. 4427 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4428 Value *lastIncomingValue = 4429 OrigLoop->isLoopInvariant(IncomingValue) 4430 ? IncomingValue 4431 : State.get(State.Plan->getVPValue(IncomingValue, true), 4432 VPIteration(UF - 1, Lane)); 4433 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4434 } 4435 } 4436 4437 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4438 // The basic block and loop containing the predicated instruction. 4439 auto *PredBB = PredInst->getParent(); 4440 auto *VectorLoop = LI->getLoopFor(PredBB); 4441 4442 // Initialize a worklist with the operands of the predicated instruction. 4443 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4444 4445 // Holds instructions that we need to analyze again. An instruction may be 4446 // reanalyzed if we don't yet know if we can sink it or not. 4447 SmallVector<Instruction *, 8> InstsToReanalyze; 4448 4449 // Returns true if a given use occurs in the predicated block. Phi nodes use 4450 // their operands in their corresponding predecessor blocks. 4451 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4452 auto *I = cast<Instruction>(U.getUser()); 4453 BasicBlock *BB = I->getParent(); 4454 if (auto *Phi = dyn_cast<PHINode>(I)) 4455 BB = Phi->getIncomingBlock( 4456 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4457 return BB == PredBB; 4458 }; 4459 4460 // Iteratively sink the scalarized operands of the predicated instruction 4461 // into the block we created for it. When an instruction is sunk, it's 4462 // operands are then added to the worklist. The algorithm ends after one pass 4463 // through the worklist doesn't sink a single instruction. 4464 bool Changed; 4465 do { 4466 // Add the instructions that need to be reanalyzed to the worklist, and 4467 // reset the changed indicator. 4468 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4469 InstsToReanalyze.clear(); 4470 Changed = false; 4471 4472 while (!Worklist.empty()) { 4473 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4474 4475 // We can't sink an instruction if it is a phi node, is not in the loop, 4476 // or may have side effects. 4477 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4478 I->mayHaveSideEffects()) 4479 continue; 4480 4481 // If the instruction is already in PredBB, check if we can sink its 4482 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4483 // sinking the scalar instruction I, hence it appears in PredBB; but it 4484 // may have failed to sink I's operands (recursively), which we try 4485 // (again) here. 4486 if (I->getParent() == PredBB) { 4487 Worklist.insert(I->op_begin(), I->op_end()); 4488 continue; 4489 } 4490 4491 // It's legal to sink the instruction if all its uses occur in the 4492 // predicated block. Otherwise, there's nothing to do yet, and we may 4493 // need to reanalyze the instruction. 4494 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4495 InstsToReanalyze.push_back(I); 4496 continue; 4497 } 4498 4499 // Move the instruction to the beginning of the predicated block, and add 4500 // it's operands to the worklist. 4501 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4502 Worklist.insert(I->op_begin(), I->op_end()); 4503 4504 // The sinking may have enabled other instructions to be sunk, so we will 4505 // need to iterate. 4506 Changed = true; 4507 } 4508 } while (Changed); 4509 } 4510 4511 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4512 for (PHINode *OrigPhi : OrigPHIsToFix) { 4513 VPWidenPHIRecipe *VPPhi = 4514 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4515 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4516 // Make sure the builder has a valid insert point. 4517 Builder.SetInsertPoint(NewPhi); 4518 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4519 VPValue *Inc = VPPhi->getIncomingValue(i); 4520 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4521 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4522 } 4523 } 4524 } 4525 4526 bool InnerLoopVectorizer::useOrderedReductions( 4527 const RecurrenceDescriptor &RdxDesc) { 4528 return Cost->useOrderedReductions(RdxDesc); 4529 } 4530 4531 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4532 VPWidenPHIRecipe *PhiR, 4533 VPTransformState &State) { 4534 PHINode *P = cast<PHINode>(PN); 4535 if (EnableVPlanNativePath) { 4536 // Currently we enter here in the VPlan-native path for non-induction 4537 // PHIs where all control flow is uniform. We simply widen these PHIs. 4538 // Create a vector phi with no operands - the vector phi operands will be 4539 // set at the end of vector code generation. 4540 Type *VecTy = (State.VF.isScalar()) 4541 ? PN->getType() 4542 : VectorType::get(PN->getType(), State.VF); 4543 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4544 State.set(PhiR, VecPhi, 0); 4545 OrigPHIsToFix.push_back(P); 4546 4547 return; 4548 } 4549 4550 assert(PN->getParent() == OrigLoop->getHeader() && 4551 "Non-header phis should have been handled elsewhere"); 4552 4553 // In order to support recurrences we need to be able to vectorize Phi nodes. 4554 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4555 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4556 // this value when we vectorize all of the instructions that use the PHI. 4557 4558 assert(!Legal->isReductionVariable(P) && 4559 "reductions should be handled elsewhere"); 4560 4561 setDebugLocFromInst(P); 4562 4563 // This PHINode must be an induction variable. 4564 // Make sure that we know about it. 4565 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4566 4567 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4568 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4569 4570 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4571 // which can be found from the original scalar operations. 4572 switch (II.getKind()) { 4573 case InductionDescriptor::IK_NoInduction: 4574 llvm_unreachable("Unknown induction"); 4575 case InductionDescriptor::IK_IntInduction: 4576 case InductionDescriptor::IK_FpInduction: 4577 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4578 case InductionDescriptor::IK_PtrInduction: { 4579 // Handle the pointer induction variable case. 4580 assert(P->getType()->isPointerTy() && "Unexpected type."); 4581 4582 if (Cost->isScalarAfterVectorization(P, State.VF)) { 4583 // This is the normalized GEP that starts counting at zero. 4584 Value *PtrInd = 4585 Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType()); 4586 // Determine the number of scalars we need to generate for each unroll 4587 // iteration. If the instruction is uniform, we only need to generate the 4588 // first lane. Otherwise, we generate all VF values. 4589 bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF); 4590 assert((IsUniform || !State.VF.isScalable()) && 4591 "Cannot scalarize a scalable VF"); 4592 unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue(); 4593 4594 for (unsigned Part = 0; Part < UF; ++Part) { 4595 Value *PartStart = 4596 createStepForVF(Builder, PtrInd->getType(), VF, Part); 4597 4598 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4599 Value *Idx = Builder.CreateAdd( 4600 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 4601 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4602 Value *SclrGep = emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), 4603 DL, II, State.CFG.PrevBB); 4604 SclrGep->setName("next.gep"); 4605 State.set(PhiR, SclrGep, VPIteration(Part, Lane)); 4606 } 4607 } 4608 return; 4609 } 4610 assert(isa<SCEVConstant>(II.getStep()) && 4611 "Induction step not a SCEV constant!"); 4612 Type *PhiType = II.getStep()->getType(); 4613 4614 // Build a pointer phi 4615 Value *ScalarStartValue = PhiR->getStartValue()->getLiveInIRValue(); 4616 Type *ScStValueType = ScalarStartValue->getType(); 4617 PHINode *NewPointerPhi = 4618 PHINode::Create(ScStValueType, 2, "pointer.phi", Induction); 4619 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4620 4621 // A pointer induction, performed by using a gep 4622 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4623 Instruction *InductionLoc = LoopLatch->getTerminator(); 4624 const SCEV *ScalarStep = II.getStep(); 4625 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4626 Value *ScalarStepValue = 4627 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4628 Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF); 4629 Value *NumUnrolledElems = 4630 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 4631 Value *InductionGEP = GetElementPtrInst::Create( 4632 II.getElementType(), NewPointerPhi, 4633 Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 4634 InductionLoc); 4635 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4636 4637 // Create UF many actual address geps that use the pointer 4638 // phi as base and a vectorized version of the step value 4639 // (<step*0, ..., step*N>) as offset. 4640 for (unsigned Part = 0; Part < State.UF; ++Part) { 4641 Type *VecPhiType = VectorType::get(PhiType, State.VF); 4642 Value *StartOffsetScalar = 4643 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 4644 Value *StartOffset = 4645 Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 4646 // Create a vector of consecutive numbers from zero to VF. 4647 StartOffset = 4648 Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType)); 4649 4650 Value *GEP = Builder.CreateGEP( 4651 II.getElementType(), NewPointerPhi, 4652 Builder.CreateMul( 4653 StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue), 4654 "vector.gep")); 4655 State.set(PhiR, GEP, Part); 4656 } 4657 } 4658 } 4659 } 4660 4661 /// A helper function for checking whether an integer division-related 4662 /// instruction may divide by zero (in which case it must be predicated if 4663 /// executed conditionally in the scalar code). 4664 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4665 /// Non-zero divisors that are non compile-time constants will not be 4666 /// converted into multiplication, so we will still end up scalarizing 4667 /// the division, but can do so w/o predication. 4668 static bool mayDivideByZero(Instruction &I) { 4669 assert((I.getOpcode() == Instruction::UDiv || 4670 I.getOpcode() == Instruction::SDiv || 4671 I.getOpcode() == Instruction::URem || 4672 I.getOpcode() == Instruction::SRem) && 4673 "Unexpected instruction"); 4674 Value *Divisor = I.getOperand(1); 4675 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4676 return !CInt || CInt->isZero(); 4677 } 4678 4679 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4680 VPUser &ArgOperands, 4681 VPTransformState &State) { 4682 assert(!isa<DbgInfoIntrinsic>(I) && 4683 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4684 setDebugLocFromInst(&I); 4685 4686 Module *M = I.getParent()->getParent()->getParent(); 4687 auto *CI = cast<CallInst>(&I); 4688 4689 SmallVector<Type *, 4> Tys; 4690 for (Value *ArgOperand : CI->args()) 4691 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4692 4693 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4694 4695 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4696 // version of the instruction. 4697 // Is it beneficial to perform intrinsic call compared to lib call? 4698 bool NeedToScalarize = false; 4699 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4700 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4701 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4702 assert((UseVectorIntrinsic || !NeedToScalarize) && 4703 "Instruction should be scalarized elsewhere."); 4704 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 4705 "Either the intrinsic cost or vector call cost must be valid"); 4706 4707 for (unsigned Part = 0; Part < UF; ++Part) { 4708 SmallVector<Type *, 2> TysForDecl = {CI->getType()}; 4709 SmallVector<Value *, 4> Args; 4710 for (auto &I : enumerate(ArgOperands.operands())) { 4711 // Some intrinsics have a scalar argument - don't replace it with a 4712 // vector. 4713 Value *Arg; 4714 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4715 Arg = State.get(I.value(), Part); 4716 else { 4717 Arg = State.get(I.value(), VPIteration(0, 0)); 4718 if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index())) 4719 TysForDecl.push_back(Arg->getType()); 4720 } 4721 Args.push_back(Arg); 4722 } 4723 4724 Function *VectorF; 4725 if (UseVectorIntrinsic) { 4726 // Use vector version of the intrinsic. 4727 if (VF.isVector()) 4728 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4729 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4730 assert(VectorF && "Can't retrieve vector intrinsic."); 4731 } else { 4732 // Use vector version of the function call. 4733 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 4734 #ifndef NDEBUG 4735 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4736 "Can't create vector function."); 4737 #endif 4738 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4739 } 4740 SmallVector<OperandBundleDef, 1> OpBundles; 4741 CI->getOperandBundlesAsDefs(OpBundles); 4742 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4743 4744 if (isa<FPMathOperator>(V)) 4745 V->copyFastMathFlags(CI); 4746 4747 State.set(Def, V, Part); 4748 addMetadata(V, &I); 4749 } 4750 } 4751 4752 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 4753 // We should not collect Scalars more than once per VF. Right now, this 4754 // function is called from collectUniformsAndScalars(), which already does 4755 // this check. Collecting Scalars for VF=1 does not make any sense. 4756 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 4757 "This function should not be visited twice for the same VF"); 4758 4759 SmallSetVector<Instruction *, 8> Worklist; 4760 4761 // These sets are used to seed the analysis with pointers used by memory 4762 // accesses that will remain scalar. 4763 SmallSetVector<Instruction *, 8> ScalarPtrs; 4764 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4765 auto *Latch = TheLoop->getLoopLatch(); 4766 4767 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4768 // The pointer operands of loads and stores will be scalar as long as the 4769 // memory access is not a gather or scatter operation. The value operand of a 4770 // store will remain scalar if the store is scalarized. 4771 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4772 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4773 assert(WideningDecision != CM_Unknown && 4774 "Widening decision should be ready at this moment"); 4775 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4776 if (Ptr == Store->getValueOperand()) 4777 return WideningDecision == CM_Scalarize; 4778 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4779 "Ptr is neither a value or pointer operand"); 4780 return WideningDecision != CM_GatherScatter; 4781 }; 4782 4783 // A helper that returns true if the given value is a bitcast or 4784 // getelementptr instruction contained in the loop. 4785 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4786 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4787 isa<GetElementPtrInst>(V)) && 4788 !TheLoop->isLoopInvariant(V); 4789 }; 4790 4791 // A helper that evaluates a memory access's use of a pointer. If the use will 4792 // be a scalar use and the pointer is only used by memory accesses, we place 4793 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4794 // PossibleNonScalarPtrs. 4795 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4796 // We only care about bitcast and getelementptr instructions contained in 4797 // the loop. 4798 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4799 return; 4800 4801 // If the pointer has already been identified as scalar (e.g., if it was 4802 // also identified as uniform), there's nothing to do. 4803 auto *I = cast<Instruction>(Ptr); 4804 if (Worklist.count(I)) 4805 return; 4806 4807 // If the use of the pointer will be a scalar use, and all users of the 4808 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4809 // place the pointer in PossibleNonScalarPtrs. 4810 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4811 return isa<LoadInst>(U) || isa<StoreInst>(U); 4812 })) 4813 ScalarPtrs.insert(I); 4814 else 4815 PossibleNonScalarPtrs.insert(I); 4816 }; 4817 4818 // We seed the scalars analysis with three classes of instructions: (1) 4819 // instructions marked uniform-after-vectorization and (2) bitcast, 4820 // getelementptr and (pointer) phi instructions used by memory accesses 4821 // requiring a scalar use. 4822 // 4823 // (1) Add to the worklist all instructions that have been identified as 4824 // uniform-after-vectorization. 4825 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4826 4827 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4828 // memory accesses requiring a scalar use. The pointer operands of loads and 4829 // stores will be scalar as long as the memory accesses is not a gather or 4830 // scatter operation. The value operand of a store will remain scalar if the 4831 // store is scalarized. 4832 for (auto *BB : TheLoop->blocks()) 4833 for (auto &I : *BB) { 4834 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4835 evaluatePtrUse(Load, Load->getPointerOperand()); 4836 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4837 evaluatePtrUse(Store, Store->getPointerOperand()); 4838 evaluatePtrUse(Store, Store->getValueOperand()); 4839 } 4840 } 4841 for (auto *I : ScalarPtrs) 4842 if (!PossibleNonScalarPtrs.count(I)) { 4843 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4844 Worklist.insert(I); 4845 } 4846 4847 // Insert the forced scalars. 4848 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4849 // induction variable when the PHI user is scalarized. 4850 auto ForcedScalar = ForcedScalars.find(VF); 4851 if (ForcedScalar != ForcedScalars.end()) 4852 for (auto *I : ForcedScalar->second) 4853 Worklist.insert(I); 4854 4855 // Expand the worklist by looking through any bitcasts and getelementptr 4856 // instructions we've already identified as scalar. This is similar to the 4857 // expansion step in collectLoopUniforms(); however, here we're only 4858 // expanding to include additional bitcasts and getelementptr instructions. 4859 unsigned Idx = 0; 4860 while (Idx != Worklist.size()) { 4861 Instruction *Dst = Worklist[Idx++]; 4862 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4863 continue; 4864 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4865 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4866 auto *J = cast<Instruction>(U); 4867 return !TheLoop->contains(J) || Worklist.count(J) || 4868 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4869 isScalarUse(J, Src)); 4870 })) { 4871 Worklist.insert(Src); 4872 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4873 } 4874 } 4875 4876 // An induction variable will remain scalar if all users of the induction 4877 // variable and induction variable update remain scalar. 4878 for (auto &Induction : Legal->getInductionVars()) { 4879 auto *Ind = Induction.first; 4880 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4881 4882 // If tail-folding is applied, the primary induction variable will be used 4883 // to feed a vector compare. 4884 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 4885 continue; 4886 4887 // Returns true if \p Indvar is a pointer induction that is used directly by 4888 // load/store instruction \p I. 4889 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar, 4890 Instruction *I) { 4891 return Induction.second.getKind() == 4892 InductionDescriptor::IK_PtrInduction && 4893 (isa<LoadInst>(I) || isa<StoreInst>(I)) && 4894 Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar); 4895 }; 4896 4897 // Determine if all users of the induction variable are scalar after 4898 // vectorization. 4899 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4900 auto *I = cast<Instruction>(U); 4901 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4902 IsDirectLoadStoreFromPtrIndvar(Ind, I); 4903 }); 4904 if (!ScalarInd) 4905 continue; 4906 4907 // Determine if all users of the induction variable update instruction are 4908 // scalar after vectorization. 4909 auto ScalarIndUpdate = 4910 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4911 auto *I = cast<Instruction>(U); 4912 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4913 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I); 4914 }); 4915 if (!ScalarIndUpdate) 4916 continue; 4917 4918 // The induction variable and its update instruction will remain scalar. 4919 Worklist.insert(Ind); 4920 Worklist.insert(IndUpdate); 4921 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4922 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4923 << "\n"); 4924 } 4925 4926 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4927 } 4928 4929 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) const { 4930 if (!blockNeedsPredicationForAnyReason(I->getParent())) 4931 return false; 4932 switch(I->getOpcode()) { 4933 default: 4934 break; 4935 case Instruction::Load: 4936 case Instruction::Store: { 4937 if (!Legal->isMaskRequired(I)) 4938 return false; 4939 auto *Ptr = getLoadStorePointerOperand(I); 4940 auto *Ty = getLoadStoreType(I); 4941 const Align Alignment = getLoadStoreAlignment(I); 4942 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 4943 TTI.isLegalMaskedGather(Ty, Alignment)) 4944 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 4945 TTI.isLegalMaskedScatter(Ty, Alignment)); 4946 } 4947 case Instruction::UDiv: 4948 case Instruction::SDiv: 4949 case Instruction::SRem: 4950 case Instruction::URem: 4951 return mayDivideByZero(*I); 4952 } 4953 return false; 4954 } 4955 4956 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 4957 Instruction *I, ElementCount VF) { 4958 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4959 assert(getWideningDecision(I, VF) == CM_Unknown && 4960 "Decision should not be set yet."); 4961 auto *Group = getInterleavedAccessGroup(I); 4962 assert(Group && "Must have a group."); 4963 4964 // If the instruction's allocated size doesn't equal it's type size, it 4965 // requires padding and will be scalarized. 4966 auto &DL = I->getModule()->getDataLayout(); 4967 auto *ScalarTy = getLoadStoreType(I); 4968 if (hasIrregularType(ScalarTy, DL)) 4969 return false; 4970 4971 // Check if masking is required. 4972 // A Group may need masking for one of two reasons: it resides in a block that 4973 // needs predication, or it was decided to use masking to deal with gaps 4974 // (either a gap at the end of a load-access that may result in a speculative 4975 // load, or any gaps in a store-access). 4976 bool PredicatedAccessRequiresMasking = 4977 blockNeedsPredicationForAnyReason(I->getParent()) && 4978 Legal->isMaskRequired(I); 4979 bool LoadAccessWithGapsRequiresEpilogMasking = 4980 isa<LoadInst>(I) && Group->requiresScalarEpilogue() && 4981 !isScalarEpilogueAllowed(); 4982 bool StoreAccessWithGapsRequiresMasking = 4983 isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()); 4984 if (!PredicatedAccessRequiresMasking && 4985 !LoadAccessWithGapsRequiresEpilogMasking && 4986 !StoreAccessWithGapsRequiresMasking) 4987 return true; 4988 4989 // If masked interleaving is required, we expect that the user/target had 4990 // enabled it, because otherwise it either wouldn't have been created or 4991 // it should have been invalidated by the CostModel. 4992 assert(useMaskedInterleavedAccesses(TTI) && 4993 "Masked interleave-groups for predicated accesses are not enabled."); 4994 4995 if (Group->isReverse()) 4996 return false; 4997 4998 auto *Ty = getLoadStoreType(I); 4999 const Align Alignment = getLoadStoreAlignment(I); 5000 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 5001 : TTI.isLegalMaskedStore(Ty, Alignment); 5002 } 5003 5004 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 5005 Instruction *I, ElementCount VF) { 5006 // Get and ensure we have a valid memory instruction. 5007 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction"); 5008 5009 auto *Ptr = getLoadStorePointerOperand(I); 5010 auto *ScalarTy = getLoadStoreType(I); 5011 5012 // In order to be widened, the pointer should be consecutive, first of all. 5013 if (!Legal->isConsecutivePtr(ScalarTy, Ptr)) 5014 return false; 5015 5016 // If the instruction is a store located in a predicated block, it will be 5017 // scalarized. 5018 if (isScalarWithPredication(I)) 5019 return false; 5020 5021 // If the instruction's allocated size doesn't equal it's type size, it 5022 // requires padding and will be scalarized. 5023 auto &DL = I->getModule()->getDataLayout(); 5024 if (hasIrregularType(ScalarTy, DL)) 5025 return false; 5026 5027 return true; 5028 } 5029 5030 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 5031 // We should not collect Uniforms more than once per VF. Right now, 5032 // this function is called from collectUniformsAndScalars(), which 5033 // already does this check. Collecting Uniforms for VF=1 does not make any 5034 // sense. 5035 5036 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 5037 "This function should not be visited twice for the same VF"); 5038 5039 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5040 // not analyze again. Uniforms.count(VF) will return 1. 5041 Uniforms[VF].clear(); 5042 5043 // We now know that the loop is vectorizable! 5044 // Collect instructions inside the loop that will remain uniform after 5045 // vectorization. 5046 5047 // Global values, params and instructions outside of current loop are out of 5048 // scope. 5049 auto isOutOfScope = [&](Value *V) -> bool { 5050 Instruction *I = dyn_cast<Instruction>(V); 5051 return (!I || !TheLoop->contains(I)); 5052 }; 5053 5054 // Worklist containing uniform instructions demanding lane 0. 5055 SetVector<Instruction *> Worklist; 5056 BasicBlock *Latch = TheLoop->getLoopLatch(); 5057 5058 // Add uniform instructions demanding lane 0 to the worklist. Instructions 5059 // that are scalar with predication must not be considered uniform after 5060 // vectorization, because that would create an erroneous replicating region 5061 // where only a single instance out of VF should be formed. 5062 // TODO: optimize such seldom cases if found important, see PR40816. 5063 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 5064 if (isOutOfScope(I)) { 5065 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 5066 << *I << "\n"); 5067 return; 5068 } 5069 if (isScalarWithPredication(I)) { 5070 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 5071 << *I << "\n"); 5072 return; 5073 } 5074 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 5075 Worklist.insert(I); 5076 }; 5077 5078 // Start with the conditional branch. If the branch condition is an 5079 // instruction contained in the loop that is only used by the branch, it is 5080 // uniform. 5081 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5082 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 5083 addToWorklistIfAllowed(Cmp); 5084 5085 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 5086 InstWidening WideningDecision = getWideningDecision(I, VF); 5087 assert(WideningDecision != CM_Unknown && 5088 "Widening decision should be ready at this moment"); 5089 5090 // A uniform memory op is itself uniform. We exclude uniform stores 5091 // here as they demand the last lane, not the first one. 5092 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 5093 assert(WideningDecision == CM_Scalarize); 5094 return true; 5095 } 5096 5097 return (WideningDecision == CM_Widen || 5098 WideningDecision == CM_Widen_Reverse || 5099 WideningDecision == CM_Interleave); 5100 }; 5101 5102 5103 // Returns true if Ptr is the pointer operand of a memory access instruction 5104 // I, and I is known to not require scalarization. 5105 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5106 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5107 }; 5108 5109 // Holds a list of values which are known to have at least one uniform use. 5110 // Note that there may be other uses which aren't uniform. A "uniform use" 5111 // here is something which only demands lane 0 of the unrolled iterations; 5112 // it does not imply that all lanes produce the same value (e.g. this is not 5113 // the usual meaning of uniform) 5114 SetVector<Value *> HasUniformUse; 5115 5116 // Scan the loop for instructions which are either a) known to have only 5117 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 5118 for (auto *BB : TheLoop->blocks()) 5119 for (auto &I : *BB) { 5120 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) { 5121 switch (II->getIntrinsicID()) { 5122 case Intrinsic::sideeffect: 5123 case Intrinsic::experimental_noalias_scope_decl: 5124 case Intrinsic::assume: 5125 case Intrinsic::lifetime_start: 5126 case Intrinsic::lifetime_end: 5127 if (TheLoop->hasLoopInvariantOperands(&I)) 5128 addToWorklistIfAllowed(&I); 5129 break; 5130 default: 5131 break; 5132 } 5133 } 5134 5135 // ExtractValue instructions must be uniform, because the operands are 5136 // known to be loop-invariant. 5137 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) { 5138 assert(isOutOfScope(EVI->getAggregateOperand()) && 5139 "Expected aggregate value to be loop invariant"); 5140 addToWorklistIfAllowed(EVI); 5141 continue; 5142 } 5143 5144 // If there's no pointer operand, there's nothing to do. 5145 auto *Ptr = getLoadStorePointerOperand(&I); 5146 if (!Ptr) 5147 continue; 5148 5149 // A uniform memory op is itself uniform. We exclude uniform stores 5150 // here as they demand the last lane, not the first one. 5151 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 5152 addToWorklistIfAllowed(&I); 5153 5154 if (isUniformDecision(&I, VF)) { 5155 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 5156 HasUniformUse.insert(Ptr); 5157 } 5158 } 5159 5160 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 5161 // demanding) users. Since loops are assumed to be in LCSSA form, this 5162 // disallows uses outside the loop as well. 5163 for (auto *V : HasUniformUse) { 5164 if (isOutOfScope(V)) 5165 continue; 5166 auto *I = cast<Instruction>(V); 5167 auto UsersAreMemAccesses = 5168 llvm::all_of(I->users(), [&](User *U) -> bool { 5169 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 5170 }); 5171 if (UsersAreMemAccesses) 5172 addToWorklistIfAllowed(I); 5173 } 5174 5175 // Expand Worklist in topological order: whenever a new instruction 5176 // is added , its users should be already inside Worklist. It ensures 5177 // a uniform instruction will only be used by uniform instructions. 5178 unsigned idx = 0; 5179 while (idx != Worklist.size()) { 5180 Instruction *I = Worklist[idx++]; 5181 5182 for (auto OV : I->operand_values()) { 5183 // isOutOfScope operands cannot be uniform instructions. 5184 if (isOutOfScope(OV)) 5185 continue; 5186 // First order recurrence Phi's should typically be considered 5187 // non-uniform. 5188 auto *OP = dyn_cast<PHINode>(OV); 5189 if (OP && Legal->isFirstOrderRecurrence(OP)) 5190 continue; 5191 // If all the users of the operand are uniform, then add the 5192 // operand into the uniform worklist. 5193 auto *OI = cast<Instruction>(OV); 5194 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5195 auto *J = cast<Instruction>(U); 5196 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 5197 })) 5198 addToWorklistIfAllowed(OI); 5199 } 5200 } 5201 5202 // For an instruction to be added into Worklist above, all its users inside 5203 // the loop should also be in Worklist. However, this condition cannot be 5204 // true for phi nodes that form a cyclic dependence. We must process phi 5205 // nodes separately. An induction variable will remain uniform if all users 5206 // of the induction variable and induction variable update remain uniform. 5207 // The code below handles both pointer and non-pointer induction variables. 5208 for (auto &Induction : Legal->getInductionVars()) { 5209 auto *Ind = Induction.first; 5210 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5211 5212 // Determine if all users of the induction variable are uniform after 5213 // vectorization. 5214 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5215 auto *I = cast<Instruction>(U); 5216 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5217 isVectorizedMemAccessUse(I, Ind); 5218 }); 5219 if (!UniformInd) 5220 continue; 5221 5222 // Determine if all users of the induction variable update instruction are 5223 // uniform after vectorization. 5224 auto UniformIndUpdate = 5225 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5226 auto *I = cast<Instruction>(U); 5227 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5228 isVectorizedMemAccessUse(I, IndUpdate); 5229 }); 5230 if (!UniformIndUpdate) 5231 continue; 5232 5233 // The induction variable and its update instruction will remain uniform. 5234 addToWorklistIfAllowed(Ind); 5235 addToWorklistIfAllowed(IndUpdate); 5236 } 5237 5238 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5239 } 5240 5241 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5242 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5243 5244 if (Legal->getRuntimePointerChecking()->Need) { 5245 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5246 "runtime pointer checks needed. Enable vectorization of this " 5247 "loop with '#pragma clang loop vectorize(enable)' when " 5248 "compiling with -Os/-Oz", 5249 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5250 return true; 5251 } 5252 5253 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5254 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5255 "runtime SCEV checks needed. Enable vectorization of this " 5256 "loop with '#pragma clang loop vectorize(enable)' when " 5257 "compiling with -Os/-Oz", 5258 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5259 return true; 5260 } 5261 5262 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5263 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5264 reportVectorizationFailure("Runtime stride check for small trip count", 5265 "runtime stride == 1 checks needed. Enable vectorization of " 5266 "this loop without such check by compiling with -Os/-Oz", 5267 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5268 return true; 5269 } 5270 5271 return false; 5272 } 5273 5274 ElementCount 5275 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 5276 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) 5277 return ElementCount::getScalable(0); 5278 5279 if (Hints->isScalableVectorizationDisabled()) { 5280 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 5281 "ScalableVectorizationDisabled", ORE, TheLoop); 5282 return ElementCount::getScalable(0); 5283 } 5284 5285 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n"); 5286 5287 auto MaxScalableVF = ElementCount::getScalable( 5288 std::numeric_limits<ElementCount::ScalarTy>::max()); 5289 5290 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 5291 // FIXME: While for scalable vectors this is currently sufficient, this should 5292 // be replaced by a more detailed mechanism that filters out specific VFs, 5293 // instead of invalidating vectorization for a whole set of VFs based on the 5294 // MaxVF. 5295 5296 // Disable scalable vectorization if the loop contains unsupported reductions. 5297 if (!canVectorizeReductions(MaxScalableVF)) { 5298 reportVectorizationInfo( 5299 "Scalable vectorization not supported for the reduction " 5300 "operations found in this loop.", 5301 "ScalableVFUnfeasible", ORE, TheLoop); 5302 return ElementCount::getScalable(0); 5303 } 5304 5305 // Disable scalable vectorization if the loop contains any instructions 5306 // with element types not supported for scalable vectors. 5307 if (any_of(ElementTypesInLoop, [&](Type *Ty) { 5308 return !Ty->isVoidTy() && 5309 !this->TTI.isElementTypeLegalForScalableVector(Ty); 5310 })) { 5311 reportVectorizationInfo("Scalable vectorization is not supported " 5312 "for all element types found in this loop.", 5313 "ScalableVFUnfeasible", ORE, TheLoop); 5314 return ElementCount::getScalable(0); 5315 } 5316 5317 if (Legal->isSafeForAnyVectorWidth()) 5318 return MaxScalableVF; 5319 5320 // Limit MaxScalableVF by the maximum safe dependence distance. 5321 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5322 if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) 5323 MaxVScale = 5324 TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax(); 5325 MaxScalableVF = ElementCount::getScalable( 5326 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5327 if (!MaxScalableVF) 5328 reportVectorizationInfo( 5329 "Max legal vector width too small, scalable vectorization " 5330 "unfeasible.", 5331 "ScalableVFUnfeasible", ORE, TheLoop); 5332 5333 return MaxScalableVF; 5334 } 5335 5336 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF( 5337 unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) { 5338 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5339 unsigned SmallestType, WidestType; 5340 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5341 5342 // Get the maximum safe dependence distance in bits computed by LAA. 5343 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5344 // the memory accesses that is most restrictive (involved in the smallest 5345 // dependence distance). 5346 unsigned MaxSafeElements = 5347 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 5348 5349 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 5350 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 5351 5352 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 5353 << ".\n"); 5354 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 5355 << ".\n"); 5356 5357 // First analyze the UserVF, fall back if the UserVF should be ignored. 5358 if (UserVF) { 5359 auto MaxSafeUserVF = 5360 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 5361 5362 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { 5363 // If `VF=vscale x N` is safe, then so is `VF=N` 5364 if (UserVF.isScalable()) 5365 return FixedScalableVFPair( 5366 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); 5367 else 5368 return UserVF; 5369 } 5370 5371 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 5372 5373 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 5374 // is better to ignore the hint and let the compiler choose a suitable VF. 5375 if (!UserVF.isScalable()) { 5376 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5377 << " is unsafe, clamping to max safe VF=" 5378 << MaxSafeFixedVF << ".\n"); 5379 ORE->emit([&]() { 5380 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5381 TheLoop->getStartLoc(), 5382 TheLoop->getHeader()) 5383 << "User-specified vectorization factor " 5384 << ore::NV("UserVectorizationFactor", UserVF) 5385 << " is unsafe, clamping to maximum safe vectorization factor " 5386 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 5387 }); 5388 return MaxSafeFixedVF; 5389 } 5390 5391 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 5392 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5393 << " is ignored because scalable vectors are not " 5394 "available.\n"); 5395 ORE->emit([&]() { 5396 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5397 TheLoop->getStartLoc(), 5398 TheLoop->getHeader()) 5399 << "User-specified vectorization factor " 5400 << ore::NV("UserVectorizationFactor", UserVF) 5401 << " is ignored because the target does not support scalable " 5402 "vectors. The compiler will pick a more suitable value."; 5403 }); 5404 } else { 5405 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5406 << " is unsafe. Ignoring scalable UserVF.\n"); 5407 ORE->emit([&]() { 5408 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5409 TheLoop->getStartLoc(), 5410 TheLoop->getHeader()) 5411 << "User-specified vectorization factor " 5412 << ore::NV("UserVectorizationFactor", UserVF) 5413 << " is unsafe. Ignoring the hint to let the compiler pick a " 5414 "more suitable value."; 5415 }); 5416 } 5417 } 5418 5419 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5420 << " / " << WidestType << " bits.\n"); 5421 5422 FixedScalableVFPair Result(ElementCount::getFixed(1), 5423 ElementCount::getScalable(0)); 5424 if (auto MaxVF = 5425 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 5426 MaxSafeFixedVF, FoldTailByMasking)) 5427 Result.FixedVF = MaxVF; 5428 5429 if (auto MaxVF = 5430 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 5431 MaxSafeScalableVF, FoldTailByMasking)) 5432 if (MaxVF.isScalable()) { 5433 Result.ScalableVF = MaxVF; 5434 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 5435 << "\n"); 5436 } 5437 5438 return Result; 5439 } 5440 5441 FixedScalableVFPair 5442 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5443 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5444 // TODO: It may by useful to do since it's still likely to be dynamically 5445 // uniform if the target can skip. 5446 reportVectorizationFailure( 5447 "Not inserting runtime ptr check for divergent target", 5448 "runtime pointer checks needed. Not enabled for divergent target", 5449 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5450 return FixedScalableVFPair::getNone(); 5451 } 5452 5453 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5454 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5455 if (TC == 1) { 5456 reportVectorizationFailure("Single iteration (non) loop", 5457 "loop trip count is one, irrelevant for vectorization", 5458 "SingleIterationLoop", ORE, TheLoop); 5459 return FixedScalableVFPair::getNone(); 5460 } 5461 5462 switch (ScalarEpilogueStatus) { 5463 case CM_ScalarEpilogueAllowed: 5464 return computeFeasibleMaxVF(TC, UserVF, false); 5465 case CM_ScalarEpilogueNotAllowedUsePredicate: 5466 LLVM_FALLTHROUGH; 5467 case CM_ScalarEpilogueNotNeededUsePredicate: 5468 LLVM_DEBUG( 5469 dbgs() << "LV: vector predicate hint/switch found.\n" 5470 << "LV: Not allowing scalar epilogue, creating predicated " 5471 << "vector loop.\n"); 5472 break; 5473 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5474 // fallthrough as a special case of OptForSize 5475 case CM_ScalarEpilogueNotAllowedOptSize: 5476 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5477 LLVM_DEBUG( 5478 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5479 else 5480 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5481 << "count.\n"); 5482 5483 // Bail if runtime checks are required, which are not good when optimising 5484 // for size. 5485 if (runtimeChecksRequired()) 5486 return FixedScalableVFPair::getNone(); 5487 5488 break; 5489 } 5490 5491 // The only loops we can vectorize without a scalar epilogue, are loops with 5492 // a bottom-test and a single exiting block. We'd have to handle the fact 5493 // that not every instruction executes on the last iteration. This will 5494 // require a lane mask which varies through the vector loop body. (TODO) 5495 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5496 // If there was a tail-folding hint/switch, but we can't fold the tail by 5497 // masking, fallback to a vectorization with a scalar epilogue. 5498 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5499 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5500 "scalar epilogue instead.\n"); 5501 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5502 return computeFeasibleMaxVF(TC, UserVF, false); 5503 } 5504 return FixedScalableVFPair::getNone(); 5505 } 5506 5507 // Now try the tail folding 5508 5509 // Invalidate interleave groups that require an epilogue if we can't mask 5510 // the interleave-group. 5511 if (!useMaskedInterleavedAccesses(TTI)) { 5512 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5513 "No decisions should have been taken at this point"); 5514 // Note: There is no need to invalidate any cost modeling decisions here, as 5515 // non where taken so far. 5516 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5517 } 5518 5519 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true); 5520 // Avoid tail folding if the trip count is known to be a multiple of any VF 5521 // we chose. 5522 // FIXME: The condition below pessimises the case for fixed-width vectors, 5523 // when scalable VFs are also candidates for vectorization. 5524 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5525 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5526 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5527 "MaxFixedVF must be a power of 2"); 5528 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5529 : MaxFixedVF.getFixedValue(); 5530 ScalarEvolution *SE = PSE.getSE(); 5531 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5532 const SCEV *ExitCount = SE->getAddExpr( 5533 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5534 const SCEV *Rem = SE->getURemExpr( 5535 SE->applyLoopGuards(ExitCount, TheLoop), 5536 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5537 if (Rem->isZero()) { 5538 // Accept MaxFixedVF if we do not have a tail. 5539 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5540 return MaxFactors; 5541 } 5542 } 5543 5544 // For scalable vectors, don't use tail folding as this is currently not yet 5545 // supported. The code is likely to have ended up here if the tripcount is 5546 // low, in which case it makes sense not to use scalable vectors. 5547 if (MaxFactors.ScalableVF.isVector()) 5548 MaxFactors.ScalableVF = ElementCount::getScalable(0); 5549 5550 // If we don't know the precise trip count, or if the trip count that we 5551 // found modulo the vectorization factor is not zero, try to fold the tail 5552 // by masking. 5553 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5554 if (Legal->prepareToFoldTailByMasking()) { 5555 FoldTailByMasking = true; 5556 return MaxFactors; 5557 } 5558 5559 // If there was a tail-folding hint/switch, but we can't fold the tail by 5560 // masking, fallback to a vectorization with a scalar epilogue. 5561 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5562 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5563 "scalar epilogue instead.\n"); 5564 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5565 return MaxFactors; 5566 } 5567 5568 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5569 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5570 return FixedScalableVFPair::getNone(); 5571 } 5572 5573 if (TC == 0) { 5574 reportVectorizationFailure( 5575 "Unable to calculate the loop count due to complex control flow", 5576 "unable to calculate the loop count due to complex control flow", 5577 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5578 return FixedScalableVFPair::getNone(); 5579 } 5580 5581 reportVectorizationFailure( 5582 "Cannot optimize for size and vectorize at the same time.", 5583 "cannot optimize for size and vectorize at the same time. " 5584 "Enable vectorization of this loop with '#pragma clang loop " 5585 "vectorize(enable)' when compiling with -Os/-Oz", 5586 "NoTailLoopWithOptForSize", ORE, TheLoop); 5587 return FixedScalableVFPair::getNone(); 5588 } 5589 5590 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5591 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5592 const ElementCount &MaxSafeVF, bool FoldTailByMasking) { 5593 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5594 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5595 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5596 : TargetTransformInfo::RGK_FixedWidthVector); 5597 5598 // Convenience function to return the minimum of two ElementCounts. 5599 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5600 assert((LHS.isScalable() == RHS.isScalable()) && 5601 "Scalable flags must match"); 5602 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5603 }; 5604 5605 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5606 // Note that both WidestRegister and WidestType may not be a powers of 2. 5607 auto MaxVectorElementCount = ElementCount::get( 5608 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5609 ComputeScalableMaxVF); 5610 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5611 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5612 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5613 5614 if (!MaxVectorElementCount) { 5615 LLVM_DEBUG(dbgs() << "LV: The target has no " 5616 << (ComputeScalableMaxVF ? "scalable" : "fixed") 5617 << " vector registers.\n"); 5618 return ElementCount::getFixed(1); 5619 } 5620 5621 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5622 if (ConstTripCount && 5623 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5624 (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) { 5625 // If loop trip count (TC) is known at compile time there is no point in 5626 // choosing VF greater than TC (as done in the loop below). Select maximum 5627 // power of two which doesn't exceed TC. 5628 // If MaxVectorElementCount is scalable, we only fall back on a fixed VF 5629 // when the TC is less than or equal to the known number of lanes. 5630 auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount); 5631 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not " 5632 "exceeding the constant trip count: " 5633 << ClampedConstTripCount << "\n"); 5634 return ElementCount::getFixed(ClampedConstTripCount); 5635 } 5636 5637 ElementCount MaxVF = MaxVectorElementCount; 5638 if (TTI.shouldMaximizeVectorBandwidth() || 5639 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5640 auto MaxVectorElementCountMaxBW = ElementCount::get( 5641 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5642 ComputeScalableMaxVF); 5643 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5644 5645 // Collect all viable vectorization factors larger than the default MaxVF 5646 // (i.e. MaxVectorElementCount). 5647 SmallVector<ElementCount, 8> VFs; 5648 for (ElementCount VS = MaxVectorElementCount * 2; 5649 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5650 VFs.push_back(VS); 5651 5652 // For each VF calculate its register usage. 5653 auto RUs = calculateRegisterUsage(VFs); 5654 5655 // Select the largest VF which doesn't require more registers than existing 5656 // ones. 5657 for (int i = RUs.size() - 1; i >= 0; --i) { 5658 bool Selected = true; 5659 for (auto &pair : RUs[i].MaxLocalUsers) { 5660 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5661 if (pair.second > TargetNumRegisters) 5662 Selected = false; 5663 } 5664 if (Selected) { 5665 MaxVF = VFs[i]; 5666 break; 5667 } 5668 } 5669 if (ElementCount MinVF = 5670 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 5671 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5672 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5673 << ") with target's minimum: " << MinVF << '\n'); 5674 MaxVF = MinVF; 5675 } 5676 } 5677 } 5678 return MaxVF; 5679 } 5680 5681 bool LoopVectorizationCostModel::isMoreProfitable( 5682 const VectorizationFactor &A, const VectorizationFactor &B) const { 5683 InstructionCost CostA = A.Cost; 5684 InstructionCost CostB = B.Cost; 5685 5686 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 5687 5688 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 5689 MaxTripCount) { 5690 // If we are folding the tail and the trip count is a known (possibly small) 5691 // constant, the trip count will be rounded up to an integer number of 5692 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 5693 // which we compare directly. When not folding the tail, the total cost will 5694 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 5695 // approximated with the per-lane cost below instead of using the tripcount 5696 // as here. 5697 auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 5698 auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 5699 return RTCostA < RTCostB; 5700 } 5701 5702 // Improve estimate for the vector width if it is scalable. 5703 unsigned EstimatedWidthA = A.Width.getKnownMinValue(); 5704 unsigned EstimatedWidthB = B.Width.getKnownMinValue(); 5705 if (Optional<unsigned> VScale = TTI.getVScaleForTuning()) { 5706 if (A.Width.isScalable()) 5707 EstimatedWidthA *= VScale.getValue(); 5708 if (B.Width.isScalable()) 5709 EstimatedWidthB *= VScale.getValue(); 5710 } 5711 5712 // Assume vscale may be larger than 1 (or the value being tuned for), 5713 // so that scalable vectorization is slightly favorable over fixed-width 5714 // vectorization. 5715 if (A.Width.isScalable() && !B.Width.isScalable()) 5716 return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA); 5717 5718 // To avoid the need for FP division: 5719 // (CostA / A.Width) < (CostB / B.Width) 5720 // <=> (CostA * B.Width) < (CostB * A.Width) 5721 return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA); 5722 } 5723 5724 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 5725 const ElementCountSet &VFCandidates) { 5726 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5727 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5728 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5729 assert(VFCandidates.count(ElementCount::getFixed(1)) && 5730 "Expected Scalar VF to be a candidate"); 5731 5732 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 5733 VectorizationFactor ChosenFactor = ScalarCost; 5734 5735 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5736 if (ForceVectorization && VFCandidates.size() > 1) { 5737 // Ignore scalar width, because the user explicitly wants vectorization. 5738 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5739 // evaluation. 5740 ChosenFactor.Cost = InstructionCost::getMax(); 5741 } 5742 5743 SmallVector<InstructionVFPair> InvalidCosts; 5744 for (const auto &i : VFCandidates) { 5745 // The cost for scalar VF=1 is already calculated, so ignore it. 5746 if (i.isScalar()) 5747 continue; 5748 5749 VectorizationCostTy C = expectedCost(i, &InvalidCosts); 5750 VectorizationFactor Candidate(i, C.first); 5751 5752 #ifndef NDEBUG 5753 unsigned AssumedMinimumVscale = 1; 5754 if (Optional<unsigned> VScale = TTI.getVScaleForTuning()) 5755 AssumedMinimumVscale = VScale.getValue(); 5756 unsigned Width = 5757 Candidate.Width.isScalable() 5758 ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale 5759 : Candidate.Width.getFixedValue(); 5760 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5761 << " costs: " << (Candidate.Cost / Width)); 5762 if (i.isScalable()) 5763 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of " 5764 << AssumedMinimumVscale << ")"); 5765 LLVM_DEBUG(dbgs() << ".\n"); 5766 #endif 5767 5768 if (!C.second && !ForceVectorization) { 5769 LLVM_DEBUG( 5770 dbgs() << "LV: Not considering vector loop of width " << i 5771 << " because it will not generate any vector instructions.\n"); 5772 continue; 5773 } 5774 5775 // If profitable add it to ProfitableVF list. 5776 if (isMoreProfitable(Candidate, ScalarCost)) 5777 ProfitableVFs.push_back(Candidate); 5778 5779 if (isMoreProfitable(Candidate, ChosenFactor)) 5780 ChosenFactor = Candidate; 5781 } 5782 5783 // Emit a report of VFs with invalid costs in the loop. 5784 if (!InvalidCosts.empty()) { 5785 // Group the remarks per instruction, keeping the instruction order from 5786 // InvalidCosts. 5787 std::map<Instruction *, unsigned> Numbering; 5788 unsigned I = 0; 5789 for (auto &Pair : InvalidCosts) 5790 if (!Numbering.count(Pair.first)) 5791 Numbering[Pair.first] = I++; 5792 5793 // Sort the list, first on instruction(number) then on VF. 5794 llvm::sort(InvalidCosts, 5795 [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { 5796 if (Numbering[A.first] != Numbering[B.first]) 5797 return Numbering[A.first] < Numbering[B.first]; 5798 ElementCountComparator ECC; 5799 return ECC(A.second, B.second); 5800 }); 5801 5802 // For a list of ordered instruction-vf pairs: 5803 // [(load, vf1), (load, vf2), (store, vf1)] 5804 // Group the instructions together to emit separate remarks for: 5805 // load (vf1, vf2) 5806 // store (vf1) 5807 auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); 5808 auto Subset = ArrayRef<InstructionVFPair>(); 5809 do { 5810 if (Subset.empty()) 5811 Subset = Tail.take_front(1); 5812 5813 Instruction *I = Subset.front().first; 5814 5815 // If the next instruction is different, or if there are no other pairs, 5816 // emit a remark for the collated subset. e.g. 5817 // [(load, vf1), (load, vf2))] 5818 // to emit: 5819 // remark: invalid costs for 'load' at VF=(vf, vf2) 5820 if (Subset == Tail || Tail[Subset.size()].first != I) { 5821 std::string OutString; 5822 raw_string_ostream OS(OutString); 5823 assert(!Subset.empty() && "Unexpected empty range"); 5824 OS << "Instruction with invalid costs prevented vectorization at VF=("; 5825 for (auto &Pair : Subset) 5826 OS << (Pair.second == Subset.front().second ? "" : ", ") 5827 << Pair.second; 5828 OS << "):"; 5829 if (auto *CI = dyn_cast<CallInst>(I)) 5830 OS << " call to " << CI->getCalledFunction()->getName(); 5831 else 5832 OS << " " << I->getOpcodeName(); 5833 OS.flush(); 5834 reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); 5835 Tail = Tail.drop_front(Subset.size()); 5836 Subset = {}; 5837 } else 5838 // Grow the subset by one element 5839 Subset = Tail.take_front(Subset.size() + 1); 5840 } while (!Tail.empty()); 5841 } 5842 5843 if (!EnableCondStoresVectorization && NumPredStores) { 5844 reportVectorizationFailure("There are conditional stores.", 5845 "store that is conditionally executed prevents vectorization", 5846 "ConditionalStore", ORE, TheLoop); 5847 ChosenFactor = ScalarCost; 5848 } 5849 5850 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 5851 ChosenFactor.Cost >= ScalarCost.Cost) dbgs() 5852 << "LV: Vectorization seems to be not beneficial, " 5853 << "but was forced by a user.\n"); 5854 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 5855 return ChosenFactor; 5856 } 5857 5858 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5859 const Loop &L, ElementCount VF) const { 5860 // Cross iteration phis such as reductions need special handling and are 5861 // currently unsupported. 5862 if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) { 5863 return Legal->isFirstOrderRecurrence(&Phi) || 5864 Legal->isReductionVariable(&Phi); 5865 })) 5866 return false; 5867 5868 // Phis with uses outside of the loop require special handling and are 5869 // currently unsupported. 5870 for (auto &Entry : Legal->getInductionVars()) { 5871 // Look for uses of the value of the induction at the last iteration. 5872 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5873 for (User *U : PostInc->users()) 5874 if (!L.contains(cast<Instruction>(U))) 5875 return false; 5876 // Look for uses of penultimate value of the induction. 5877 for (User *U : Entry.first->users()) 5878 if (!L.contains(cast<Instruction>(U))) 5879 return false; 5880 } 5881 5882 // Induction variables that are widened require special handling that is 5883 // currently not supported. 5884 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5885 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5886 this->isProfitableToScalarize(Entry.first, VF)); 5887 })) 5888 return false; 5889 5890 // Epilogue vectorization code has not been auditted to ensure it handles 5891 // non-latch exits properly. It may be fine, but it needs auditted and 5892 // tested. 5893 if (L.getExitingBlock() != L.getLoopLatch()) 5894 return false; 5895 5896 return true; 5897 } 5898 5899 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5900 const ElementCount VF) const { 5901 // FIXME: We need a much better cost-model to take different parameters such 5902 // as register pressure, code size increase and cost of extra branches into 5903 // account. For now we apply a very crude heuristic and only consider loops 5904 // with vectorization factors larger than a certain value. 5905 // We also consider epilogue vectorization unprofitable for targets that don't 5906 // consider interleaving beneficial (eg. MVE). 5907 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 5908 return false; 5909 if (VF.getFixedValue() >= EpilogueVectorizationMinVF) 5910 return true; 5911 return false; 5912 } 5913 5914 VectorizationFactor 5915 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 5916 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 5917 VectorizationFactor Result = VectorizationFactor::Disabled(); 5918 if (!EnableEpilogueVectorization) { 5919 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 5920 return Result; 5921 } 5922 5923 if (!isScalarEpilogueAllowed()) { 5924 LLVM_DEBUG( 5925 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 5926 "allowed.\n";); 5927 return Result; 5928 } 5929 5930 // Not really a cost consideration, but check for unsupported cases here to 5931 // simplify the logic. 5932 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 5933 LLVM_DEBUG( 5934 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 5935 "not a supported candidate.\n";); 5936 return Result; 5937 } 5938 5939 if (EpilogueVectorizationForceVF > 1) { 5940 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 5941 ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF); 5942 if (LVP.hasPlanWithVF(ForcedEC)) 5943 return {ForcedEC, 0}; 5944 else { 5945 LLVM_DEBUG( 5946 dbgs() 5947 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 5948 return Result; 5949 } 5950 } 5951 5952 if (TheLoop->getHeader()->getParent()->hasOptSize() || 5953 TheLoop->getHeader()->getParent()->hasMinSize()) { 5954 LLVM_DEBUG( 5955 dbgs() 5956 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 5957 return Result; 5958 } 5959 5960 auto FixedMainLoopVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue()); 5961 if (MainLoopVF.isScalable()) 5962 LLVM_DEBUG( 5963 dbgs() << "LEV: Epilogue vectorization using scalable vectors not " 5964 "yet supported. Converting to fixed-width (VF=" 5965 << FixedMainLoopVF << ") instead\n"); 5966 5967 if (!isEpilogueVectorizationProfitable(FixedMainLoopVF)) { 5968 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for " 5969 "this loop\n"); 5970 return Result; 5971 } 5972 5973 for (auto &NextVF : ProfitableVFs) 5974 if (ElementCount::isKnownLT(NextVF.Width, FixedMainLoopVF) && 5975 (Result.Width.getFixedValue() == 1 || 5976 isMoreProfitable(NextVF, Result)) && 5977 LVP.hasPlanWithVF(NextVF.Width)) 5978 Result = NextVF; 5979 5980 if (Result != VectorizationFactor::Disabled()) 5981 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 5982 << Result.Width.getFixedValue() << "\n";); 5983 return Result; 5984 } 5985 5986 std::pair<unsigned, unsigned> 5987 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5988 unsigned MinWidth = -1U; 5989 unsigned MaxWidth = 8; 5990 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5991 for (Type *T : ElementTypesInLoop) { 5992 MinWidth = std::min<unsigned>( 5993 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5994 MaxWidth = std::max<unsigned>( 5995 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5996 } 5997 return {MinWidth, MaxWidth}; 5998 } 5999 6000 void LoopVectorizationCostModel::collectElementTypesForWidening() { 6001 ElementTypesInLoop.clear(); 6002 // For each block. 6003 for (BasicBlock *BB : TheLoop->blocks()) { 6004 // For each instruction in the loop. 6005 for (Instruction &I : BB->instructionsWithoutDebug()) { 6006 Type *T = I.getType(); 6007 6008 // Skip ignored values. 6009 if (ValuesToIgnore.count(&I)) 6010 continue; 6011 6012 // Only examine Loads, Stores and PHINodes. 6013 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6014 continue; 6015 6016 // Examine PHI nodes that are reduction variables. Update the type to 6017 // account for the recurrence type. 6018 if (auto *PN = dyn_cast<PHINode>(&I)) { 6019 if (!Legal->isReductionVariable(PN)) 6020 continue; 6021 const RecurrenceDescriptor &RdxDesc = 6022 Legal->getReductionVars().find(PN)->second; 6023 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 6024 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 6025 RdxDesc.getRecurrenceType(), 6026 TargetTransformInfo::ReductionFlags())) 6027 continue; 6028 T = RdxDesc.getRecurrenceType(); 6029 } 6030 6031 // Examine the stored values. 6032 if (auto *ST = dyn_cast<StoreInst>(&I)) 6033 T = ST->getValueOperand()->getType(); 6034 6035 // Ignore loaded pointer types and stored pointer types that are not 6036 // vectorizable. 6037 // 6038 // FIXME: The check here attempts to predict whether a load or store will 6039 // be vectorized. We only know this for certain after a VF has 6040 // been selected. Here, we assume that if an access can be 6041 // vectorized, it will be. We should also look at extending this 6042 // optimization to non-pointer types. 6043 // 6044 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 6045 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 6046 continue; 6047 6048 ElementTypesInLoop.insert(T); 6049 } 6050 } 6051 } 6052 6053 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 6054 unsigned LoopCost) { 6055 // -- The interleave heuristics -- 6056 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6057 // There are many micro-architectural considerations that we can't predict 6058 // at this level. For example, frontend pressure (on decode or fetch) due to 6059 // code size, or the number and capabilities of the execution ports. 6060 // 6061 // We use the following heuristics to select the interleave count: 6062 // 1. If the code has reductions, then we interleave to break the cross 6063 // iteration dependency. 6064 // 2. If the loop is really small, then we interleave to reduce the loop 6065 // overhead. 6066 // 3. We don't interleave if we think that we will spill registers to memory 6067 // due to the increased register pressure. 6068 6069 if (!isScalarEpilogueAllowed()) 6070 return 1; 6071 6072 // We used the distance for the interleave count. 6073 if (Legal->getMaxSafeDepDistBytes() != -1U) 6074 return 1; 6075 6076 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 6077 const bool HasReductions = !Legal->getReductionVars().empty(); 6078 // Do not interleave loops with a relatively small known or estimated trip 6079 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 6080 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 6081 // because with the above conditions interleaving can expose ILP and break 6082 // cross iteration dependences for reductions. 6083 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 6084 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 6085 return 1; 6086 6087 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6088 // We divide by these constants so assume that we have at least one 6089 // instruction that uses at least one register. 6090 for (auto& pair : R.MaxLocalUsers) { 6091 pair.second = std::max(pair.second, 1U); 6092 } 6093 6094 // We calculate the interleave count using the following formula. 6095 // Subtract the number of loop invariants from the number of available 6096 // registers. These registers are used by all of the interleaved instances. 6097 // Next, divide the remaining registers by the number of registers that is 6098 // required by the loop, in order to estimate how many parallel instances 6099 // fit without causing spills. All of this is rounded down if necessary to be 6100 // a power of two. We want power of two interleave count to simplify any 6101 // addressing operations or alignment considerations. 6102 // We also want power of two interleave counts to ensure that the induction 6103 // variable of the vector loop wraps to zero, when tail is folded by masking; 6104 // this currently happens when OptForSize, in which case IC is set to 1 above. 6105 unsigned IC = UINT_MAX; 6106 6107 for (auto& pair : R.MaxLocalUsers) { 6108 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 6109 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6110 << " registers of " 6111 << TTI.getRegisterClassName(pair.first) << " register class\n"); 6112 if (VF.isScalar()) { 6113 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6114 TargetNumRegisters = ForceTargetNumScalarRegs; 6115 } else { 6116 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6117 TargetNumRegisters = ForceTargetNumVectorRegs; 6118 } 6119 unsigned MaxLocalUsers = pair.second; 6120 unsigned LoopInvariantRegs = 0; 6121 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 6122 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 6123 6124 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 6125 // Don't count the induction variable as interleaved. 6126 if (EnableIndVarRegisterHeur) { 6127 TmpIC = 6128 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 6129 std::max(1U, (MaxLocalUsers - 1))); 6130 } 6131 6132 IC = std::min(IC, TmpIC); 6133 } 6134 6135 // Clamp the interleave ranges to reasonable counts. 6136 unsigned MaxInterleaveCount = 6137 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 6138 6139 // Check if the user has overridden the max. 6140 if (VF.isScalar()) { 6141 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6142 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6143 } else { 6144 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6145 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6146 } 6147 6148 // If trip count is known or estimated compile time constant, limit the 6149 // interleave count to be less than the trip count divided by VF, provided it 6150 // is at least 1. 6151 // 6152 // For scalable vectors we can't know if interleaving is beneficial. It may 6153 // not be beneficial for small loops if none of the lanes in the second vector 6154 // iterations is enabled. However, for larger loops, there is likely to be a 6155 // similar benefit as for fixed-width vectors. For now, we choose to leave 6156 // the InterleaveCount as if vscale is '1', although if some information about 6157 // the vector is known (e.g. min vector size), we can make a better decision. 6158 if (BestKnownTC) { 6159 MaxInterleaveCount = 6160 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 6161 // Make sure MaxInterleaveCount is greater than 0. 6162 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 6163 } 6164 6165 assert(MaxInterleaveCount > 0 && 6166 "Maximum interleave count must be greater than 0"); 6167 6168 // Clamp the calculated IC to be between the 1 and the max interleave count 6169 // that the target and trip count allows. 6170 if (IC > MaxInterleaveCount) 6171 IC = MaxInterleaveCount; 6172 else 6173 // Make sure IC is greater than 0. 6174 IC = std::max(1u, IC); 6175 6176 assert(IC > 0 && "Interleave count must be greater than 0."); 6177 6178 // If we did not calculate the cost for VF (because the user selected the VF) 6179 // then we calculate the cost of VF here. 6180 if (LoopCost == 0) { 6181 InstructionCost C = expectedCost(VF).first; 6182 assert(C.isValid() && "Expected to have chosen a VF with valid cost"); 6183 LoopCost = *C.getValue(); 6184 } 6185 6186 assert(LoopCost && "Non-zero loop cost expected"); 6187 6188 // Interleave if we vectorized this loop and there is a reduction that could 6189 // benefit from interleaving. 6190 if (VF.isVector() && HasReductions) { 6191 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6192 return IC; 6193 } 6194 6195 // Note that if we've already vectorized the loop we will have done the 6196 // runtime check and so interleaving won't require further checks. 6197 bool InterleavingRequiresRuntimePointerCheck = 6198 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 6199 6200 // We want to interleave small loops in order to reduce the loop overhead and 6201 // potentially expose ILP opportunities. 6202 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 6203 << "LV: IC is " << IC << '\n' 6204 << "LV: VF is " << VF << '\n'); 6205 const bool AggressivelyInterleaveReductions = 6206 TTI.enableAggressiveInterleaving(HasReductions); 6207 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6208 // We assume that the cost overhead is 1 and we use the cost model 6209 // to estimate the cost of the loop and interleave until the cost of the 6210 // loop overhead is about 5% of the cost of the loop. 6211 unsigned SmallIC = 6212 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6213 6214 // Interleave until store/load ports (estimated by max interleave count) are 6215 // saturated. 6216 unsigned NumStores = Legal->getNumStores(); 6217 unsigned NumLoads = Legal->getNumLoads(); 6218 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6219 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6220 6221 // There is little point in interleaving for reductions containing selects 6222 // and compares when VF=1 since it may just create more overhead than it's 6223 // worth for loops with small trip counts. This is because we still have to 6224 // do the final reduction after the loop. 6225 bool HasSelectCmpReductions = 6226 HasReductions && 6227 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 6228 const RecurrenceDescriptor &RdxDesc = Reduction.second; 6229 return RecurrenceDescriptor::isSelectCmpRecurrenceKind( 6230 RdxDesc.getRecurrenceKind()); 6231 }); 6232 if (HasSelectCmpReductions) { 6233 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n"); 6234 return 1; 6235 } 6236 6237 // If we have a scalar reduction (vector reductions are already dealt with 6238 // by this point), we can increase the critical path length if the loop 6239 // we're interleaving is inside another loop. For tree-wise reductions 6240 // set the limit to 2, and for ordered reductions it's best to disable 6241 // interleaving entirely. 6242 if (HasReductions && TheLoop->getLoopDepth() > 1) { 6243 bool HasOrderedReductions = 6244 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 6245 const RecurrenceDescriptor &RdxDesc = Reduction.second; 6246 return RdxDesc.isOrdered(); 6247 }); 6248 if (HasOrderedReductions) { 6249 LLVM_DEBUG( 6250 dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); 6251 return 1; 6252 } 6253 6254 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6255 SmallIC = std::min(SmallIC, F); 6256 StoresIC = std::min(StoresIC, F); 6257 LoadsIC = std::min(LoadsIC, F); 6258 } 6259 6260 if (EnableLoadStoreRuntimeInterleave && 6261 std::max(StoresIC, LoadsIC) > SmallIC) { 6262 LLVM_DEBUG( 6263 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6264 return std::max(StoresIC, LoadsIC); 6265 } 6266 6267 // If there are scalar reductions and TTI has enabled aggressive 6268 // interleaving for reductions, we will interleave to expose ILP. 6269 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6270 AggressivelyInterleaveReductions) { 6271 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6272 // Interleave no less than SmallIC but not as aggressive as the normal IC 6273 // to satisfy the rare situation when resources are too limited. 6274 return std::max(IC / 2, SmallIC); 6275 } else { 6276 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6277 return SmallIC; 6278 } 6279 } 6280 6281 // Interleave if this is a large loop (small loops are already dealt with by 6282 // this point) that could benefit from interleaving. 6283 if (AggressivelyInterleaveReductions) { 6284 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6285 return IC; 6286 } 6287 6288 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6289 return 1; 6290 } 6291 6292 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6293 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6294 // This function calculates the register usage by measuring the highest number 6295 // of values that are alive at a single location. Obviously, this is a very 6296 // rough estimation. We scan the loop in a topological order in order and 6297 // assign a number to each instruction. We use RPO to ensure that defs are 6298 // met before their users. We assume that each instruction that has in-loop 6299 // users starts an interval. We record every time that an in-loop value is 6300 // used, so we have a list of the first and last occurrences of each 6301 // instruction. Next, we transpose this data structure into a multi map that 6302 // holds the list of intervals that *end* at a specific location. This multi 6303 // map allows us to perform a linear search. We scan the instructions linearly 6304 // and record each time that a new interval starts, by placing it in a set. 6305 // If we find this value in the multi-map then we remove it from the set. 6306 // The max register usage is the maximum size of the set. 6307 // We also search for instructions that are defined outside the loop, but are 6308 // used inside the loop. We need this number separately from the max-interval 6309 // usage number because when we unroll, loop-invariant values do not take 6310 // more register. 6311 LoopBlocksDFS DFS(TheLoop); 6312 DFS.perform(LI); 6313 6314 RegisterUsage RU; 6315 6316 // Each 'key' in the map opens a new interval. The values 6317 // of the map are the index of the 'last seen' usage of the 6318 // instruction that is the key. 6319 using IntervalMap = DenseMap<Instruction *, unsigned>; 6320 6321 // Maps instruction to its index. 6322 SmallVector<Instruction *, 64> IdxToInstr; 6323 // Marks the end of each interval. 6324 IntervalMap EndPoint; 6325 // Saves the list of instruction indices that are used in the loop. 6326 SmallPtrSet<Instruction *, 8> Ends; 6327 // Saves the list of values that are used in the loop but are 6328 // defined outside the loop, such as arguments and constants. 6329 SmallPtrSet<Value *, 8> LoopInvariants; 6330 6331 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6332 for (Instruction &I : BB->instructionsWithoutDebug()) { 6333 IdxToInstr.push_back(&I); 6334 6335 // Save the end location of each USE. 6336 for (Value *U : I.operands()) { 6337 auto *Instr = dyn_cast<Instruction>(U); 6338 6339 // Ignore non-instruction values such as arguments, constants, etc. 6340 if (!Instr) 6341 continue; 6342 6343 // If this instruction is outside the loop then record it and continue. 6344 if (!TheLoop->contains(Instr)) { 6345 LoopInvariants.insert(Instr); 6346 continue; 6347 } 6348 6349 // Overwrite previous end points. 6350 EndPoint[Instr] = IdxToInstr.size(); 6351 Ends.insert(Instr); 6352 } 6353 } 6354 } 6355 6356 // Saves the list of intervals that end with the index in 'key'. 6357 using InstrList = SmallVector<Instruction *, 2>; 6358 DenseMap<unsigned, InstrList> TransposeEnds; 6359 6360 // Transpose the EndPoints to a list of values that end at each index. 6361 for (auto &Interval : EndPoint) 6362 TransposeEnds[Interval.second].push_back(Interval.first); 6363 6364 SmallPtrSet<Instruction *, 8> OpenIntervals; 6365 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6366 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6367 6368 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6369 6370 // A lambda that gets the register usage for the given type and VF. 6371 const auto &TTICapture = TTI; 6372 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned { 6373 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6374 return 0; 6375 InstructionCost::CostType RegUsage = 6376 *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue(); 6377 assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() && 6378 "Nonsensical values for register usage."); 6379 return RegUsage; 6380 }; 6381 6382 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6383 Instruction *I = IdxToInstr[i]; 6384 6385 // Remove all of the instructions that end at this location. 6386 InstrList &List = TransposeEnds[i]; 6387 for (Instruction *ToRemove : List) 6388 OpenIntervals.erase(ToRemove); 6389 6390 // Ignore instructions that are never used within the loop. 6391 if (!Ends.count(I)) 6392 continue; 6393 6394 // Skip ignored values. 6395 if (ValuesToIgnore.count(I)) 6396 continue; 6397 6398 // For each VF find the maximum usage of registers. 6399 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6400 // Count the number of live intervals. 6401 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6402 6403 if (VFs[j].isScalar()) { 6404 for (auto Inst : OpenIntervals) { 6405 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6406 if (RegUsage.find(ClassID) == RegUsage.end()) 6407 RegUsage[ClassID] = 1; 6408 else 6409 RegUsage[ClassID] += 1; 6410 } 6411 } else { 6412 collectUniformsAndScalars(VFs[j]); 6413 for (auto Inst : OpenIntervals) { 6414 // Skip ignored values for VF > 1. 6415 if (VecValuesToIgnore.count(Inst)) 6416 continue; 6417 if (isScalarAfterVectorization(Inst, VFs[j])) { 6418 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6419 if (RegUsage.find(ClassID) == RegUsage.end()) 6420 RegUsage[ClassID] = 1; 6421 else 6422 RegUsage[ClassID] += 1; 6423 } else { 6424 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6425 if (RegUsage.find(ClassID) == RegUsage.end()) 6426 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6427 else 6428 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6429 } 6430 } 6431 } 6432 6433 for (auto& pair : RegUsage) { 6434 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6435 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6436 else 6437 MaxUsages[j][pair.first] = pair.second; 6438 } 6439 } 6440 6441 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6442 << OpenIntervals.size() << '\n'); 6443 6444 // Add the current instruction to the list of open intervals. 6445 OpenIntervals.insert(I); 6446 } 6447 6448 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6449 SmallMapVector<unsigned, unsigned, 4> Invariant; 6450 6451 for (auto Inst : LoopInvariants) { 6452 unsigned Usage = 6453 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6454 unsigned ClassID = 6455 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6456 if (Invariant.find(ClassID) == Invariant.end()) 6457 Invariant[ClassID] = Usage; 6458 else 6459 Invariant[ClassID] += Usage; 6460 } 6461 6462 LLVM_DEBUG({ 6463 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6464 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6465 << " item\n"; 6466 for (const auto &pair : MaxUsages[i]) { 6467 dbgs() << "LV(REG): RegisterClass: " 6468 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6469 << " registers\n"; 6470 } 6471 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6472 << " item\n"; 6473 for (const auto &pair : Invariant) { 6474 dbgs() << "LV(REG): RegisterClass: " 6475 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6476 << " registers\n"; 6477 } 6478 }); 6479 6480 RU.LoopInvariantRegs = Invariant; 6481 RU.MaxLocalUsers = MaxUsages[i]; 6482 RUs[i] = RU; 6483 } 6484 6485 return RUs; 6486 } 6487 6488 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 6489 // TODO: Cost model for emulated masked load/store is completely 6490 // broken. This hack guides the cost model to use an artificially 6491 // high enough value to practically disable vectorization with such 6492 // operations, except where previously deployed legality hack allowed 6493 // using very low cost values. This is to avoid regressions coming simply 6494 // from moving "masked load/store" check from legality to cost model. 6495 // Masked Load/Gather emulation was previously never allowed. 6496 // Limited number of Masked Store/Scatter emulation was allowed. 6497 assert(isPredicatedInst(I) && 6498 "Expecting a scalar emulated instruction"); 6499 return isa<LoadInst>(I) || 6500 (isa<StoreInst>(I) && 6501 NumPredStores > NumberOfStoresToPredicate); 6502 } 6503 6504 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6505 // If we aren't vectorizing the loop, or if we've already collected the 6506 // instructions to scalarize, there's nothing to do. Collection may already 6507 // have occurred if we have a user-selected VF and are now computing the 6508 // expected cost for interleaving. 6509 if (VF.isScalar() || VF.isZero() || 6510 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6511 return; 6512 6513 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6514 // not profitable to scalarize any instructions, the presence of VF in the 6515 // map will indicate that we've analyzed it already. 6516 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6517 6518 // Find all the instructions that are scalar with predication in the loop and 6519 // determine if it would be better to not if-convert the blocks they are in. 6520 // If so, we also record the instructions to scalarize. 6521 for (BasicBlock *BB : TheLoop->blocks()) { 6522 if (!blockNeedsPredicationForAnyReason(BB)) 6523 continue; 6524 for (Instruction &I : *BB) 6525 if (isScalarWithPredication(&I)) { 6526 ScalarCostsTy ScalarCosts; 6527 // Do not apply discount if scalable, because that would lead to 6528 // invalid scalarization costs. 6529 // Do not apply discount logic if hacked cost is needed 6530 // for emulated masked memrefs. 6531 if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I) && 6532 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6533 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6534 // Remember that BB will remain after vectorization. 6535 PredicatedBBsAfterVectorization.insert(BB); 6536 } 6537 } 6538 } 6539 6540 int LoopVectorizationCostModel::computePredInstDiscount( 6541 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6542 assert(!isUniformAfterVectorization(PredInst, VF) && 6543 "Instruction marked uniform-after-vectorization will be predicated"); 6544 6545 // Initialize the discount to zero, meaning that the scalar version and the 6546 // vector version cost the same. 6547 InstructionCost Discount = 0; 6548 6549 // Holds instructions to analyze. The instructions we visit are mapped in 6550 // ScalarCosts. Those instructions are the ones that would be scalarized if 6551 // we find that the scalar version costs less. 6552 SmallVector<Instruction *, 8> Worklist; 6553 6554 // Returns true if the given instruction can be scalarized. 6555 auto canBeScalarized = [&](Instruction *I) -> bool { 6556 // We only attempt to scalarize instructions forming a single-use chain 6557 // from the original predicated block that would otherwise be vectorized. 6558 // Although not strictly necessary, we give up on instructions we know will 6559 // already be scalar to avoid traversing chains that are unlikely to be 6560 // beneficial. 6561 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6562 isScalarAfterVectorization(I, VF)) 6563 return false; 6564 6565 // If the instruction is scalar with predication, it will be analyzed 6566 // separately. We ignore it within the context of PredInst. 6567 if (isScalarWithPredication(I)) 6568 return false; 6569 6570 // If any of the instruction's operands are uniform after vectorization, 6571 // the instruction cannot be scalarized. This prevents, for example, a 6572 // masked load from being scalarized. 6573 // 6574 // We assume we will only emit a value for lane zero of an instruction 6575 // marked uniform after vectorization, rather than VF identical values. 6576 // Thus, if we scalarize an instruction that uses a uniform, we would 6577 // create uses of values corresponding to the lanes we aren't emitting code 6578 // for. This behavior can be changed by allowing getScalarValue to clone 6579 // the lane zero values for uniforms rather than asserting. 6580 for (Use &U : I->operands()) 6581 if (auto *J = dyn_cast<Instruction>(U.get())) 6582 if (isUniformAfterVectorization(J, VF)) 6583 return false; 6584 6585 // Otherwise, we can scalarize the instruction. 6586 return true; 6587 }; 6588 6589 // Compute the expected cost discount from scalarizing the entire expression 6590 // feeding the predicated instruction. We currently only consider expressions 6591 // that are single-use instruction chains. 6592 Worklist.push_back(PredInst); 6593 while (!Worklist.empty()) { 6594 Instruction *I = Worklist.pop_back_val(); 6595 6596 // If we've already analyzed the instruction, there's nothing to do. 6597 if (ScalarCosts.find(I) != ScalarCosts.end()) 6598 continue; 6599 6600 // Compute the cost of the vector instruction. Note that this cost already 6601 // includes the scalarization overhead of the predicated instruction. 6602 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6603 6604 // Compute the cost of the scalarized instruction. This cost is the cost of 6605 // the instruction as if it wasn't if-converted and instead remained in the 6606 // predicated block. We will scale this cost by block probability after 6607 // computing the scalarization overhead. 6608 InstructionCost ScalarCost = 6609 VF.getFixedValue() * 6610 getInstructionCost(I, ElementCount::getFixed(1)).first; 6611 6612 // Compute the scalarization overhead of needed insertelement instructions 6613 // and phi nodes. 6614 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6615 ScalarCost += TTI.getScalarizationOverhead( 6616 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6617 APInt::getAllOnes(VF.getFixedValue()), true, false); 6618 ScalarCost += 6619 VF.getFixedValue() * 6620 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6621 } 6622 6623 // Compute the scalarization overhead of needed extractelement 6624 // instructions. For each of the instruction's operands, if the operand can 6625 // be scalarized, add it to the worklist; otherwise, account for the 6626 // overhead. 6627 for (Use &U : I->operands()) 6628 if (auto *J = dyn_cast<Instruction>(U.get())) { 6629 assert(VectorType::isValidElementType(J->getType()) && 6630 "Instruction has non-scalar type"); 6631 if (canBeScalarized(J)) 6632 Worklist.push_back(J); 6633 else if (needsExtract(J, VF)) { 6634 ScalarCost += TTI.getScalarizationOverhead( 6635 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6636 APInt::getAllOnes(VF.getFixedValue()), false, true); 6637 } 6638 } 6639 6640 // Scale the total scalar cost by block probability. 6641 ScalarCost /= getReciprocalPredBlockProb(); 6642 6643 // Compute the discount. A non-negative discount means the vector version 6644 // of the instruction costs more, and scalarizing would be beneficial. 6645 Discount += VectorCost - ScalarCost; 6646 ScalarCosts[I] = ScalarCost; 6647 } 6648 6649 return *Discount.getValue(); 6650 } 6651 6652 LoopVectorizationCostModel::VectorizationCostTy 6653 LoopVectorizationCostModel::expectedCost( 6654 ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { 6655 VectorizationCostTy Cost; 6656 6657 // For each block. 6658 for (BasicBlock *BB : TheLoop->blocks()) { 6659 VectorizationCostTy BlockCost; 6660 6661 // For each instruction in the old loop. 6662 for (Instruction &I : BB->instructionsWithoutDebug()) { 6663 // Skip ignored values. 6664 if (ValuesToIgnore.count(&I) || 6665 (VF.isVector() && VecValuesToIgnore.count(&I))) 6666 continue; 6667 6668 VectorizationCostTy C = getInstructionCost(&I, VF); 6669 6670 // Check if we should override the cost. 6671 if (C.first.isValid() && 6672 ForceTargetInstructionCost.getNumOccurrences() > 0) 6673 C.first = InstructionCost(ForceTargetInstructionCost); 6674 6675 // Keep a list of instructions with invalid costs. 6676 if (Invalid && !C.first.isValid()) 6677 Invalid->emplace_back(&I, VF); 6678 6679 BlockCost.first += C.first; 6680 BlockCost.second |= C.second; 6681 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6682 << " for VF " << VF << " For instruction: " << I 6683 << '\n'); 6684 } 6685 6686 // If we are vectorizing a predicated block, it will have been 6687 // if-converted. This means that the block's instructions (aside from 6688 // stores and instructions that may divide by zero) will now be 6689 // unconditionally executed. For the scalar case, we may not always execute 6690 // the predicated block, if it is an if-else block. Thus, scale the block's 6691 // cost by the probability of executing it. blockNeedsPredication from 6692 // Legal is used so as to not include all blocks in tail folded loops. 6693 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6694 BlockCost.first /= getReciprocalPredBlockProb(); 6695 6696 Cost.first += BlockCost.first; 6697 Cost.second |= BlockCost.second; 6698 } 6699 6700 return Cost; 6701 } 6702 6703 /// Gets Address Access SCEV after verifying that the access pattern 6704 /// is loop invariant except the induction variable dependence. 6705 /// 6706 /// This SCEV can be sent to the Target in order to estimate the address 6707 /// calculation cost. 6708 static const SCEV *getAddressAccessSCEV( 6709 Value *Ptr, 6710 LoopVectorizationLegality *Legal, 6711 PredicatedScalarEvolution &PSE, 6712 const Loop *TheLoop) { 6713 6714 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6715 if (!Gep) 6716 return nullptr; 6717 6718 // We are looking for a gep with all loop invariant indices except for one 6719 // which should be an induction variable. 6720 auto SE = PSE.getSE(); 6721 unsigned NumOperands = Gep->getNumOperands(); 6722 for (unsigned i = 1; i < NumOperands; ++i) { 6723 Value *Opd = Gep->getOperand(i); 6724 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6725 !Legal->isInductionVariable(Opd)) 6726 return nullptr; 6727 } 6728 6729 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6730 return PSE.getSCEV(Ptr); 6731 } 6732 6733 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6734 return Legal->hasStride(I->getOperand(0)) || 6735 Legal->hasStride(I->getOperand(1)); 6736 } 6737 6738 InstructionCost 6739 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6740 ElementCount VF) { 6741 assert(VF.isVector() && 6742 "Scalarization cost of instruction implies vectorization."); 6743 if (VF.isScalable()) 6744 return InstructionCost::getInvalid(); 6745 6746 Type *ValTy = getLoadStoreType(I); 6747 auto SE = PSE.getSE(); 6748 6749 unsigned AS = getLoadStoreAddressSpace(I); 6750 Value *Ptr = getLoadStorePointerOperand(I); 6751 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6752 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost` 6753 // that it is being called from this specific place. 6754 6755 // Figure out whether the access is strided and get the stride value 6756 // if it's known in compile time 6757 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6758 6759 // Get the cost of the scalar memory instruction and address computation. 6760 InstructionCost Cost = 6761 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6762 6763 // Don't pass *I here, since it is scalar but will actually be part of a 6764 // vectorized loop where the user of it is a vectorized instruction. 6765 const Align Alignment = getLoadStoreAlignment(I); 6766 Cost += VF.getKnownMinValue() * 6767 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6768 AS, TTI::TCK_RecipThroughput); 6769 6770 // Get the overhead of the extractelement and insertelement instructions 6771 // we might create due to scalarization. 6772 Cost += getScalarizationOverhead(I, VF); 6773 6774 // If we have a predicated load/store, it will need extra i1 extracts and 6775 // conditional branches, but may not be executed for each vector lane. Scale 6776 // the cost by the probability of executing the predicated block. 6777 if (isPredicatedInst(I)) { 6778 Cost /= getReciprocalPredBlockProb(); 6779 6780 // Add the cost of an i1 extract and a branch 6781 auto *Vec_i1Ty = 6782 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 6783 Cost += TTI.getScalarizationOverhead( 6784 Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()), 6785 /*Insert=*/false, /*Extract=*/true); 6786 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 6787 6788 if (useEmulatedMaskMemRefHack(I)) 6789 // Artificially setting to a high enough value to practically disable 6790 // vectorization with such operations. 6791 Cost = 3000000; 6792 } 6793 6794 return Cost; 6795 } 6796 6797 InstructionCost 6798 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6799 ElementCount VF) { 6800 Type *ValTy = getLoadStoreType(I); 6801 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6802 Value *Ptr = getLoadStorePointerOperand(I); 6803 unsigned AS = getLoadStoreAddressSpace(I); 6804 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr); 6805 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6806 6807 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6808 "Stride should be 1 or -1 for consecutive memory access"); 6809 const Align Alignment = getLoadStoreAlignment(I); 6810 InstructionCost Cost = 0; 6811 if (Legal->isMaskRequired(I)) 6812 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6813 CostKind); 6814 else 6815 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6816 CostKind, I); 6817 6818 bool Reverse = ConsecutiveStride < 0; 6819 if (Reverse) 6820 Cost += 6821 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6822 return Cost; 6823 } 6824 6825 InstructionCost 6826 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6827 ElementCount VF) { 6828 assert(Legal->isUniformMemOp(*I)); 6829 6830 Type *ValTy = getLoadStoreType(I); 6831 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6832 const Align Alignment = getLoadStoreAlignment(I); 6833 unsigned AS = getLoadStoreAddressSpace(I); 6834 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6835 if (isa<LoadInst>(I)) { 6836 return TTI.getAddressComputationCost(ValTy) + 6837 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6838 CostKind) + 6839 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6840 } 6841 StoreInst *SI = cast<StoreInst>(I); 6842 6843 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6844 return TTI.getAddressComputationCost(ValTy) + 6845 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6846 CostKind) + 6847 (isLoopInvariantStoreValue 6848 ? 0 6849 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6850 VF.getKnownMinValue() - 1)); 6851 } 6852 6853 InstructionCost 6854 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6855 ElementCount VF) { 6856 Type *ValTy = getLoadStoreType(I); 6857 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6858 const Align Alignment = getLoadStoreAlignment(I); 6859 const Value *Ptr = getLoadStorePointerOperand(I); 6860 6861 return TTI.getAddressComputationCost(VectorTy) + 6862 TTI.getGatherScatterOpCost( 6863 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6864 TargetTransformInfo::TCK_RecipThroughput, I); 6865 } 6866 6867 InstructionCost 6868 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6869 ElementCount VF) { 6870 // TODO: Once we have support for interleaving with scalable vectors 6871 // we can calculate the cost properly here. 6872 if (VF.isScalable()) 6873 return InstructionCost::getInvalid(); 6874 6875 Type *ValTy = getLoadStoreType(I); 6876 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6877 unsigned AS = getLoadStoreAddressSpace(I); 6878 6879 auto Group = getInterleavedAccessGroup(I); 6880 assert(Group && "Fail to get an interleaved access group."); 6881 6882 unsigned InterleaveFactor = Group->getFactor(); 6883 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6884 6885 // Holds the indices of existing members in the interleaved group. 6886 SmallVector<unsigned, 4> Indices; 6887 for (unsigned IF = 0; IF < InterleaveFactor; IF++) 6888 if (Group->getMember(IF)) 6889 Indices.push_back(IF); 6890 6891 // Calculate the cost of the whole interleaved group. 6892 bool UseMaskForGaps = 6893 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) || 6894 (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor())); 6895 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 6896 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6897 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6898 6899 if (Group->isReverse()) { 6900 // TODO: Add support for reversed masked interleaved access. 6901 assert(!Legal->isMaskRequired(I) && 6902 "Reverse masked interleaved access not supported."); 6903 Cost += 6904 Group->getNumMembers() * 6905 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6906 } 6907 return Cost; 6908 } 6909 6910 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( 6911 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 6912 using namespace llvm::PatternMatch; 6913 // Early exit for no inloop reductions 6914 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 6915 return None; 6916 auto *VectorTy = cast<VectorType>(Ty); 6917 6918 // We are looking for a pattern of, and finding the minimal acceptable cost: 6919 // reduce(mul(ext(A), ext(B))) or 6920 // reduce(mul(A, B)) or 6921 // reduce(ext(A)) or 6922 // reduce(A). 6923 // The basic idea is that we walk down the tree to do that, finding the root 6924 // reduction instruction in InLoopReductionImmediateChains. From there we find 6925 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 6926 // of the components. If the reduction cost is lower then we return it for the 6927 // reduction instruction and 0 for the other instructions in the pattern. If 6928 // it is not we return an invalid cost specifying the orignal cost method 6929 // should be used. 6930 Instruction *RetI = I; 6931 if (match(RetI, m_ZExtOrSExt(m_Value()))) { 6932 if (!RetI->hasOneUser()) 6933 return None; 6934 RetI = RetI->user_back(); 6935 } 6936 if (match(RetI, m_Mul(m_Value(), m_Value())) && 6937 RetI->user_back()->getOpcode() == Instruction::Add) { 6938 if (!RetI->hasOneUser()) 6939 return None; 6940 RetI = RetI->user_back(); 6941 } 6942 6943 // Test if the found instruction is a reduction, and if not return an invalid 6944 // cost specifying the parent to use the original cost modelling. 6945 if (!InLoopReductionImmediateChains.count(RetI)) 6946 return None; 6947 6948 // Find the reduction this chain is a part of and calculate the basic cost of 6949 // the reduction on its own. 6950 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 6951 Instruction *ReductionPhi = LastChain; 6952 while (!isa<PHINode>(ReductionPhi)) 6953 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 6954 6955 const RecurrenceDescriptor &RdxDesc = 6956 Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second; 6957 6958 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 6959 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); 6960 6961 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a 6962 // normal fmul instruction to the cost of the fadd reduction. 6963 if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd) 6964 BaseCost += 6965 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind); 6966 6967 // If we're using ordered reductions then we can just return the base cost 6968 // here, since getArithmeticReductionCost calculates the full ordered 6969 // reduction cost when FP reassociation is not allowed. 6970 if (useOrderedReductions(RdxDesc)) 6971 return BaseCost; 6972 6973 // Get the operand that was not the reduction chain and match it to one of the 6974 // patterns, returning the better cost if it is found. 6975 Instruction *RedOp = RetI->getOperand(1) == LastChain 6976 ? dyn_cast<Instruction>(RetI->getOperand(0)) 6977 : dyn_cast<Instruction>(RetI->getOperand(1)); 6978 6979 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 6980 6981 Instruction *Op0, *Op1; 6982 if (RedOp && 6983 match(RedOp, 6984 m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) && 6985 match(Op0, m_ZExtOrSExt(m_Value())) && 6986 Op0->getOpcode() == Op1->getOpcode() && 6987 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 6988 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) && 6989 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) { 6990 6991 // Matched reduce(ext(mul(ext(A), ext(B))) 6992 // Note that the extend opcodes need to all match, or if A==B they will have 6993 // been converted to zext(mul(sext(A), sext(A))) as it is known positive, 6994 // which is equally fine. 6995 bool IsUnsigned = isa<ZExtInst>(Op0); 6996 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 6997 auto *MulType = VectorType::get(Op0->getType(), VectorTy); 6998 6999 InstructionCost ExtCost = 7000 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType, 7001 TTI::CastContextHint::None, CostKind, Op0); 7002 InstructionCost MulCost = 7003 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind); 7004 InstructionCost Ext2Cost = 7005 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType, 7006 TTI::CastContextHint::None, CostKind, RedOp); 7007 7008 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7009 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7010 CostKind); 7011 7012 if (RedCost.isValid() && 7013 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost) 7014 return I == RetI ? RedCost : 0; 7015 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && 7016 !TheLoop->isLoopInvariant(RedOp)) { 7017 // Matched reduce(ext(A)) 7018 bool IsUnsigned = isa<ZExtInst>(RedOp); 7019 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 7020 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7021 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7022 CostKind); 7023 7024 InstructionCost ExtCost = 7025 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 7026 TTI::CastContextHint::None, CostKind, RedOp); 7027 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 7028 return I == RetI ? RedCost : 0; 7029 } else if (RedOp && 7030 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { 7031 if (match(Op0, m_ZExtOrSExt(m_Value())) && 7032 Op0->getOpcode() == Op1->getOpcode() && 7033 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 7034 bool IsUnsigned = isa<ZExtInst>(Op0); 7035 Type *Op0Ty = Op0->getOperand(0)->getType(); 7036 Type *Op1Ty = Op1->getOperand(0)->getType(); 7037 Type *LargestOpTy = 7038 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty 7039 : Op0Ty; 7040 auto *ExtType = VectorType::get(LargestOpTy, VectorTy); 7041 7042 // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of 7043 // different sizes. We take the largest type as the ext to reduce, and add 7044 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))). 7045 InstructionCost ExtCost0 = TTI.getCastInstrCost( 7046 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy), 7047 TTI::CastContextHint::None, CostKind, Op0); 7048 InstructionCost ExtCost1 = TTI.getCastInstrCost( 7049 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy), 7050 TTI::CastContextHint::None, CostKind, Op1); 7051 InstructionCost MulCost = 7052 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7053 7054 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7055 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7056 CostKind); 7057 InstructionCost ExtraExtCost = 0; 7058 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) { 7059 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1; 7060 ExtraExtCost = TTI.getCastInstrCost( 7061 ExtraExtOp->getOpcode(), ExtType, 7062 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy), 7063 TTI::CastContextHint::None, CostKind, ExtraExtOp); 7064 } 7065 7066 if (RedCost.isValid() && 7067 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost)) 7068 return I == RetI ? RedCost : 0; 7069 } else if (!match(I, m_ZExtOrSExt(m_Value()))) { 7070 // Matched reduce(mul()) 7071 InstructionCost MulCost = 7072 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7073 7074 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7075 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 7076 CostKind); 7077 7078 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 7079 return I == RetI ? RedCost : 0; 7080 } 7081 } 7082 7083 return I == RetI ? Optional<InstructionCost>(BaseCost) : None; 7084 } 7085 7086 InstructionCost 7087 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 7088 ElementCount VF) { 7089 // Calculate scalar cost only. Vectorization cost should be ready at this 7090 // moment. 7091 if (VF.isScalar()) { 7092 Type *ValTy = getLoadStoreType(I); 7093 const Align Alignment = getLoadStoreAlignment(I); 7094 unsigned AS = getLoadStoreAddressSpace(I); 7095 7096 return TTI.getAddressComputationCost(ValTy) + 7097 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 7098 TTI::TCK_RecipThroughput, I); 7099 } 7100 return getWideningCost(I, VF); 7101 } 7102 7103 LoopVectorizationCostModel::VectorizationCostTy 7104 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7105 ElementCount VF) { 7106 // If we know that this instruction will remain uniform, check the cost of 7107 // the scalar version. 7108 if (isUniformAfterVectorization(I, VF)) 7109 VF = ElementCount::getFixed(1); 7110 7111 if (VF.isVector() && isProfitableToScalarize(I, VF)) 7112 return VectorizationCostTy(InstsToScalarize[VF][I], false); 7113 7114 // Forced scalars do not have any scalarization overhead. 7115 auto ForcedScalar = ForcedScalars.find(VF); 7116 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 7117 auto InstSet = ForcedScalar->second; 7118 if (InstSet.count(I)) 7119 return VectorizationCostTy( 7120 (getInstructionCost(I, ElementCount::getFixed(1)).first * 7121 VF.getKnownMinValue()), 7122 false); 7123 } 7124 7125 Type *VectorTy; 7126 InstructionCost C = getInstructionCost(I, VF, VectorTy); 7127 7128 bool TypeNotScalarized = false; 7129 if (VF.isVector() && VectorTy->isVectorTy()) { 7130 unsigned NumParts = TTI.getNumberOfParts(VectorTy); 7131 if (NumParts) 7132 TypeNotScalarized = NumParts < VF.getKnownMinValue(); 7133 else 7134 C = InstructionCost::getInvalid(); 7135 } 7136 return VectorizationCostTy(C, TypeNotScalarized); 7137 } 7138 7139 InstructionCost 7140 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 7141 ElementCount VF) const { 7142 7143 // There is no mechanism yet to create a scalable scalarization loop, 7144 // so this is currently Invalid. 7145 if (VF.isScalable()) 7146 return InstructionCost::getInvalid(); 7147 7148 if (VF.isScalar()) 7149 return 0; 7150 7151 InstructionCost Cost = 0; 7152 Type *RetTy = ToVectorTy(I->getType(), VF); 7153 if (!RetTy->isVoidTy() && 7154 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 7155 Cost += TTI.getScalarizationOverhead( 7156 cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true, 7157 false); 7158 7159 // Some targets keep addresses scalar. 7160 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 7161 return Cost; 7162 7163 // Some targets support efficient element stores. 7164 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 7165 return Cost; 7166 7167 // Collect operands to consider. 7168 CallInst *CI = dyn_cast<CallInst>(I); 7169 Instruction::op_range Ops = CI ? CI->args() : I->operands(); 7170 7171 // Skip operands that do not require extraction/scalarization and do not incur 7172 // any overhead. 7173 SmallVector<Type *> Tys; 7174 for (auto *V : filterExtractingOperands(Ops, VF)) 7175 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 7176 return Cost + TTI.getOperandsScalarizationOverhead( 7177 filterExtractingOperands(Ops, VF), Tys); 7178 } 7179 7180 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 7181 if (VF.isScalar()) 7182 return; 7183 NumPredStores = 0; 7184 for (BasicBlock *BB : TheLoop->blocks()) { 7185 // For each instruction in the old loop. 7186 for (Instruction &I : *BB) { 7187 Value *Ptr = getLoadStorePointerOperand(&I); 7188 if (!Ptr) 7189 continue; 7190 7191 // TODO: We should generate better code and update the cost model for 7192 // predicated uniform stores. Today they are treated as any other 7193 // predicated store (see added test cases in 7194 // invariant-store-vectorization.ll). 7195 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 7196 NumPredStores++; 7197 7198 if (Legal->isUniformMemOp(I)) { 7199 // TODO: Avoid replicating loads and stores instead of 7200 // relying on instcombine to remove them. 7201 // Load: Scalar load + broadcast 7202 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 7203 InstructionCost Cost; 7204 if (isa<StoreInst>(&I) && VF.isScalable() && 7205 isLegalGatherOrScatter(&I)) { 7206 Cost = getGatherScatterCost(&I, VF); 7207 setWideningDecision(&I, VF, CM_GatherScatter, Cost); 7208 } else { 7209 assert((isa<LoadInst>(&I) || !VF.isScalable()) && 7210 "Cannot yet scalarize uniform stores"); 7211 Cost = getUniformMemOpCost(&I, VF); 7212 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7213 } 7214 continue; 7215 } 7216 7217 // We assume that widening is the best solution when possible. 7218 if (memoryInstructionCanBeWidened(&I, VF)) { 7219 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 7220 int ConsecutiveStride = Legal->isConsecutivePtr( 7221 getLoadStoreType(&I), getLoadStorePointerOperand(&I)); 7222 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7223 "Expected consecutive stride."); 7224 InstWidening Decision = 7225 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 7226 setWideningDecision(&I, VF, Decision, Cost); 7227 continue; 7228 } 7229 7230 // Choose between Interleaving, Gather/Scatter or Scalarization. 7231 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 7232 unsigned NumAccesses = 1; 7233 if (isAccessInterleaved(&I)) { 7234 auto Group = getInterleavedAccessGroup(&I); 7235 assert(Group && "Fail to get an interleaved access group."); 7236 7237 // Make one decision for the whole group. 7238 if (getWideningDecision(&I, VF) != CM_Unknown) 7239 continue; 7240 7241 NumAccesses = Group->getNumMembers(); 7242 if (interleavedAccessCanBeWidened(&I, VF)) 7243 InterleaveCost = getInterleaveGroupCost(&I, VF); 7244 } 7245 7246 InstructionCost GatherScatterCost = 7247 isLegalGatherOrScatter(&I) 7248 ? getGatherScatterCost(&I, VF) * NumAccesses 7249 : InstructionCost::getInvalid(); 7250 7251 InstructionCost ScalarizationCost = 7252 getMemInstScalarizationCost(&I, VF) * NumAccesses; 7253 7254 // Choose better solution for the current VF, 7255 // write down this decision and use it during vectorization. 7256 InstructionCost Cost; 7257 InstWidening Decision; 7258 if (InterleaveCost <= GatherScatterCost && 7259 InterleaveCost < ScalarizationCost) { 7260 Decision = CM_Interleave; 7261 Cost = InterleaveCost; 7262 } else if (GatherScatterCost < ScalarizationCost) { 7263 Decision = CM_GatherScatter; 7264 Cost = GatherScatterCost; 7265 } else { 7266 Decision = CM_Scalarize; 7267 Cost = ScalarizationCost; 7268 } 7269 // If the instructions belongs to an interleave group, the whole group 7270 // receives the same decision. The whole group receives the cost, but 7271 // the cost will actually be assigned to one instruction. 7272 if (auto Group = getInterleavedAccessGroup(&I)) 7273 setWideningDecision(Group, VF, Decision, Cost); 7274 else 7275 setWideningDecision(&I, VF, Decision, Cost); 7276 } 7277 } 7278 7279 // Make sure that any load of address and any other address computation 7280 // remains scalar unless there is gather/scatter support. This avoids 7281 // inevitable extracts into address registers, and also has the benefit of 7282 // activating LSR more, since that pass can't optimize vectorized 7283 // addresses. 7284 if (TTI.prefersVectorizedAddressing()) 7285 return; 7286 7287 // Start with all scalar pointer uses. 7288 SmallPtrSet<Instruction *, 8> AddrDefs; 7289 for (BasicBlock *BB : TheLoop->blocks()) 7290 for (Instruction &I : *BB) { 7291 Instruction *PtrDef = 7292 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7293 if (PtrDef && TheLoop->contains(PtrDef) && 7294 getWideningDecision(&I, VF) != CM_GatherScatter) 7295 AddrDefs.insert(PtrDef); 7296 } 7297 7298 // Add all instructions used to generate the addresses. 7299 SmallVector<Instruction *, 4> Worklist; 7300 append_range(Worklist, AddrDefs); 7301 while (!Worklist.empty()) { 7302 Instruction *I = Worklist.pop_back_val(); 7303 for (auto &Op : I->operands()) 7304 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7305 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7306 AddrDefs.insert(InstOp).second) 7307 Worklist.push_back(InstOp); 7308 } 7309 7310 for (auto *I : AddrDefs) { 7311 if (isa<LoadInst>(I)) { 7312 // Setting the desired widening decision should ideally be handled in 7313 // by cost functions, but since this involves the task of finding out 7314 // if the loaded register is involved in an address computation, it is 7315 // instead changed here when we know this is the case. 7316 InstWidening Decision = getWideningDecision(I, VF); 7317 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7318 // Scalarize a widened load of address. 7319 setWideningDecision( 7320 I, VF, CM_Scalarize, 7321 (VF.getKnownMinValue() * 7322 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7323 else if (auto Group = getInterleavedAccessGroup(I)) { 7324 // Scalarize an interleave group of address loads. 7325 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7326 if (Instruction *Member = Group->getMember(I)) 7327 setWideningDecision( 7328 Member, VF, CM_Scalarize, 7329 (VF.getKnownMinValue() * 7330 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7331 } 7332 } 7333 } else 7334 // Make sure I gets scalarized and a cost estimate without 7335 // scalarization overhead. 7336 ForcedScalars[VF].insert(I); 7337 } 7338 } 7339 7340 InstructionCost 7341 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 7342 Type *&VectorTy) { 7343 Type *RetTy = I->getType(); 7344 if (canTruncateToMinimalBitwidth(I, VF)) 7345 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7346 auto SE = PSE.getSE(); 7347 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7348 7349 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 7350 ElementCount VF) -> bool { 7351 if (VF.isScalar()) 7352 return true; 7353 7354 auto Scalarized = InstsToScalarize.find(VF); 7355 assert(Scalarized != InstsToScalarize.end() && 7356 "VF not yet analyzed for scalarization profitability"); 7357 return !Scalarized->second.count(I) && 7358 llvm::all_of(I->users(), [&](User *U) { 7359 auto *UI = cast<Instruction>(U); 7360 return !Scalarized->second.count(UI); 7361 }); 7362 }; 7363 (void) hasSingleCopyAfterVectorization; 7364 7365 if (isScalarAfterVectorization(I, VF)) { 7366 // With the exception of GEPs and PHIs, after scalarization there should 7367 // only be one copy of the instruction generated in the loop. This is 7368 // because the VF is either 1, or any instructions that need scalarizing 7369 // have already been dealt with by the the time we get here. As a result, 7370 // it means we don't have to multiply the instruction cost by VF. 7371 assert(I->getOpcode() == Instruction::GetElementPtr || 7372 I->getOpcode() == Instruction::PHI || 7373 (I->getOpcode() == Instruction::BitCast && 7374 I->getType()->isPointerTy()) || 7375 hasSingleCopyAfterVectorization(I, VF)); 7376 VectorTy = RetTy; 7377 } else 7378 VectorTy = ToVectorTy(RetTy, VF); 7379 7380 // TODO: We need to estimate the cost of intrinsic calls. 7381 switch (I->getOpcode()) { 7382 case Instruction::GetElementPtr: 7383 // We mark this instruction as zero-cost because the cost of GEPs in 7384 // vectorized code depends on whether the corresponding memory instruction 7385 // is scalarized or not. Therefore, we handle GEPs with the memory 7386 // instruction cost. 7387 return 0; 7388 case Instruction::Br: { 7389 // In cases of scalarized and predicated instructions, there will be VF 7390 // predicated blocks in the vectorized loop. Each branch around these 7391 // blocks requires also an extract of its vector compare i1 element. 7392 bool ScalarPredicatedBB = false; 7393 BranchInst *BI = cast<BranchInst>(I); 7394 if (VF.isVector() && BI->isConditional() && 7395 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7396 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7397 ScalarPredicatedBB = true; 7398 7399 if (ScalarPredicatedBB) { 7400 // Not possible to scalarize scalable vector with predicated instructions. 7401 if (VF.isScalable()) 7402 return InstructionCost::getInvalid(); 7403 // Return cost for branches around scalarized and predicated blocks. 7404 auto *Vec_i1Ty = 7405 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7406 return ( 7407 TTI.getScalarizationOverhead( 7408 Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) + 7409 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); 7410 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7411 // The back-edge branch will remain, as will all scalar branches. 7412 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7413 else 7414 // This branch will be eliminated by if-conversion. 7415 return 0; 7416 // Note: We currently assume zero cost for an unconditional branch inside 7417 // a predicated block since it will become a fall-through, although we 7418 // may decide in the future to call TTI for all branches. 7419 } 7420 case Instruction::PHI: { 7421 auto *Phi = cast<PHINode>(I); 7422 7423 // First-order recurrences are replaced by vector shuffles inside the loop. 7424 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7425 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7426 return TTI.getShuffleCost( 7427 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7428 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7429 7430 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7431 // converted into select instructions. We require N - 1 selects per phi 7432 // node, where N is the number of incoming values. 7433 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7434 return (Phi->getNumIncomingValues() - 1) * 7435 TTI.getCmpSelInstrCost( 7436 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7437 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7438 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7439 7440 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7441 } 7442 case Instruction::UDiv: 7443 case Instruction::SDiv: 7444 case Instruction::URem: 7445 case Instruction::SRem: 7446 // If we have a predicated instruction, it may not be executed for each 7447 // vector lane. Get the scalarization cost and scale this amount by the 7448 // probability of executing the predicated block. If the instruction is not 7449 // predicated, we fall through to the next case. 7450 if (VF.isVector() && isScalarWithPredication(I)) { 7451 InstructionCost Cost = 0; 7452 7453 // These instructions have a non-void type, so account for the phi nodes 7454 // that we will create. This cost is likely to be zero. The phi node 7455 // cost, if any, should be scaled by the block probability because it 7456 // models a copy at the end of each predicated block. 7457 Cost += VF.getKnownMinValue() * 7458 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7459 7460 // The cost of the non-predicated instruction. 7461 Cost += VF.getKnownMinValue() * 7462 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7463 7464 // The cost of insertelement and extractelement instructions needed for 7465 // scalarization. 7466 Cost += getScalarizationOverhead(I, VF); 7467 7468 // Scale the cost by the probability of executing the predicated blocks. 7469 // This assumes the predicated block for each vector lane is equally 7470 // likely. 7471 return Cost / getReciprocalPredBlockProb(); 7472 } 7473 LLVM_FALLTHROUGH; 7474 case Instruction::Add: 7475 case Instruction::FAdd: 7476 case Instruction::Sub: 7477 case Instruction::FSub: 7478 case Instruction::Mul: 7479 case Instruction::FMul: 7480 case Instruction::FDiv: 7481 case Instruction::FRem: 7482 case Instruction::Shl: 7483 case Instruction::LShr: 7484 case Instruction::AShr: 7485 case Instruction::And: 7486 case Instruction::Or: 7487 case Instruction::Xor: { 7488 // Since we will replace the stride by 1 the multiplication should go away. 7489 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7490 return 0; 7491 7492 // Detect reduction patterns 7493 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7494 return *RedCost; 7495 7496 // Certain instructions can be cheaper to vectorize if they have a constant 7497 // second vector operand. One example of this are shifts on x86. 7498 Value *Op2 = I->getOperand(1); 7499 TargetTransformInfo::OperandValueProperties Op2VP; 7500 TargetTransformInfo::OperandValueKind Op2VK = 7501 TTI.getOperandInfo(Op2, Op2VP); 7502 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7503 Op2VK = TargetTransformInfo::OK_UniformValue; 7504 7505 SmallVector<const Value *, 4> Operands(I->operand_values()); 7506 return TTI.getArithmeticInstrCost( 7507 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7508 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7509 } 7510 case Instruction::FNeg: { 7511 return TTI.getArithmeticInstrCost( 7512 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7513 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7514 TargetTransformInfo::OP_None, I->getOperand(0), I); 7515 } 7516 case Instruction::Select: { 7517 SelectInst *SI = cast<SelectInst>(I); 7518 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7519 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7520 7521 const Value *Op0, *Op1; 7522 using namespace llvm::PatternMatch; 7523 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7524 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7525 // select x, y, false --> x & y 7526 // select x, true, y --> x | y 7527 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7528 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7529 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7530 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7531 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7532 Op1->getType()->getScalarSizeInBits() == 1); 7533 7534 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7535 return TTI.getArithmeticInstrCost( 7536 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7537 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7538 } 7539 7540 Type *CondTy = SI->getCondition()->getType(); 7541 if (!ScalarCond) 7542 CondTy = VectorType::get(CondTy, VF); 7543 7544 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 7545 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition())) 7546 Pred = Cmp->getPredicate(); 7547 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred, 7548 CostKind, I); 7549 } 7550 case Instruction::ICmp: 7551 case Instruction::FCmp: { 7552 Type *ValTy = I->getOperand(0)->getType(); 7553 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7554 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7555 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7556 VectorTy = ToVectorTy(ValTy, VF); 7557 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7558 cast<CmpInst>(I)->getPredicate(), CostKind, 7559 I); 7560 } 7561 case Instruction::Store: 7562 case Instruction::Load: { 7563 ElementCount Width = VF; 7564 if (Width.isVector()) { 7565 InstWidening Decision = getWideningDecision(I, Width); 7566 assert(Decision != CM_Unknown && 7567 "CM decision should be taken at this point"); 7568 if (Decision == CM_Scalarize) 7569 Width = ElementCount::getFixed(1); 7570 } 7571 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7572 return getMemoryInstructionCost(I, VF); 7573 } 7574 case Instruction::BitCast: 7575 if (I->getType()->isPointerTy()) 7576 return 0; 7577 LLVM_FALLTHROUGH; 7578 case Instruction::ZExt: 7579 case Instruction::SExt: 7580 case Instruction::FPToUI: 7581 case Instruction::FPToSI: 7582 case Instruction::FPExt: 7583 case Instruction::PtrToInt: 7584 case Instruction::IntToPtr: 7585 case Instruction::SIToFP: 7586 case Instruction::UIToFP: 7587 case Instruction::Trunc: 7588 case Instruction::FPTrunc: { 7589 // Computes the CastContextHint from a Load/Store instruction. 7590 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7591 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7592 "Expected a load or a store!"); 7593 7594 if (VF.isScalar() || !TheLoop->contains(I)) 7595 return TTI::CastContextHint::Normal; 7596 7597 switch (getWideningDecision(I, VF)) { 7598 case LoopVectorizationCostModel::CM_GatherScatter: 7599 return TTI::CastContextHint::GatherScatter; 7600 case LoopVectorizationCostModel::CM_Interleave: 7601 return TTI::CastContextHint::Interleave; 7602 case LoopVectorizationCostModel::CM_Scalarize: 7603 case LoopVectorizationCostModel::CM_Widen: 7604 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7605 : TTI::CastContextHint::Normal; 7606 case LoopVectorizationCostModel::CM_Widen_Reverse: 7607 return TTI::CastContextHint::Reversed; 7608 case LoopVectorizationCostModel::CM_Unknown: 7609 llvm_unreachable("Instr did not go through cost modelling?"); 7610 } 7611 7612 llvm_unreachable("Unhandled case!"); 7613 }; 7614 7615 unsigned Opcode = I->getOpcode(); 7616 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7617 // For Trunc, the context is the only user, which must be a StoreInst. 7618 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7619 if (I->hasOneUse()) 7620 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7621 CCH = ComputeCCH(Store); 7622 } 7623 // For Z/Sext, the context is the operand, which must be a LoadInst. 7624 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7625 Opcode == Instruction::FPExt) { 7626 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7627 CCH = ComputeCCH(Load); 7628 } 7629 7630 // We optimize the truncation of induction variables having constant 7631 // integer steps. The cost of these truncations is the same as the scalar 7632 // operation. 7633 if (isOptimizableIVTruncate(I, VF)) { 7634 auto *Trunc = cast<TruncInst>(I); 7635 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7636 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7637 } 7638 7639 // Detect reduction patterns 7640 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7641 return *RedCost; 7642 7643 Type *SrcScalarTy = I->getOperand(0)->getType(); 7644 Type *SrcVecTy = 7645 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7646 if (canTruncateToMinimalBitwidth(I, VF)) { 7647 // This cast is going to be shrunk. This may remove the cast or it might 7648 // turn it into slightly different cast. For example, if MinBW == 16, 7649 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7650 // 7651 // Calculate the modified src and dest types. 7652 Type *MinVecTy = VectorTy; 7653 if (Opcode == Instruction::Trunc) { 7654 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7655 VectorTy = 7656 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7657 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7658 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7659 VectorTy = 7660 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7661 } 7662 } 7663 7664 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7665 } 7666 case Instruction::Call: { 7667 if (RecurrenceDescriptor::isFMulAddIntrinsic(I)) 7668 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7669 return *RedCost; 7670 bool NeedToScalarize; 7671 CallInst *CI = cast<CallInst>(I); 7672 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7673 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7674 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7675 return std::min(CallCost, IntrinsicCost); 7676 } 7677 return CallCost; 7678 } 7679 case Instruction::ExtractValue: 7680 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7681 case Instruction::Alloca: 7682 // We cannot easily widen alloca to a scalable alloca, as 7683 // the result would need to be a vector of pointers. 7684 if (VF.isScalable()) 7685 return InstructionCost::getInvalid(); 7686 LLVM_FALLTHROUGH; 7687 default: 7688 // This opcode is unknown. Assume that it is the same as 'mul'. 7689 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7690 } // end of switch. 7691 } 7692 7693 char LoopVectorize::ID = 0; 7694 7695 static const char lv_name[] = "Loop Vectorization"; 7696 7697 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7698 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7699 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7700 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7701 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7702 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7703 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7704 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7705 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7706 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7707 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7708 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7709 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7710 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7711 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7712 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7713 7714 namespace llvm { 7715 7716 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7717 7718 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7719 bool VectorizeOnlyWhenForced) { 7720 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7721 } 7722 7723 } // end namespace llvm 7724 7725 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7726 // Check if the pointer operand of a load or store instruction is 7727 // consecutive. 7728 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7729 return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr); 7730 return false; 7731 } 7732 7733 void LoopVectorizationCostModel::collectValuesToIgnore() { 7734 // Ignore ephemeral values. 7735 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7736 7737 // Ignore type-promoting instructions we identified during reduction 7738 // detection. 7739 for (auto &Reduction : Legal->getReductionVars()) { 7740 const RecurrenceDescriptor &RedDes = Reduction.second; 7741 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7742 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7743 } 7744 // Ignore type-casting instructions we identified during induction 7745 // detection. 7746 for (auto &Induction : Legal->getInductionVars()) { 7747 const InductionDescriptor &IndDes = Induction.second; 7748 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7749 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7750 } 7751 } 7752 7753 void LoopVectorizationCostModel::collectInLoopReductions() { 7754 for (auto &Reduction : Legal->getReductionVars()) { 7755 PHINode *Phi = Reduction.first; 7756 const RecurrenceDescriptor &RdxDesc = Reduction.second; 7757 7758 // We don't collect reductions that are type promoted (yet). 7759 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7760 continue; 7761 7762 // If the target would prefer this reduction to happen "in-loop", then we 7763 // want to record it as such. 7764 unsigned Opcode = RdxDesc.getOpcode(); 7765 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 7766 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7767 TargetTransformInfo::ReductionFlags())) 7768 continue; 7769 7770 // Check that we can correctly put the reductions into the loop, by 7771 // finding the chain of operations that leads from the phi to the loop 7772 // exit value. 7773 SmallVector<Instruction *, 4> ReductionOperations = 7774 RdxDesc.getReductionOpChain(Phi, TheLoop); 7775 bool InLoop = !ReductionOperations.empty(); 7776 if (InLoop) { 7777 InLoopReductionChains[Phi] = ReductionOperations; 7778 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7779 Instruction *LastChain = Phi; 7780 for (auto *I : ReductionOperations) { 7781 InLoopReductionImmediateChains[I] = LastChain; 7782 LastChain = I; 7783 } 7784 } 7785 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7786 << " reduction for phi: " << *Phi << "\n"); 7787 } 7788 } 7789 7790 // TODO: we could return a pair of values that specify the max VF and 7791 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7792 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7793 // doesn't have a cost model that can choose which plan to execute if 7794 // more than one is generated. 7795 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7796 LoopVectorizationCostModel &CM) { 7797 unsigned WidestType; 7798 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7799 return WidestVectorRegBits / WidestType; 7800 } 7801 7802 VectorizationFactor 7803 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7804 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7805 ElementCount VF = UserVF; 7806 // Outer loop handling: They may require CFG and instruction level 7807 // transformations before even evaluating whether vectorization is profitable. 7808 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7809 // the vectorization pipeline. 7810 if (!OrigLoop->isInnermost()) { 7811 // If the user doesn't provide a vectorization factor, determine a 7812 // reasonable one. 7813 if (UserVF.isZero()) { 7814 VF = ElementCount::getFixed(determineVPlanVF( 7815 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 7816 .getFixedSize(), 7817 CM)); 7818 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7819 7820 // Make sure we have a VF > 1 for stress testing. 7821 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7822 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7823 << "overriding computed VF.\n"); 7824 VF = ElementCount::getFixed(4); 7825 } 7826 } 7827 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7828 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7829 "VF needs to be a power of two"); 7830 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7831 << "VF " << VF << " to build VPlans.\n"); 7832 buildVPlans(VF, VF); 7833 7834 // For VPlan build stress testing, we bail out after VPlan construction. 7835 if (VPlanBuildStressTest) 7836 return VectorizationFactor::Disabled(); 7837 7838 return {VF, 0 /*Cost*/}; 7839 } 7840 7841 LLVM_DEBUG( 7842 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7843 "VPlan-native path.\n"); 7844 return VectorizationFactor::Disabled(); 7845 } 7846 7847 Optional<VectorizationFactor> 7848 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7849 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7850 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 7851 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 7852 return None; 7853 7854 // Invalidate interleave groups if all blocks of loop will be predicated. 7855 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) && 7856 !useMaskedInterleavedAccesses(*TTI)) { 7857 LLVM_DEBUG( 7858 dbgs() 7859 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7860 "which requires masked-interleaved support.\n"); 7861 if (CM.InterleaveInfo.invalidateGroups()) 7862 // Invalidating interleave groups also requires invalidating all decisions 7863 // based on them, which includes widening decisions and uniform and scalar 7864 // values. 7865 CM.invalidateCostModelingDecisions(); 7866 } 7867 7868 ElementCount MaxUserVF = 7869 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 7870 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 7871 if (!UserVF.isZero() && UserVFIsLegal) { 7872 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 7873 "VF needs to be a power of two"); 7874 // Collect the instructions (and their associated costs) that will be more 7875 // profitable to scalarize. 7876 if (CM.selectUserVectorizationFactor(UserVF)) { 7877 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 7878 CM.collectInLoopReductions(); 7879 buildVPlansWithVPRecipes(UserVF, UserVF); 7880 LLVM_DEBUG(printPlans(dbgs())); 7881 return {{UserVF, 0}}; 7882 } else 7883 reportVectorizationInfo("UserVF ignored because of invalid costs.", 7884 "InvalidCost", ORE, OrigLoop); 7885 } 7886 7887 // Populate the set of Vectorization Factor Candidates. 7888 ElementCountSet VFCandidates; 7889 for (auto VF = ElementCount::getFixed(1); 7890 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 7891 VFCandidates.insert(VF); 7892 for (auto VF = ElementCount::getScalable(1); 7893 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 7894 VFCandidates.insert(VF); 7895 7896 for (const auto &VF : VFCandidates) { 7897 // Collect Uniform and Scalar instructions after vectorization with VF. 7898 CM.collectUniformsAndScalars(VF); 7899 7900 // Collect the instructions (and their associated costs) that will be more 7901 // profitable to scalarize. 7902 if (VF.isVector()) 7903 CM.collectInstsToScalarize(VF); 7904 } 7905 7906 CM.collectInLoopReductions(); 7907 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 7908 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 7909 7910 LLVM_DEBUG(printPlans(dbgs())); 7911 if (!MaxFactors.hasVector()) 7912 return VectorizationFactor::Disabled(); 7913 7914 // Select the optimal vectorization factor. 7915 auto SelectedVF = CM.selectVectorizationFactor(VFCandidates); 7916 7917 // Check if it is profitable to vectorize with runtime checks. 7918 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 7919 if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { 7920 bool PragmaThresholdReached = 7921 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 7922 bool ThresholdReached = 7923 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 7924 if ((ThresholdReached && !Hints.allowReordering()) || 7925 PragmaThresholdReached) { 7926 ORE->emit([&]() { 7927 return OptimizationRemarkAnalysisAliasing( 7928 DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(), 7929 OrigLoop->getHeader()) 7930 << "loop not vectorized: cannot prove it is safe to reorder " 7931 "memory operations"; 7932 }); 7933 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 7934 Hints.emitRemarkWithHints(); 7935 return VectorizationFactor::Disabled(); 7936 } 7937 } 7938 return SelectedVF; 7939 } 7940 7941 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const { 7942 assert(count_if(VPlans, 7943 [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) == 7944 1 && 7945 "Best VF has not a single VPlan."); 7946 7947 for (const VPlanPtr &Plan : VPlans) { 7948 if (Plan->hasVF(VF)) 7949 return *Plan.get(); 7950 } 7951 llvm_unreachable("No plan found!"); 7952 } 7953 7954 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF, 7955 VPlan &BestVPlan, 7956 InnerLoopVectorizer &ILV, 7957 DominatorTree *DT) { 7958 LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF 7959 << '\n'); 7960 7961 // Perform the actual loop transformation. 7962 7963 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7964 VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan}; 7965 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 7966 State.CanonicalIV = ILV.Induction; 7967 ILV.collectPoisonGeneratingRecipes(State); 7968 7969 ILV.printDebugTracesAtStart(); 7970 7971 //===------------------------------------------------===// 7972 // 7973 // Notice: any optimization or new instruction that go 7974 // into the code below should also be implemented in 7975 // the cost-model. 7976 // 7977 //===------------------------------------------------===// 7978 7979 // 2. Copy and widen instructions from the old loop into the new loop. 7980 BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr), State); 7981 BestVPlan.execute(&State); 7982 7983 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7984 // predication, updating analyses. 7985 ILV.fixVectorizedLoop(State); 7986 7987 ILV.printDebugTracesAtEnd(); 7988 } 7989 7990 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 7991 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 7992 for (const auto &Plan : VPlans) 7993 if (PrintVPlansInDotFormat) 7994 Plan->printDOT(O); 7995 else 7996 Plan->print(O); 7997 } 7998 #endif 7999 8000 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 8001 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 8002 8003 // We create new control-flow for the vectorized loop, so the original exit 8004 // conditions will be dead after vectorization if it's only used by the 8005 // terminator 8006 SmallVector<BasicBlock*> ExitingBlocks; 8007 OrigLoop->getExitingBlocks(ExitingBlocks); 8008 for (auto *BB : ExitingBlocks) { 8009 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 8010 if (!Cmp || !Cmp->hasOneUse()) 8011 continue; 8012 8013 // TODO: we should introduce a getUniqueExitingBlocks on Loop 8014 if (!DeadInstructions.insert(Cmp).second) 8015 continue; 8016 8017 // The operands of the icmp is often a dead trunc, used by IndUpdate. 8018 // TODO: can recurse through operands in general 8019 for (Value *Op : Cmp->operands()) { 8020 if (isa<TruncInst>(Op) && Op->hasOneUse()) 8021 DeadInstructions.insert(cast<Instruction>(Op)); 8022 } 8023 } 8024 8025 // We create new "steps" for induction variable updates to which the original 8026 // induction variables map. An original update instruction will be dead if 8027 // all its users except the induction variable are dead. 8028 auto *Latch = OrigLoop->getLoopLatch(); 8029 for (auto &Induction : Legal->getInductionVars()) { 8030 PHINode *Ind = Induction.first; 8031 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 8032 8033 // If the tail is to be folded by masking, the primary induction variable, 8034 // if exists, isn't dead: it will be used for masking. Don't kill it. 8035 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 8036 continue; 8037 8038 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 8039 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 8040 })) 8041 DeadInstructions.insert(IndUpdate); 8042 } 8043 } 8044 8045 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 8046 8047 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 8048 8049 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 8050 SmallVector<Metadata *, 4> MDs; 8051 // Reserve first location for self reference to the LoopID metadata node. 8052 MDs.push_back(nullptr); 8053 bool IsUnrollMetadata = false; 8054 MDNode *LoopID = L->getLoopID(); 8055 if (LoopID) { 8056 // First find existing loop unrolling disable metadata. 8057 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 8058 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 8059 if (MD) { 8060 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 8061 IsUnrollMetadata = 8062 S && S->getString().startswith("llvm.loop.unroll.disable"); 8063 } 8064 MDs.push_back(LoopID->getOperand(i)); 8065 } 8066 } 8067 8068 if (!IsUnrollMetadata) { 8069 // Add runtime unroll disable metadata. 8070 LLVMContext &Context = L->getHeader()->getContext(); 8071 SmallVector<Metadata *, 1> DisableOperands; 8072 DisableOperands.push_back( 8073 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 8074 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 8075 MDs.push_back(DisableNode); 8076 MDNode *NewLoopID = MDNode::get(Context, MDs); 8077 // Set operand 0 to refer to the loop id itself. 8078 NewLoopID->replaceOperandWith(0, NewLoopID); 8079 L->setLoopID(NewLoopID); 8080 } 8081 } 8082 8083 //===--------------------------------------------------------------------===// 8084 // EpilogueVectorizerMainLoop 8085 //===--------------------------------------------------------------------===// 8086 8087 /// This function is partially responsible for generating the control flow 8088 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8089 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 8090 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8091 Loop *Lp = createVectorLoopSkeleton(""); 8092 8093 // Generate the code to check the minimum iteration count of the vector 8094 // epilogue (see below). 8095 EPI.EpilogueIterationCountCheck = 8096 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); 8097 EPI.EpilogueIterationCountCheck->setName("iter.check"); 8098 8099 // Generate the code to check any assumptions that we've made for SCEV 8100 // expressions. 8101 EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader); 8102 8103 // Generate the code that checks at runtime if arrays overlap. We put the 8104 // checks into a separate block to make the more common case of few elements 8105 // faster. 8106 EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 8107 8108 // Generate the iteration count check for the main loop, *after* the check 8109 // for the epilogue loop, so that the path-length is shorter for the case 8110 // that goes directly through the vector epilogue. The longer-path length for 8111 // the main loop is compensated for, by the gain from vectorizing the larger 8112 // trip count. Note: the branch will get updated later on when we vectorize 8113 // the epilogue. 8114 EPI.MainLoopIterationCountCheck = 8115 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); 8116 8117 // Generate the induction variable. 8118 OldInduction = Legal->getPrimaryInduction(); 8119 Type *IdxTy = Legal->getWidestInductionType(); 8120 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8121 8122 IRBuilder<> B(&*Lp->getLoopPreheader()->getFirstInsertionPt()); 8123 Value *Step = getRuntimeVF(B, IdxTy, VF * UF); 8124 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8125 EPI.VectorTripCount = CountRoundDown; 8126 Induction = 8127 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8128 getDebugLocFromInstOrOperands(OldInduction)); 8129 8130 // Skip induction resume value creation here because they will be created in 8131 // the second pass. If we created them here, they wouldn't be used anyway, 8132 // because the vplan in the second pass still contains the inductions from the 8133 // original loop. 8134 8135 return completeLoopSkeleton(Lp, OrigLoopID); 8136 } 8137 8138 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 8139 LLVM_DEBUG({ 8140 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 8141 << "Main Loop VF:" << EPI.MainLoopVF 8142 << ", Main Loop UF:" << EPI.MainLoopUF 8143 << ", Epilogue Loop VF:" << EPI.EpilogueVF 8144 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8145 }); 8146 } 8147 8148 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 8149 DEBUG_WITH_TYPE(VerboseDebug, { 8150 dbgs() << "intermediate fn:\n" 8151 << *OrigLoop->getHeader()->getParent() << "\n"; 8152 }); 8153 } 8154 8155 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( 8156 Loop *L, BasicBlock *Bypass, bool ForEpilogue) { 8157 assert(L && "Expected valid Loop."); 8158 assert(Bypass && "Expected valid bypass basic block."); 8159 ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF; 8160 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 8161 Value *Count = getOrCreateTripCount(L); 8162 // Reuse existing vector loop preheader for TC checks. 8163 // Note that new preheader block is generated for vector loop. 8164 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 8165 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 8166 8167 // Generate code to check if the loop's trip count is less than VF * UF of the 8168 // main vector loop. 8169 auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? 8170 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8171 8172 Value *CheckMinIters = Builder.CreateICmp( 8173 P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor), 8174 "min.iters.check"); 8175 8176 if (!ForEpilogue) 8177 TCCheckBlock->setName("vector.main.loop.iter.check"); 8178 8179 // Create new preheader for vector loop. 8180 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 8181 DT, LI, nullptr, "vector.ph"); 8182 8183 if (ForEpilogue) { 8184 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 8185 DT->getNode(Bypass)->getIDom()) && 8186 "TC check is expected to dominate Bypass"); 8187 8188 // Update dominator for Bypass & LoopExit. 8189 DT->changeImmediateDominator(Bypass, TCCheckBlock); 8190 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8191 // For loops with multiple exits, there's no edge from the middle block 8192 // to exit blocks (as the epilogue must run) and thus no need to update 8193 // the immediate dominator of the exit blocks. 8194 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 8195 8196 LoopBypassBlocks.push_back(TCCheckBlock); 8197 8198 // Save the trip count so we don't have to regenerate it in the 8199 // vec.epilog.iter.check. This is safe to do because the trip count 8200 // generated here dominates the vector epilog iter check. 8201 EPI.TripCount = Count; 8202 } 8203 8204 ReplaceInstWithInst( 8205 TCCheckBlock->getTerminator(), 8206 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8207 8208 return TCCheckBlock; 8209 } 8210 8211 //===--------------------------------------------------------------------===// 8212 // EpilogueVectorizerEpilogueLoop 8213 //===--------------------------------------------------------------------===// 8214 8215 /// This function is partially responsible for generating the control flow 8216 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8217 BasicBlock * 8218 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 8219 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8220 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 8221 8222 // Now, compare the remaining count and if there aren't enough iterations to 8223 // execute the vectorized epilogue skip to the scalar part. 8224 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 8225 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 8226 LoopVectorPreHeader = 8227 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 8228 LI, nullptr, "vec.epilog.ph"); 8229 emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, 8230 VecEpilogueIterationCountCheck); 8231 8232 // Adjust the control flow taking the state info from the main loop 8233 // vectorization into account. 8234 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 8235 "expected this to be saved from the previous pass."); 8236 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 8237 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 8238 8239 DT->changeImmediateDominator(LoopVectorPreHeader, 8240 EPI.MainLoopIterationCountCheck); 8241 8242 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 8243 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8244 8245 if (EPI.SCEVSafetyCheck) 8246 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 8247 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8248 if (EPI.MemSafetyCheck) 8249 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 8250 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8251 8252 DT->changeImmediateDominator( 8253 VecEpilogueIterationCountCheck, 8254 VecEpilogueIterationCountCheck->getSinglePredecessor()); 8255 8256 DT->changeImmediateDominator(LoopScalarPreHeader, 8257 EPI.EpilogueIterationCountCheck); 8258 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8259 // If there is an epilogue which must run, there's no edge from the 8260 // middle block to exit blocks and thus no need to update the immediate 8261 // dominator of the exit blocks. 8262 DT->changeImmediateDominator(LoopExitBlock, 8263 EPI.EpilogueIterationCountCheck); 8264 8265 // Keep track of bypass blocks, as they feed start values to the induction 8266 // phis in the scalar loop preheader. 8267 if (EPI.SCEVSafetyCheck) 8268 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 8269 if (EPI.MemSafetyCheck) 8270 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 8271 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 8272 8273 // Generate a resume induction for the vector epilogue and put it in the 8274 // vector epilogue preheader 8275 Type *IdxTy = Legal->getWidestInductionType(); 8276 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 8277 LoopVectorPreHeader->getFirstNonPHI()); 8278 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 8279 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 8280 EPI.MainLoopIterationCountCheck); 8281 8282 // Generate the induction variable. 8283 OldInduction = Legal->getPrimaryInduction(); 8284 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8285 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 8286 Value *StartIdx = EPResumeVal; 8287 Induction = 8288 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8289 getDebugLocFromInstOrOperands(OldInduction)); 8290 8291 // Generate induction resume values. These variables save the new starting 8292 // indexes for the scalar loop. They are used to test if there are any tail 8293 // iterations left once the vector loop has completed. 8294 // Note that when the vectorized epilogue is skipped due to iteration count 8295 // check, then the resume value for the induction variable comes from 8296 // the trip count of the main vector loop, hence passing the AdditionalBypass 8297 // argument. 8298 createInductionResumeValues(Lp, CountRoundDown, 8299 {VecEpilogueIterationCountCheck, 8300 EPI.VectorTripCount} /* AdditionalBypass */); 8301 8302 AddRuntimeUnrollDisableMetaData(Lp); 8303 return completeLoopSkeleton(Lp, OrigLoopID); 8304 } 8305 8306 BasicBlock * 8307 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 8308 Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { 8309 8310 assert(EPI.TripCount && 8311 "Expected trip count to have been safed in the first pass."); 8312 assert( 8313 (!isa<Instruction>(EPI.TripCount) || 8314 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 8315 "saved trip count does not dominate insertion point."); 8316 Value *TC = EPI.TripCount; 8317 IRBuilder<> Builder(Insert->getTerminator()); 8318 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 8319 8320 // Generate code to check if the loop's trip count is less than VF * UF of the 8321 // vector epilogue loop. 8322 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? 8323 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8324 8325 Value *CheckMinIters = 8326 Builder.CreateICmp(P, Count, 8327 createStepForVF(Builder, Count->getType(), 8328 EPI.EpilogueVF, EPI.EpilogueUF), 8329 "min.epilog.iters.check"); 8330 8331 ReplaceInstWithInst( 8332 Insert->getTerminator(), 8333 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8334 8335 LoopBypassBlocks.push_back(Insert); 8336 return Insert; 8337 } 8338 8339 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 8340 LLVM_DEBUG({ 8341 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 8342 << "Epilogue Loop VF:" << EPI.EpilogueVF 8343 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8344 }); 8345 } 8346 8347 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 8348 DEBUG_WITH_TYPE(VerboseDebug, { 8349 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n"; 8350 }); 8351 } 8352 8353 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8354 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8355 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8356 bool PredicateAtRangeStart = Predicate(Range.Start); 8357 8358 for (ElementCount TmpVF = Range.Start * 2; 8359 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8360 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8361 Range.End = TmpVF; 8362 break; 8363 } 8364 8365 return PredicateAtRangeStart; 8366 } 8367 8368 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8369 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8370 /// of VF's starting at a given VF and extending it as much as possible. Each 8371 /// vectorization decision can potentially shorten this sub-range during 8372 /// buildVPlan(). 8373 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8374 ElementCount MaxVF) { 8375 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8376 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8377 VFRange SubRange = {VF, MaxVFPlusOne}; 8378 VPlans.push_back(buildVPlan(SubRange)); 8379 VF = SubRange.End; 8380 } 8381 } 8382 8383 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8384 VPlanPtr &Plan) { 8385 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8386 8387 // Look for cached value. 8388 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8389 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8390 if (ECEntryIt != EdgeMaskCache.end()) 8391 return ECEntryIt->second; 8392 8393 VPValue *SrcMask = createBlockInMask(Src, Plan); 8394 8395 // The terminator has to be a branch inst! 8396 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8397 assert(BI && "Unexpected terminator found"); 8398 8399 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8400 return EdgeMaskCache[Edge] = SrcMask; 8401 8402 // If source is an exiting block, we know the exit edge is dynamically dead 8403 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8404 // adding uses of an otherwise potentially dead instruction. 8405 if (OrigLoop->isLoopExiting(Src)) 8406 return EdgeMaskCache[Edge] = SrcMask; 8407 8408 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8409 assert(EdgeMask && "No Edge Mask found for condition"); 8410 8411 if (BI->getSuccessor(0) != Dst) 8412 EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc()); 8413 8414 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8415 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8416 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8417 // The select version does not introduce new UB if SrcMask is false and 8418 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8419 VPValue *False = Plan->getOrAddVPValue( 8420 ConstantInt::getFalse(BI->getCondition()->getType())); 8421 EdgeMask = 8422 Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc()); 8423 } 8424 8425 return EdgeMaskCache[Edge] = EdgeMask; 8426 } 8427 8428 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8429 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8430 8431 // Look for cached value. 8432 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8433 if (BCEntryIt != BlockMaskCache.end()) 8434 return BCEntryIt->second; 8435 8436 // All-one mask is modelled as no-mask following the convention for masked 8437 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8438 VPValue *BlockMask = nullptr; 8439 8440 if (OrigLoop->getHeader() == BB) { 8441 if (!CM.blockNeedsPredicationForAnyReason(BB)) 8442 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8443 8444 // Introduce the early-exit compare IV <= BTC to form header block mask. 8445 // This is used instead of IV < TC because TC may wrap, unlike BTC. 8446 // Start by constructing the desired canonical IV in the header block. 8447 VPValue *IV = nullptr; 8448 if (Legal->getPrimaryInduction()) 8449 IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction()); 8450 else { 8451 VPBasicBlock *HeaderVPBB = Plan->getEntry()->getEntryBasicBlock(); 8452 auto *IVRecipe = new VPWidenCanonicalIVRecipe(); 8453 HeaderVPBB->insert(IVRecipe, HeaderVPBB->getFirstNonPhi()); 8454 IV = IVRecipe; 8455 } 8456 8457 // Create the block in mask as the first non-phi instruction in the block. 8458 VPBuilder::InsertPointGuard Guard(Builder); 8459 auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi(); 8460 Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint); 8461 8462 assert(CM.foldTailByMasking() && "must fold the tail"); 8463 8464 if (CM.TTI.emitGetActiveLaneMask()) { 8465 VPValue *TC = Plan->getOrCreateTripCount(); 8466 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC}); 8467 } else { 8468 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8469 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8470 } 8471 return BlockMaskCache[BB] = BlockMask; 8472 } 8473 8474 // This is the block mask. We OR all incoming edges. 8475 for (auto *Predecessor : predecessors(BB)) { 8476 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8477 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8478 return BlockMaskCache[BB] = EdgeMask; 8479 8480 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8481 BlockMask = EdgeMask; 8482 continue; 8483 } 8484 8485 BlockMask = Builder.createOr(BlockMask, EdgeMask, {}); 8486 } 8487 8488 return BlockMaskCache[BB] = BlockMask; 8489 } 8490 8491 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8492 ArrayRef<VPValue *> Operands, 8493 VFRange &Range, 8494 VPlanPtr &Plan) { 8495 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8496 "Must be called with either a load or store"); 8497 8498 auto willWiden = [&](ElementCount VF) -> bool { 8499 if (VF.isScalar()) 8500 return false; 8501 LoopVectorizationCostModel::InstWidening Decision = 8502 CM.getWideningDecision(I, VF); 8503 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8504 "CM decision should be taken at this point."); 8505 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8506 return true; 8507 if (CM.isScalarAfterVectorization(I, VF) || 8508 CM.isProfitableToScalarize(I, VF)) 8509 return false; 8510 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8511 }; 8512 8513 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8514 return nullptr; 8515 8516 VPValue *Mask = nullptr; 8517 if (Legal->isMaskRequired(I)) 8518 Mask = createBlockInMask(I->getParent(), Plan); 8519 8520 // Determine if the pointer operand of the access is either consecutive or 8521 // reverse consecutive. 8522 LoopVectorizationCostModel::InstWidening Decision = 8523 CM.getWideningDecision(I, Range.Start); 8524 bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse; 8525 bool Consecutive = 8526 Reverse || Decision == LoopVectorizationCostModel::CM_Widen; 8527 8528 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8529 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask, 8530 Consecutive, Reverse); 8531 8532 StoreInst *Store = cast<StoreInst>(I); 8533 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8534 Mask, Consecutive, Reverse); 8535 } 8536 8537 VPWidenIntOrFpInductionRecipe * 8538 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, 8539 ArrayRef<VPValue *> Operands) const { 8540 // Check if this is an integer or fp induction. If so, build the recipe that 8541 // produces its scalar and vector values. 8542 if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi)) { 8543 assert(II->getStartValue() == 8544 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8545 return new VPWidenIntOrFpInductionRecipe(Phi, Operands[0], *II); 8546 } 8547 8548 return nullptr; 8549 } 8550 8551 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8552 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, 8553 VPlan &Plan) const { 8554 // Optimize the special case where the source is a constant integer 8555 // induction variable. Notice that we can only optimize the 'trunc' case 8556 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8557 // (c) other casts depend on pointer size. 8558 8559 // Determine whether \p K is a truncation based on an induction variable that 8560 // can be optimized. 8561 auto isOptimizableIVTruncate = 8562 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8563 return [=](ElementCount VF) -> bool { 8564 return CM.isOptimizableIVTruncate(K, VF); 8565 }; 8566 }; 8567 8568 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8569 isOptimizableIVTruncate(I), Range)) { 8570 8571 auto *Phi = cast<PHINode>(I->getOperand(0)); 8572 const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi); 8573 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8574 return new VPWidenIntOrFpInductionRecipe(Phi, Start, II, I); 8575 } 8576 return nullptr; 8577 } 8578 8579 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8580 ArrayRef<VPValue *> Operands, 8581 VPlanPtr &Plan) { 8582 // If all incoming values are equal, the incoming VPValue can be used directly 8583 // instead of creating a new VPBlendRecipe. 8584 VPValue *FirstIncoming = Operands[0]; 8585 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8586 return FirstIncoming == Inc; 8587 })) { 8588 return Operands[0]; 8589 } 8590 8591 // We know that all PHIs in non-header blocks are converted into selects, so 8592 // we don't have to worry about the insertion order and we can just use the 8593 // builder. At this point we generate the predication tree. There may be 8594 // duplications since this is a simple recursive scan, but future 8595 // optimizations will clean it up. 8596 SmallVector<VPValue *, 2> OperandsWithMask; 8597 unsigned NumIncoming = Phi->getNumIncomingValues(); 8598 8599 for (unsigned In = 0; In < NumIncoming; In++) { 8600 VPValue *EdgeMask = 8601 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8602 assert((EdgeMask || NumIncoming == 1) && 8603 "Multiple predecessors with one having a full mask"); 8604 OperandsWithMask.push_back(Operands[In]); 8605 if (EdgeMask) 8606 OperandsWithMask.push_back(EdgeMask); 8607 } 8608 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8609 } 8610 8611 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8612 ArrayRef<VPValue *> Operands, 8613 VFRange &Range) const { 8614 8615 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8616 [this, CI](ElementCount VF) { return CM.isScalarWithPredication(CI); }, 8617 Range); 8618 8619 if (IsPredicated) 8620 return nullptr; 8621 8622 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8623 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8624 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8625 ID == Intrinsic::pseudoprobe || 8626 ID == Intrinsic::experimental_noalias_scope_decl)) 8627 return nullptr; 8628 8629 auto willWiden = [&](ElementCount VF) -> bool { 8630 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8631 // The following case may be scalarized depending on the VF. 8632 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8633 // version of the instruction. 8634 // Is it beneficial to perform intrinsic call compared to lib call? 8635 bool NeedToScalarize = false; 8636 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8637 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8638 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8639 return UseVectorIntrinsic || !NeedToScalarize; 8640 }; 8641 8642 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8643 return nullptr; 8644 8645 ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size()); 8646 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8647 } 8648 8649 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8650 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8651 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8652 // Instruction should be widened, unless it is scalar after vectorization, 8653 // scalarization is profitable or it is predicated. 8654 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8655 return CM.isScalarAfterVectorization(I, VF) || 8656 CM.isProfitableToScalarize(I, VF) || CM.isScalarWithPredication(I); 8657 }; 8658 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8659 Range); 8660 } 8661 8662 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8663 ArrayRef<VPValue *> Operands) const { 8664 auto IsVectorizableOpcode = [](unsigned Opcode) { 8665 switch (Opcode) { 8666 case Instruction::Add: 8667 case Instruction::And: 8668 case Instruction::AShr: 8669 case Instruction::BitCast: 8670 case Instruction::FAdd: 8671 case Instruction::FCmp: 8672 case Instruction::FDiv: 8673 case Instruction::FMul: 8674 case Instruction::FNeg: 8675 case Instruction::FPExt: 8676 case Instruction::FPToSI: 8677 case Instruction::FPToUI: 8678 case Instruction::FPTrunc: 8679 case Instruction::FRem: 8680 case Instruction::FSub: 8681 case Instruction::ICmp: 8682 case Instruction::IntToPtr: 8683 case Instruction::LShr: 8684 case Instruction::Mul: 8685 case Instruction::Or: 8686 case Instruction::PtrToInt: 8687 case Instruction::SDiv: 8688 case Instruction::Select: 8689 case Instruction::SExt: 8690 case Instruction::Shl: 8691 case Instruction::SIToFP: 8692 case Instruction::SRem: 8693 case Instruction::Sub: 8694 case Instruction::Trunc: 8695 case Instruction::UDiv: 8696 case Instruction::UIToFP: 8697 case Instruction::URem: 8698 case Instruction::Xor: 8699 case Instruction::ZExt: 8700 return true; 8701 } 8702 return false; 8703 }; 8704 8705 if (!IsVectorizableOpcode(I->getOpcode())) 8706 return nullptr; 8707 8708 // Success: widen this instruction. 8709 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 8710 } 8711 8712 void VPRecipeBuilder::fixHeaderPhis() { 8713 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 8714 for (VPHeaderPHIRecipe *R : PhisToFix) { 8715 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 8716 VPRecipeBase *IncR = 8717 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 8718 R->addOperand(IncR->getVPSingleValue()); 8719 } 8720 } 8721 8722 VPBasicBlock *VPRecipeBuilder::handleReplication( 8723 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8724 VPlanPtr &Plan) { 8725 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8726 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8727 Range); 8728 8729 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8730 [&](ElementCount VF) { return CM.isPredicatedInst(I, IsUniform); }, 8731 Range); 8732 8733 // Even if the instruction is not marked as uniform, there are certain 8734 // intrinsic calls that can be effectively treated as such, so we check for 8735 // them here. Conservatively, we only do this for scalable vectors, since 8736 // for fixed-width VFs we can always fall back on full scalarization. 8737 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) { 8738 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) { 8739 case Intrinsic::assume: 8740 case Intrinsic::lifetime_start: 8741 case Intrinsic::lifetime_end: 8742 // For scalable vectors if one of the operands is variant then we still 8743 // want to mark as uniform, which will generate one instruction for just 8744 // the first lane of the vector. We can't scalarize the call in the same 8745 // way as for fixed-width vectors because we don't know how many lanes 8746 // there are. 8747 // 8748 // The reasons for doing it this way for scalable vectors are: 8749 // 1. For the assume intrinsic generating the instruction for the first 8750 // lane is still be better than not generating any at all. For 8751 // example, the input may be a splat across all lanes. 8752 // 2. For the lifetime start/end intrinsics the pointer operand only 8753 // does anything useful when the input comes from a stack object, 8754 // which suggests it should always be uniform. For non-stack objects 8755 // the effect is to poison the object, which still allows us to 8756 // remove the call. 8757 IsUniform = true; 8758 break; 8759 default: 8760 break; 8761 } 8762 } 8763 8764 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8765 IsUniform, IsPredicated); 8766 setRecipe(I, Recipe); 8767 Plan->addVPValue(I, Recipe); 8768 8769 // Find if I uses a predicated instruction. If so, it will use its scalar 8770 // value. Avoid hoisting the insert-element which packs the scalar value into 8771 // a vector value, as that happens iff all users use the vector value. 8772 for (VPValue *Op : Recipe->operands()) { 8773 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 8774 if (!PredR) 8775 continue; 8776 auto *RepR = 8777 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 8778 assert(RepR->isPredicated() && 8779 "expected Replicate recipe to be predicated"); 8780 RepR->setAlsoPack(false); 8781 } 8782 8783 // Finalize the recipe for Instr, first if it is not predicated. 8784 if (!IsPredicated) { 8785 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8786 VPBB->appendRecipe(Recipe); 8787 return VPBB; 8788 } 8789 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8790 8791 VPBlockBase *SingleSucc = VPBB->getSingleSuccessor(); 8792 assert(SingleSucc && "VPBB must have a single successor when handling " 8793 "predicated replication."); 8794 VPBlockUtils::disconnectBlocks(VPBB, SingleSucc); 8795 // Record predicated instructions for above packing optimizations. 8796 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8797 VPBlockUtils::insertBlockAfter(Region, VPBB); 8798 auto *RegSucc = new VPBasicBlock(); 8799 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8800 VPBlockUtils::connectBlocks(RegSucc, SingleSucc); 8801 return RegSucc; 8802 } 8803 8804 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8805 VPRecipeBase *PredRecipe, 8806 VPlanPtr &Plan) { 8807 // Instructions marked for predication are replicated and placed under an 8808 // if-then construct to prevent side-effects. 8809 8810 // Generate recipes to compute the block mask for this region. 8811 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8812 8813 // Build the triangular if-then region. 8814 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8815 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8816 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8817 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8818 auto *PHIRecipe = Instr->getType()->isVoidTy() 8819 ? nullptr 8820 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8821 if (PHIRecipe) { 8822 Plan->removeVPValueFor(Instr); 8823 Plan->addVPValue(Instr, PHIRecipe); 8824 } 8825 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8826 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8827 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8828 8829 // Note: first set Entry as region entry and then connect successors starting 8830 // from it in order, to propagate the "parent" of each VPBasicBlock. 8831 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8832 VPBlockUtils::connectBlocks(Pred, Exit); 8833 8834 return Region; 8835 } 8836 8837 VPRecipeOrVPValueTy 8838 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8839 ArrayRef<VPValue *> Operands, 8840 VFRange &Range, VPlanPtr &Plan) { 8841 // First, check for specific widening recipes that deal with calls, memory 8842 // operations, inductions and Phi nodes. 8843 if (auto *CI = dyn_cast<CallInst>(Instr)) 8844 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 8845 8846 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8847 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 8848 8849 VPRecipeBase *Recipe; 8850 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8851 if (Phi->getParent() != OrigLoop->getHeader()) 8852 return tryToBlend(Phi, Operands, Plan); 8853 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands))) 8854 return toVPRecipeResult(Recipe); 8855 8856 VPHeaderPHIRecipe *PhiRecipe = nullptr; 8857 if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) { 8858 VPValue *StartV = Operands[0]; 8859 if (Legal->isReductionVariable(Phi)) { 8860 const RecurrenceDescriptor &RdxDesc = 8861 Legal->getReductionVars().find(Phi)->second; 8862 assert(RdxDesc.getRecurrenceStartValue() == 8863 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8864 PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, 8865 CM.isInLoopReduction(Phi), 8866 CM.useOrderedReductions(RdxDesc)); 8867 } else { 8868 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); 8869 } 8870 8871 // Record the incoming value from the backedge, so we can add the incoming 8872 // value from the backedge after all recipes have been created. 8873 recordRecipeOf(cast<Instruction>( 8874 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 8875 PhisToFix.push_back(PhiRecipe); 8876 } else { 8877 // TODO: record backedge value for remaining pointer induction phis. 8878 assert(Phi->getType()->isPointerTy() && 8879 "only pointer phis should be handled here"); 8880 assert(Legal->getInductionVars().count(Phi) && 8881 "Not an induction variable"); 8882 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8883 VPValue *Start = Plan->getOrAddVPValue(II.getStartValue()); 8884 PhiRecipe = new VPWidenPHIRecipe(Phi, Start); 8885 } 8886 8887 return toVPRecipeResult(PhiRecipe); 8888 } 8889 8890 if (isa<TruncInst>(Instr) && 8891 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 8892 Range, *Plan))) 8893 return toVPRecipeResult(Recipe); 8894 8895 if (!shouldWiden(Instr, Range)) 8896 return nullptr; 8897 8898 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8899 return toVPRecipeResult(new VPWidenGEPRecipe( 8900 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 8901 8902 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8903 bool InvariantCond = 8904 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8905 return toVPRecipeResult(new VPWidenSelectRecipe( 8906 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 8907 } 8908 8909 return toVPRecipeResult(tryToWiden(Instr, Operands)); 8910 } 8911 8912 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8913 ElementCount MaxVF) { 8914 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8915 8916 // Collect instructions from the original loop that will become trivially dead 8917 // in the vectorized loop. We don't need to vectorize these instructions. For 8918 // example, original induction update instructions can become dead because we 8919 // separately emit induction "steps" when generating code for the new loop. 8920 // Similarly, we create a new latch condition when setting up the structure 8921 // of the new loop, so the old one can become dead. 8922 SmallPtrSet<Instruction *, 4> DeadInstructions; 8923 collectTriviallyDeadInstructions(DeadInstructions); 8924 8925 // Add assume instructions we need to drop to DeadInstructions, to prevent 8926 // them from being added to the VPlan. 8927 // TODO: We only need to drop assumes in blocks that get flattend. If the 8928 // control flow is preserved, we should keep them. 8929 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8930 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8931 8932 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8933 // Dead instructions do not need sinking. Remove them from SinkAfter. 8934 for (Instruction *I : DeadInstructions) 8935 SinkAfter.erase(I); 8936 8937 // Cannot sink instructions after dead instructions (there won't be any 8938 // recipes for them). Instead, find the first non-dead previous instruction. 8939 for (auto &P : Legal->getSinkAfter()) { 8940 Instruction *SinkTarget = P.second; 8941 Instruction *FirstInst = &*SinkTarget->getParent()->begin(); 8942 (void)FirstInst; 8943 while (DeadInstructions.contains(SinkTarget)) { 8944 assert( 8945 SinkTarget != FirstInst && 8946 "Must find a live instruction (at least the one feeding the " 8947 "first-order recurrence PHI) before reaching beginning of the block"); 8948 SinkTarget = SinkTarget->getPrevNode(); 8949 assert(SinkTarget != P.first && 8950 "sink source equals target, no sinking required"); 8951 } 8952 P.second = SinkTarget; 8953 } 8954 8955 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8956 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8957 VFRange SubRange = {VF, MaxVFPlusOne}; 8958 VPlans.push_back( 8959 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 8960 VF = SubRange.End; 8961 } 8962 } 8963 8964 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 8965 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 8966 const MapVector<Instruction *, Instruction *> &SinkAfter) { 8967 8968 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 8969 8970 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 8971 8972 // --------------------------------------------------------------------------- 8973 // Pre-construction: record ingredients whose recipes we'll need to further 8974 // process after constructing the initial VPlan. 8975 // --------------------------------------------------------------------------- 8976 8977 // Mark instructions we'll need to sink later and their targets as 8978 // ingredients whose recipe we'll need to record. 8979 for (auto &Entry : SinkAfter) { 8980 RecipeBuilder.recordRecipeOf(Entry.first); 8981 RecipeBuilder.recordRecipeOf(Entry.second); 8982 } 8983 for (auto &Reduction : CM.getInLoopReductionChains()) { 8984 PHINode *Phi = Reduction.first; 8985 RecurKind Kind = 8986 Legal->getReductionVars().find(Phi)->second.getRecurrenceKind(); 8987 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8988 8989 RecipeBuilder.recordRecipeOf(Phi); 8990 for (auto &R : ReductionOperations) { 8991 RecipeBuilder.recordRecipeOf(R); 8992 // For min/max reducitons, where we have a pair of icmp/select, we also 8993 // need to record the ICmp recipe, so it can be removed later. 8994 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 8995 "Only min/max recurrences allowed for inloop reductions"); 8996 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 8997 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 8998 } 8999 } 9000 9001 // For each interleave group which is relevant for this (possibly trimmed) 9002 // Range, add it to the set of groups to be later applied to the VPlan and add 9003 // placeholders for its members' Recipes which we'll be replacing with a 9004 // single VPInterleaveRecipe. 9005 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 9006 auto applyIG = [IG, this](ElementCount VF) -> bool { 9007 return (VF.isVector() && // Query is illegal for VF == 1 9008 CM.getWideningDecision(IG->getInsertPos(), VF) == 9009 LoopVectorizationCostModel::CM_Interleave); 9010 }; 9011 if (!getDecisionAndClampRange(applyIG, Range)) 9012 continue; 9013 InterleaveGroups.insert(IG); 9014 for (unsigned i = 0; i < IG->getFactor(); i++) 9015 if (Instruction *Member = IG->getMember(i)) 9016 RecipeBuilder.recordRecipeOf(Member); 9017 }; 9018 9019 // --------------------------------------------------------------------------- 9020 // Build initial VPlan: Scan the body of the loop in a topological order to 9021 // visit each basic block after having visited its predecessor basic blocks. 9022 // --------------------------------------------------------------------------- 9023 9024 // Create initial VPlan skeleton, with separate header and latch blocks. 9025 VPBasicBlock *HeaderVPBB = new VPBasicBlock(); 9026 VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch"); 9027 VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB); 9028 auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop"); 9029 auto Plan = std::make_unique<VPlan>(TopRegion); 9030 9031 // Scan the body of the loop in a topological order to visit each basic block 9032 // after having visited its predecessor basic blocks. 9033 LoopBlocksDFS DFS(OrigLoop); 9034 DFS.perform(LI); 9035 9036 VPBasicBlock *VPBB = HeaderVPBB; 9037 SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove; 9038 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 9039 // Relevant instructions from basic block BB will be grouped into VPRecipe 9040 // ingredients and fill a new VPBasicBlock. 9041 unsigned VPBBsForBB = 0; 9042 VPBB->setName(BB->getName()); 9043 Builder.setInsertPoint(VPBB); 9044 9045 // Introduce each ingredient into VPlan. 9046 // TODO: Model and preserve debug instrinsics in VPlan. 9047 for (Instruction &I : BB->instructionsWithoutDebug()) { 9048 Instruction *Instr = &I; 9049 9050 // First filter out irrelevant instructions, to ensure no recipes are 9051 // built for them. 9052 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 9053 continue; 9054 9055 SmallVector<VPValue *, 4> Operands; 9056 auto *Phi = dyn_cast<PHINode>(Instr); 9057 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 9058 Operands.push_back(Plan->getOrAddVPValue( 9059 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 9060 } else { 9061 auto OpRange = Plan->mapToVPValues(Instr->operands()); 9062 Operands = {OpRange.begin(), OpRange.end()}; 9063 } 9064 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 9065 Instr, Operands, Range, Plan)) { 9066 // If Instr can be simplified to an existing VPValue, use it. 9067 if (RecipeOrValue.is<VPValue *>()) { 9068 auto *VPV = RecipeOrValue.get<VPValue *>(); 9069 Plan->addVPValue(Instr, VPV); 9070 // If the re-used value is a recipe, register the recipe for the 9071 // instruction, in case the recipe for Instr needs to be recorded. 9072 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 9073 RecipeBuilder.setRecipe(Instr, R); 9074 continue; 9075 } 9076 // Otherwise, add the new recipe. 9077 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 9078 for (auto *Def : Recipe->definedValues()) { 9079 auto *UV = Def->getUnderlyingValue(); 9080 Plan->addVPValue(UV, Def); 9081 } 9082 9083 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && 9084 HeaderVPBB->getFirstNonPhi() != VPBB->end()) { 9085 // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section 9086 // of the header block. That can happen for truncates of induction 9087 // variables. Those recipes are moved to the phi section of the header 9088 // block after applying SinkAfter, which relies on the original 9089 // position of the trunc. 9090 assert(isa<TruncInst>(Instr)); 9091 InductionsToMove.push_back( 9092 cast<VPWidenIntOrFpInductionRecipe>(Recipe)); 9093 } 9094 RecipeBuilder.setRecipe(Instr, Recipe); 9095 VPBB->appendRecipe(Recipe); 9096 continue; 9097 } 9098 9099 // Otherwise, if all widening options failed, Instruction is to be 9100 // replicated. This may create a successor for VPBB. 9101 VPBasicBlock *NextVPBB = 9102 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 9103 if (NextVPBB != VPBB) { 9104 VPBB = NextVPBB; 9105 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 9106 : ""); 9107 } 9108 } 9109 9110 VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB); 9111 VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor()); 9112 } 9113 9114 // Fold the last, empty block into its predecessor. 9115 VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB); 9116 assert(VPBB && "expected to fold last (empty) block"); 9117 // After here, VPBB should not be used. 9118 VPBB = nullptr; 9119 9120 assert(isa<VPRegionBlock>(Plan->getEntry()) && 9121 !Plan->getEntry()->getEntryBasicBlock()->empty() && 9122 "entry block must be set to a VPRegionBlock having a non-empty entry " 9123 "VPBasicBlock"); 9124 RecipeBuilder.fixHeaderPhis(); 9125 9126 // --------------------------------------------------------------------------- 9127 // Transform initial VPlan: Apply previously taken decisions, in order, to 9128 // bring the VPlan to its final state. 9129 // --------------------------------------------------------------------------- 9130 9131 // Apply Sink-After legal constraints. 9132 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 9133 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 9134 if (Region && Region->isReplicator()) { 9135 assert(Region->getNumSuccessors() == 1 && 9136 Region->getNumPredecessors() == 1 && "Expected SESE region!"); 9137 assert(R->getParent()->size() == 1 && 9138 "A recipe in an original replicator region must be the only " 9139 "recipe in its block"); 9140 return Region; 9141 } 9142 return nullptr; 9143 }; 9144 for (auto &Entry : SinkAfter) { 9145 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 9146 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 9147 9148 auto *TargetRegion = GetReplicateRegion(Target); 9149 auto *SinkRegion = GetReplicateRegion(Sink); 9150 if (!SinkRegion) { 9151 // If the sink source is not a replicate region, sink the recipe directly. 9152 if (TargetRegion) { 9153 // The target is in a replication region, make sure to move Sink to 9154 // the block after it, not into the replication region itself. 9155 VPBasicBlock *NextBlock = 9156 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 9157 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 9158 } else 9159 Sink->moveAfter(Target); 9160 continue; 9161 } 9162 9163 // The sink source is in a replicate region. Unhook the region from the CFG. 9164 auto *SinkPred = SinkRegion->getSinglePredecessor(); 9165 auto *SinkSucc = SinkRegion->getSingleSuccessor(); 9166 VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); 9167 VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); 9168 VPBlockUtils::connectBlocks(SinkPred, SinkSucc); 9169 9170 if (TargetRegion) { 9171 // The target recipe is also in a replicate region, move the sink region 9172 // after the target region. 9173 auto *TargetSucc = TargetRegion->getSingleSuccessor(); 9174 VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); 9175 VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); 9176 VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); 9177 } else { 9178 // The sink source is in a replicate region, we need to move the whole 9179 // replicate region, which should only contain a single recipe in the 9180 // main block. 9181 auto *SplitBlock = 9182 Target->getParent()->splitAt(std::next(Target->getIterator())); 9183 9184 auto *SplitPred = SplitBlock->getSinglePredecessor(); 9185 9186 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 9187 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 9188 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 9189 } 9190 } 9191 9192 VPlanTransforms::removeRedundantInductionCasts(*Plan); 9193 9194 // Now that sink-after is done, move induction recipes for optimized truncates 9195 // to the phi section of the header block. 9196 for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove) 9197 Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi()); 9198 9199 // Adjust the recipes for any inloop reductions. 9200 adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExit()), Plan, 9201 RecipeBuilder, Range.Start); 9202 9203 // Introduce a recipe to combine the incoming and previous values of a 9204 // first-order recurrence. 9205 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9206 auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); 9207 if (!RecurPhi) 9208 continue; 9209 9210 VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); 9211 VPBasicBlock *InsertBlock = PrevRecipe->getParent(); 9212 auto *Region = GetReplicateRegion(PrevRecipe); 9213 if (Region) 9214 InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor()); 9215 if (Region || PrevRecipe->isPhi()) 9216 Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi()); 9217 else 9218 Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator())); 9219 9220 auto *RecurSplice = cast<VPInstruction>( 9221 Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 9222 {RecurPhi, RecurPhi->getBackedgeValue()})); 9223 9224 RecurPhi->replaceAllUsesWith(RecurSplice); 9225 // Set the first operand of RecurSplice to RecurPhi again, after replacing 9226 // all users. 9227 RecurSplice->setOperand(0, RecurPhi); 9228 } 9229 9230 // Interleave memory: for each Interleave Group we marked earlier as relevant 9231 // for this VPlan, replace the Recipes widening its memory instructions with a 9232 // single VPInterleaveRecipe at its insertion point. 9233 for (auto IG : InterleaveGroups) { 9234 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 9235 RecipeBuilder.getRecipe(IG->getInsertPos())); 9236 SmallVector<VPValue *, 4> StoredValues; 9237 for (unsigned i = 0; i < IG->getFactor(); ++i) 9238 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 9239 auto *StoreR = 9240 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); 9241 StoredValues.push_back(StoreR->getStoredValue()); 9242 } 9243 9244 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 9245 Recipe->getMask()); 9246 VPIG->insertBefore(Recipe); 9247 unsigned J = 0; 9248 for (unsigned i = 0; i < IG->getFactor(); ++i) 9249 if (Instruction *Member = IG->getMember(i)) { 9250 if (!Member->getType()->isVoidTy()) { 9251 VPValue *OriginalV = Plan->getVPValue(Member); 9252 Plan->removeVPValueFor(Member); 9253 Plan->addVPValue(Member, VPIG->getVPValue(J)); 9254 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 9255 J++; 9256 } 9257 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9258 } 9259 } 9260 9261 // From this point onwards, VPlan-to-VPlan transformations may change the plan 9262 // in ways that accessing values using original IR values is incorrect. 9263 Plan->disableValue2VPValue(); 9264 9265 VPlanTransforms::sinkScalarOperands(*Plan); 9266 VPlanTransforms::mergeReplicateRegions(*Plan); 9267 9268 std::string PlanName; 9269 raw_string_ostream RSO(PlanName); 9270 ElementCount VF = Range.Start; 9271 Plan->addVF(VF); 9272 RSO << "Initial VPlan for VF={" << VF; 9273 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9274 Plan->addVF(VF); 9275 RSO << "," << VF; 9276 } 9277 RSO << "},UF>=1"; 9278 RSO.flush(); 9279 Plan->setName(PlanName); 9280 9281 // Fold Exit block into its predecessor if possible. 9282 // TODO: Fold block earlier once all VPlan transforms properly maintain a 9283 // VPBasicBlock as exit. 9284 VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExit()); 9285 9286 assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid"); 9287 return Plan; 9288 } 9289 9290 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9291 // Outer loop handling: They may require CFG and instruction level 9292 // transformations before even evaluating whether vectorization is profitable. 9293 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9294 // the vectorization pipeline. 9295 assert(!OrigLoop->isInnermost()); 9296 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9297 9298 // Create new empty VPlan 9299 auto Plan = std::make_unique<VPlan>(); 9300 9301 // Build hierarchical CFG 9302 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9303 HCFGBuilder.buildHierarchicalCFG(); 9304 9305 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9306 VF *= 2) 9307 Plan->addVF(VF); 9308 9309 if (EnableVPlanPredication) { 9310 VPlanPredicator VPP(*Plan); 9311 VPP.predicate(); 9312 9313 // Avoid running transformation to recipes until masked code generation in 9314 // VPlan-native path is in place. 9315 return Plan; 9316 } 9317 9318 SmallPtrSet<Instruction *, 1> DeadInstructions; 9319 VPlanTransforms::VPInstructionsToVPRecipes( 9320 OrigLoop, Plan, 9321 [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); }, 9322 DeadInstructions, *PSE.getSE()); 9323 return Plan; 9324 } 9325 9326 // Adjust the recipes for reductions. For in-loop reductions the chain of 9327 // instructions leading from the loop exit instr to the phi need to be converted 9328 // to reductions, with one operand being vector and the other being the scalar 9329 // reduction chain. For other reductions, a select is introduced between the phi 9330 // and live-out recipes when folding the tail. 9331 void LoopVectorizationPlanner::adjustRecipesForReductions( 9332 VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, 9333 ElementCount MinVF) { 9334 for (auto &Reduction : CM.getInLoopReductionChains()) { 9335 PHINode *Phi = Reduction.first; 9336 const RecurrenceDescriptor &RdxDesc = 9337 Legal->getReductionVars().find(Phi)->second; 9338 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9339 9340 if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) 9341 continue; 9342 9343 // ReductionOperations are orders top-down from the phi's use to the 9344 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9345 // which of the two operands will remain scalar and which will be reduced. 9346 // For minmax the chain will be the select instructions. 9347 Instruction *Chain = Phi; 9348 for (Instruction *R : ReductionOperations) { 9349 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9350 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9351 9352 VPValue *ChainOp = Plan->getVPValue(Chain); 9353 unsigned FirstOpId; 9354 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9355 "Only min/max recurrences allowed for inloop reductions"); 9356 // Recognize a call to the llvm.fmuladd intrinsic. 9357 bool IsFMulAdd = (Kind == RecurKind::FMulAdd); 9358 assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) && 9359 "Expected instruction to be a call to the llvm.fmuladd intrinsic"); 9360 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9361 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9362 "Expected to replace a VPWidenSelectSC"); 9363 FirstOpId = 1; 9364 } else { 9365 assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) || 9366 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) && 9367 "Expected to replace a VPWidenSC"); 9368 FirstOpId = 0; 9369 } 9370 unsigned VecOpId = 9371 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9372 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9373 9374 auto *CondOp = CM.foldTailByMasking() 9375 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9376 : nullptr; 9377 9378 if (IsFMulAdd) { 9379 // If the instruction is a call to the llvm.fmuladd intrinsic then we 9380 // need to create an fmul recipe to use as the vector operand for the 9381 // fadd reduction. 9382 VPInstruction *FMulRecipe = new VPInstruction( 9383 Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))}); 9384 FMulRecipe->setFastMathFlags(R->getFastMathFlags()); 9385 WidenRecipe->getParent()->insert(FMulRecipe, 9386 WidenRecipe->getIterator()); 9387 VecOp = FMulRecipe; 9388 } 9389 VPReductionRecipe *RedRecipe = 9390 new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9391 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9392 Plan->removeVPValueFor(R); 9393 Plan->addVPValue(R, RedRecipe); 9394 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9395 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9396 WidenRecipe->eraseFromParent(); 9397 9398 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9399 VPRecipeBase *CompareRecipe = 9400 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9401 assert(isa<VPWidenRecipe>(CompareRecipe) && 9402 "Expected to replace a VPWidenSC"); 9403 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9404 "Expected no remaining users"); 9405 CompareRecipe->eraseFromParent(); 9406 } 9407 Chain = R; 9408 } 9409 } 9410 9411 // If tail is folded by masking, introduce selects between the phi 9412 // and the live-out instruction of each reduction, at the end of the latch. 9413 if (CM.foldTailByMasking()) { 9414 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9415 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R); 9416 if (!PhiR || PhiR->isInLoop()) 9417 continue; 9418 Builder.setInsertPoint(LatchVPBB); 9419 VPValue *Cond = 9420 RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9421 VPValue *Red = PhiR->getBackedgeValue(); 9422 Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR}); 9423 } 9424 } 9425 } 9426 9427 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9428 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9429 VPSlotTracker &SlotTracker) const { 9430 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9431 IG->getInsertPos()->printAsOperand(O, false); 9432 O << ", "; 9433 getAddr()->printAsOperand(O, SlotTracker); 9434 VPValue *Mask = getMask(); 9435 if (Mask) { 9436 O << ", "; 9437 Mask->printAsOperand(O, SlotTracker); 9438 } 9439 9440 unsigned OpIdx = 0; 9441 for (unsigned i = 0; i < IG->getFactor(); ++i) { 9442 if (!IG->getMember(i)) 9443 continue; 9444 if (getNumStoreOperands() > 0) { 9445 O << "\n" << Indent << " store "; 9446 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker); 9447 O << " to index " << i; 9448 } else { 9449 O << "\n" << Indent << " "; 9450 getVPValue(OpIdx)->printAsOperand(O, SlotTracker); 9451 O << " = load from index " << i; 9452 } 9453 ++OpIdx; 9454 } 9455 } 9456 #endif 9457 9458 void VPWidenCallRecipe::execute(VPTransformState &State) { 9459 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9460 *this, State); 9461 } 9462 9463 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9464 auto &I = *cast<SelectInst>(getUnderlyingInstr()); 9465 State.ILV->setDebugLocFromInst(&I); 9466 9467 // The condition can be loop invariant but still defined inside the 9468 // loop. This means that we can't just use the original 'cond' value. 9469 // We have to take the 'vectorized' value and pick the first lane. 9470 // Instcombine will make this a no-op. 9471 auto *InvarCond = 9472 InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr; 9473 9474 for (unsigned Part = 0; Part < State.UF; ++Part) { 9475 Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part); 9476 Value *Op0 = State.get(getOperand(1), Part); 9477 Value *Op1 = State.get(getOperand(2), Part); 9478 Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1); 9479 State.set(this, Sel, Part); 9480 State.ILV->addMetadata(Sel, &I); 9481 } 9482 } 9483 9484 void VPWidenRecipe::execute(VPTransformState &State) { 9485 auto &I = *cast<Instruction>(getUnderlyingValue()); 9486 auto &Builder = State.Builder; 9487 switch (I.getOpcode()) { 9488 case Instruction::Call: 9489 case Instruction::Br: 9490 case Instruction::PHI: 9491 case Instruction::GetElementPtr: 9492 case Instruction::Select: 9493 llvm_unreachable("This instruction is handled by a different recipe."); 9494 case Instruction::UDiv: 9495 case Instruction::SDiv: 9496 case Instruction::SRem: 9497 case Instruction::URem: 9498 case Instruction::Add: 9499 case Instruction::FAdd: 9500 case Instruction::Sub: 9501 case Instruction::FSub: 9502 case Instruction::FNeg: 9503 case Instruction::Mul: 9504 case Instruction::FMul: 9505 case Instruction::FDiv: 9506 case Instruction::FRem: 9507 case Instruction::Shl: 9508 case Instruction::LShr: 9509 case Instruction::AShr: 9510 case Instruction::And: 9511 case Instruction::Or: 9512 case Instruction::Xor: { 9513 // Just widen unops and binops. 9514 State.ILV->setDebugLocFromInst(&I); 9515 9516 for (unsigned Part = 0; Part < State.UF; ++Part) { 9517 SmallVector<Value *, 2> Ops; 9518 for (VPValue *VPOp : operands()) 9519 Ops.push_back(State.get(VPOp, Part)); 9520 9521 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 9522 9523 if (auto *VecOp = dyn_cast<Instruction>(V)) { 9524 VecOp->copyIRFlags(&I); 9525 9526 // If the instruction is vectorized and was in a basic block that needed 9527 // predication, we can't propagate poison-generating flags (nuw/nsw, 9528 // exact, etc.). The control flow has been linearized and the 9529 // instruction is no longer guarded by the predicate, which could make 9530 // the flag properties to no longer hold. 9531 if (State.MayGeneratePoisonRecipes.contains(this)) 9532 VecOp->dropPoisonGeneratingFlags(); 9533 } 9534 9535 // Use this vector value for all users of the original instruction. 9536 State.set(this, V, Part); 9537 State.ILV->addMetadata(V, &I); 9538 } 9539 9540 break; 9541 } 9542 case Instruction::ICmp: 9543 case Instruction::FCmp: { 9544 // Widen compares. Generate vector compares. 9545 bool FCmp = (I.getOpcode() == Instruction::FCmp); 9546 auto *Cmp = cast<CmpInst>(&I); 9547 State.ILV->setDebugLocFromInst(Cmp); 9548 for (unsigned Part = 0; Part < State.UF; ++Part) { 9549 Value *A = State.get(getOperand(0), Part); 9550 Value *B = State.get(getOperand(1), Part); 9551 Value *C = nullptr; 9552 if (FCmp) { 9553 // Propagate fast math flags. 9554 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9555 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 9556 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 9557 } else { 9558 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 9559 } 9560 State.set(this, C, Part); 9561 State.ILV->addMetadata(C, &I); 9562 } 9563 9564 break; 9565 } 9566 9567 case Instruction::ZExt: 9568 case Instruction::SExt: 9569 case Instruction::FPToUI: 9570 case Instruction::FPToSI: 9571 case Instruction::FPExt: 9572 case Instruction::PtrToInt: 9573 case Instruction::IntToPtr: 9574 case Instruction::SIToFP: 9575 case Instruction::UIToFP: 9576 case Instruction::Trunc: 9577 case Instruction::FPTrunc: 9578 case Instruction::BitCast: { 9579 auto *CI = cast<CastInst>(&I); 9580 State.ILV->setDebugLocFromInst(CI); 9581 9582 /// Vectorize casts. 9583 Type *DestTy = (State.VF.isScalar()) 9584 ? CI->getType() 9585 : VectorType::get(CI->getType(), State.VF); 9586 9587 for (unsigned Part = 0; Part < State.UF; ++Part) { 9588 Value *A = State.get(getOperand(0), Part); 9589 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 9590 State.set(this, Cast, Part); 9591 State.ILV->addMetadata(Cast, &I); 9592 } 9593 break; 9594 } 9595 default: 9596 // This instruction is not vectorized by simple widening. 9597 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 9598 llvm_unreachable("Unhandled instruction!"); 9599 } // end of switch. 9600 } 9601 9602 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9603 auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr()); 9604 // Construct a vector GEP by widening the operands of the scalar GEP as 9605 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 9606 // results in a vector of pointers when at least one operand of the GEP 9607 // is vector-typed. Thus, to keep the representation compact, we only use 9608 // vector-typed operands for loop-varying values. 9609 9610 if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 9611 // If we are vectorizing, but the GEP has only loop-invariant operands, 9612 // the GEP we build (by only using vector-typed operands for 9613 // loop-varying values) would be a scalar pointer. Thus, to ensure we 9614 // produce a vector of pointers, we need to either arbitrarily pick an 9615 // operand to broadcast, or broadcast a clone of the original GEP. 9616 // Here, we broadcast a clone of the original. 9617 // 9618 // TODO: If at some point we decide to scalarize instructions having 9619 // loop-invariant operands, this special case will no longer be 9620 // required. We would add the scalarization decision to 9621 // collectLoopScalars() and teach getVectorValue() to broadcast 9622 // the lane-zero scalar value. 9623 auto *Clone = State.Builder.Insert(GEP->clone()); 9624 for (unsigned Part = 0; Part < State.UF; ++Part) { 9625 Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone); 9626 State.set(this, EntryPart, Part); 9627 State.ILV->addMetadata(EntryPart, GEP); 9628 } 9629 } else { 9630 // If the GEP has at least one loop-varying operand, we are sure to 9631 // produce a vector of pointers. But if we are only unrolling, we want 9632 // to produce a scalar GEP for each unroll part. Thus, the GEP we 9633 // produce with the code below will be scalar (if VF == 1) or vector 9634 // (otherwise). Note that for the unroll-only case, we still maintain 9635 // values in the vector mapping with initVector, as we do for other 9636 // instructions. 9637 for (unsigned Part = 0; Part < State.UF; ++Part) { 9638 // The pointer operand of the new GEP. If it's loop-invariant, we 9639 // won't broadcast it. 9640 auto *Ptr = IsPtrLoopInvariant 9641 ? State.get(getOperand(0), VPIteration(0, 0)) 9642 : State.get(getOperand(0), Part); 9643 9644 // Collect all the indices for the new GEP. If any index is 9645 // loop-invariant, we won't broadcast it. 9646 SmallVector<Value *, 4> Indices; 9647 for (unsigned I = 1, E = getNumOperands(); I < E; I++) { 9648 VPValue *Operand = getOperand(I); 9649 if (IsIndexLoopInvariant[I - 1]) 9650 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 9651 else 9652 Indices.push_back(State.get(Operand, Part)); 9653 } 9654 9655 // If the GEP instruction is vectorized and was in a basic block that 9656 // needed predication, we can't propagate the poison-generating 'inbounds' 9657 // flag. The control flow has been linearized and the GEP is no longer 9658 // guarded by the predicate, which could make the 'inbounds' properties to 9659 // no longer hold. 9660 bool IsInBounds = 9661 GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0; 9662 9663 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 9664 // but it should be a vector, otherwise. 9665 auto *NewGEP = IsInBounds 9666 ? State.Builder.CreateInBoundsGEP( 9667 GEP->getSourceElementType(), Ptr, Indices) 9668 : State.Builder.CreateGEP(GEP->getSourceElementType(), 9669 Ptr, Indices); 9670 assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) && 9671 "NewGEP is not a pointer vector"); 9672 State.set(this, NewGEP, Part); 9673 State.ILV->addMetadata(NewGEP, GEP); 9674 } 9675 } 9676 } 9677 9678 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9679 assert(!State.Instance && "Int or FP induction being replicated."); 9680 State.ILV->widenIntOrFpInduction(IV, getInductionDescriptor(), 9681 getStartValue()->getLiveInIRValue(), 9682 getTruncInst(), getVPValue(0), State); 9683 } 9684 9685 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9686 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this, 9687 State); 9688 } 9689 9690 void VPBlendRecipe::execute(VPTransformState &State) { 9691 State.ILV->setDebugLocFromInst(Phi, &State.Builder); 9692 // We know that all PHIs in non-header blocks are converted into 9693 // selects, so we don't have to worry about the insertion order and we 9694 // can just use the builder. 9695 // At this point we generate the predication tree. There may be 9696 // duplications since this is a simple recursive scan, but future 9697 // optimizations will clean it up. 9698 9699 unsigned NumIncoming = getNumIncomingValues(); 9700 9701 // Generate a sequence of selects of the form: 9702 // SELECT(Mask3, In3, 9703 // SELECT(Mask2, In2, 9704 // SELECT(Mask1, In1, 9705 // In0))) 9706 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9707 // are essentially undef are taken from In0. 9708 InnerLoopVectorizer::VectorParts Entry(State.UF); 9709 for (unsigned In = 0; In < NumIncoming; ++In) { 9710 for (unsigned Part = 0; Part < State.UF; ++Part) { 9711 // We might have single edge PHIs (blocks) - use an identity 9712 // 'select' for the first PHI operand. 9713 Value *In0 = State.get(getIncomingValue(In), Part); 9714 if (In == 0) 9715 Entry[Part] = In0; // Initialize with the first incoming value. 9716 else { 9717 // Select between the current value and the previous incoming edge 9718 // based on the incoming mask. 9719 Value *Cond = State.get(getMask(In), Part); 9720 Entry[Part] = 9721 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9722 } 9723 } 9724 } 9725 for (unsigned Part = 0; Part < State.UF; ++Part) 9726 State.set(this, Entry[Part], Part); 9727 } 9728 9729 void VPInterleaveRecipe::execute(VPTransformState &State) { 9730 assert(!State.Instance && "Interleave group being replicated."); 9731 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9732 getStoredValues(), getMask()); 9733 } 9734 9735 void VPReductionRecipe::execute(VPTransformState &State) { 9736 assert(!State.Instance && "Reduction being replicated."); 9737 Value *PrevInChain = State.get(getChainOp(), 0); 9738 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9739 bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); 9740 // Propagate the fast-math flags carried by the underlying instruction. 9741 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 9742 State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags()); 9743 for (unsigned Part = 0; Part < State.UF; ++Part) { 9744 Value *NewVecOp = State.get(getVecOp(), Part); 9745 if (VPValue *Cond = getCondOp()) { 9746 Value *NewCond = State.get(Cond, Part); 9747 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9748 Value *Iden = RdxDesc->getRecurrenceIdentity( 9749 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9750 Value *IdenVec = 9751 State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden); 9752 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9753 NewVecOp = Select; 9754 } 9755 Value *NewRed; 9756 Value *NextInChain; 9757 if (IsOrdered) { 9758 if (State.VF.isVector()) 9759 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9760 PrevInChain); 9761 else 9762 NewRed = State.Builder.CreateBinOp( 9763 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain, 9764 NewVecOp); 9765 PrevInChain = NewRed; 9766 } else { 9767 PrevInChain = State.get(getChainOp(), Part); 9768 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9769 } 9770 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9771 NextInChain = 9772 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9773 NewRed, PrevInChain); 9774 } else if (IsOrdered) 9775 NextInChain = NewRed; 9776 else 9777 NextInChain = State.Builder.CreateBinOp( 9778 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed, 9779 PrevInChain); 9780 State.set(this, NextInChain, Part); 9781 } 9782 } 9783 9784 void VPReplicateRecipe::execute(VPTransformState &State) { 9785 if (State.Instance) { // Generate a single instance. 9786 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9787 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance, 9788 IsPredicated, State); 9789 // Insert scalar instance packing it into a vector. 9790 if (AlsoPack && State.VF.isVector()) { 9791 // If we're constructing lane 0, initialize to start from poison. 9792 if (State.Instance->Lane.isFirstLane()) { 9793 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9794 Value *Poison = PoisonValue::get( 9795 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9796 State.set(this, Poison, State.Instance->Part); 9797 } 9798 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9799 } 9800 return; 9801 } 9802 9803 // Generate scalar instances for all VF lanes of all UF parts, unless the 9804 // instruction is uniform inwhich case generate only the first lane for each 9805 // of the UF parts. 9806 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9807 assert((!State.VF.isScalable() || IsUniform) && 9808 "Can't scalarize a scalable vector"); 9809 for (unsigned Part = 0; Part < State.UF; ++Part) 9810 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9811 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, 9812 VPIteration(Part, Lane), IsPredicated, 9813 State); 9814 } 9815 9816 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9817 assert(State.Instance && "Branch on Mask works only on single instance."); 9818 9819 unsigned Part = State.Instance->Part; 9820 unsigned Lane = State.Instance->Lane.getKnownLane(); 9821 9822 Value *ConditionBit = nullptr; 9823 VPValue *BlockInMask = getMask(); 9824 if (BlockInMask) { 9825 ConditionBit = State.get(BlockInMask, Part); 9826 if (ConditionBit->getType()->isVectorTy()) 9827 ConditionBit = State.Builder.CreateExtractElement( 9828 ConditionBit, State.Builder.getInt32(Lane)); 9829 } else // Block in mask is all-one. 9830 ConditionBit = State.Builder.getTrue(); 9831 9832 // Replace the temporary unreachable terminator with a new conditional branch, 9833 // whose two destinations will be set later when they are created. 9834 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9835 assert(isa<UnreachableInst>(CurrentTerminator) && 9836 "Expected to replace unreachable terminator with conditional branch."); 9837 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9838 CondBr->setSuccessor(0, nullptr); 9839 ReplaceInstWithInst(CurrentTerminator, CondBr); 9840 } 9841 9842 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9843 assert(State.Instance && "Predicated instruction PHI works per instance."); 9844 Instruction *ScalarPredInst = 9845 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9846 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9847 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9848 assert(PredicatingBB && "Predicated block has no single predecessor."); 9849 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9850 "operand must be VPReplicateRecipe"); 9851 9852 // By current pack/unpack logic we need to generate only a single phi node: if 9853 // a vector value for the predicated instruction exists at this point it means 9854 // the instruction has vector users only, and a phi for the vector value is 9855 // needed. In this case the recipe of the predicated instruction is marked to 9856 // also do that packing, thereby "hoisting" the insert-element sequence. 9857 // Otherwise, a phi node for the scalar value is needed. 9858 unsigned Part = State.Instance->Part; 9859 if (State.hasVectorValue(getOperand(0), Part)) { 9860 Value *VectorValue = State.get(getOperand(0), Part); 9861 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9862 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9863 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9864 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9865 if (State.hasVectorValue(this, Part)) 9866 State.reset(this, VPhi, Part); 9867 else 9868 State.set(this, VPhi, Part); 9869 // NOTE: Currently we need to update the value of the operand, so the next 9870 // predicated iteration inserts its generated value in the correct vector. 9871 State.reset(getOperand(0), VPhi, Part); 9872 } else { 9873 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9874 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9875 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9876 PredicatingBB); 9877 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9878 if (State.hasScalarValue(this, *State.Instance)) 9879 State.reset(this, Phi, *State.Instance); 9880 else 9881 State.set(this, Phi, *State.Instance); 9882 // NOTE: Currently we need to update the value of the operand, so the next 9883 // predicated iteration inserts its generated value in the correct vector. 9884 State.reset(getOperand(0), Phi, *State.Instance); 9885 } 9886 } 9887 9888 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9889 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9890 9891 // Attempt to issue a wide load. 9892 LoadInst *LI = dyn_cast<LoadInst>(&Ingredient); 9893 StoreInst *SI = dyn_cast<StoreInst>(&Ingredient); 9894 9895 assert((LI || SI) && "Invalid Load/Store instruction"); 9896 assert((!SI || StoredValue) && "No stored value provided for widened store"); 9897 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 9898 9899 Type *ScalarDataTy = getLoadStoreType(&Ingredient); 9900 9901 auto *DataTy = VectorType::get(ScalarDataTy, State.VF); 9902 const Align Alignment = getLoadStoreAlignment(&Ingredient); 9903 bool CreateGatherScatter = !Consecutive; 9904 9905 auto &Builder = State.Builder; 9906 InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF); 9907 bool isMaskRequired = getMask(); 9908 if (isMaskRequired) 9909 for (unsigned Part = 0; Part < State.UF; ++Part) 9910 BlockInMaskParts[Part] = State.get(getMask(), Part); 9911 9912 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 9913 // Calculate the pointer for the specific unroll-part. 9914 GetElementPtrInst *PartPtr = nullptr; 9915 9916 bool InBounds = false; 9917 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 9918 InBounds = gep->isInBounds(); 9919 if (Reverse) { 9920 // If the address is consecutive but reversed, then the 9921 // wide store needs to start at the last vector element. 9922 // RunTimeVF = VScale * VF.getKnownMinValue() 9923 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 9924 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF); 9925 // NumElt = -Part * RunTimeVF 9926 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 9927 // LastLane = 1 - RunTimeVF 9928 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 9929 PartPtr = 9930 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 9931 PartPtr->setIsInBounds(InBounds); 9932 PartPtr = cast<GetElementPtrInst>( 9933 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 9934 PartPtr->setIsInBounds(InBounds); 9935 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 9936 BlockInMaskParts[Part] = 9937 Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse"); 9938 } else { 9939 Value *Increment = 9940 createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part); 9941 PartPtr = cast<GetElementPtrInst>( 9942 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 9943 PartPtr->setIsInBounds(InBounds); 9944 } 9945 9946 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 9947 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 9948 }; 9949 9950 // Handle Stores: 9951 if (SI) { 9952 State.ILV->setDebugLocFromInst(SI); 9953 9954 for (unsigned Part = 0; Part < State.UF; ++Part) { 9955 Instruction *NewSI = nullptr; 9956 Value *StoredVal = State.get(StoredValue, Part); 9957 if (CreateGatherScatter) { 9958 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 9959 Value *VectorGep = State.get(getAddr(), Part); 9960 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 9961 MaskPart); 9962 } else { 9963 if (Reverse) { 9964 // If we store to reverse consecutive memory locations, then we need 9965 // to reverse the order of elements in the stored value. 9966 StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse"); 9967 // We don't want to update the value in the map as it might be used in 9968 // another expression. So don't call resetVectorValue(StoredVal). 9969 } 9970 auto *VecPtr = 9971 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 9972 if (isMaskRequired) 9973 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 9974 BlockInMaskParts[Part]); 9975 else 9976 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 9977 } 9978 State.ILV->addMetadata(NewSI, SI); 9979 } 9980 return; 9981 } 9982 9983 // Handle loads. 9984 assert(LI && "Must have a load instruction"); 9985 State.ILV->setDebugLocFromInst(LI); 9986 for (unsigned Part = 0; Part < State.UF; ++Part) { 9987 Value *NewLI; 9988 if (CreateGatherScatter) { 9989 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 9990 Value *VectorGep = State.get(getAddr(), Part); 9991 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, 9992 nullptr, "wide.masked.gather"); 9993 State.ILV->addMetadata(NewLI, LI); 9994 } else { 9995 auto *VecPtr = 9996 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 9997 if (isMaskRequired) 9998 NewLI = Builder.CreateMaskedLoad( 9999 DataTy, VecPtr, Alignment, BlockInMaskParts[Part], 10000 PoisonValue::get(DataTy), "wide.masked.load"); 10001 else 10002 NewLI = 10003 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 10004 10005 // Add metadata to the load, but setVectorValue to the reverse shuffle. 10006 State.ILV->addMetadata(NewLI, LI); 10007 if (Reverse) 10008 NewLI = Builder.CreateVectorReverse(NewLI, "reverse"); 10009 } 10010 10011 State.set(getVPSingleValue(), NewLI, Part); 10012 } 10013 } 10014 10015 // Determine how to lower the scalar epilogue, which depends on 1) optimising 10016 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 10017 // predication, and 4) a TTI hook that analyses whether the loop is suitable 10018 // for predication. 10019 static ScalarEpilogueLowering getScalarEpilogueLowering( 10020 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 10021 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 10022 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 10023 LoopVectorizationLegality &LVL) { 10024 // 1) OptSize takes precedence over all other options, i.e. if this is set, 10025 // don't look at hints or options, and don't request a scalar epilogue. 10026 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 10027 // LoopAccessInfo (due to code dependency and not being able to reliably get 10028 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 10029 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 10030 // versioning when the vectorization is forced, unlike hasOptSize. So revert 10031 // back to the old way and vectorize with versioning when forced. See D81345.) 10032 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 10033 PGSOQueryType::IRPass) && 10034 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 10035 return CM_ScalarEpilogueNotAllowedOptSize; 10036 10037 // 2) If set, obey the directives 10038 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 10039 switch (PreferPredicateOverEpilogue) { 10040 case PreferPredicateTy::ScalarEpilogue: 10041 return CM_ScalarEpilogueAllowed; 10042 case PreferPredicateTy::PredicateElseScalarEpilogue: 10043 return CM_ScalarEpilogueNotNeededUsePredicate; 10044 case PreferPredicateTy::PredicateOrDontVectorize: 10045 return CM_ScalarEpilogueNotAllowedUsePredicate; 10046 }; 10047 } 10048 10049 // 3) If set, obey the hints 10050 switch (Hints.getPredicate()) { 10051 case LoopVectorizeHints::FK_Enabled: 10052 return CM_ScalarEpilogueNotNeededUsePredicate; 10053 case LoopVectorizeHints::FK_Disabled: 10054 return CM_ScalarEpilogueAllowed; 10055 }; 10056 10057 // 4) if the TTI hook indicates this is profitable, request predication. 10058 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 10059 LVL.getLAI())) 10060 return CM_ScalarEpilogueNotNeededUsePredicate; 10061 10062 return CM_ScalarEpilogueAllowed; 10063 } 10064 10065 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 10066 // If Values have been set for this Def return the one relevant for \p Part. 10067 if (hasVectorValue(Def, Part)) 10068 return Data.PerPartOutput[Def][Part]; 10069 10070 if (!hasScalarValue(Def, {Part, 0})) { 10071 Value *IRV = Def->getLiveInIRValue(); 10072 Value *B = ILV->getBroadcastInstrs(IRV); 10073 set(Def, B, Part); 10074 return B; 10075 } 10076 10077 Value *ScalarValue = get(Def, {Part, 0}); 10078 // If we aren't vectorizing, we can just copy the scalar map values over 10079 // to the vector map. 10080 if (VF.isScalar()) { 10081 set(Def, ScalarValue, Part); 10082 return ScalarValue; 10083 } 10084 10085 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 10086 bool IsUniform = RepR && RepR->isUniform(); 10087 10088 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 10089 // Check if there is a scalar value for the selected lane. 10090 if (!hasScalarValue(Def, {Part, LastLane})) { 10091 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 10092 assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) && 10093 "unexpected recipe found to be invariant"); 10094 IsUniform = true; 10095 LastLane = 0; 10096 } 10097 10098 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 10099 // Set the insert point after the last scalarized instruction or after the 10100 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 10101 // will directly follow the scalar definitions. 10102 auto OldIP = Builder.saveIP(); 10103 auto NewIP = 10104 isa<PHINode>(LastInst) 10105 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 10106 : std::next(BasicBlock::iterator(LastInst)); 10107 Builder.SetInsertPoint(&*NewIP); 10108 10109 // However, if we are vectorizing, we need to construct the vector values. 10110 // If the value is known to be uniform after vectorization, we can just 10111 // broadcast the scalar value corresponding to lane zero for each unroll 10112 // iteration. Otherwise, we construct the vector values using 10113 // insertelement instructions. Since the resulting vectors are stored in 10114 // State, we will only generate the insertelements once. 10115 Value *VectorValue = nullptr; 10116 if (IsUniform) { 10117 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 10118 set(Def, VectorValue, Part); 10119 } else { 10120 // Initialize packing with insertelements to start from undef. 10121 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 10122 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 10123 set(Def, Undef, Part); 10124 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 10125 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 10126 VectorValue = get(Def, Part); 10127 } 10128 Builder.restoreIP(OldIP); 10129 return VectorValue; 10130 } 10131 10132 // Process the loop in the VPlan-native vectorization path. This path builds 10133 // VPlan upfront in the vectorization pipeline, which allows to apply 10134 // VPlan-to-VPlan transformations from the very beginning without modifying the 10135 // input LLVM IR. 10136 static bool processLoopInVPlanNativePath( 10137 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 10138 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 10139 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 10140 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 10141 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 10142 LoopVectorizationRequirements &Requirements) { 10143 10144 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 10145 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 10146 return false; 10147 } 10148 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 10149 Function *F = L->getHeader()->getParent(); 10150 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 10151 10152 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10153 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 10154 10155 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 10156 &Hints, IAI); 10157 // Use the planner for outer loop vectorization. 10158 // TODO: CM is not used at this point inside the planner. Turn CM into an 10159 // optional argument if we don't need it in the future. 10160 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 10161 Requirements, ORE); 10162 10163 // Get user vectorization factor. 10164 ElementCount UserVF = Hints.getWidth(); 10165 10166 CM.collectElementTypesForWidening(); 10167 10168 // Plan how to best vectorize, return the best VF and its cost. 10169 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 10170 10171 // If we are stress testing VPlan builds, do not attempt to generate vector 10172 // code. Masked vector code generation support will follow soon. 10173 // Also, do not attempt to vectorize if no vector code will be produced. 10174 if (VPlanBuildStressTest || EnableVPlanPredication || 10175 VectorizationFactor::Disabled() == VF) 10176 return false; 10177 10178 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10179 10180 { 10181 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10182 F->getParent()->getDataLayout()); 10183 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 10184 &CM, BFI, PSI, Checks); 10185 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 10186 << L->getHeader()->getParent()->getName() << "\"\n"); 10187 LVP.executePlan(VF.Width, 1, BestPlan, LB, DT); 10188 } 10189 10190 // Mark the loop as already vectorized to avoid vectorizing again. 10191 Hints.setAlreadyVectorized(); 10192 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10193 return true; 10194 } 10195 10196 // Emit a remark if there are stores to floats that required a floating point 10197 // extension. If the vectorized loop was generated with floating point there 10198 // will be a performance penalty from the conversion overhead and the change in 10199 // the vector width. 10200 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 10201 SmallVector<Instruction *, 4> Worklist; 10202 for (BasicBlock *BB : L->getBlocks()) { 10203 for (Instruction &Inst : *BB) { 10204 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 10205 if (S->getValueOperand()->getType()->isFloatTy()) 10206 Worklist.push_back(S); 10207 } 10208 } 10209 } 10210 10211 // Traverse the floating point stores upwards searching, for floating point 10212 // conversions. 10213 SmallPtrSet<const Instruction *, 4> Visited; 10214 SmallPtrSet<const Instruction *, 4> EmittedRemark; 10215 while (!Worklist.empty()) { 10216 auto *I = Worklist.pop_back_val(); 10217 if (!L->contains(I)) 10218 continue; 10219 if (!Visited.insert(I).second) 10220 continue; 10221 10222 // Emit a remark if the floating point store required a floating 10223 // point conversion. 10224 // TODO: More work could be done to identify the root cause such as a 10225 // constant or a function return type and point the user to it. 10226 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 10227 ORE->emit([&]() { 10228 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 10229 I->getDebugLoc(), L->getHeader()) 10230 << "floating point conversion changes vector width. " 10231 << "Mixed floating point precision requires an up/down " 10232 << "cast that will negatively impact performance."; 10233 }); 10234 10235 for (Use &Op : I->operands()) 10236 if (auto *OpI = dyn_cast<Instruction>(Op)) 10237 Worklist.push_back(OpI); 10238 } 10239 } 10240 10241 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 10242 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 10243 !EnableLoopInterleaving), 10244 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 10245 !EnableLoopVectorization) {} 10246 10247 bool LoopVectorizePass::processLoop(Loop *L) { 10248 assert((EnableVPlanNativePath || L->isInnermost()) && 10249 "VPlan-native path is not enabled. Only process inner loops."); 10250 10251 #ifndef NDEBUG 10252 const std::string DebugLocStr = getDebugLocString(L); 10253 #endif /* NDEBUG */ 10254 10255 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 10256 << L->getHeader()->getParent()->getName() << "\" from " 10257 << DebugLocStr << "\n"); 10258 10259 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI); 10260 10261 LLVM_DEBUG( 10262 dbgs() << "LV: Loop hints:" 10263 << " force=" 10264 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 10265 ? "disabled" 10266 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 10267 ? "enabled" 10268 : "?")) 10269 << " width=" << Hints.getWidth() 10270 << " interleave=" << Hints.getInterleave() << "\n"); 10271 10272 // Function containing loop 10273 Function *F = L->getHeader()->getParent(); 10274 10275 // Looking at the diagnostic output is the only way to determine if a loop 10276 // was vectorized (other than looking at the IR or machine code), so it 10277 // is important to generate an optimization remark for each loop. Most of 10278 // these messages are generated as OptimizationRemarkAnalysis. Remarks 10279 // generated as OptimizationRemark and OptimizationRemarkMissed are 10280 // less verbose reporting vectorized loops and unvectorized loops that may 10281 // benefit from vectorization, respectively. 10282 10283 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 10284 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 10285 return false; 10286 } 10287 10288 PredicatedScalarEvolution PSE(*SE, *L); 10289 10290 // Check if it is legal to vectorize the loop. 10291 LoopVectorizationRequirements Requirements; 10292 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 10293 &Requirements, &Hints, DB, AC, BFI, PSI); 10294 if (!LVL.canVectorize(EnableVPlanNativePath)) { 10295 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 10296 Hints.emitRemarkWithHints(); 10297 return false; 10298 } 10299 10300 // Check the function attributes and profiles to find out if this function 10301 // should be optimized for size. 10302 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10303 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 10304 10305 // Entrance to the VPlan-native vectorization path. Outer loops are processed 10306 // here. They may require CFG and instruction level transformations before 10307 // even evaluating whether vectorization is profitable. Since we cannot modify 10308 // the incoming IR, we need to build VPlan upfront in the vectorization 10309 // pipeline. 10310 if (!L->isInnermost()) 10311 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 10312 ORE, BFI, PSI, Hints, Requirements); 10313 10314 assert(L->isInnermost() && "Inner loop expected."); 10315 10316 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 10317 // count by optimizing for size, to minimize overheads. 10318 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 10319 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 10320 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 10321 << "This loop is worth vectorizing only if no scalar " 10322 << "iteration overheads are incurred."); 10323 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 10324 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 10325 else { 10326 LLVM_DEBUG(dbgs() << "\n"); 10327 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 10328 } 10329 } 10330 10331 // Check the function attributes to see if implicit floats are allowed. 10332 // FIXME: This check doesn't seem possibly correct -- what if the loop is 10333 // an integer loop and the vector instructions selected are purely integer 10334 // vector instructions? 10335 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10336 reportVectorizationFailure( 10337 "Can't vectorize when the NoImplicitFloat attribute is used", 10338 "loop not vectorized due to NoImplicitFloat attribute", 10339 "NoImplicitFloat", ORE, L); 10340 Hints.emitRemarkWithHints(); 10341 return false; 10342 } 10343 10344 // Check if the target supports potentially unsafe FP vectorization. 10345 // FIXME: Add a check for the type of safety issue (denormal, signaling) 10346 // for the target we're vectorizing for, to make sure none of the 10347 // additional fp-math flags can help. 10348 if (Hints.isPotentiallyUnsafe() && 10349 TTI->isFPVectorizationPotentiallyUnsafe()) { 10350 reportVectorizationFailure( 10351 "Potentially unsafe FP op prevents vectorization", 10352 "loop not vectorized due to unsafe FP support.", 10353 "UnsafeFP", ORE, L); 10354 Hints.emitRemarkWithHints(); 10355 return false; 10356 } 10357 10358 bool AllowOrderedReductions; 10359 // If the flag is set, use that instead and override the TTI behaviour. 10360 if (ForceOrderedReductions.getNumOccurrences() > 0) 10361 AllowOrderedReductions = ForceOrderedReductions; 10362 else 10363 AllowOrderedReductions = TTI->enableOrderedReductions(); 10364 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) { 10365 ORE->emit([&]() { 10366 auto *ExactFPMathInst = Requirements.getExactFPInst(); 10367 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 10368 ExactFPMathInst->getDebugLoc(), 10369 ExactFPMathInst->getParent()) 10370 << "loop not vectorized: cannot prove it is safe to reorder " 10371 "floating-point operations"; 10372 }); 10373 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 10374 "reorder floating-point operations\n"); 10375 Hints.emitRemarkWithHints(); 10376 return false; 10377 } 10378 10379 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 10380 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 10381 10382 // If an override option has been passed in for interleaved accesses, use it. 10383 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 10384 UseInterleaved = EnableInterleavedMemAccesses; 10385 10386 // Analyze interleaved memory accesses. 10387 if (UseInterleaved) { 10388 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10389 } 10390 10391 // Use the cost model. 10392 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10393 F, &Hints, IAI); 10394 CM.collectValuesToIgnore(); 10395 CM.collectElementTypesForWidening(); 10396 10397 // Use the planner for vectorization. 10398 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 10399 Requirements, ORE); 10400 10401 // Get user vectorization factor and interleave count. 10402 ElementCount UserVF = Hints.getWidth(); 10403 unsigned UserIC = Hints.getInterleave(); 10404 10405 // Plan how to best vectorize, return the best VF and its cost. 10406 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10407 10408 VectorizationFactor VF = VectorizationFactor::Disabled(); 10409 unsigned IC = 1; 10410 10411 if (MaybeVF) { 10412 VF = *MaybeVF; 10413 // Select the interleave count. 10414 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10415 } 10416 10417 // Identify the diagnostic messages that should be produced. 10418 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10419 bool VectorizeLoop = true, InterleaveLoop = true; 10420 if (VF.Width.isScalar()) { 10421 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10422 VecDiagMsg = std::make_pair( 10423 "VectorizationNotBeneficial", 10424 "the cost-model indicates that vectorization is not beneficial"); 10425 VectorizeLoop = false; 10426 } 10427 10428 if (!MaybeVF && UserIC > 1) { 10429 // Tell the user interleaving was avoided up-front, despite being explicitly 10430 // requested. 10431 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10432 "interleaving should be avoided up front\n"); 10433 IntDiagMsg = std::make_pair( 10434 "InterleavingAvoided", 10435 "Ignoring UserIC, because interleaving was avoided up front"); 10436 InterleaveLoop = false; 10437 } else if (IC == 1 && UserIC <= 1) { 10438 // Tell the user interleaving is not beneficial. 10439 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10440 IntDiagMsg = std::make_pair( 10441 "InterleavingNotBeneficial", 10442 "the cost-model indicates that interleaving is not beneficial"); 10443 InterleaveLoop = false; 10444 if (UserIC == 1) { 10445 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10446 IntDiagMsg.second += 10447 " and is explicitly disabled or interleave count is set to 1"; 10448 } 10449 } else if (IC > 1 && UserIC == 1) { 10450 // Tell the user interleaving is beneficial, but it explicitly disabled. 10451 LLVM_DEBUG( 10452 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10453 IntDiagMsg = std::make_pair( 10454 "InterleavingBeneficialButDisabled", 10455 "the cost-model indicates that interleaving is beneficial " 10456 "but is explicitly disabled or interleave count is set to 1"); 10457 InterleaveLoop = false; 10458 } 10459 10460 // Override IC if user provided an interleave count. 10461 IC = UserIC > 0 ? UserIC : IC; 10462 10463 // Emit diagnostic messages, if any. 10464 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10465 if (!VectorizeLoop && !InterleaveLoop) { 10466 // Do not vectorize or interleaving the loop. 10467 ORE->emit([&]() { 10468 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10469 L->getStartLoc(), L->getHeader()) 10470 << VecDiagMsg.second; 10471 }); 10472 ORE->emit([&]() { 10473 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10474 L->getStartLoc(), L->getHeader()) 10475 << IntDiagMsg.second; 10476 }); 10477 return false; 10478 } else if (!VectorizeLoop && InterleaveLoop) { 10479 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10480 ORE->emit([&]() { 10481 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10482 L->getStartLoc(), L->getHeader()) 10483 << VecDiagMsg.second; 10484 }); 10485 } else if (VectorizeLoop && !InterleaveLoop) { 10486 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10487 << ") in " << DebugLocStr << '\n'); 10488 ORE->emit([&]() { 10489 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10490 L->getStartLoc(), L->getHeader()) 10491 << IntDiagMsg.second; 10492 }); 10493 } else if (VectorizeLoop && InterleaveLoop) { 10494 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10495 << ") in " << DebugLocStr << '\n'); 10496 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10497 } 10498 10499 bool DisableRuntimeUnroll = false; 10500 MDNode *OrigLoopID = L->getLoopID(); 10501 { 10502 // Optimistically generate runtime checks. Drop them if they turn out to not 10503 // be profitable. Limit the scope of Checks, so the cleanup happens 10504 // immediately after vector codegeneration is done. 10505 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10506 F->getParent()->getDataLayout()); 10507 if (!VF.Width.isScalar() || IC > 1) 10508 Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate()); 10509 10510 using namespace ore; 10511 if (!VectorizeLoop) { 10512 assert(IC > 1 && "interleave count should not be 1 or 0"); 10513 // If we decided that it is not legal to vectorize the loop, then 10514 // interleave it. 10515 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10516 &CM, BFI, PSI, Checks); 10517 10518 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10519 LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT); 10520 10521 ORE->emit([&]() { 10522 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10523 L->getHeader()) 10524 << "interleaved loop (interleaved count: " 10525 << NV("InterleaveCount", IC) << ")"; 10526 }); 10527 } else { 10528 // If we decided that it is *legal* to vectorize the loop, then do it. 10529 10530 // Consider vectorizing the epilogue too if it's profitable. 10531 VectorizationFactor EpilogueVF = 10532 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10533 if (EpilogueVF.Width.isVector()) { 10534 10535 // The first pass vectorizes the main loop and creates a scalar epilogue 10536 // to be vectorized by executing the plan (potentially with a different 10537 // factor) again shortly afterwards. 10538 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1); 10539 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10540 EPI, &LVL, &CM, BFI, PSI, Checks); 10541 10542 VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF); 10543 LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, 10544 DT); 10545 ++LoopsVectorized; 10546 10547 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10548 formLCSSARecursively(*L, *DT, LI, SE); 10549 10550 // Second pass vectorizes the epilogue and adjusts the control flow 10551 // edges from the first pass. 10552 EPI.MainLoopVF = EPI.EpilogueVF; 10553 EPI.MainLoopUF = EPI.EpilogueUF; 10554 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10555 ORE, EPI, &LVL, &CM, BFI, PSI, 10556 Checks); 10557 10558 VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF); 10559 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, 10560 DT); 10561 ++LoopsEpilogueVectorized; 10562 10563 if (!MainILV.areSafetyChecksAdded()) 10564 DisableRuntimeUnroll = true; 10565 } else { 10566 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 10567 &LVL, &CM, BFI, PSI, Checks); 10568 10569 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10570 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT); 10571 ++LoopsVectorized; 10572 10573 // Add metadata to disable runtime unrolling a scalar loop when there 10574 // are no runtime checks about strides and memory. A scalar loop that is 10575 // rarely used is not worth unrolling. 10576 if (!LB.areSafetyChecksAdded()) 10577 DisableRuntimeUnroll = true; 10578 } 10579 // Report the vectorization decision. 10580 ORE->emit([&]() { 10581 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10582 L->getHeader()) 10583 << "vectorized loop (vectorization width: " 10584 << NV("VectorizationFactor", VF.Width) 10585 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10586 }); 10587 } 10588 10589 if (ORE->allowExtraAnalysis(LV_NAME)) 10590 checkMixedPrecision(L, ORE); 10591 } 10592 10593 Optional<MDNode *> RemainderLoopID = 10594 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10595 LLVMLoopVectorizeFollowupEpilogue}); 10596 if (RemainderLoopID.hasValue()) { 10597 L->setLoopID(RemainderLoopID.getValue()); 10598 } else { 10599 if (DisableRuntimeUnroll) 10600 AddRuntimeUnrollDisableMetaData(L); 10601 10602 // Mark the loop as already vectorized to avoid vectorizing again. 10603 Hints.setAlreadyVectorized(); 10604 } 10605 10606 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10607 return true; 10608 } 10609 10610 LoopVectorizeResult LoopVectorizePass::runImpl( 10611 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10612 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10613 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10614 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10615 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10616 SE = &SE_; 10617 LI = &LI_; 10618 TTI = &TTI_; 10619 DT = &DT_; 10620 BFI = &BFI_; 10621 TLI = TLI_; 10622 AA = &AA_; 10623 AC = &AC_; 10624 GetLAA = &GetLAA_; 10625 DB = &DB_; 10626 ORE = &ORE_; 10627 PSI = PSI_; 10628 10629 // Don't attempt if 10630 // 1. the target claims to have no vector registers, and 10631 // 2. interleaving won't help ILP. 10632 // 10633 // The second condition is necessary because, even if the target has no 10634 // vector registers, loop vectorization may still enable scalar 10635 // interleaving. 10636 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10637 TTI->getMaxInterleaveFactor(1) < 2) 10638 return LoopVectorizeResult(false, false); 10639 10640 bool Changed = false, CFGChanged = false; 10641 10642 // The vectorizer requires loops to be in simplified form. 10643 // Since simplification may add new inner loops, it has to run before the 10644 // legality and profitability checks. This means running the loop vectorizer 10645 // will simplify all loops, regardless of whether anything end up being 10646 // vectorized. 10647 for (auto &L : *LI) 10648 Changed |= CFGChanged |= 10649 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10650 10651 // Build up a worklist of inner-loops to vectorize. This is necessary as 10652 // the act of vectorizing or partially unrolling a loop creates new loops 10653 // and can invalidate iterators across the loops. 10654 SmallVector<Loop *, 8> Worklist; 10655 10656 for (Loop *L : *LI) 10657 collectSupportedLoops(*L, LI, ORE, Worklist); 10658 10659 LoopsAnalyzed += Worklist.size(); 10660 10661 // Now walk the identified inner loops. 10662 while (!Worklist.empty()) { 10663 Loop *L = Worklist.pop_back_val(); 10664 10665 // For the inner loops we actually process, form LCSSA to simplify the 10666 // transform. 10667 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10668 10669 Changed |= CFGChanged |= processLoop(L); 10670 } 10671 10672 // Process each loop nest in the function. 10673 return LoopVectorizeResult(Changed, CFGChanged); 10674 } 10675 10676 PreservedAnalyses LoopVectorizePass::run(Function &F, 10677 FunctionAnalysisManager &AM) { 10678 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10679 auto &LI = AM.getResult<LoopAnalysis>(F); 10680 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10681 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10682 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10683 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10684 auto &AA = AM.getResult<AAManager>(F); 10685 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10686 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10687 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10688 10689 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10690 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10691 [&](Loop &L) -> const LoopAccessInfo & { 10692 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10693 TLI, TTI, nullptr, nullptr, nullptr}; 10694 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10695 }; 10696 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10697 ProfileSummaryInfo *PSI = 10698 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10699 LoopVectorizeResult Result = 10700 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10701 if (!Result.MadeAnyChange) 10702 return PreservedAnalyses::all(); 10703 PreservedAnalyses PA; 10704 10705 // We currently do not preserve loopinfo/dominator analyses with outer loop 10706 // vectorization. Until this is addressed, mark these analyses as preserved 10707 // only for non-VPlan-native path. 10708 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10709 if (!EnableVPlanNativePath) { 10710 PA.preserve<LoopAnalysis>(); 10711 PA.preserve<DominatorTreeAnalysis>(); 10712 } 10713 10714 if (Result.MadeCFGChange) { 10715 // Making CFG changes likely means a loop got vectorized. Indicate that 10716 // extra simplification passes should be run. 10717 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only 10718 // be run if runtime checks have been added. 10719 AM.getResult<ShouldRunExtraVectorPasses>(F); 10720 PA.preserve<ShouldRunExtraVectorPasses>(); 10721 } else { 10722 PA.preserveSet<CFGAnalyses>(); 10723 } 10724 return PA; 10725 } 10726 10727 void LoopVectorizePass::printPipeline( 10728 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 10729 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline( 10730 OS, MapClassName2PassName); 10731 10732 OS << "<"; 10733 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;"; 10734 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;"; 10735 OS << ">"; 10736 } 10737