1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 91 #include "llvm/Analysis/ProfileSummaryInfo.h" 92 #include "llvm/Analysis/ScalarEvolution.h" 93 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 94 #include "llvm/Analysis/TargetLibraryInfo.h" 95 #include "llvm/Analysis/TargetTransformInfo.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/LLVMContext.h" 116 #include "llvm/IR/Metadata.h" 117 #include "llvm/IR/Module.h" 118 #include "llvm/IR/Operator.h" 119 #include "llvm/IR/PatternMatch.h" 120 #include "llvm/IR/Type.h" 121 #include "llvm/IR/Use.h" 122 #include "llvm/IR/User.h" 123 #include "llvm/IR/Value.h" 124 #include "llvm/IR/ValueHandle.h" 125 #include "llvm/IR/Verifier.h" 126 #include "llvm/InitializePasses.h" 127 #include "llvm/Pass.h" 128 #include "llvm/Support/Casting.h" 129 #include "llvm/Support/CommandLine.h" 130 #include "llvm/Support/Compiler.h" 131 #include "llvm/Support/Debug.h" 132 #include "llvm/Support/ErrorHandling.h" 133 #include "llvm/Support/InstructionCost.h" 134 #include "llvm/Support/MathExtras.h" 135 #include "llvm/Support/raw_ostream.h" 136 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 137 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 138 #include "llvm/Transforms/Utils/LoopSimplify.h" 139 #include "llvm/Transforms/Utils/LoopUtils.h" 140 #include "llvm/Transforms/Utils/LoopVersioning.h" 141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 142 #include "llvm/Transforms/Utils/SizeOpts.h" 143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 144 #include <algorithm> 145 #include <cassert> 146 #include <cstdint> 147 #include <cstdlib> 148 #include <functional> 149 #include <iterator> 150 #include <limits> 151 #include <memory> 152 #include <string> 153 #include <tuple> 154 #include <utility> 155 156 using namespace llvm; 157 158 #define LV_NAME "loop-vectorize" 159 #define DEBUG_TYPE LV_NAME 160 161 #ifndef NDEBUG 162 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 163 #endif 164 165 /// @{ 166 /// Metadata attribute names 167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 168 const char LLVMLoopVectorizeFollowupVectorized[] = 169 "llvm.loop.vectorize.followup_vectorized"; 170 const char LLVMLoopVectorizeFollowupEpilogue[] = 171 "llvm.loop.vectorize.followup_epilogue"; 172 /// @} 173 174 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 177 178 static cl::opt<bool> EnableEpilogueVectorization( 179 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 180 cl::desc("Enable vectorization of epilogue loops.")); 181 182 static cl::opt<unsigned> EpilogueVectorizationForceVF( 183 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 184 cl::desc("When epilogue vectorization is enabled, and a value greater than " 185 "1 is specified, forces the given VF for all applicable epilogue " 186 "loops.")); 187 188 static cl::opt<unsigned> EpilogueVectorizationMinVF( 189 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 190 cl::desc("Only loops with vectorization factor equal to or larger than " 191 "the specified value are considered for epilogue vectorization.")); 192 193 /// Loops with a known constant trip count below this number are vectorized only 194 /// if no scalar iteration overheads are incurred. 195 static cl::opt<unsigned> TinyTripCountVectorThreshold( 196 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 197 cl::desc("Loops with a constant trip count that is smaller than this " 198 "value are vectorized only if no scalar iteration overheads " 199 "are incurred.")); 200 201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 202 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 203 cl::desc("The maximum allowed number of runtime memory checks with a " 204 "vectorize(enable) pragma.")); 205 206 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 207 // that predication is preferred, and this lists all options. I.e., the 208 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 209 // and predicate the instructions accordingly. If tail-folding fails, there are 210 // different fallback strategies depending on these values: 211 namespace PreferPredicateTy { 212 enum Option { 213 ScalarEpilogue = 0, 214 PredicateElseScalarEpilogue, 215 PredicateOrDontVectorize 216 }; 217 } // namespace PreferPredicateTy 218 219 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 220 "prefer-predicate-over-epilogue", 221 cl::init(PreferPredicateTy::ScalarEpilogue), 222 cl::Hidden, 223 cl::desc("Tail-folding and predication preferences over creating a scalar " 224 "epilogue loop."), 225 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 226 "scalar-epilogue", 227 "Don't tail-predicate loops, create scalar epilogue"), 228 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 229 "predicate-else-scalar-epilogue", 230 "prefer tail-folding, create scalar epilogue if tail " 231 "folding fails."), 232 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 233 "predicate-dont-vectorize", 234 "prefers tail-folding, don't attempt vectorization if " 235 "tail-folding fails."))); 236 237 static cl::opt<bool> MaximizeBandwidth( 238 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 239 cl::desc("Maximize bandwidth when selecting vectorization factor which " 240 "will be determined by the smallest type in loop.")); 241 242 static cl::opt<bool> EnableInterleavedMemAccesses( 243 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 244 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 245 246 /// An interleave-group may need masking if it resides in a block that needs 247 /// predication, or in order to mask away gaps. 248 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 249 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 250 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 251 252 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 253 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 254 cl::desc("We don't interleave loops with a estimated constant trip count " 255 "below this number")); 256 257 static cl::opt<unsigned> ForceTargetNumScalarRegs( 258 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 259 cl::desc("A flag that overrides the target's number of scalar registers.")); 260 261 static cl::opt<unsigned> ForceTargetNumVectorRegs( 262 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 263 cl::desc("A flag that overrides the target's number of vector registers.")); 264 265 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 266 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 267 cl::desc("A flag that overrides the target's max interleave factor for " 268 "scalar loops.")); 269 270 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 271 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 272 cl::desc("A flag that overrides the target's max interleave factor for " 273 "vectorized loops.")); 274 275 static cl::opt<unsigned> ForceTargetInstructionCost( 276 "force-target-instruction-cost", cl::init(0), cl::Hidden, 277 cl::desc("A flag that overrides the target's expected cost for " 278 "an instruction to a single constant value. Mostly " 279 "useful for getting consistent testing.")); 280 281 static cl::opt<bool> ForceTargetSupportsScalableVectors( 282 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 283 cl::desc( 284 "Pretend that scalable vectors are supported, even if the target does " 285 "not support them. This flag should only be used for testing.")); 286 287 static cl::opt<unsigned> SmallLoopCost( 288 "small-loop-cost", cl::init(20), cl::Hidden, 289 cl::desc( 290 "The cost of a loop that is considered 'small' by the interleaver.")); 291 292 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 293 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 294 cl::desc("Enable the use of the block frequency analysis to access PGO " 295 "heuristics minimizing code growth in cold regions and being more " 296 "aggressive in hot regions.")); 297 298 // Runtime interleave loops for load/store throughput. 299 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 300 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 301 cl::desc( 302 "Enable runtime interleaving until load/store ports are saturated")); 303 304 /// Interleave small loops with scalar reductions. 305 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 306 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 307 cl::desc("Enable interleaving for loops with small iteration counts that " 308 "contain scalar reductions to expose ILP.")); 309 310 /// The number of stores in a loop that are allowed to need predication. 311 static cl::opt<unsigned> NumberOfStoresToPredicate( 312 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 313 cl::desc("Max number of stores to be predicated behind an if.")); 314 315 static cl::opt<bool> EnableIndVarRegisterHeur( 316 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 317 cl::desc("Count the induction variable only once when interleaving")); 318 319 static cl::opt<bool> EnableCondStoresVectorization( 320 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 321 cl::desc("Enable if predication of stores during vectorization.")); 322 323 static cl::opt<unsigned> MaxNestedScalarReductionIC( 324 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 325 cl::desc("The maximum interleave count to use when interleaving a scalar " 326 "reduction in a nested loop.")); 327 328 static cl::opt<bool> 329 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 330 cl::Hidden, 331 cl::desc("Prefer in-loop vector reductions, " 332 "overriding the targets preference.")); 333 334 static cl::opt<bool> ForceOrderedReductions( 335 "force-ordered-reductions", cl::init(false), cl::Hidden, 336 cl::desc("Enable the vectorisation of loops with in-order (strict) " 337 "FP reductions")); 338 339 static cl::opt<bool> PreferPredicatedReductionSelect( 340 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 341 cl::desc( 342 "Prefer predicating a reduction operation over an after loop select.")); 343 344 cl::opt<bool> EnableVPlanNativePath( 345 "enable-vplan-native-path", cl::init(false), cl::Hidden, 346 cl::desc("Enable VPlan-native vectorization path with " 347 "support for outer loop vectorization.")); 348 349 // FIXME: Remove this switch once we have divergence analysis. Currently we 350 // assume divergent non-backedge branches when this switch is true. 351 cl::opt<bool> EnableVPlanPredication( 352 "enable-vplan-predication", cl::init(false), cl::Hidden, 353 cl::desc("Enable VPlan-native vectorization path predicator with " 354 "support for outer loop vectorization.")); 355 356 // This flag enables the stress testing of the VPlan H-CFG construction in the 357 // VPlan-native vectorization path. It must be used in conjuction with 358 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 359 // verification of the H-CFGs built. 360 static cl::opt<bool> VPlanBuildStressTest( 361 "vplan-build-stress-test", cl::init(false), cl::Hidden, 362 cl::desc( 363 "Build VPlan for every supported loop nest in the function and bail " 364 "out right after the build (stress test the VPlan H-CFG construction " 365 "in the VPlan-native vectorization path).")); 366 367 cl::opt<bool> llvm::EnableLoopInterleaving( 368 "interleave-loops", cl::init(true), cl::Hidden, 369 cl::desc("Enable loop interleaving in Loop vectorization passes")); 370 cl::opt<bool> llvm::EnableLoopVectorization( 371 "vectorize-loops", cl::init(true), cl::Hidden, 372 cl::desc("Run the Loop vectorization passes")); 373 374 cl::opt<bool> PrintVPlansInDotFormat( 375 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 376 cl::desc("Use dot format instead of plain text when dumping VPlans")); 377 378 /// A helper function that returns true if the given type is irregular. The 379 /// type is irregular if its allocated size doesn't equal the store size of an 380 /// element of the corresponding vector type. 381 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 382 // Determine if an array of N elements of type Ty is "bitcast compatible" 383 // with a <N x Ty> vector. 384 // This is only true if there is no padding between the array elements. 385 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 386 } 387 388 /// A helper function that returns the reciprocal of the block probability of 389 /// predicated blocks. If we return X, we are assuming the predicated block 390 /// will execute once for every X iterations of the loop header. 391 /// 392 /// TODO: We should use actual block probability here, if available. Currently, 393 /// we always assume predicated blocks have a 50% chance of executing. 394 static unsigned getReciprocalPredBlockProb() { return 2; } 395 396 /// A helper function that returns an integer or floating-point constant with 397 /// value C. 398 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 399 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 400 : ConstantFP::get(Ty, C); 401 } 402 403 /// Returns "best known" trip count for the specified loop \p L as defined by 404 /// the following procedure: 405 /// 1) Returns exact trip count if it is known. 406 /// 2) Returns expected trip count according to profile data if any. 407 /// 3) Returns upper bound estimate if it is known. 408 /// 4) Returns None if all of the above failed. 409 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 410 // Check if exact trip count is known. 411 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 412 return ExpectedTC; 413 414 // Check if there is an expected trip count available from profile data. 415 if (LoopVectorizeWithBlockFrequency) 416 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 417 return EstimatedTC; 418 419 // Check if upper bound estimate is known. 420 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 421 return ExpectedTC; 422 423 return None; 424 } 425 426 // Forward declare GeneratedRTChecks. 427 class GeneratedRTChecks; 428 429 namespace llvm { 430 431 /// InnerLoopVectorizer vectorizes loops which contain only one basic 432 /// block to a specified vectorization factor (VF). 433 /// This class performs the widening of scalars into vectors, or multiple 434 /// scalars. This class also implements the following features: 435 /// * It inserts an epilogue loop for handling loops that don't have iteration 436 /// counts that are known to be a multiple of the vectorization factor. 437 /// * It handles the code generation for reduction variables. 438 /// * Scalarization (implementation using scalars) of un-vectorizable 439 /// instructions. 440 /// InnerLoopVectorizer does not perform any vectorization-legality 441 /// checks, and relies on the caller to check for the different legality 442 /// aspects. The InnerLoopVectorizer relies on the 443 /// LoopVectorizationLegality class to provide information about the induction 444 /// and reduction variables that were found to a given vectorization factor. 445 class InnerLoopVectorizer { 446 public: 447 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 448 LoopInfo *LI, DominatorTree *DT, 449 const TargetLibraryInfo *TLI, 450 const TargetTransformInfo *TTI, AssumptionCache *AC, 451 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 452 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 453 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 454 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 455 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 456 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 457 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 458 PSI(PSI), RTChecks(RTChecks) { 459 // Query this against the original loop and save it here because the profile 460 // of the original loop header may change as the transformation happens. 461 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 462 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 463 } 464 465 virtual ~InnerLoopVectorizer() = default; 466 467 /// Create a new empty loop that will contain vectorized instructions later 468 /// on, while the old loop will be used as the scalar remainder. Control flow 469 /// is generated around the vectorized (and scalar epilogue) loops consisting 470 /// of various checks and bypasses. Return the pre-header block of the new 471 /// loop. 472 /// In the case of epilogue vectorization, this function is overriden to 473 /// handle the more complex control flow around the loops. 474 virtual BasicBlock *createVectorizedLoopSkeleton(); 475 476 /// Widen a single instruction within the innermost loop. 477 void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands, 478 VPTransformState &State); 479 480 /// Widen a single call instruction within the innermost loop. 481 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 482 VPTransformState &State); 483 484 /// Widen a single select instruction within the innermost loop. 485 void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands, 486 bool InvariantCond, VPTransformState &State); 487 488 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 489 void fixVectorizedLoop(VPTransformState &State); 490 491 // Return true if any runtime check is added. 492 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 493 494 /// A type for vectorized values in the new loop. Each value from the 495 /// original loop, when vectorized, is represented by UF vector values in the 496 /// new unrolled loop, where UF is the unroll factor. 497 using VectorParts = SmallVector<Value *, 2>; 498 499 /// Vectorize a single GetElementPtrInst based on information gathered and 500 /// decisions taken during planning. 501 void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices, 502 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant, 503 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State); 504 505 /// Vectorize a single first-order recurrence or pointer induction PHINode in 506 /// a block. This method handles the induction variable canonicalization. It 507 /// supports both VF = 1 for unrolled loops and arbitrary length vectors. 508 void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR, 509 VPTransformState &State); 510 511 /// A helper function to scalarize a single Instruction in the innermost loop. 512 /// Generates a sequence of scalar instances for each lane between \p MinLane 513 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 514 /// inclusive. Uses the VPValue operands from \p Operands instead of \p 515 /// Instr's operands. 516 void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands, 517 const VPIteration &Instance, bool IfPredicateInstr, 518 VPTransformState &State); 519 520 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 521 /// is provided, the integer induction variable will first be truncated to 522 /// the corresponding type. 523 void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc, 524 VPValue *Def, VPValue *CastDef, 525 VPTransformState &State); 526 527 /// Construct the vector value of a scalarized value \p V one lane at a time. 528 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 529 VPTransformState &State); 530 531 /// Try to vectorize interleaved access group \p Group with the base address 532 /// given in \p Addr, optionally masking the vector operations if \p 533 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 534 /// values in the vectorized loop. 535 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 536 ArrayRef<VPValue *> VPDefs, 537 VPTransformState &State, VPValue *Addr, 538 ArrayRef<VPValue *> StoredValues, 539 VPValue *BlockInMask = nullptr); 540 541 /// Vectorize Load and Store instructions with the base address given in \p 542 /// Addr, optionally masking the vector operations if \p BlockInMask is 543 /// non-null. Use \p State to translate given VPValues to IR values in the 544 /// vectorized loop. 545 void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State, 546 VPValue *Def, VPValue *Addr, 547 VPValue *StoredValue, VPValue *BlockInMask); 548 549 /// Set the debug location in the builder \p Ptr using the debug location in 550 /// \p V. If \p Ptr is None then it uses the class member's Builder. 551 void setDebugLocFromInst(const Value *V, 552 Optional<IRBuilder<> *> CustomBuilder = None); 553 554 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 555 void fixNonInductionPHIs(VPTransformState &State); 556 557 /// Returns true if the reordering of FP operations is not allowed, but we are 558 /// able to vectorize with strict in-order reductions for the given RdxDesc. 559 bool useOrderedReductions(RecurrenceDescriptor &RdxDesc); 560 561 /// Create a broadcast instruction. This method generates a broadcast 562 /// instruction (shuffle) for loop invariant values and for the induction 563 /// value. If this is the induction variable then we extend it to N, N+1, ... 564 /// this is needed because each iteration in the loop corresponds to a SIMD 565 /// element. 566 virtual Value *getBroadcastInstrs(Value *V); 567 568 protected: 569 friend class LoopVectorizationPlanner; 570 571 /// A small list of PHINodes. 572 using PhiVector = SmallVector<PHINode *, 4>; 573 574 /// A type for scalarized values in the new loop. Each value from the 575 /// original loop, when scalarized, is represented by UF x VF scalar values 576 /// in the new unrolled loop, where UF is the unroll factor and VF is the 577 /// vectorization factor. 578 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 579 580 /// Set up the values of the IVs correctly when exiting the vector loop. 581 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 582 Value *CountRoundDown, Value *EndValue, 583 BasicBlock *MiddleBlock); 584 585 /// Create a new induction variable inside L. 586 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 587 Value *Step, Instruction *DL); 588 589 /// Handle all cross-iteration phis in the header. 590 void fixCrossIterationPHIs(VPTransformState &State); 591 592 /// Create the exit value of first order recurrences in the middle block and 593 /// update their users. 594 void fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, VPTransformState &State); 595 596 /// Create code for the loop exit value of the reduction. 597 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); 598 599 /// Clear NSW/NUW flags from reduction instructions if necessary. 600 void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 601 VPTransformState &State); 602 603 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 604 /// means we need to add the appropriate incoming value from the middle 605 /// block as exiting edges from the scalar epilogue loop (if present) are 606 /// already in place, and we exit the vector loop exclusively to the middle 607 /// block. 608 void fixLCSSAPHIs(VPTransformState &State); 609 610 /// Iteratively sink the scalarized operands of a predicated instruction into 611 /// the block that was created for it. 612 void sinkScalarOperands(Instruction *PredInst); 613 614 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 615 /// represented as. 616 void truncateToMinimalBitwidths(VPTransformState &State); 617 618 /// This function adds 619 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 620 /// to each vector element of Val. The sequence starts at StartIndex. 621 /// \p Opcode is relevant for FP induction variable. 622 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 623 Instruction::BinaryOps Opcode = 624 Instruction::BinaryOpsEnd); 625 626 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 627 /// variable on which to base the steps, \p Step is the size of the step, and 628 /// \p EntryVal is the value from the original loop that maps to the steps. 629 /// Note that \p EntryVal doesn't have to be an induction variable - it 630 /// can also be a truncate instruction. 631 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 632 const InductionDescriptor &ID, VPValue *Def, 633 VPValue *CastDef, VPTransformState &State); 634 635 /// Create a vector induction phi node based on an existing scalar one. \p 636 /// EntryVal is the value from the original loop that maps to the vector phi 637 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 638 /// truncate instruction, instead of widening the original IV, we widen a 639 /// version of the IV truncated to \p EntryVal's type. 640 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 641 Value *Step, Value *Start, 642 Instruction *EntryVal, VPValue *Def, 643 VPValue *CastDef, 644 VPTransformState &State); 645 646 /// Returns true if an instruction \p I should be scalarized instead of 647 /// vectorized for the chosen vectorization factor. 648 bool shouldScalarizeInstruction(Instruction *I) const; 649 650 /// Returns true if we should generate a scalar version of \p IV. 651 bool needsScalarInduction(Instruction *IV) const; 652 653 /// If there is a cast involved in the induction variable \p ID, which should 654 /// be ignored in the vectorized loop body, this function records the 655 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 656 /// cast. We had already proved that the casted Phi is equal to the uncasted 657 /// Phi in the vectorized loop (under a runtime guard), and therefore 658 /// there is no need to vectorize the cast - the same value can be used in the 659 /// vector loop for both the Phi and the cast. 660 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 661 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 662 /// 663 /// \p EntryVal is the value from the original loop that maps to the vector 664 /// phi node and is used to distinguish what is the IV currently being 665 /// processed - original one (if \p EntryVal is a phi corresponding to the 666 /// original IV) or the "newly-created" one based on the proof mentioned above 667 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 668 /// latter case \p EntryVal is a TruncInst and we must not record anything for 669 /// that IV, but it's error-prone to expect callers of this routine to care 670 /// about that, hence this explicit parameter. 671 void recordVectorLoopValueForInductionCast( 672 const InductionDescriptor &ID, const Instruction *EntryVal, 673 Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State, 674 unsigned Part, unsigned Lane = UINT_MAX); 675 676 /// Generate a shuffle sequence that will reverse the vector Vec. 677 virtual Value *reverseVector(Value *Vec); 678 679 /// Returns (and creates if needed) the original loop trip count. 680 Value *getOrCreateTripCount(Loop *NewLoop); 681 682 /// Returns (and creates if needed) the trip count of the widened loop. 683 Value *getOrCreateVectorTripCount(Loop *NewLoop); 684 685 /// Returns a bitcasted value to the requested vector type. 686 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 687 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 688 const DataLayout &DL); 689 690 /// Emit a bypass check to see if the vector trip count is zero, including if 691 /// it overflows. 692 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 693 694 /// Emit a bypass check to see if all of the SCEV assumptions we've 695 /// had to make are correct. Returns the block containing the checks or 696 /// nullptr if no checks have been added. 697 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass); 698 699 /// Emit bypass checks to check any memory assumptions we may have made. 700 /// Returns the block containing the checks or nullptr if no checks have been 701 /// added. 702 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 703 704 /// Compute the transformed value of Index at offset StartValue using step 705 /// StepValue. 706 /// For integer induction, returns StartValue + Index * StepValue. 707 /// For pointer induction, returns StartValue[Index * StepValue]. 708 /// FIXME: The newly created binary instructions should contain nsw/nuw 709 /// flags, which can be found from the original scalar operations. 710 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 711 const DataLayout &DL, 712 const InductionDescriptor &ID) const; 713 714 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 715 /// vector loop preheader, middle block and scalar preheader. Also 716 /// allocate a loop object for the new vector loop and return it. 717 Loop *createVectorLoopSkeleton(StringRef Prefix); 718 719 /// Create new phi nodes for the induction variables to resume iteration count 720 /// in the scalar epilogue, from where the vectorized loop left off (given by 721 /// \p VectorTripCount). 722 /// In cases where the loop skeleton is more complicated (eg. epilogue 723 /// vectorization) and the resume values can come from an additional bypass 724 /// block, the \p AdditionalBypass pair provides information about the bypass 725 /// block and the end value on the edge from bypass to this loop. 726 void createInductionResumeValues( 727 Loop *L, Value *VectorTripCount, 728 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 729 730 /// Complete the loop skeleton by adding debug MDs, creating appropriate 731 /// conditional branches in the middle block, preparing the builder and 732 /// running the verifier. Take in the vector loop \p L as argument, and return 733 /// the preheader of the completed vector loop. 734 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 735 736 /// Add additional metadata to \p To that was not present on \p Orig. 737 /// 738 /// Currently this is used to add the noalias annotations based on the 739 /// inserted memchecks. Use this for instructions that are *cloned* into the 740 /// vector loop. 741 void addNewMetadata(Instruction *To, const Instruction *Orig); 742 743 /// Add metadata from one instruction to another. 744 /// 745 /// This includes both the original MDs from \p From and additional ones (\see 746 /// addNewMetadata). Use this for *newly created* instructions in the vector 747 /// loop. 748 void addMetadata(Instruction *To, Instruction *From); 749 750 /// Similar to the previous function but it adds the metadata to a 751 /// vector of instructions. 752 void addMetadata(ArrayRef<Value *> To, Instruction *From); 753 754 /// Allow subclasses to override and print debug traces before/after vplan 755 /// execution, when trace information is requested. 756 virtual void printDebugTracesAtStart(){}; 757 virtual void printDebugTracesAtEnd(){}; 758 759 /// The original loop. 760 Loop *OrigLoop; 761 762 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 763 /// dynamic knowledge to simplify SCEV expressions and converts them to a 764 /// more usable form. 765 PredicatedScalarEvolution &PSE; 766 767 /// Loop Info. 768 LoopInfo *LI; 769 770 /// Dominator Tree. 771 DominatorTree *DT; 772 773 /// Alias Analysis. 774 AAResults *AA; 775 776 /// Target Library Info. 777 const TargetLibraryInfo *TLI; 778 779 /// Target Transform Info. 780 const TargetTransformInfo *TTI; 781 782 /// Assumption Cache. 783 AssumptionCache *AC; 784 785 /// Interface to emit optimization remarks. 786 OptimizationRemarkEmitter *ORE; 787 788 /// LoopVersioning. It's only set up (non-null) if memchecks were 789 /// used. 790 /// 791 /// This is currently only used to add no-alias metadata based on the 792 /// memchecks. The actually versioning is performed manually. 793 std::unique_ptr<LoopVersioning> LVer; 794 795 /// The vectorization SIMD factor to use. Each vector will have this many 796 /// vector elements. 797 ElementCount VF; 798 799 /// The vectorization unroll factor to use. Each scalar is vectorized to this 800 /// many different vector instructions. 801 unsigned UF; 802 803 /// The builder that we use 804 IRBuilder<> Builder; 805 806 // --- Vectorization state --- 807 808 /// The vector-loop preheader. 809 BasicBlock *LoopVectorPreHeader; 810 811 /// The scalar-loop preheader. 812 BasicBlock *LoopScalarPreHeader; 813 814 /// Middle Block between the vector and the scalar. 815 BasicBlock *LoopMiddleBlock; 816 817 /// The unique ExitBlock of the scalar loop if one exists. Note that 818 /// there can be multiple exiting edges reaching this block. 819 BasicBlock *LoopExitBlock; 820 821 /// The vector loop body. 822 BasicBlock *LoopVectorBody; 823 824 /// The scalar loop body. 825 BasicBlock *LoopScalarBody; 826 827 /// A list of all bypass blocks. The first block is the entry of the loop. 828 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 829 830 /// The new Induction variable which was added to the new block. 831 PHINode *Induction = nullptr; 832 833 /// The induction variable of the old basic block. 834 PHINode *OldInduction = nullptr; 835 836 /// Store instructions that were predicated. 837 SmallVector<Instruction *, 4> PredicatedInstructions; 838 839 /// Trip count of the original loop. 840 Value *TripCount = nullptr; 841 842 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 843 Value *VectorTripCount = nullptr; 844 845 /// The legality analysis. 846 LoopVectorizationLegality *Legal; 847 848 /// The profitablity analysis. 849 LoopVectorizationCostModel *Cost; 850 851 // Record whether runtime checks are added. 852 bool AddedSafetyChecks = false; 853 854 // Holds the end values for each induction variable. We save the end values 855 // so we can later fix-up the external users of the induction variables. 856 DenseMap<PHINode *, Value *> IVEndValues; 857 858 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 859 // fixed up at the end of vector code generation. 860 SmallVector<PHINode *, 8> OrigPHIsToFix; 861 862 /// BFI and PSI are used to check for profile guided size optimizations. 863 BlockFrequencyInfo *BFI; 864 ProfileSummaryInfo *PSI; 865 866 // Whether this loop should be optimized for size based on profile guided size 867 // optimizatios. 868 bool OptForSizeBasedOnProfile; 869 870 /// Structure to hold information about generated runtime checks, responsible 871 /// for cleaning the checks, if vectorization turns out unprofitable. 872 GeneratedRTChecks &RTChecks; 873 }; 874 875 class InnerLoopUnroller : public InnerLoopVectorizer { 876 public: 877 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 878 LoopInfo *LI, DominatorTree *DT, 879 const TargetLibraryInfo *TLI, 880 const TargetTransformInfo *TTI, AssumptionCache *AC, 881 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 882 LoopVectorizationLegality *LVL, 883 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 884 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 885 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 886 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 887 BFI, PSI, Check) {} 888 889 private: 890 Value *getBroadcastInstrs(Value *V) override; 891 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 892 Instruction::BinaryOps Opcode = 893 Instruction::BinaryOpsEnd) override; 894 Value *reverseVector(Value *Vec) override; 895 }; 896 897 /// Encapsulate information regarding vectorization of a loop and its epilogue. 898 /// This information is meant to be updated and used across two stages of 899 /// epilogue vectorization. 900 struct EpilogueLoopVectorizationInfo { 901 ElementCount MainLoopVF = ElementCount::getFixed(0); 902 unsigned MainLoopUF = 0; 903 ElementCount EpilogueVF = ElementCount::getFixed(0); 904 unsigned EpilogueUF = 0; 905 BasicBlock *MainLoopIterationCountCheck = nullptr; 906 BasicBlock *EpilogueIterationCountCheck = nullptr; 907 BasicBlock *SCEVSafetyCheck = nullptr; 908 BasicBlock *MemSafetyCheck = nullptr; 909 Value *TripCount = nullptr; 910 Value *VectorTripCount = nullptr; 911 912 EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, 913 ElementCount EVF, unsigned EUF) 914 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) { 915 assert(EUF == 1 && 916 "A high UF for the epilogue loop is likely not beneficial."); 917 } 918 }; 919 920 /// An extension of the inner loop vectorizer that creates a skeleton for a 921 /// vectorized loop that has its epilogue (residual) also vectorized. 922 /// The idea is to run the vplan on a given loop twice, firstly to setup the 923 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 924 /// from the first step and vectorize the epilogue. This is achieved by 925 /// deriving two concrete strategy classes from this base class and invoking 926 /// them in succession from the loop vectorizer planner. 927 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 928 public: 929 InnerLoopAndEpilogueVectorizer( 930 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 931 DominatorTree *DT, const TargetLibraryInfo *TLI, 932 const TargetTransformInfo *TTI, AssumptionCache *AC, 933 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 934 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 935 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 936 GeneratedRTChecks &Checks) 937 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 938 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 939 Checks), 940 EPI(EPI) {} 941 942 // Override this function to handle the more complex control flow around the 943 // three loops. 944 BasicBlock *createVectorizedLoopSkeleton() final override { 945 return createEpilogueVectorizedLoopSkeleton(); 946 } 947 948 /// The interface for creating a vectorized skeleton using one of two 949 /// different strategies, each corresponding to one execution of the vplan 950 /// as described above. 951 virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0; 952 953 /// Holds and updates state information required to vectorize the main loop 954 /// and its epilogue in two separate passes. This setup helps us avoid 955 /// regenerating and recomputing runtime safety checks. It also helps us to 956 /// shorten the iteration-count-check path length for the cases where the 957 /// iteration count of the loop is so small that the main vector loop is 958 /// completely skipped. 959 EpilogueLoopVectorizationInfo &EPI; 960 }; 961 962 /// A specialized derived class of inner loop vectorizer that performs 963 /// vectorization of *main* loops in the process of vectorizing loops and their 964 /// epilogues. 965 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 966 public: 967 EpilogueVectorizerMainLoop( 968 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 969 DominatorTree *DT, const TargetLibraryInfo *TLI, 970 const TargetTransformInfo *TTI, AssumptionCache *AC, 971 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 972 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 973 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 974 GeneratedRTChecks &Check) 975 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 976 EPI, LVL, CM, BFI, PSI, Check) {} 977 /// Implements the interface for creating a vectorized skeleton using the 978 /// *main loop* strategy (ie the first pass of vplan execution). 979 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 980 981 protected: 982 /// Emits an iteration count bypass check once for the main loop (when \p 983 /// ForEpilogue is false) and once for the epilogue loop (when \p 984 /// ForEpilogue is true). 985 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, 986 bool ForEpilogue); 987 void printDebugTracesAtStart() override; 988 void printDebugTracesAtEnd() override; 989 }; 990 991 // A specialized derived class of inner loop vectorizer that performs 992 // vectorization of *epilogue* loops in the process of vectorizing loops and 993 // their epilogues. 994 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 995 public: 996 EpilogueVectorizerEpilogueLoop( 997 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 998 DominatorTree *DT, const TargetLibraryInfo *TLI, 999 const TargetTransformInfo *TTI, AssumptionCache *AC, 1000 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 1001 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 1002 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 1003 GeneratedRTChecks &Checks) 1004 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1005 EPI, LVL, CM, BFI, PSI, Checks) {} 1006 /// Implements the interface for creating a vectorized skeleton using the 1007 /// *epilogue loop* strategy (ie the second pass of vplan execution). 1008 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 1009 1010 protected: 1011 /// Emits an iteration count bypass check after the main vector loop has 1012 /// finished to see if there are any iterations left to execute by either 1013 /// the vector epilogue or the scalar epilogue. 1014 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, 1015 BasicBlock *Bypass, 1016 BasicBlock *Insert); 1017 void printDebugTracesAtStart() override; 1018 void printDebugTracesAtEnd() override; 1019 }; 1020 } // end namespace llvm 1021 1022 /// Look for a meaningful debug location on the instruction or it's 1023 /// operands. 1024 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 1025 if (!I) 1026 return I; 1027 1028 DebugLoc Empty; 1029 if (I->getDebugLoc() != Empty) 1030 return I; 1031 1032 for (Use &Op : I->operands()) { 1033 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 1034 if (OpInst->getDebugLoc() != Empty) 1035 return OpInst; 1036 } 1037 1038 return I; 1039 } 1040 1041 void InnerLoopVectorizer::setDebugLocFromInst( 1042 const Value *V, Optional<IRBuilder<> *> CustomBuilder) { 1043 IRBuilder<> *B = (CustomBuilder == None) ? &Builder : *CustomBuilder; 1044 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) { 1045 const DILocation *DIL = Inst->getDebugLoc(); 1046 1047 // When a FSDiscriminator is enabled, we don't need to add the multiply 1048 // factors to the discriminators. 1049 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 1050 !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { 1051 // FIXME: For scalable vectors, assume vscale=1. 1052 auto NewDIL = 1053 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 1054 if (NewDIL) 1055 B->SetCurrentDebugLocation(NewDIL.getValue()); 1056 else 1057 LLVM_DEBUG(dbgs() 1058 << "Failed to create new discriminator: " 1059 << DIL->getFilename() << " Line: " << DIL->getLine()); 1060 } else 1061 B->SetCurrentDebugLocation(DIL); 1062 } else 1063 B->SetCurrentDebugLocation(DebugLoc()); 1064 } 1065 1066 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 1067 /// is passed, the message relates to that particular instruction. 1068 #ifndef NDEBUG 1069 static void debugVectorizationMessage(const StringRef Prefix, 1070 const StringRef DebugMsg, 1071 Instruction *I) { 1072 dbgs() << "LV: " << Prefix << DebugMsg; 1073 if (I != nullptr) 1074 dbgs() << " " << *I; 1075 else 1076 dbgs() << '.'; 1077 dbgs() << '\n'; 1078 } 1079 #endif 1080 1081 /// Create an analysis remark that explains why vectorization failed 1082 /// 1083 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 1084 /// RemarkName is the identifier for the remark. If \p I is passed it is an 1085 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 1086 /// the location of the remark. \return the remark object that can be 1087 /// streamed to. 1088 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1089 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1090 Value *CodeRegion = TheLoop->getHeader(); 1091 DebugLoc DL = TheLoop->getStartLoc(); 1092 1093 if (I) { 1094 CodeRegion = I->getParent(); 1095 // If there is no debug location attached to the instruction, revert back to 1096 // using the loop's. 1097 if (I->getDebugLoc()) 1098 DL = I->getDebugLoc(); 1099 } 1100 1101 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 1102 } 1103 1104 /// Return a value for Step multiplied by VF. 1105 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) { 1106 assert(isa<ConstantInt>(Step) && "Expected an integer step"); 1107 Constant *StepVal = ConstantInt::get( 1108 Step->getType(), 1109 cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue()); 1110 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1111 } 1112 1113 namespace llvm { 1114 1115 /// Return the runtime value for VF. 1116 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) { 1117 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1118 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1119 } 1120 1121 void reportVectorizationFailure(const StringRef DebugMsg, 1122 const StringRef OREMsg, const StringRef ORETag, 1123 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1124 Instruction *I) { 1125 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 1126 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1127 ORE->emit( 1128 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1129 << "loop not vectorized: " << OREMsg); 1130 } 1131 1132 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 1133 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1134 Instruction *I) { 1135 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 1136 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1137 ORE->emit( 1138 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1139 << Msg); 1140 } 1141 1142 } // end namespace llvm 1143 1144 #ifndef NDEBUG 1145 /// \return string containing a file name and a line # for the given loop. 1146 static std::string getDebugLocString(const Loop *L) { 1147 std::string Result; 1148 if (L) { 1149 raw_string_ostream OS(Result); 1150 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1151 LoopDbgLoc.print(OS); 1152 else 1153 // Just print the module name. 1154 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1155 OS.flush(); 1156 } 1157 return Result; 1158 } 1159 #endif 1160 1161 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1162 const Instruction *Orig) { 1163 // If the loop was versioned with memchecks, add the corresponding no-alias 1164 // metadata. 1165 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1166 LVer->annotateInstWithNoAlias(To, Orig); 1167 } 1168 1169 void InnerLoopVectorizer::addMetadata(Instruction *To, 1170 Instruction *From) { 1171 propagateMetadata(To, From); 1172 addNewMetadata(To, From); 1173 } 1174 1175 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1176 Instruction *From) { 1177 for (Value *V : To) { 1178 if (Instruction *I = dyn_cast<Instruction>(V)) 1179 addMetadata(I, From); 1180 } 1181 } 1182 1183 namespace llvm { 1184 1185 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1186 // lowered. 1187 enum ScalarEpilogueLowering { 1188 1189 // The default: allowing scalar epilogues. 1190 CM_ScalarEpilogueAllowed, 1191 1192 // Vectorization with OptForSize: don't allow epilogues. 1193 CM_ScalarEpilogueNotAllowedOptSize, 1194 1195 // A special case of vectorisation with OptForSize: loops with a very small 1196 // trip count are considered for vectorization under OptForSize, thereby 1197 // making sure the cost of their loop body is dominant, free of runtime 1198 // guards and scalar iteration overheads. 1199 CM_ScalarEpilogueNotAllowedLowTripLoop, 1200 1201 // Loop hint predicate indicating an epilogue is undesired. 1202 CM_ScalarEpilogueNotNeededUsePredicate, 1203 1204 // Directive indicating we must either tail fold or not vectorize 1205 CM_ScalarEpilogueNotAllowedUsePredicate 1206 }; 1207 1208 /// ElementCountComparator creates a total ordering for ElementCount 1209 /// for the purposes of using it in a set structure. 1210 struct ElementCountComparator { 1211 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1212 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1213 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1214 } 1215 }; 1216 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1217 1218 /// LoopVectorizationCostModel - estimates the expected speedups due to 1219 /// vectorization. 1220 /// In many cases vectorization is not profitable. This can happen because of 1221 /// a number of reasons. In this class we mainly attempt to predict the 1222 /// expected speedup/slowdowns due to the supported instruction set. We use the 1223 /// TargetTransformInfo to query the different backends for the cost of 1224 /// different operations. 1225 class LoopVectorizationCostModel { 1226 public: 1227 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1228 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1229 LoopVectorizationLegality *Legal, 1230 const TargetTransformInfo &TTI, 1231 const TargetLibraryInfo *TLI, DemandedBits *DB, 1232 AssumptionCache *AC, 1233 OptimizationRemarkEmitter *ORE, const Function *F, 1234 const LoopVectorizeHints *Hints, 1235 InterleavedAccessInfo &IAI) 1236 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1237 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1238 Hints(Hints), InterleaveInfo(IAI) {} 1239 1240 /// \return An upper bound for the vectorization factors (both fixed and 1241 /// scalable). If the factors are 0, vectorization and interleaving should be 1242 /// avoided up front. 1243 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1244 1245 /// \return True if runtime checks are required for vectorization, and false 1246 /// otherwise. 1247 bool runtimeChecksRequired(); 1248 1249 /// \return The most profitable vectorization factor and the cost of that VF. 1250 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1251 /// then this vectorization factor will be selected if vectorization is 1252 /// possible. 1253 VectorizationFactor 1254 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1255 1256 VectorizationFactor 1257 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1258 const LoopVectorizationPlanner &LVP); 1259 1260 /// Setup cost-based decisions for user vectorization factor. 1261 /// \return true if the UserVF is a feasible VF to be chosen. 1262 bool selectUserVectorizationFactor(ElementCount UserVF) { 1263 collectUniformsAndScalars(UserVF); 1264 collectInstsToScalarize(UserVF); 1265 return expectedCost(UserVF).first.isValid(); 1266 } 1267 1268 /// \return The size (in bits) of the smallest and widest types in the code 1269 /// that needs to be vectorized. We ignore values that remain scalar such as 1270 /// 64 bit loop indices. 1271 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1272 1273 /// \return The desired interleave count. 1274 /// If interleave count has been specified by metadata it will be returned. 1275 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1276 /// are the selected vectorization factor and the cost of the selected VF. 1277 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1278 1279 /// Memory access instruction may be vectorized in more than one way. 1280 /// Form of instruction after vectorization depends on cost. 1281 /// This function takes cost-based decisions for Load/Store instructions 1282 /// and collects them in a map. This decisions map is used for building 1283 /// the lists of loop-uniform and loop-scalar instructions. 1284 /// The calculated cost is saved with widening decision in order to 1285 /// avoid redundant calculations. 1286 void setCostBasedWideningDecision(ElementCount VF); 1287 1288 /// A struct that represents some properties of the register usage 1289 /// of a loop. 1290 struct RegisterUsage { 1291 /// Holds the number of loop invariant values that are used in the loop. 1292 /// The key is ClassID of target-provided register class. 1293 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1294 /// Holds the maximum number of concurrent live intervals in the loop. 1295 /// The key is ClassID of target-provided register class. 1296 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1297 }; 1298 1299 /// \return Returns information about the register usages of the loop for the 1300 /// given vectorization factors. 1301 SmallVector<RegisterUsage, 8> 1302 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1303 1304 /// Collect values we want to ignore in the cost model. 1305 void collectValuesToIgnore(); 1306 1307 /// Collect all element types in the loop for which widening is needed. 1308 void collectElementTypesForWidening(); 1309 1310 /// Split reductions into those that happen in the loop, and those that happen 1311 /// outside. In loop reductions are collected into InLoopReductionChains. 1312 void collectInLoopReductions(); 1313 1314 /// Returns true if we should use strict in-order reductions for the given 1315 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, 1316 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering 1317 /// of FP operations. 1318 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) { 1319 return !Hints->allowReordering() && RdxDesc.isOrdered(); 1320 } 1321 1322 /// \returns The smallest bitwidth each instruction can be represented with. 1323 /// The vector equivalents of these instructions should be truncated to this 1324 /// type. 1325 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1326 return MinBWs; 1327 } 1328 1329 /// \returns True if it is more profitable to scalarize instruction \p I for 1330 /// vectorization factor \p VF. 1331 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1332 assert(VF.isVector() && 1333 "Profitable to scalarize relevant only for VF > 1."); 1334 1335 // Cost model is not run in the VPlan-native path - return conservative 1336 // result until this changes. 1337 if (EnableVPlanNativePath) 1338 return false; 1339 1340 auto Scalars = InstsToScalarize.find(VF); 1341 assert(Scalars != InstsToScalarize.end() && 1342 "VF not yet analyzed for scalarization profitability"); 1343 return Scalars->second.find(I) != Scalars->second.end(); 1344 } 1345 1346 /// Returns true if \p I is known to be uniform after vectorization. 1347 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1348 if (VF.isScalar()) 1349 return true; 1350 1351 // Cost model is not run in the VPlan-native path - return conservative 1352 // result until this changes. 1353 if (EnableVPlanNativePath) 1354 return false; 1355 1356 auto UniformsPerVF = Uniforms.find(VF); 1357 assert(UniformsPerVF != Uniforms.end() && 1358 "VF not yet analyzed for uniformity"); 1359 return UniformsPerVF->second.count(I); 1360 } 1361 1362 /// Returns true if \p I is known to be scalar after vectorization. 1363 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1364 if (VF.isScalar()) 1365 return true; 1366 1367 // Cost model is not run in the VPlan-native path - return conservative 1368 // result until this changes. 1369 if (EnableVPlanNativePath) 1370 return false; 1371 1372 auto ScalarsPerVF = Scalars.find(VF); 1373 assert(ScalarsPerVF != Scalars.end() && 1374 "Scalar values are not calculated for VF"); 1375 return ScalarsPerVF->second.count(I); 1376 } 1377 1378 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1379 /// for vectorization factor \p VF. 1380 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1381 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1382 !isProfitableToScalarize(I, VF) && 1383 !isScalarAfterVectorization(I, VF); 1384 } 1385 1386 /// Decision that was taken during cost calculation for memory instruction. 1387 enum InstWidening { 1388 CM_Unknown, 1389 CM_Widen, // For consecutive accesses with stride +1. 1390 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1391 CM_Interleave, 1392 CM_GatherScatter, 1393 CM_Scalarize 1394 }; 1395 1396 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1397 /// instruction \p I and vector width \p VF. 1398 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1399 InstructionCost Cost) { 1400 assert(VF.isVector() && "Expected VF >=2"); 1401 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1402 } 1403 1404 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1405 /// interleaving group \p Grp and vector width \p VF. 1406 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1407 ElementCount VF, InstWidening W, 1408 InstructionCost Cost) { 1409 assert(VF.isVector() && "Expected VF >=2"); 1410 /// Broadcast this decicion to all instructions inside the group. 1411 /// But the cost will be assigned to one instruction only. 1412 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1413 if (auto *I = Grp->getMember(i)) { 1414 if (Grp->getInsertPos() == I) 1415 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1416 else 1417 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1418 } 1419 } 1420 } 1421 1422 /// Return the cost model decision for the given instruction \p I and vector 1423 /// width \p VF. Return CM_Unknown if this instruction did not pass 1424 /// through the cost modeling. 1425 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1426 assert(VF.isVector() && "Expected VF to be a vector VF"); 1427 // Cost model is not run in the VPlan-native path - return conservative 1428 // result until this changes. 1429 if (EnableVPlanNativePath) 1430 return CM_GatherScatter; 1431 1432 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1433 auto Itr = WideningDecisions.find(InstOnVF); 1434 if (Itr == WideningDecisions.end()) 1435 return CM_Unknown; 1436 return Itr->second.first; 1437 } 1438 1439 /// Return the vectorization cost for the given instruction \p I and vector 1440 /// width \p VF. 1441 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1442 assert(VF.isVector() && "Expected VF >=2"); 1443 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1444 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1445 "The cost is not calculated"); 1446 return WideningDecisions[InstOnVF].second; 1447 } 1448 1449 /// Return True if instruction \p I is an optimizable truncate whose operand 1450 /// is an induction variable. Such a truncate will be removed by adding a new 1451 /// induction variable with the destination type. 1452 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1453 // If the instruction is not a truncate, return false. 1454 auto *Trunc = dyn_cast<TruncInst>(I); 1455 if (!Trunc) 1456 return false; 1457 1458 // Get the source and destination types of the truncate. 1459 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1460 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1461 1462 // If the truncate is free for the given types, return false. Replacing a 1463 // free truncate with an induction variable would add an induction variable 1464 // update instruction to each iteration of the loop. We exclude from this 1465 // check the primary induction variable since it will need an update 1466 // instruction regardless. 1467 Value *Op = Trunc->getOperand(0); 1468 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1469 return false; 1470 1471 // If the truncated value is not an induction variable, return false. 1472 return Legal->isInductionPhi(Op); 1473 } 1474 1475 /// Collects the instructions to scalarize for each predicated instruction in 1476 /// the loop. 1477 void collectInstsToScalarize(ElementCount VF); 1478 1479 /// Collect Uniform and Scalar values for the given \p VF. 1480 /// The sets depend on CM decision for Load/Store instructions 1481 /// that may be vectorized as interleave, gather-scatter or scalarized. 1482 void collectUniformsAndScalars(ElementCount VF) { 1483 // Do the analysis once. 1484 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1485 return; 1486 setCostBasedWideningDecision(VF); 1487 collectLoopUniforms(VF); 1488 collectLoopScalars(VF); 1489 } 1490 1491 /// Returns true if the target machine supports masked store operation 1492 /// for the given \p DataType and kind of access to \p Ptr. 1493 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1494 return Legal->isConsecutivePtr(DataType, Ptr) && 1495 TTI.isLegalMaskedStore(DataType, Alignment); 1496 } 1497 1498 /// Returns true if the target machine supports masked load operation 1499 /// for the given \p DataType and kind of access to \p Ptr. 1500 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1501 return Legal->isConsecutivePtr(DataType, Ptr) && 1502 TTI.isLegalMaskedLoad(DataType, Alignment); 1503 } 1504 1505 /// Returns true if the target machine can represent \p V as a masked gather 1506 /// or scatter operation. 1507 bool isLegalGatherOrScatter(Value *V) { 1508 bool LI = isa<LoadInst>(V); 1509 bool SI = isa<StoreInst>(V); 1510 if (!LI && !SI) 1511 return false; 1512 auto *Ty = getLoadStoreType(V); 1513 Align Align = getLoadStoreAlignment(V); 1514 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1515 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1516 } 1517 1518 /// Returns true if the target machine supports all of the reduction 1519 /// variables found for the given VF. 1520 bool canVectorizeReductions(ElementCount VF) const { 1521 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1522 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1523 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1524 })); 1525 } 1526 1527 /// Returns true if \p I is an instruction that will be scalarized with 1528 /// predication. Such instructions include conditional stores and 1529 /// instructions that may divide by zero. 1530 /// If a non-zero VF has been calculated, we check if I will be scalarized 1531 /// predication for that VF. 1532 bool isScalarWithPredication(Instruction *I) const; 1533 1534 // Returns true if \p I is an instruction that will be predicated either 1535 // through scalar predication or masked load/store or masked gather/scatter. 1536 // Superset of instructions that return true for isScalarWithPredication. 1537 bool isPredicatedInst(Instruction *I) { 1538 if (!blockNeedsPredication(I->getParent())) 1539 return false; 1540 // Loads and stores that need some form of masked operation are predicated 1541 // instructions. 1542 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1543 return Legal->isMaskRequired(I); 1544 return isScalarWithPredication(I); 1545 } 1546 1547 /// Returns true if \p I is a memory instruction with consecutive memory 1548 /// access that can be widened. 1549 bool 1550 memoryInstructionCanBeWidened(Instruction *I, 1551 ElementCount VF = ElementCount::getFixed(1)); 1552 1553 /// Returns true if \p I is a memory instruction in an interleaved-group 1554 /// of memory accesses that can be vectorized with wide vector loads/stores 1555 /// and shuffles. 1556 bool 1557 interleavedAccessCanBeWidened(Instruction *I, 1558 ElementCount VF = ElementCount::getFixed(1)); 1559 1560 /// Check if \p Instr belongs to any interleaved access group. 1561 bool isAccessInterleaved(Instruction *Instr) { 1562 return InterleaveInfo.isInterleaved(Instr); 1563 } 1564 1565 /// Get the interleaved access group that \p Instr belongs to. 1566 const InterleaveGroup<Instruction> * 1567 getInterleavedAccessGroup(Instruction *Instr) { 1568 return InterleaveInfo.getInterleaveGroup(Instr); 1569 } 1570 1571 /// Returns true if we're required to use a scalar epilogue for at least 1572 /// the final iteration of the original loop. 1573 bool requiresScalarEpilogue(ElementCount VF) const { 1574 if (!isScalarEpilogueAllowed()) 1575 return false; 1576 // If we might exit from anywhere but the latch, must run the exiting 1577 // iteration in scalar form. 1578 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1579 return true; 1580 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); 1581 } 1582 1583 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1584 /// loop hint annotation. 1585 bool isScalarEpilogueAllowed() const { 1586 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1587 } 1588 1589 /// Returns true if all loop blocks should be masked to fold tail loop. 1590 bool foldTailByMasking() const { return FoldTailByMasking; } 1591 1592 bool blockNeedsPredication(BasicBlock *BB) const { 1593 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1594 } 1595 1596 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1597 /// nodes to the chain of instructions representing the reductions. Uses a 1598 /// MapVector to ensure deterministic iteration order. 1599 using ReductionChainMap = 1600 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1601 1602 /// Return the chain of instructions representing an inloop reduction. 1603 const ReductionChainMap &getInLoopReductionChains() const { 1604 return InLoopReductionChains; 1605 } 1606 1607 /// Returns true if the Phi is part of an inloop reduction. 1608 bool isInLoopReduction(PHINode *Phi) const { 1609 return InLoopReductionChains.count(Phi); 1610 } 1611 1612 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1613 /// with factor VF. Return the cost of the instruction, including 1614 /// scalarization overhead if it's needed. 1615 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1616 1617 /// Estimate cost of a call instruction CI if it were vectorized with factor 1618 /// VF. Return the cost of the instruction, including scalarization overhead 1619 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1620 /// scalarized - 1621 /// i.e. either vector version isn't available, or is too expensive. 1622 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1623 bool &NeedToScalarize) const; 1624 1625 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1626 /// that of B. 1627 bool isMoreProfitable(const VectorizationFactor &A, 1628 const VectorizationFactor &B) const; 1629 1630 /// Invalidates decisions already taken by the cost model. 1631 void invalidateCostModelingDecisions() { 1632 WideningDecisions.clear(); 1633 Uniforms.clear(); 1634 Scalars.clear(); 1635 } 1636 1637 private: 1638 unsigned NumPredStores = 0; 1639 1640 /// \return An upper bound for the vectorization factors for both 1641 /// fixed and scalable vectorization, where the minimum-known number of 1642 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1643 /// disabled or unsupported, then the scalable part will be equal to 1644 /// ElementCount::getScalable(0). 1645 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1646 ElementCount UserVF); 1647 1648 /// \return the maximized element count based on the targets vector 1649 /// registers and the loop trip-count, but limited to a maximum safe VF. 1650 /// This is a helper function of computeFeasibleMaxVF. 1651 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure 1652 /// issue that occurred on one of the buildbots which cannot be reproduced 1653 /// without having access to the properietary compiler (see comments on 1654 /// D98509). The issue is currently under investigation and this workaround 1655 /// will be removed as soon as possible. 1656 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1657 unsigned SmallestType, 1658 unsigned WidestType, 1659 const ElementCount &MaxSafeVF); 1660 1661 /// \return the maximum legal scalable VF, based on the safe max number 1662 /// of elements. 1663 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1664 1665 /// The vectorization cost is a combination of the cost itself and a boolean 1666 /// indicating whether any of the contributing operations will actually 1667 /// operate on vector values after type legalization in the backend. If this 1668 /// latter value is false, then all operations will be scalarized (i.e. no 1669 /// vectorization has actually taken place). 1670 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1671 1672 /// Returns the expected execution cost. The unit of the cost does 1673 /// not matter because we use the 'cost' units to compare different 1674 /// vector widths. The cost that is returned is *not* normalized by 1675 /// the factor width. If \p Invalid is not nullptr, this function 1676 /// will add a pair(Instruction*, ElementCount) to \p Invalid for 1677 /// each instruction that has an Invalid cost for the given VF. 1678 using InstructionVFPair = std::pair<Instruction *, ElementCount>; 1679 VectorizationCostTy 1680 expectedCost(ElementCount VF, 1681 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); 1682 1683 /// Returns the execution time cost of an instruction for a given vector 1684 /// width. Vector width of one means scalar. 1685 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1686 1687 /// The cost-computation logic from getInstructionCost which provides 1688 /// the vector type as an output parameter. 1689 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1690 Type *&VectorTy); 1691 1692 /// Return the cost of instructions in an inloop reduction pattern, if I is 1693 /// part of that pattern. 1694 Optional<InstructionCost> 1695 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, 1696 TTI::TargetCostKind CostKind); 1697 1698 /// Calculate vectorization cost of memory instruction \p I. 1699 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1700 1701 /// The cost computation for scalarized memory instruction. 1702 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1703 1704 /// The cost computation for interleaving group of memory instructions. 1705 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1706 1707 /// The cost computation for Gather/Scatter instruction. 1708 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1709 1710 /// The cost computation for widening instruction \p I with consecutive 1711 /// memory access. 1712 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1713 1714 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1715 /// Load: scalar load + broadcast. 1716 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1717 /// element) 1718 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1719 1720 /// Estimate the overhead of scalarizing an instruction. This is a 1721 /// convenience wrapper for the type-based getScalarizationOverhead API. 1722 InstructionCost getScalarizationOverhead(Instruction *I, 1723 ElementCount VF) const; 1724 1725 /// Returns whether the instruction is a load or store and will be a emitted 1726 /// as a vector operation. 1727 bool isConsecutiveLoadOrStore(Instruction *I); 1728 1729 /// Returns true if an artificially high cost for emulated masked memrefs 1730 /// should be used. 1731 bool useEmulatedMaskMemRefHack(Instruction *I); 1732 1733 /// Map of scalar integer values to the smallest bitwidth they can be legally 1734 /// represented as. The vector equivalents of these values should be truncated 1735 /// to this type. 1736 MapVector<Instruction *, uint64_t> MinBWs; 1737 1738 /// A type representing the costs for instructions if they were to be 1739 /// scalarized rather than vectorized. The entries are Instruction-Cost 1740 /// pairs. 1741 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1742 1743 /// A set containing all BasicBlocks that are known to present after 1744 /// vectorization as a predicated block. 1745 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1746 1747 /// Records whether it is allowed to have the original scalar loop execute at 1748 /// least once. This may be needed as a fallback loop in case runtime 1749 /// aliasing/dependence checks fail, or to handle the tail/remainder 1750 /// iterations when the trip count is unknown or doesn't divide by the VF, 1751 /// or as a peel-loop to handle gaps in interleave-groups. 1752 /// Under optsize and when the trip count is very small we don't allow any 1753 /// iterations to execute in the scalar loop. 1754 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1755 1756 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1757 bool FoldTailByMasking = false; 1758 1759 /// A map holding scalar costs for different vectorization factors. The 1760 /// presence of a cost for an instruction in the mapping indicates that the 1761 /// instruction will be scalarized when vectorizing with the associated 1762 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1763 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1764 1765 /// Holds the instructions known to be uniform after vectorization. 1766 /// The data is collected per VF. 1767 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1768 1769 /// Holds the instructions known to be scalar after vectorization. 1770 /// The data is collected per VF. 1771 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1772 1773 /// Holds the instructions (address computations) that are forced to be 1774 /// scalarized. 1775 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1776 1777 /// PHINodes of the reductions that should be expanded in-loop along with 1778 /// their associated chains of reduction operations, in program order from top 1779 /// (PHI) to bottom 1780 ReductionChainMap InLoopReductionChains; 1781 1782 /// A Map of inloop reduction operations and their immediate chain operand. 1783 /// FIXME: This can be removed once reductions can be costed correctly in 1784 /// vplan. This was added to allow quick lookup to the inloop operations, 1785 /// without having to loop through InLoopReductionChains. 1786 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1787 1788 /// Returns the expected difference in cost from scalarizing the expression 1789 /// feeding a predicated instruction \p PredInst. The instructions to 1790 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1791 /// non-negative return value implies the expression will be scalarized. 1792 /// Currently, only single-use chains are considered for scalarization. 1793 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1794 ElementCount VF); 1795 1796 /// Collect the instructions that are uniform after vectorization. An 1797 /// instruction is uniform if we represent it with a single scalar value in 1798 /// the vectorized loop corresponding to each vector iteration. Examples of 1799 /// uniform instructions include pointer operands of consecutive or 1800 /// interleaved memory accesses. Note that although uniformity implies an 1801 /// instruction will be scalar, the reverse is not true. In general, a 1802 /// scalarized instruction will be represented by VF scalar values in the 1803 /// vectorized loop, each corresponding to an iteration of the original 1804 /// scalar loop. 1805 void collectLoopUniforms(ElementCount VF); 1806 1807 /// Collect the instructions that are scalar after vectorization. An 1808 /// instruction is scalar if it is known to be uniform or will be scalarized 1809 /// during vectorization. Non-uniform scalarized instructions will be 1810 /// represented by VF values in the vectorized loop, each corresponding to an 1811 /// iteration of the original scalar loop. 1812 void collectLoopScalars(ElementCount VF); 1813 1814 /// Keeps cost model vectorization decision and cost for instructions. 1815 /// Right now it is used for memory instructions only. 1816 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1817 std::pair<InstWidening, InstructionCost>>; 1818 1819 DecisionList WideningDecisions; 1820 1821 /// Returns true if \p V is expected to be vectorized and it needs to be 1822 /// extracted. 1823 bool needsExtract(Value *V, ElementCount VF) const { 1824 Instruction *I = dyn_cast<Instruction>(V); 1825 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1826 TheLoop->isLoopInvariant(I)) 1827 return false; 1828 1829 // Assume we can vectorize V (and hence we need extraction) if the 1830 // scalars are not computed yet. This can happen, because it is called 1831 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1832 // the scalars are collected. That should be a safe assumption in most 1833 // cases, because we check if the operands have vectorizable types 1834 // beforehand in LoopVectorizationLegality. 1835 return Scalars.find(VF) == Scalars.end() || 1836 !isScalarAfterVectorization(I, VF); 1837 }; 1838 1839 /// Returns a range containing only operands needing to be extracted. 1840 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1841 ElementCount VF) const { 1842 return SmallVector<Value *, 4>(make_filter_range( 1843 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1844 } 1845 1846 /// Determines if we have the infrastructure to vectorize loop \p L and its 1847 /// epilogue, assuming the main loop is vectorized by \p VF. 1848 bool isCandidateForEpilogueVectorization(const Loop &L, 1849 const ElementCount VF) const; 1850 1851 /// Returns true if epilogue vectorization is considered profitable, and 1852 /// false otherwise. 1853 /// \p VF is the vectorization factor chosen for the original loop. 1854 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1855 1856 public: 1857 /// The loop that we evaluate. 1858 Loop *TheLoop; 1859 1860 /// Predicated scalar evolution analysis. 1861 PredicatedScalarEvolution &PSE; 1862 1863 /// Loop Info analysis. 1864 LoopInfo *LI; 1865 1866 /// Vectorization legality. 1867 LoopVectorizationLegality *Legal; 1868 1869 /// Vector target information. 1870 const TargetTransformInfo &TTI; 1871 1872 /// Target Library Info. 1873 const TargetLibraryInfo *TLI; 1874 1875 /// Demanded bits analysis. 1876 DemandedBits *DB; 1877 1878 /// Assumption cache. 1879 AssumptionCache *AC; 1880 1881 /// Interface to emit optimization remarks. 1882 OptimizationRemarkEmitter *ORE; 1883 1884 const Function *TheFunction; 1885 1886 /// Loop Vectorize Hint. 1887 const LoopVectorizeHints *Hints; 1888 1889 /// The interleave access information contains groups of interleaved accesses 1890 /// with the same stride and close to each other. 1891 InterleavedAccessInfo &InterleaveInfo; 1892 1893 /// Values to ignore in the cost model. 1894 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1895 1896 /// Values to ignore in the cost model when VF > 1. 1897 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1898 1899 /// All element types found in the loop. 1900 SmallPtrSet<Type *, 16> ElementTypesInLoop; 1901 1902 /// Profitable vector factors. 1903 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1904 }; 1905 } // end namespace llvm 1906 1907 /// Helper struct to manage generating runtime checks for vectorization. 1908 /// 1909 /// The runtime checks are created up-front in temporary blocks to allow better 1910 /// estimating the cost and un-linked from the existing IR. After deciding to 1911 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1912 /// temporary blocks are completely removed. 1913 class GeneratedRTChecks { 1914 /// Basic block which contains the generated SCEV checks, if any. 1915 BasicBlock *SCEVCheckBlock = nullptr; 1916 1917 /// The value representing the result of the generated SCEV checks. If it is 1918 /// nullptr, either no SCEV checks have been generated or they have been used. 1919 Value *SCEVCheckCond = nullptr; 1920 1921 /// Basic block which contains the generated memory runtime checks, if any. 1922 BasicBlock *MemCheckBlock = nullptr; 1923 1924 /// The value representing the result of the generated memory runtime checks. 1925 /// If it is nullptr, either no memory runtime checks have been generated or 1926 /// they have been used. 1927 Instruction *MemRuntimeCheckCond = nullptr; 1928 1929 DominatorTree *DT; 1930 LoopInfo *LI; 1931 1932 SCEVExpander SCEVExp; 1933 SCEVExpander MemCheckExp; 1934 1935 public: 1936 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 1937 const DataLayout &DL) 1938 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 1939 MemCheckExp(SE, DL, "scev.check") {} 1940 1941 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 1942 /// accurately estimate the cost of the runtime checks. The blocks are 1943 /// un-linked from the IR and is added back during vector code generation. If 1944 /// there is no vector code generation, the check blocks are removed 1945 /// completely. 1946 void Create(Loop *L, const LoopAccessInfo &LAI, 1947 const SCEVUnionPredicate &UnionPred) { 1948 1949 BasicBlock *LoopHeader = L->getHeader(); 1950 BasicBlock *Preheader = L->getLoopPreheader(); 1951 1952 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 1953 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 1954 // may be used by SCEVExpander. The blocks will be un-linked from their 1955 // predecessors and removed from LI & DT at the end of the function. 1956 if (!UnionPred.isAlwaysTrue()) { 1957 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 1958 nullptr, "vector.scevcheck"); 1959 1960 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 1961 &UnionPred, SCEVCheckBlock->getTerminator()); 1962 } 1963 1964 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 1965 if (RtPtrChecking.Need) { 1966 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 1967 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 1968 "vector.memcheck"); 1969 1970 std::tie(std::ignore, MemRuntimeCheckCond) = 1971 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 1972 RtPtrChecking.getChecks(), MemCheckExp); 1973 assert(MemRuntimeCheckCond && 1974 "no RT checks generated although RtPtrChecking " 1975 "claimed checks are required"); 1976 } 1977 1978 if (!MemCheckBlock && !SCEVCheckBlock) 1979 return; 1980 1981 // Unhook the temporary block with the checks, update various places 1982 // accordingly. 1983 if (SCEVCheckBlock) 1984 SCEVCheckBlock->replaceAllUsesWith(Preheader); 1985 if (MemCheckBlock) 1986 MemCheckBlock->replaceAllUsesWith(Preheader); 1987 1988 if (SCEVCheckBlock) { 1989 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 1990 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 1991 Preheader->getTerminator()->eraseFromParent(); 1992 } 1993 if (MemCheckBlock) { 1994 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 1995 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 1996 Preheader->getTerminator()->eraseFromParent(); 1997 } 1998 1999 DT->changeImmediateDominator(LoopHeader, Preheader); 2000 if (MemCheckBlock) { 2001 DT->eraseNode(MemCheckBlock); 2002 LI->removeBlock(MemCheckBlock); 2003 } 2004 if (SCEVCheckBlock) { 2005 DT->eraseNode(SCEVCheckBlock); 2006 LI->removeBlock(SCEVCheckBlock); 2007 } 2008 } 2009 2010 /// Remove the created SCEV & memory runtime check blocks & instructions, if 2011 /// unused. 2012 ~GeneratedRTChecks() { 2013 SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT); 2014 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT); 2015 if (!SCEVCheckCond) 2016 SCEVCleaner.markResultUsed(); 2017 2018 if (!MemRuntimeCheckCond) 2019 MemCheckCleaner.markResultUsed(); 2020 2021 if (MemRuntimeCheckCond) { 2022 auto &SE = *MemCheckExp.getSE(); 2023 // Memory runtime check generation creates compares that use expanded 2024 // values. Remove them before running the SCEVExpanderCleaners. 2025 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2026 if (MemCheckExp.isInsertedInstruction(&I)) 2027 continue; 2028 SE.forgetValue(&I); 2029 SE.eraseValueFromMap(&I); 2030 I.eraseFromParent(); 2031 } 2032 } 2033 MemCheckCleaner.cleanup(); 2034 SCEVCleaner.cleanup(); 2035 2036 if (SCEVCheckCond) 2037 SCEVCheckBlock->eraseFromParent(); 2038 if (MemRuntimeCheckCond) 2039 MemCheckBlock->eraseFromParent(); 2040 } 2041 2042 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2043 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2044 /// depending on the generated condition. 2045 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass, 2046 BasicBlock *LoopVectorPreHeader, 2047 BasicBlock *LoopExitBlock) { 2048 if (!SCEVCheckCond) 2049 return nullptr; 2050 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) 2051 if (C->isZero()) 2052 return nullptr; 2053 2054 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2055 2056 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2057 // Create new preheader for vector loop. 2058 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2059 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2060 2061 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2062 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2063 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2064 SCEVCheckBlock); 2065 2066 DT->addNewBlock(SCEVCheckBlock, Pred); 2067 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2068 2069 ReplaceInstWithInst( 2070 SCEVCheckBlock->getTerminator(), 2071 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); 2072 // Mark the check as used, to prevent it from being removed during cleanup. 2073 SCEVCheckCond = nullptr; 2074 return SCEVCheckBlock; 2075 } 2076 2077 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2078 /// the branches to branch to the vector preheader or \p Bypass, depending on 2079 /// the generated condition. 2080 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass, 2081 BasicBlock *LoopVectorPreHeader) { 2082 // Check if we generated code that checks in runtime if arrays overlap. 2083 if (!MemRuntimeCheckCond) 2084 return nullptr; 2085 2086 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2087 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2088 MemCheckBlock); 2089 2090 DT->addNewBlock(MemCheckBlock, Pred); 2091 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2092 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2093 2094 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2095 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2096 2097 ReplaceInstWithInst( 2098 MemCheckBlock->getTerminator(), 2099 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2100 MemCheckBlock->getTerminator()->setDebugLoc( 2101 Pred->getTerminator()->getDebugLoc()); 2102 2103 // Mark the check as used, to prevent it from being removed during cleanup. 2104 MemRuntimeCheckCond = nullptr; 2105 return MemCheckBlock; 2106 } 2107 }; 2108 2109 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2110 // vectorization. The loop needs to be annotated with #pragma omp simd 2111 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2112 // vector length information is not provided, vectorization is not considered 2113 // explicit. Interleave hints are not allowed either. These limitations will be 2114 // relaxed in the future. 2115 // Please, note that we are currently forced to abuse the pragma 'clang 2116 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2117 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2118 // provides *explicit vectorization hints* (LV can bypass legal checks and 2119 // assume that vectorization is legal). However, both hints are implemented 2120 // using the same metadata (llvm.loop.vectorize, processed by 2121 // LoopVectorizeHints). This will be fixed in the future when the native IR 2122 // representation for pragma 'omp simd' is introduced. 2123 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2124 OptimizationRemarkEmitter *ORE) { 2125 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2126 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2127 2128 // Only outer loops with an explicit vectorization hint are supported. 2129 // Unannotated outer loops are ignored. 2130 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2131 return false; 2132 2133 Function *Fn = OuterLp->getHeader()->getParent(); 2134 if (!Hints.allowVectorization(Fn, OuterLp, 2135 true /*VectorizeOnlyWhenForced*/)) { 2136 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2137 return false; 2138 } 2139 2140 if (Hints.getInterleave() > 1) { 2141 // TODO: Interleave support is future work. 2142 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2143 "outer loops.\n"); 2144 Hints.emitRemarkWithHints(); 2145 return false; 2146 } 2147 2148 return true; 2149 } 2150 2151 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2152 OptimizationRemarkEmitter *ORE, 2153 SmallVectorImpl<Loop *> &V) { 2154 // Collect inner loops and outer loops without irreducible control flow. For 2155 // now, only collect outer loops that have explicit vectorization hints. If we 2156 // are stress testing the VPlan H-CFG construction, we collect the outermost 2157 // loop of every loop nest. 2158 if (L.isInnermost() || VPlanBuildStressTest || 2159 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2160 LoopBlocksRPO RPOT(&L); 2161 RPOT.perform(LI); 2162 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2163 V.push_back(&L); 2164 // TODO: Collect inner loops inside marked outer loops in case 2165 // vectorization fails for the outer loop. Do not invoke 2166 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2167 // already known to be reducible. We can use an inherited attribute for 2168 // that. 2169 return; 2170 } 2171 } 2172 for (Loop *InnerL : L) 2173 collectSupportedLoops(*InnerL, LI, ORE, V); 2174 } 2175 2176 namespace { 2177 2178 /// The LoopVectorize Pass. 2179 struct LoopVectorize : public FunctionPass { 2180 /// Pass identification, replacement for typeid 2181 static char ID; 2182 2183 LoopVectorizePass Impl; 2184 2185 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2186 bool VectorizeOnlyWhenForced = false) 2187 : FunctionPass(ID), 2188 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2189 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2190 } 2191 2192 bool runOnFunction(Function &F) override { 2193 if (skipFunction(F)) 2194 return false; 2195 2196 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2197 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2198 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2199 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2200 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2201 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2202 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2203 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2204 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2205 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2206 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2207 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2208 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2209 2210 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2211 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2212 2213 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2214 GetLAA, *ORE, PSI).MadeAnyChange; 2215 } 2216 2217 void getAnalysisUsage(AnalysisUsage &AU) const override { 2218 AU.addRequired<AssumptionCacheTracker>(); 2219 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2220 AU.addRequired<DominatorTreeWrapperPass>(); 2221 AU.addRequired<LoopInfoWrapperPass>(); 2222 AU.addRequired<ScalarEvolutionWrapperPass>(); 2223 AU.addRequired<TargetTransformInfoWrapperPass>(); 2224 AU.addRequired<AAResultsWrapperPass>(); 2225 AU.addRequired<LoopAccessLegacyAnalysis>(); 2226 AU.addRequired<DemandedBitsWrapperPass>(); 2227 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2228 AU.addRequired<InjectTLIMappingsLegacy>(); 2229 2230 // We currently do not preserve loopinfo/dominator analyses with outer loop 2231 // vectorization. Until this is addressed, mark these analyses as preserved 2232 // only for non-VPlan-native path. 2233 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2234 if (!EnableVPlanNativePath) { 2235 AU.addPreserved<LoopInfoWrapperPass>(); 2236 AU.addPreserved<DominatorTreeWrapperPass>(); 2237 } 2238 2239 AU.addPreserved<BasicAAWrapperPass>(); 2240 AU.addPreserved<GlobalsAAWrapperPass>(); 2241 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2242 } 2243 }; 2244 2245 } // end anonymous namespace 2246 2247 //===----------------------------------------------------------------------===// 2248 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2249 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2250 //===----------------------------------------------------------------------===// 2251 2252 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2253 // We need to place the broadcast of invariant variables outside the loop, 2254 // but only if it's proven safe to do so. Else, broadcast will be inside 2255 // vector loop body. 2256 Instruction *Instr = dyn_cast<Instruction>(V); 2257 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2258 (!Instr || 2259 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2260 // Place the code for broadcasting invariant variables in the new preheader. 2261 IRBuilder<>::InsertPointGuard Guard(Builder); 2262 if (SafeToHoist) 2263 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2264 2265 // Broadcast the scalar into all locations in the vector. 2266 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2267 2268 return Shuf; 2269 } 2270 2271 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2272 const InductionDescriptor &II, Value *Step, Value *Start, 2273 Instruction *EntryVal, VPValue *Def, VPValue *CastDef, 2274 VPTransformState &State) { 2275 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2276 "Expected either an induction phi-node or a truncate of it!"); 2277 2278 // Construct the initial value of the vector IV in the vector loop preheader 2279 auto CurrIP = Builder.saveIP(); 2280 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2281 if (isa<TruncInst>(EntryVal)) { 2282 assert(Start->getType()->isIntegerTy() && 2283 "Truncation requires an integer type"); 2284 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2285 Step = Builder.CreateTrunc(Step, TruncType); 2286 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2287 } 2288 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2289 Value *SteppedStart = 2290 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 2291 2292 // We create vector phi nodes for both integer and floating-point induction 2293 // variables. Here, we determine the kind of arithmetic we will perform. 2294 Instruction::BinaryOps AddOp; 2295 Instruction::BinaryOps MulOp; 2296 if (Step->getType()->isIntegerTy()) { 2297 AddOp = Instruction::Add; 2298 MulOp = Instruction::Mul; 2299 } else { 2300 AddOp = II.getInductionOpcode(); 2301 MulOp = Instruction::FMul; 2302 } 2303 2304 // Multiply the vectorization factor by the step using integer or 2305 // floating-point arithmetic as appropriate. 2306 Type *StepType = Step->getType(); 2307 if (Step->getType()->isFloatingPointTy()) 2308 StepType = IntegerType::get(StepType->getContext(), 2309 StepType->getScalarSizeInBits()); 2310 Value *RuntimeVF = getRuntimeVF(Builder, StepType, VF); 2311 if (Step->getType()->isFloatingPointTy()) 2312 RuntimeVF = Builder.CreateSIToFP(RuntimeVF, Step->getType()); 2313 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 2314 2315 // Create a vector splat to use in the induction update. 2316 // 2317 // FIXME: If the step is non-constant, we create the vector splat with 2318 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2319 // handle a constant vector splat. 2320 Value *SplatVF = isa<Constant>(Mul) 2321 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 2322 : Builder.CreateVectorSplat(VF, Mul); 2323 Builder.restoreIP(CurrIP); 2324 2325 // We may need to add the step a number of times, depending on the unroll 2326 // factor. The last of those goes into the PHI. 2327 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2328 &*LoopVectorBody->getFirstInsertionPt()); 2329 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 2330 Instruction *LastInduction = VecInd; 2331 for (unsigned Part = 0; Part < UF; ++Part) { 2332 State.set(Def, LastInduction, Part); 2333 2334 if (isa<TruncInst>(EntryVal)) 2335 addMetadata(LastInduction, EntryVal); 2336 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef, 2337 State, Part); 2338 2339 LastInduction = cast<Instruction>( 2340 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 2341 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 2342 } 2343 2344 // Move the last step to the end of the latch block. This ensures consistent 2345 // placement of all induction updates. 2346 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2347 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2348 auto *ICmp = cast<Instruction>(Br->getCondition()); 2349 LastInduction->moveBefore(ICmp); 2350 LastInduction->setName("vec.ind.next"); 2351 2352 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2353 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2354 } 2355 2356 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2357 return Cost->isScalarAfterVectorization(I, VF) || 2358 Cost->isProfitableToScalarize(I, VF); 2359 } 2360 2361 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2362 if (shouldScalarizeInstruction(IV)) 2363 return true; 2364 auto isScalarInst = [&](User *U) -> bool { 2365 auto *I = cast<Instruction>(U); 2366 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2367 }; 2368 return llvm::any_of(IV->users(), isScalarInst); 2369 } 2370 2371 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 2372 const InductionDescriptor &ID, const Instruction *EntryVal, 2373 Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State, 2374 unsigned Part, unsigned Lane) { 2375 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2376 "Expected either an induction phi-node or a truncate of it!"); 2377 2378 // This induction variable is not the phi from the original loop but the 2379 // newly-created IV based on the proof that casted Phi is equal to the 2380 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 2381 // re-uses the same InductionDescriptor that original IV uses but we don't 2382 // have to do any recording in this case - that is done when original IV is 2383 // processed. 2384 if (isa<TruncInst>(EntryVal)) 2385 return; 2386 2387 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 2388 if (Casts.empty()) 2389 return; 2390 // Only the first Cast instruction in the Casts vector is of interest. 2391 // The rest of the Casts (if exist) have no uses outside the 2392 // induction update chain itself. 2393 if (Lane < UINT_MAX) 2394 State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane)); 2395 else 2396 State.set(CastDef, VectorLoopVal, Part); 2397 } 2398 2399 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start, 2400 TruncInst *Trunc, VPValue *Def, 2401 VPValue *CastDef, 2402 VPTransformState &State) { 2403 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2404 "Primary induction variable must have an integer type"); 2405 2406 auto II = Legal->getInductionVars().find(IV); 2407 assert(II != Legal->getInductionVars().end() && "IV is not an induction"); 2408 2409 auto ID = II->second; 2410 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2411 2412 // The value from the original loop to which we are mapping the new induction 2413 // variable. 2414 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2415 2416 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2417 2418 // Generate code for the induction step. Note that induction steps are 2419 // required to be loop-invariant 2420 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 2421 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 2422 "Induction step should be loop invariant"); 2423 if (PSE.getSE()->isSCEVable(IV->getType())) { 2424 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2425 return Exp.expandCodeFor(Step, Step->getType(), 2426 LoopVectorPreHeader->getTerminator()); 2427 } 2428 return cast<SCEVUnknown>(Step)->getValue(); 2429 }; 2430 2431 // The scalar value to broadcast. This is derived from the canonical 2432 // induction variable. If a truncation type is given, truncate the canonical 2433 // induction variable and step. Otherwise, derive these values from the 2434 // induction descriptor. 2435 auto CreateScalarIV = [&](Value *&Step) -> Value * { 2436 Value *ScalarIV = Induction; 2437 if (IV != OldInduction) { 2438 ScalarIV = IV->getType()->isIntegerTy() 2439 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2440 : Builder.CreateCast(Instruction::SIToFP, Induction, 2441 IV->getType()); 2442 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 2443 ScalarIV->setName("offset.idx"); 2444 } 2445 if (Trunc) { 2446 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2447 assert(Step->getType()->isIntegerTy() && 2448 "Truncation requires an integer step"); 2449 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2450 Step = Builder.CreateTrunc(Step, TruncType); 2451 } 2452 return ScalarIV; 2453 }; 2454 2455 // Create the vector values from the scalar IV, in the absence of creating a 2456 // vector IV. 2457 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 2458 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2459 for (unsigned Part = 0; Part < UF; ++Part) { 2460 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2461 Value *EntryPart = 2462 getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step, 2463 ID.getInductionOpcode()); 2464 State.set(Def, EntryPart, Part); 2465 if (Trunc) 2466 addMetadata(EntryPart, Trunc); 2467 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef, 2468 State, Part); 2469 } 2470 }; 2471 2472 // Fast-math-flags propagate from the original induction instruction. 2473 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 2474 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 2475 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 2476 2477 // Now do the actual transformations, and start with creating the step value. 2478 Value *Step = CreateStepValue(ID.getStep()); 2479 if (VF.isZero() || VF.isScalar()) { 2480 Value *ScalarIV = CreateScalarIV(Step); 2481 CreateSplatIV(ScalarIV, Step); 2482 return; 2483 } 2484 2485 // Determine if we want a scalar version of the induction variable. This is 2486 // true if the induction variable itself is not widened, or if it has at 2487 // least one user in the loop that is not widened. 2488 auto NeedsScalarIV = needsScalarInduction(EntryVal); 2489 if (!NeedsScalarIV) { 2490 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2491 State); 2492 return; 2493 } 2494 2495 // Try to create a new independent vector induction variable. If we can't 2496 // create the phi node, we will splat the scalar induction variable in each 2497 // loop iteration. 2498 if (!shouldScalarizeInstruction(EntryVal)) { 2499 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2500 State); 2501 Value *ScalarIV = CreateScalarIV(Step); 2502 // Create scalar steps that can be used by instructions we will later 2503 // scalarize. Note that the addition of the scalar steps will not increase 2504 // the number of instructions in the loop in the common case prior to 2505 // InstCombine. We will be trading one vector extract for each scalar step. 2506 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2507 return; 2508 } 2509 2510 // All IV users are scalar instructions, so only emit a scalar IV, not a 2511 // vectorised IV. Except when we tail-fold, then the splat IV feeds the 2512 // predicate used by the masked loads/stores. 2513 Value *ScalarIV = CreateScalarIV(Step); 2514 if (!Cost->isScalarEpilogueAllowed()) 2515 CreateSplatIV(ScalarIV, Step); 2516 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2517 } 2518 2519 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2520 Instruction::BinaryOps BinOp) { 2521 // Create and check the types. 2522 auto *ValVTy = cast<VectorType>(Val->getType()); 2523 ElementCount VLen = ValVTy->getElementCount(); 2524 2525 Type *STy = Val->getType()->getScalarType(); 2526 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2527 "Induction Step must be an integer or FP"); 2528 assert(Step->getType() == STy && "Step has wrong type"); 2529 2530 SmallVector<Constant *, 8> Indices; 2531 2532 // Create a vector of consecutive numbers from zero to VF. 2533 VectorType *InitVecValVTy = ValVTy; 2534 Type *InitVecValSTy = STy; 2535 if (STy->isFloatingPointTy()) { 2536 InitVecValSTy = 2537 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2538 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2539 } 2540 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2541 2542 // Add on StartIdx 2543 Value *StartIdxSplat = Builder.CreateVectorSplat( 2544 VLen, ConstantInt::get(InitVecValSTy, StartIdx)); 2545 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2546 2547 if (STy->isIntegerTy()) { 2548 Step = Builder.CreateVectorSplat(VLen, Step); 2549 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2550 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2551 // which can be found from the original scalar operations. 2552 Step = Builder.CreateMul(InitVec, Step); 2553 return Builder.CreateAdd(Val, Step, "induction"); 2554 } 2555 2556 // Floating point induction. 2557 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2558 "Binary Opcode should be specified for FP induction"); 2559 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2560 Step = Builder.CreateVectorSplat(VLen, Step); 2561 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2562 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2563 } 2564 2565 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2566 Instruction *EntryVal, 2567 const InductionDescriptor &ID, 2568 VPValue *Def, VPValue *CastDef, 2569 VPTransformState &State) { 2570 // We shouldn't have to build scalar steps if we aren't vectorizing. 2571 assert(VF.isVector() && "VF should be greater than one"); 2572 // Get the value type and ensure it and the step have the same integer type. 2573 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2574 assert(ScalarIVTy == Step->getType() && 2575 "Val and Step should have the same type"); 2576 2577 // We build scalar steps for both integer and floating-point induction 2578 // variables. Here, we determine the kind of arithmetic we will perform. 2579 Instruction::BinaryOps AddOp; 2580 Instruction::BinaryOps MulOp; 2581 if (ScalarIVTy->isIntegerTy()) { 2582 AddOp = Instruction::Add; 2583 MulOp = Instruction::Mul; 2584 } else { 2585 AddOp = ID.getInductionOpcode(); 2586 MulOp = Instruction::FMul; 2587 } 2588 2589 // Determine the number of scalars we need to generate for each unroll 2590 // iteration. If EntryVal is uniform, we only need to generate the first 2591 // lane. Otherwise, we generate all VF values. 2592 bool IsUniform = 2593 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF); 2594 unsigned Lanes = IsUniform ? 1 : VF.getKnownMinValue(); 2595 // Compute the scalar steps and save the results in State. 2596 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2597 ScalarIVTy->getScalarSizeInBits()); 2598 Type *VecIVTy = nullptr; 2599 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2600 if (!IsUniform && VF.isScalable()) { 2601 VecIVTy = VectorType::get(ScalarIVTy, VF); 2602 UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, VF)); 2603 SplatStep = Builder.CreateVectorSplat(VF, Step); 2604 SplatIV = Builder.CreateVectorSplat(VF, ScalarIV); 2605 } 2606 2607 for (unsigned Part = 0; Part < UF; ++Part) { 2608 Value *StartIdx0 = 2609 createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF); 2610 2611 if (!IsUniform && VF.isScalable()) { 2612 auto *SplatStartIdx = Builder.CreateVectorSplat(VF, StartIdx0); 2613 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2614 if (ScalarIVTy->isFloatingPointTy()) 2615 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2616 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2617 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2618 State.set(Def, Add, Part); 2619 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2620 Part); 2621 // It's useful to record the lane values too for the known minimum number 2622 // of elements so we do those below. This improves the code quality when 2623 // trying to extract the first element, for example. 2624 } 2625 2626 if (ScalarIVTy->isFloatingPointTy()) 2627 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2628 2629 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2630 Value *StartIdx = Builder.CreateBinOp( 2631 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2632 // The step returned by `createStepForVF` is a runtime-evaluated value 2633 // when VF is scalable. Otherwise, it should be folded into a Constant. 2634 assert((VF.isScalable() || isa<Constant>(StartIdx)) && 2635 "Expected StartIdx to be folded to a constant when VF is not " 2636 "scalable"); 2637 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2638 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2639 State.set(Def, Add, VPIteration(Part, Lane)); 2640 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2641 Part, Lane); 2642 } 2643 } 2644 } 2645 2646 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2647 const VPIteration &Instance, 2648 VPTransformState &State) { 2649 Value *ScalarInst = State.get(Def, Instance); 2650 Value *VectorValue = State.get(Def, Instance.Part); 2651 VectorValue = Builder.CreateInsertElement( 2652 VectorValue, ScalarInst, 2653 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2654 State.set(Def, VectorValue, Instance.Part); 2655 } 2656 2657 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2658 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2659 return Builder.CreateVectorReverse(Vec, "reverse"); 2660 } 2661 2662 // Return whether we allow using masked interleave-groups (for dealing with 2663 // strided loads/stores that reside in predicated blocks, or for dealing 2664 // with gaps). 2665 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2666 // If an override option has been passed in for interleaved accesses, use it. 2667 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2668 return EnableMaskedInterleavedMemAccesses; 2669 2670 return TTI.enableMaskedInterleavedAccessVectorization(); 2671 } 2672 2673 // Try to vectorize the interleave group that \p Instr belongs to. 2674 // 2675 // E.g. Translate following interleaved load group (factor = 3): 2676 // for (i = 0; i < N; i+=3) { 2677 // R = Pic[i]; // Member of index 0 2678 // G = Pic[i+1]; // Member of index 1 2679 // B = Pic[i+2]; // Member of index 2 2680 // ... // do something to R, G, B 2681 // } 2682 // To: 2683 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2684 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2685 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2686 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2687 // 2688 // Or translate following interleaved store group (factor = 3): 2689 // for (i = 0; i < N; i+=3) { 2690 // ... do something to R, G, B 2691 // Pic[i] = R; // Member of index 0 2692 // Pic[i+1] = G; // Member of index 1 2693 // Pic[i+2] = B; // Member of index 2 2694 // } 2695 // To: 2696 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2697 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2698 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2699 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2700 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2701 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2702 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2703 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2704 VPValue *BlockInMask) { 2705 Instruction *Instr = Group->getInsertPos(); 2706 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2707 2708 // Prepare for the vector type of the interleaved load/store. 2709 Type *ScalarTy = getLoadStoreType(Instr); 2710 unsigned InterleaveFactor = Group->getFactor(); 2711 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2712 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2713 2714 // Prepare for the new pointers. 2715 SmallVector<Value *, 2> AddrParts; 2716 unsigned Index = Group->getIndex(Instr); 2717 2718 // TODO: extend the masked interleaved-group support to reversed access. 2719 assert((!BlockInMask || !Group->isReverse()) && 2720 "Reversed masked interleave-group not supported."); 2721 2722 // If the group is reverse, adjust the index to refer to the last vector lane 2723 // instead of the first. We adjust the index from the first vector lane, 2724 // rather than directly getting the pointer for lane VF - 1, because the 2725 // pointer operand of the interleaved access is supposed to be uniform. For 2726 // uniform instructions, we're only required to generate a value for the 2727 // first vector lane in each unroll iteration. 2728 if (Group->isReverse()) 2729 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2730 2731 for (unsigned Part = 0; Part < UF; Part++) { 2732 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2733 setDebugLocFromInst(AddrPart); 2734 2735 // Notice current instruction could be any index. Need to adjust the address 2736 // to the member of index 0. 2737 // 2738 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2739 // b = A[i]; // Member of index 0 2740 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2741 // 2742 // E.g. A[i+1] = a; // Member of index 1 2743 // A[i] = b; // Member of index 0 2744 // A[i+2] = c; // Member of index 2 (Current instruction) 2745 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2746 2747 bool InBounds = false; 2748 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2749 InBounds = gep->isInBounds(); 2750 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2751 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2752 2753 // Cast to the vector pointer type. 2754 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2755 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2756 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2757 } 2758 2759 setDebugLocFromInst(Instr); 2760 Value *PoisonVec = PoisonValue::get(VecTy); 2761 2762 Value *MaskForGaps = nullptr; 2763 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2764 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2765 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2766 } 2767 2768 // Vectorize the interleaved load group. 2769 if (isa<LoadInst>(Instr)) { 2770 // For each unroll part, create a wide load for the group. 2771 SmallVector<Value *, 2> NewLoads; 2772 for (unsigned Part = 0; Part < UF; Part++) { 2773 Instruction *NewLoad; 2774 if (BlockInMask || MaskForGaps) { 2775 assert(useMaskedInterleavedAccesses(*TTI) && 2776 "masked interleaved groups are not allowed."); 2777 Value *GroupMask = MaskForGaps; 2778 if (BlockInMask) { 2779 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2780 Value *ShuffledMask = Builder.CreateShuffleVector( 2781 BlockInMaskPart, 2782 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2783 "interleaved.mask"); 2784 GroupMask = MaskForGaps 2785 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2786 MaskForGaps) 2787 : ShuffledMask; 2788 } 2789 NewLoad = 2790 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), 2791 GroupMask, PoisonVec, "wide.masked.vec"); 2792 } 2793 else 2794 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2795 Group->getAlign(), "wide.vec"); 2796 Group->addMetadata(NewLoad); 2797 NewLoads.push_back(NewLoad); 2798 } 2799 2800 // For each member in the group, shuffle out the appropriate data from the 2801 // wide loads. 2802 unsigned J = 0; 2803 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2804 Instruction *Member = Group->getMember(I); 2805 2806 // Skip the gaps in the group. 2807 if (!Member) 2808 continue; 2809 2810 auto StrideMask = 2811 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2812 for (unsigned Part = 0; Part < UF; Part++) { 2813 Value *StridedVec = Builder.CreateShuffleVector( 2814 NewLoads[Part], StrideMask, "strided.vec"); 2815 2816 // If this member has different type, cast the result type. 2817 if (Member->getType() != ScalarTy) { 2818 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2819 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2820 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2821 } 2822 2823 if (Group->isReverse()) 2824 StridedVec = reverseVector(StridedVec); 2825 2826 State.set(VPDefs[J], StridedVec, Part); 2827 } 2828 ++J; 2829 } 2830 return; 2831 } 2832 2833 // The sub vector type for current instruction. 2834 auto *SubVT = VectorType::get(ScalarTy, VF); 2835 2836 // Vectorize the interleaved store group. 2837 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2838 assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && 2839 "masked interleaved groups are not allowed."); 2840 assert((!MaskForGaps || !VF.isScalable()) && 2841 "masking gaps for scalable vectors is not yet supported."); 2842 for (unsigned Part = 0; Part < UF; Part++) { 2843 // Collect the stored vector from each member. 2844 SmallVector<Value *, 4> StoredVecs; 2845 for (unsigned i = 0; i < InterleaveFactor; i++) { 2846 assert((Group->getMember(i) || MaskForGaps) && 2847 "Fail to get a member from an interleaved store group"); 2848 Instruction *Member = Group->getMember(i); 2849 2850 // Skip the gaps in the group. 2851 if (!Member) { 2852 Value *Undef = PoisonValue::get(SubVT); 2853 StoredVecs.push_back(Undef); 2854 continue; 2855 } 2856 2857 Value *StoredVec = State.get(StoredValues[i], Part); 2858 2859 if (Group->isReverse()) 2860 StoredVec = reverseVector(StoredVec); 2861 2862 // If this member has different type, cast it to a unified type. 2863 2864 if (StoredVec->getType() != SubVT) 2865 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2866 2867 StoredVecs.push_back(StoredVec); 2868 } 2869 2870 // Concatenate all vectors into a wide vector. 2871 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2872 2873 // Interleave the elements in the wide vector. 2874 Value *IVec = Builder.CreateShuffleVector( 2875 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2876 "interleaved.vec"); 2877 2878 Instruction *NewStoreInstr; 2879 if (BlockInMask || MaskForGaps) { 2880 Value *GroupMask = MaskForGaps; 2881 if (BlockInMask) { 2882 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2883 Value *ShuffledMask = Builder.CreateShuffleVector( 2884 BlockInMaskPart, 2885 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2886 "interleaved.mask"); 2887 GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And, 2888 ShuffledMask, MaskForGaps) 2889 : ShuffledMask; 2890 } 2891 NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part], 2892 Group->getAlign(), GroupMask); 2893 } else 2894 NewStoreInstr = 2895 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2896 2897 Group->addMetadata(NewStoreInstr); 2898 } 2899 } 2900 2901 void InnerLoopVectorizer::vectorizeMemoryInstruction( 2902 Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr, 2903 VPValue *StoredValue, VPValue *BlockInMask) { 2904 // Attempt to issue a wide load. 2905 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2906 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2907 2908 assert((LI || SI) && "Invalid Load/Store instruction"); 2909 assert((!SI || StoredValue) && "No stored value provided for widened store"); 2910 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 2911 2912 LoopVectorizationCostModel::InstWidening Decision = 2913 Cost->getWideningDecision(Instr, VF); 2914 assert((Decision == LoopVectorizationCostModel::CM_Widen || 2915 Decision == LoopVectorizationCostModel::CM_Widen_Reverse || 2916 Decision == LoopVectorizationCostModel::CM_GatherScatter) && 2917 "CM decision is not to widen the memory instruction"); 2918 2919 Type *ScalarDataTy = getLoadStoreType(Instr); 2920 2921 auto *DataTy = VectorType::get(ScalarDataTy, VF); 2922 const Align Alignment = getLoadStoreAlignment(Instr); 2923 2924 // Determine if the pointer operand of the access is either consecutive or 2925 // reverse consecutive. 2926 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2927 bool ConsecutiveStride = 2928 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2929 bool CreateGatherScatter = 2930 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2931 2932 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2933 // gather/scatter. Otherwise Decision should have been to Scalarize. 2934 assert((ConsecutiveStride || CreateGatherScatter) && 2935 "The instruction should be scalarized"); 2936 (void)ConsecutiveStride; 2937 2938 VectorParts BlockInMaskParts(UF); 2939 bool isMaskRequired = BlockInMask; 2940 if (isMaskRequired) 2941 for (unsigned Part = 0; Part < UF; ++Part) 2942 BlockInMaskParts[Part] = State.get(BlockInMask, Part); 2943 2944 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2945 // Calculate the pointer for the specific unroll-part. 2946 GetElementPtrInst *PartPtr = nullptr; 2947 2948 bool InBounds = false; 2949 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2950 InBounds = gep->isInBounds(); 2951 if (Reverse) { 2952 // If the address is consecutive but reversed, then the 2953 // wide store needs to start at the last vector element. 2954 // RunTimeVF = VScale * VF.getKnownMinValue() 2955 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 2956 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), VF); 2957 // NumElt = -Part * RunTimeVF 2958 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 2959 // LastLane = 1 - RunTimeVF 2960 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 2961 PartPtr = 2962 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 2963 PartPtr->setIsInBounds(InBounds); 2964 PartPtr = cast<GetElementPtrInst>( 2965 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 2966 PartPtr->setIsInBounds(InBounds); 2967 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2968 BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]); 2969 } else { 2970 Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF); 2971 PartPtr = cast<GetElementPtrInst>( 2972 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 2973 PartPtr->setIsInBounds(InBounds); 2974 } 2975 2976 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2977 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2978 }; 2979 2980 // Handle Stores: 2981 if (SI) { 2982 setDebugLocFromInst(SI); 2983 2984 for (unsigned Part = 0; Part < UF; ++Part) { 2985 Instruction *NewSI = nullptr; 2986 Value *StoredVal = State.get(StoredValue, Part); 2987 if (CreateGatherScatter) { 2988 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2989 Value *VectorGep = State.get(Addr, Part); 2990 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2991 MaskPart); 2992 } else { 2993 if (Reverse) { 2994 // If we store to reverse consecutive memory locations, then we need 2995 // to reverse the order of elements in the stored value. 2996 StoredVal = reverseVector(StoredVal); 2997 // We don't want to update the value in the map as it might be used in 2998 // another expression. So don't call resetVectorValue(StoredVal). 2999 } 3000 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 3001 if (isMaskRequired) 3002 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 3003 BlockInMaskParts[Part]); 3004 else 3005 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 3006 } 3007 addMetadata(NewSI, SI); 3008 } 3009 return; 3010 } 3011 3012 // Handle loads. 3013 assert(LI && "Must have a load instruction"); 3014 setDebugLocFromInst(LI); 3015 for (unsigned Part = 0; Part < UF; ++Part) { 3016 Value *NewLI; 3017 if (CreateGatherScatter) { 3018 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 3019 Value *VectorGep = State.get(Addr, Part); 3020 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, 3021 nullptr, "wide.masked.gather"); 3022 addMetadata(NewLI, LI); 3023 } else { 3024 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 3025 if (isMaskRequired) 3026 NewLI = Builder.CreateMaskedLoad( 3027 DataTy, VecPtr, Alignment, BlockInMaskParts[Part], 3028 PoisonValue::get(DataTy), "wide.masked.load"); 3029 else 3030 NewLI = 3031 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 3032 3033 // Add metadata to the load, but setVectorValue to the reverse shuffle. 3034 addMetadata(NewLI, LI); 3035 if (Reverse) 3036 NewLI = reverseVector(NewLI); 3037 } 3038 3039 State.set(Def, NewLI, Part); 3040 } 3041 } 3042 3043 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def, 3044 VPUser &User, 3045 const VPIteration &Instance, 3046 bool IfPredicateInstr, 3047 VPTransformState &State) { 3048 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 3049 3050 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 3051 // the first lane and part. 3052 if (isa<NoAliasScopeDeclInst>(Instr)) 3053 if (!Instance.isFirstIteration()) 3054 return; 3055 3056 setDebugLocFromInst(Instr); 3057 3058 // Does this instruction return a value ? 3059 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 3060 3061 Instruction *Cloned = Instr->clone(); 3062 if (!IsVoidRetTy) 3063 Cloned->setName(Instr->getName() + ".cloned"); 3064 3065 State.Builder.SetInsertPoint(Builder.GetInsertBlock(), 3066 Builder.GetInsertPoint()); 3067 // Replace the operands of the cloned instructions with their scalar 3068 // equivalents in the new loop. 3069 for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) { 3070 auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op)); 3071 auto InputInstance = Instance; 3072 if (!Operand || !OrigLoop->contains(Operand) || 3073 (Cost->isUniformAfterVectorization(Operand, State.VF))) 3074 InputInstance.Lane = VPLane::getFirstLane(); 3075 auto *NewOp = State.get(User.getOperand(op), InputInstance); 3076 Cloned->setOperand(op, NewOp); 3077 } 3078 addNewMetadata(Cloned, Instr); 3079 3080 // Place the cloned scalar in the new loop. 3081 Builder.Insert(Cloned); 3082 3083 State.set(Def, Cloned, Instance); 3084 3085 // If we just cloned a new assumption, add it the assumption cache. 3086 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 3087 AC->registerAssumption(II); 3088 3089 // End if-block. 3090 if (IfPredicateInstr) 3091 PredicatedInstructions.push_back(Cloned); 3092 } 3093 3094 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3095 Value *End, Value *Step, 3096 Instruction *DL) { 3097 BasicBlock *Header = L->getHeader(); 3098 BasicBlock *Latch = L->getLoopLatch(); 3099 // As we're just creating this loop, it's possible no latch exists 3100 // yet. If so, use the header as this will be a single block loop. 3101 if (!Latch) 3102 Latch = Header; 3103 3104 IRBuilder<> B(&*Header->getFirstInsertionPt()); 3105 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 3106 setDebugLocFromInst(OldInst, &B); 3107 auto *Induction = B.CreatePHI(Start->getType(), 2, "index"); 3108 3109 B.SetInsertPoint(Latch->getTerminator()); 3110 setDebugLocFromInst(OldInst, &B); 3111 3112 // Create i+1 and fill the PHINode. 3113 // 3114 // If the tail is not folded, we know that End - Start >= Step (either 3115 // statically or through the minimum iteration checks). We also know that both 3116 // Start % Step == 0 and End % Step == 0. We exit the vector loop if %IV + 3117 // %Step == %End. Hence we must exit the loop before %IV + %Step unsigned 3118 // overflows and we can mark the induction increment as NUW. 3119 Value *Next = B.CreateAdd(Induction, Step, "index.next", 3120 /*NUW=*/!Cost->foldTailByMasking(), /*NSW=*/false); 3121 Induction->addIncoming(Start, L->getLoopPreheader()); 3122 Induction->addIncoming(Next, Latch); 3123 // Create the compare. 3124 Value *ICmp = B.CreateICmpEQ(Next, End); 3125 B.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header); 3126 3127 // Now we have two terminators. Remove the old one from the block. 3128 Latch->getTerminator()->eraseFromParent(); 3129 3130 return Induction; 3131 } 3132 3133 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3134 if (TripCount) 3135 return TripCount; 3136 3137 assert(L && "Create Trip Count for null loop."); 3138 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3139 // Find the loop boundaries. 3140 ScalarEvolution *SE = PSE.getSE(); 3141 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3142 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 3143 "Invalid loop count"); 3144 3145 Type *IdxTy = Legal->getWidestInductionType(); 3146 assert(IdxTy && "No type for induction"); 3147 3148 // The exit count might have the type of i64 while the phi is i32. This can 3149 // happen if we have an induction variable that is sign extended before the 3150 // compare. The only way that we get a backedge taken count is that the 3151 // induction variable was signed and as such will not overflow. In such a case 3152 // truncation is legal. 3153 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 3154 IdxTy->getPrimitiveSizeInBits()) 3155 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3156 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3157 3158 // Get the total trip count from the count by adding 1. 3159 const SCEV *ExitCount = SE->getAddExpr( 3160 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3161 3162 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3163 3164 // Expand the trip count and place the new instructions in the preheader. 3165 // Notice that the pre-header does not change, only the loop body. 3166 SCEVExpander Exp(*SE, DL, "induction"); 3167 3168 // Count holds the overall loop count (N). 3169 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3170 L->getLoopPreheader()->getTerminator()); 3171 3172 if (TripCount->getType()->isPointerTy()) 3173 TripCount = 3174 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3175 L->getLoopPreheader()->getTerminator()); 3176 3177 return TripCount; 3178 } 3179 3180 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3181 if (VectorTripCount) 3182 return VectorTripCount; 3183 3184 Value *TC = getOrCreateTripCount(L); 3185 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3186 3187 Type *Ty = TC->getType(); 3188 // This is where we can make the step a runtime constant. 3189 Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF); 3190 3191 // If the tail is to be folded by masking, round the number of iterations N 3192 // up to a multiple of Step instead of rounding down. This is done by first 3193 // adding Step-1 and then rounding down. Note that it's ok if this addition 3194 // overflows: the vector induction variable will eventually wrap to zero given 3195 // that it starts at zero and its Step is a power of two; the loop will then 3196 // exit, with the last early-exit vector comparison also producing all-true. 3197 if (Cost->foldTailByMasking()) { 3198 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 3199 "VF*UF must be a power of 2 when folding tail by masking"); 3200 assert(!VF.isScalable() && 3201 "Tail folding not yet supported for scalable vectors"); 3202 TC = Builder.CreateAdd( 3203 TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up"); 3204 } 3205 3206 // Now we need to generate the expression for the part of the loop that the 3207 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3208 // iterations are not required for correctness, or N - Step, otherwise. Step 3209 // is equal to the vectorization factor (number of SIMD elements) times the 3210 // unroll factor (number of SIMD instructions). 3211 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3212 3213 // There are cases where we *must* run at least one iteration in the remainder 3214 // loop. See the cost model for when this can happen. If the step evenly 3215 // divides the trip count, we set the remainder to be equal to the step. If 3216 // the step does not evenly divide the trip count, no adjustment is necessary 3217 // since there will already be scalar iterations. Note that the minimum 3218 // iterations check ensures that N >= Step. 3219 if (Cost->requiresScalarEpilogue(VF)) { 3220 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3221 R = Builder.CreateSelect(IsZero, Step, R); 3222 } 3223 3224 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3225 3226 return VectorTripCount; 3227 } 3228 3229 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3230 const DataLayout &DL) { 3231 // Verify that V is a vector type with same number of elements as DstVTy. 3232 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 3233 unsigned VF = DstFVTy->getNumElements(); 3234 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 3235 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3236 Type *SrcElemTy = SrcVecTy->getElementType(); 3237 Type *DstElemTy = DstFVTy->getElementType(); 3238 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3239 "Vector elements must have same size"); 3240 3241 // Do a direct cast if element types are castable. 3242 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3243 return Builder.CreateBitOrPointerCast(V, DstFVTy); 3244 } 3245 // V cannot be directly casted to desired vector type. 3246 // May happen when V is a floating point vector but DstVTy is a vector of 3247 // pointers or vice-versa. Handle this using a two-step bitcast using an 3248 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3249 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3250 "Only one type should be a pointer type"); 3251 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3252 "Only one type should be a floating point type"); 3253 Type *IntTy = 3254 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3255 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 3256 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3257 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 3258 } 3259 3260 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3261 BasicBlock *Bypass) { 3262 Value *Count = getOrCreateTripCount(L); 3263 // Reuse existing vector loop preheader for TC checks. 3264 // Note that new preheader block is generated for vector loop. 3265 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 3266 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 3267 3268 // Generate code to check if the loop's trip count is less than VF * UF, or 3269 // equal to it in case a scalar epilogue is required; this implies that the 3270 // vector trip count is zero. This check also covers the case where adding one 3271 // to the backedge-taken count overflowed leading to an incorrect trip count 3272 // of zero. In this case we will also jump to the scalar loop. 3273 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE 3274 : ICmpInst::ICMP_ULT; 3275 3276 // If tail is to be folded, vector loop takes care of all iterations. 3277 Value *CheckMinIters = Builder.getFalse(); 3278 if (!Cost->foldTailByMasking()) { 3279 Value *Step = 3280 createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF); 3281 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3282 } 3283 // Create new preheader for vector loop. 3284 LoopVectorPreHeader = 3285 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3286 "vector.ph"); 3287 3288 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3289 DT->getNode(Bypass)->getIDom()) && 3290 "TC check is expected to dominate Bypass"); 3291 3292 // Update dominator for Bypass & LoopExit (if needed). 3293 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3294 if (!Cost->requiresScalarEpilogue(VF)) 3295 // If there is an epilogue which must run, there's no edge from the 3296 // middle block to exit blocks and thus no need to update the immediate 3297 // dominator of the exit blocks. 3298 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3299 3300 ReplaceInstWithInst( 3301 TCCheckBlock->getTerminator(), 3302 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3303 LoopBypassBlocks.push_back(TCCheckBlock); 3304 } 3305 3306 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3307 3308 BasicBlock *const SCEVCheckBlock = 3309 RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock); 3310 if (!SCEVCheckBlock) 3311 return nullptr; 3312 3313 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3314 (OptForSizeBasedOnProfile && 3315 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3316 "Cannot SCEV check stride or overflow when optimizing for size"); 3317 3318 3319 // Update dominator only if this is first RT check. 3320 if (LoopBypassBlocks.empty()) { 3321 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3322 if (!Cost->requiresScalarEpilogue(VF)) 3323 // If there is an epilogue which must run, there's no edge from the 3324 // middle block to exit blocks and thus no need to update the immediate 3325 // dominator of the exit blocks. 3326 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3327 } 3328 3329 LoopBypassBlocks.push_back(SCEVCheckBlock); 3330 AddedSafetyChecks = true; 3331 return SCEVCheckBlock; 3332 } 3333 3334 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, 3335 BasicBlock *Bypass) { 3336 // VPlan-native path does not do any analysis for runtime checks currently. 3337 if (EnableVPlanNativePath) 3338 return nullptr; 3339 3340 BasicBlock *const MemCheckBlock = 3341 RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader); 3342 3343 // Check if we generated code that checks in runtime if arrays overlap. We put 3344 // the checks into a separate block to make the more common case of few 3345 // elements faster. 3346 if (!MemCheckBlock) 3347 return nullptr; 3348 3349 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3350 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3351 "Cannot emit memory checks when optimizing for size, unless forced " 3352 "to vectorize."); 3353 ORE->emit([&]() { 3354 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3355 L->getStartLoc(), L->getHeader()) 3356 << "Code-size may be reduced by not forcing " 3357 "vectorization, or by source-code modifications " 3358 "eliminating the need for runtime checks " 3359 "(e.g., adding 'restrict')."; 3360 }); 3361 } 3362 3363 LoopBypassBlocks.push_back(MemCheckBlock); 3364 3365 AddedSafetyChecks = true; 3366 3367 // We currently don't use LoopVersioning for the actual loop cloning but we 3368 // still use it to add the noalias metadata. 3369 LVer = std::make_unique<LoopVersioning>( 3370 *Legal->getLAI(), 3371 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3372 DT, PSE.getSE()); 3373 LVer->prepareNoAliasMetadata(); 3374 return MemCheckBlock; 3375 } 3376 3377 Value *InnerLoopVectorizer::emitTransformedIndex( 3378 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 3379 const InductionDescriptor &ID) const { 3380 3381 SCEVExpander Exp(*SE, DL, "induction"); 3382 auto Step = ID.getStep(); 3383 auto StartValue = ID.getStartValue(); 3384 assert(Index->getType()->getScalarType() == Step->getType() && 3385 "Index scalar type does not match StepValue type"); 3386 3387 // Note: the IR at this point is broken. We cannot use SE to create any new 3388 // SCEV and then expand it, hoping that SCEV's simplification will give us 3389 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 3390 // lead to various SCEV crashes. So all we can do is to use builder and rely 3391 // on InstCombine for future simplifications. Here we handle some trivial 3392 // cases only. 3393 auto CreateAdd = [&B](Value *X, Value *Y) { 3394 assert(X->getType() == Y->getType() && "Types don't match!"); 3395 if (auto *CX = dyn_cast<ConstantInt>(X)) 3396 if (CX->isZero()) 3397 return Y; 3398 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3399 if (CY->isZero()) 3400 return X; 3401 return B.CreateAdd(X, Y); 3402 }; 3403 3404 // We allow X to be a vector type, in which case Y will potentially be 3405 // splatted into a vector with the same element count. 3406 auto CreateMul = [&B](Value *X, Value *Y) { 3407 assert(X->getType()->getScalarType() == Y->getType() && 3408 "Types don't match!"); 3409 if (auto *CX = dyn_cast<ConstantInt>(X)) 3410 if (CX->isOne()) 3411 return Y; 3412 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3413 if (CY->isOne()) 3414 return X; 3415 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 3416 if (XVTy && !isa<VectorType>(Y->getType())) 3417 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 3418 return B.CreateMul(X, Y); 3419 }; 3420 3421 // Get a suitable insert point for SCEV expansion. For blocks in the vector 3422 // loop, choose the end of the vector loop header (=LoopVectorBody), because 3423 // the DomTree is not kept up-to-date for additional blocks generated in the 3424 // vector loop. By using the header as insertion point, we guarantee that the 3425 // expanded instructions dominate all their uses. 3426 auto GetInsertPoint = [this, &B]() { 3427 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 3428 if (InsertBB != LoopVectorBody && 3429 LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB)) 3430 return LoopVectorBody->getTerminator(); 3431 return &*B.GetInsertPoint(); 3432 }; 3433 3434 switch (ID.getKind()) { 3435 case InductionDescriptor::IK_IntInduction: { 3436 assert(!isa<VectorType>(Index->getType()) && 3437 "Vector indices not supported for integer inductions yet"); 3438 assert(Index->getType() == StartValue->getType() && 3439 "Index type does not match StartValue type"); 3440 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 3441 return B.CreateSub(StartValue, Index); 3442 auto *Offset = CreateMul( 3443 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 3444 return CreateAdd(StartValue, Offset); 3445 } 3446 case InductionDescriptor::IK_PtrInduction: { 3447 assert(isa<SCEVConstant>(Step) && 3448 "Expected constant step for pointer induction"); 3449 return B.CreateGEP( 3450 ID.getElementType(), StartValue, 3451 CreateMul(Index, 3452 Exp.expandCodeFor(Step, Index->getType()->getScalarType(), 3453 GetInsertPoint()))); 3454 } 3455 case InductionDescriptor::IK_FpInduction: { 3456 assert(!isa<VectorType>(Index->getType()) && 3457 "Vector indices not supported for FP inductions yet"); 3458 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 3459 auto InductionBinOp = ID.getInductionBinOp(); 3460 assert(InductionBinOp && 3461 (InductionBinOp->getOpcode() == Instruction::FAdd || 3462 InductionBinOp->getOpcode() == Instruction::FSub) && 3463 "Original bin op should be defined for FP induction"); 3464 3465 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 3466 Value *MulExp = B.CreateFMul(StepValue, Index); 3467 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 3468 "induction"); 3469 } 3470 case InductionDescriptor::IK_NoInduction: 3471 return nullptr; 3472 } 3473 llvm_unreachable("invalid enum"); 3474 } 3475 3476 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3477 LoopScalarBody = OrigLoop->getHeader(); 3478 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3479 assert(LoopVectorPreHeader && "Invalid loop structure"); 3480 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr 3481 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && 3482 "multiple exit loop without required epilogue?"); 3483 3484 LoopMiddleBlock = 3485 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3486 LI, nullptr, Twine(Prefix) + "middle.block"); 3487 LoopScalarPreHeader = 3488 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3489 nullptr, Twine(Prefix) + "scalar.ph"); 3490 3491 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3492 3493 // Set up the middle block terminator. Two cases: 3494 // 1) If we know that we must execute the scalar epilogue, emit an 3495 // unconditional branch. 3496 // 2) Otherwise, we must have a single unique exit block (due to how we 3497 // implement the multiple exit case). In this case, set up a conditonal 3498 // branch from the middle block to the loop scalar preheader, and the 3499 // exit block. completeLoopSkeleton will update the condition to use an 3500 // iteration check, if required to decide whether to execute the remainder. 3501 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? 3502 BranchInst::Create(LoopScalarPreHeader) : 3503 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, 3504 Builder.getTrue()); 3505 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3506 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3507 3508 // We intentionally don't let SplitBlock to update LoopInfo since 3509 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3510 // LoopVectorBody is explicitly added to the correct place few lines later. 3511 LoopVectorBody = 3512 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3513 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3514 3515 // Update dominator for loop exit. 3516 if (!Cost->requiresScalarEpilogue(VF)) 3517 // If there is an epilogue which must run, there's no edge from the 3518 // middle block to exit blocks and thus no need to update the immediate 3519 // dominator of the exit blocks. 3520 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3521 3522 // Create and register the new vector loop. 3523 Loop *Lp = LI->AllocateLoop(); 3524 Loop *ParentLoop = OrigLoop->getParentLoop(); 3525 3526 // Insert the new loop into the loop nest and register the new basic blocks 3527 // before calling any utilities such as SCEV that require valid LoopInfo. 3528 if (ParentLoop) { 3529 ParentLoop->addChildLoop(Lp); 3530 } else { 3531 LI->addTopLevelLoop(Lp); 3532 } 3533 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3534 return Lp; 3535 } 3536 3537 void InnerLoopVectorizer::createInductionResumeValues( 3538 Loop *L, Value *VectorTripCount, 3539 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3540 assert(VectorTripCount && L && "Expected valid arguments"); 3541 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3542 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3543 "Inconsistent information about additional bypass."); 3544 // We are going to resume the execution of the scalar loop. 3545 // Go over all of the induction variables that we found and fix the 3546 // PHIs that are left in the scalar version of the loop. 3547 // The starting values of PHI nodes depend on the counter of the last 3548 // iteration in the vectorized loop. 3549 // If we come from a bypass edge then we need to start from the original 3550 // start value. 3551 for (auto &InductionEntry : Legal->getInductionVars()) { 3552 PHINode *OrigPhi = InductionEntry.first; 3553 InductionDescriptor II = InductionEntry.second; 3554 3555 // Create phi nodes to merge from the backedge-taken check block. 3556 PHINode *BCResumeVal = 3557 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3558 LoopScalarPreHeader->getTerminator()); 3559 // Copy original phi DL over to the new one. 3560 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3561 Value *&EndValue = IVEndValues[OrigPhi]; 3562 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3563 if (OrigPhi == OldInduction) { 3564 // We know what the end value is. 3565 EndValue = VectorTripCount; 3566 } else { 3567 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3568 3569 // Fast-math-flags propagate from the original induction instruction. 3570 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3571 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3572 3573 Type *StepType = II.getStep()->getType(); 3574 Instruction::CastOps CastOp = 3575 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3576 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3577 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3578 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3579 EndValue->setName("ind.end"); 3580 3581 // Compute the end value for the additional bypass (if applicable). 3582 if (AdditionalBypass.first) { 3583 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3584 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3585 StepType, true); 3586 CRD = 3587 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3588 EndValueFromAdditionalBypass = 3589 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3590 EndValueFromAdditionalBypass->setName("ind.end"); 3591 } 3592 } 3593 // The new PHI merges the original incoming value, in case of a bypass, 3594 // or the value at the end of the vectorized loop. 3595 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3596 3597 // Fix the scalar body counter (PHI node). 3598 // The old induction's phi node in the scalar body needs the truncated 3599 // value. 3600 for (BasicBlock *BB : LoopBypassBlocks) 3601 BCResumeVal->addIncoming(II.getStartValue(), BB); 3602 3603 if (AdditionalBypass.first) 3604 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3605 EndValueFromAdditionalBypass); 3606 3607 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3608 } 3609 } 3610 3611 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3612 MDNode *OrigLoopID) { 3613 assert(L && "Expected valid loop."); 3614 3615 // The trip counts should be cached by now. 3616 Value *Count = getOrCreateTripCount(L); 3617 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3618 3619 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3620 3621 // Add a check in the middle block to see if we have completed 3622 // all of the iterations in the first vector loop. Three cases: 3623 // 1) If we require a scalar epilogue, there is no conditional branch as 3624 // we unconditionally branch to the scalar preheader. Do nothing. 3625 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. 3626 // Thus if tail is to be folded, we know we don't need to run the 3627 // remainder and we can use the previous value for the condition (true). 3628 // 3) Otherwise, construct a runtime check. 3629 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { 3630 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3631 Count, VectorTripCount, "cmp.n", 3632 LoopMiddleBlock->getTerminator()); 3633 3634 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3635 // of the corresponding compare because they may have ended up with 3636 // different line numbers and we want to avoid awkward line stepping while 3637 // debugging. Eg. if the compare has got a line number inside the loop. 3638 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3639 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3640 } 3641 3642 // Get ready to start creating new instructions into the vectorized body. 3643 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3644 "Inconsistent vector loop preheader"); 3645 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3646 3647 Optional<MDNode *> VectorizedLoopID = 3648 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3649 LLVMLoopVectorizeFollowupVectorized}); 3650 if (VectorizedLoopID.hasValue()) { 3651 L->setLoopID(VectorizedLoopID.getValue()); 3652 3653 // Do not setAlreadyVectorized if loop attributes have been defined 3654 // explicitly. 3655 return LoopVectorPreHeader; 3656 } 3657 3658 // Keep all loop hints from the original loop on the vector loop (we'll 3659 // replace the vectorizer-specific hints below). 3660 if (MDNode *LID = OrigLoop->getLoopID()) 3661 L->setLoopID(LID); 3662 3663 LoopVectorizeHints Hints(L, true, *ORE); 3664 Hints.setAlreadyVectorized(); 3665 3666 #ifdef EXPENSIVE_CHECKS 3667 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3668 LI->verify(*DT); 3669 #endif 3670 3671 return LoopVectorPreHeader; 3672 } 3673 3674 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3675 /* 3676 In this function we generate a new loop. The new loop will contain 3677 the vectorized instructions while the old loop will continue to run the 3678 scalar remainder. 3679 3680 [ ] <-- loop iteration number check. 3681 / | 3682 / v 3683 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3684 | / | 3685 | / v 3686 || [ ] <-- vector pre header. 3687 |/ | 3688 | v 3689 | [ ] \ 3690 | [ ]_| <-- vector loop. 3691 | | 3692 | v 3693 \ -[ ] <--- middle-block. 3694 \/ | 3695 /\ v 3696 | ->[ ] <--- new preheader. 3697 | | 3698 (opt) v <-- edge from middle to exit iff epilogue is not required. 3699 | [ ] \ 3700 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). 3701 \ | 3702 \ v 3703 >[ ] <-- exit block(s). 3704 ... 3705 */ 3706 3707 // Get the metadata of the original loop before it gets modified. 3708 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3709 3710 // Workaround! Compute the trip count of the original loop and cache it 3711 // before we start modifying the CFG. This code has a systemic problem 3712 // wherein it tries to run analysis over partially constructed IR; this is 3713 // wrong, and not simply for SCEV. The trip count of the original loop 3714 // simply happens to be prone to hitting this in practice. In theory, we 3715 // can hit the same issue for any SCEV, or ValueTracking query done during 3716 // mutation. See PR49900. 3717 getOrCreateTripCount(OrigLoop); 3718 3719 // Create an empty vector loop, and prepare basic blocks for the runtime 3720 // checks. 3721 Loop *Lp = createVectorLoopSkeleton(""); 3722 3723 // Now, compare the new count to zero. If it is zero skip the vector loop and 3724 // jump to the scalar loop. This check also covers the case where the 3725 // backedge-taken count is uint##_max: adding one to it will overflow leading 3726 // to an incorrect trip count of zero. In this (rare) case we will also jump 3727 // to the scalar loop. 3728 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3729 3730 // Generate the code to check any assumptions that we've made for SCEV 3731 // expressions. 3732 emitSCEVChecks(Lp, LoopScalarPreHeader); 3733 3734 // Generate the code that checks in runtime if arrays overlap. We put the 3735 // checks into a separate block to make the more common case of few elements 3736 // faster. 3737 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3738 3739 // Some loops have a single integer induction variable, while other loops 3740 // don't. One example is c++ iterators that often have multiple pointer 3741 // induction variables. In the code below we also support a case where we 3742 // don't have a single induction variable. 3743 // 3744 // We try to obtain an induction variable from the original loop as hard 3745 // as possible. However if we don't find one that: 3746 // - is an integer 3747 // - counts from zero, stepping by one 3748 // - is the size of the widest induction variable type 3749 // then we create a new one. 3750 OldInduction = Legal->getPrimaryInduction(); 3751 Type *IdxTy = Legal->getWidestInductionType(); 3752 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3753 // The loop step is equal to the vectorization factor (num of SIMD elements) 3754 // times the unroll factor (num of SIMD instructions). 3755 Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt()); 3756 Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF); 3757 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3758 Induction = 3759 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3760 getDebugLocFromInstOrOperands(OldInduction)); 3761 3762 // Emit phis for the new starting index of the scalar loop. 3763 createInductionResumeValues(Lp, CountRoundDown); 3764 3765 return completeLoopSkeleton(Lp, OrigLoopID); 3766 } 3767 3768 // Fix up external users of the induction variable. At this point, we are 3769 // in LCSSA form, with all external PHIs that use the IV having one input value, 3770 // coming from the remainder loop. We need those PHIs to also have a correct 3771 // value for the IV when arriving directly from the middle block. 3772 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3773 const InductionDescriptor &II, 3774 Value *CountRoundDown, Value *EndValue, 3775 BasicBlock *MiddleBlock) { 3776 // There are two kinds of external IV usages - those that use the value 3777 // computed in the last iteration (the PHI) and those that use the penultimate 3778 // value (the value that feeds into the phi from the loop latch). 3779 // We allow both, but they, obviously, have different values. 3780 3781 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3782 3783 DenseMap<Value *, Value *> MissingVals; 3784 3785 // An external user of the last iteration's value should see the value that 3786 // the remainder loop uses to initialize its own IV. 3787 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3788 for (User *U : PostInc->users()) { 3789 Instruction *UI = cast<Instruction>(U); 3790 if (!OrigLoop->contains(UI)) { 3791 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3792 MissingVals[UI] = EndValue; 3793 } 3794 } 3795 3796 // An external user of the penultimate value need to see EndValue - Step. 3797 // The simplest way to get this is to recompute it from the constituent SCEVs, 3798 // that is Start + (Step * (CRD - 1)). 3799 for (User *U : OrigPhi->users()) { 3800 auto *UI = cast<Instruction>(U); 3801 if (!OrigLoop->contains(UI)) { 3802 const DataLayout &DL = 3803 OrigLoop->getHeader()->getModule()->getDataLayout(); 3804 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3805 3806 IRBuilder<> B(MiddleBlock->getTerminator()); 3807 3808 // Fast-math-flags propagate from the original induction instruction. 3809 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3810 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3811 3812 Value *CountMinusOne = B.CreateSub( 3813 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3814 Value *CMO = 3815 !II.getStep()->getType()->isIntegerTy() 3816 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3817 II.getStep()->getType()) 3818 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3819 CMO->setName("cast.cmo"); 3820 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3821 Escape->setName("ind.escape"); 3822 MissingVals[UI] = Escape; 3823 } 3824 } 3825 3826 for (auto &I : MissingVals) { 3827 PHINode *PHI = cast<PHINode>(I.first); 3828 // One corner case we have to handle is two IVs "chasing" each-other, 3829 // that is %IV2 = phi [...], [ %IV1, %latch ] 3830 // In this case, if IV1 has an external use, we need to avoid adding both 3831 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3832 // don't already have an incoming value for the middle block. 3833 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3834 PHI->addIncoming(I.second, MiddleBlock); 3835 } 3836 } 3837 3838 namespace { 3839 3840 struct CSEDenseMapInfo { 3841 static bool canHandle(const Instruction *I) { 3842 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3843 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3844 } 3845 3846 static inline Instruction *getEmptyKey() { 3847 return DenseMapInfo<Instruction *>::getEmptyKey(); 3848 } 3849 3850 static inline Instruction *getTombstoneKey() { 3851 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3852 } 3853 3854 static unsigned getHashValue(const Instruction *I) { 3855 assert(canHandle(I) && "Unknown instruction!"); 3856 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3857 I->value_op_end())); 3858 } 3859 3860 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3861 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3862 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3863 return LHS == RHS; 3864 return LHS->isIdenticalTo(RHS); 3865 } 3866 }; 3867 3868 } // end anonymous namespace 3869 3870 ///Perform cse of induction variable instructions. 3871 static void cse(BasicBlock *BB) { 3872 // Perform simple cse. 3873 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3874 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3875 Instruction *In = &*I++; 3876 3877 if (!CSEDenseMapInfo::canHandle(In)) 3878 continue; 3879 3880 // Check if we can replace this instruction with any of the 3881 // visited instructions. 3882 if (Instruction *V = CSEMap.lookup(In)) { 3883 In->replaceAllUsesWith(V); 3884 In->eraseFromParent(); 3885 continue; 3886 } 3887 3888 CSEMap[In] = In; 3889 } 3890 } 3891 3892 InstructionCost 3893 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3894 bool &NeedToScalarize) const { 3895 Function *F = CI->getCalledFunction(); 3896 Type *ScalarRetTy = CI->getType(); 3897 SmallVector<Type *, 4> Tys, ScalarTys; 3898 for (auto &ArgOp : CI->arg_operands()) 3899 ScalarTys.push_back(ArgOp->getType()); 3900 3901 // Estimate cost of scalarized vector call. The source operands are assumed 3902 // to be vectors, so we need to extract individual elements from there, 3903 // execute VF scalar calls, and then gather the result into the vector return 3904 // value. 3905 InstructionCost ScalarCallCost = 3906 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3907 if (VF.isScalar()) 3908 return ScalarCallCost; 3909 3910 // Compute corresponding vector type for return value and arguments. 3911 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3912 for (Type *ScalarTy : ScalarTys) 3913 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3914 3915 // Compute costs of unpacking argument values for the scalar calls and 3916 // packing the return values to a vector. 3917 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3918 3919 InstructionCost Cost = 3920 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3921 3922 // If we can't emit a vector call for this function, then the currently found 3923 // cost is the cost we need to return. 3924 NeedToScalarize = true; 3925 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3926 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3927 3928 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3929 return Cost; 3930 3931 // If the corresponding vector cost is cheaper, return its cost. 3932 InstructionCost VectorCallCost = 3933 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3934 if (VectorCallCost < Cost) { 3935 NeedToScalarize = false; 3936 Cost = VectorCallCost; 3937 } 3938 return Cost; 3939 } 3940 3941 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3942 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3943 return Elt; 3944 return VectorType::get(Elt, VF); 3945 } 3946 3947 InstructionCost 3948 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3949 ElementCount VF) const { 3950 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3951 assert(ID && "Expected intrinsic call!"); 3952 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3953 FastMathFlags FMF; 3954 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3955 FMF = FPMO->getFastMathFlags(); 3956 3957 SmallVector<const Value *> Arguments(CI->args()); 3958 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3959 SmallVector<Type *> ParamTys; 3960 std::transform(FTy->param_begin(), FTy->param_end(), 3961 std::back_inserter(ParamTys), 3962 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3963 3964 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3965 dyn_cast<IntrinsicInst>(CI)); 3966 return TTI.getIntrinsicInstrCost(CostAttrs, 3967 TargetTransformInfo::TCK_RecipThroughput); 3968 } 3969 3970 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3971 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3972 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3973 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3974 } 3975 3976 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3977 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3978 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3979 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3980 } 3981 3982 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3983 // For every instruction `I` in MinBWs, truncate the operands, create a 3984 // truncated version of `I` and reextend its result. InstCombine runs 3985 // later and will remove any ext/trunc pairs. 3986 SmallPtrSet<Value *, 4> Erased; 3987 for (const auto &KV : Cost->getMinimalBitwidths()) { 3988 // If the value wasn't vectorized, we must maintain the original scalar 3989 // type. The absence of the value from State indicates that it 3990 // wasn't vectorized. 3991 // FIXME: Should not rely on getVPValue at this point. 3992 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3993 if (!State.hasAnyVectorValue(Def)) 3994 continue; 3995 for (unsigned Part = 0; Part < UF; ++Part) { 3996 Value *I = State.get(Def, Part); 3997 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3998 continue; 3999 Type *OriginalTy = I->getType(); 4000 Type *ScalarTruncatedTy = 4001 IntegerType::get(OriginalTy->getContext(), KV.second); 4002 auto *TruncatedTy = VectorType::get( 4003 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); 4004 if (TruncatedTy == OriginalTy) 4005 continue; 4006 4007 IRBuilder<> B(cast<Instruction>(I)); 4008 auto ShrinkOperand = [&](Value *V) -> Value * { 4009 if (auto *ZI = dyn_cast<ZExtInst>(V)) 4010 if (ZI->getSrcTy() == TruncatedTy) 4011 return ZI->getOperand(0); 4012 return B.CreateZExtOrTrunc(V, TruncatedTy); 4013 }; 4014 4015 // The actual instruction modification depends on the instruction type, 4016 // unfortunately. 4017 Value *NewI = nullptr; 4018 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 4019 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 4020 ShrinkOperand(BO->getOperand(1))); 4021 4022 // Any wrapping introduced by shrinking this operation shouldn't be 4023 // considered undefined behavior. So, we can't unconditionally copy 4024 // arithmetic wrapping flags to NewI. 4025 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 4026 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 4027 NewI = 4028 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 4029 ShrinkOperand(CI->getOperand(1))); 4030 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 4031 NewI = B.CreateSelect(SI->getCondition(), 4032 ShrinkOperand(SI->getTrueValue()), 4033 ShrinkOperand(SI->getFalseValue())); 4034 } else if (auto *CI = dyn_cast<CastInst>(I)) { 4035 switch (CI->getOpcode()) { 4036 default: 4037 llvm_unreachable("Unhandled cast!"); 4038 case Instruction::Trunc: 4039 NewI = ShrinkOperand(CI->getOperand(0)); 4040 break; 4041 case Instruction::SExt: 4042 NewI = B.CreateSExtOrTrunc( 4043 CI->getOperand(0), 4044 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 4045 break; 4046 case Instruction::ZExt: 4047 NewI = B.CreateZExtOrTrunc( 4048 CI->getOperand(0), 4049 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 4050 break; 4051 } 4052 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 4053 auto Elements0 = 4054 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); 4055 auto *O0 = B.CreateZExtOrTrunc( 4056 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 4057 auto Elements1 = 4058 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); 4059 auto *O1 = B.CreateZExtOrTrunc( 4060 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 4061 4062 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 4063 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 4064 // Don't do anything with the operands, just extend the result. 4065 continue; 4066 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 4067 auto Elements = 4068 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); 4069 auto *O0 = B.CreateZExtOrTrunc( 4070 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 4071 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 4072 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 4073 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 4074 auto Elements = 4075 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); 4076 auto *O0 = B.CreateZExtOrTrunc( 4077 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 4078 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 4079 } else { 4080 // If we don't know what to do, be conservative and don't do anything. 4081 continue; 4082 } 4083 4084 // Lastly, extend the result. 4085 NewI->takeName(cast<Instruction>(I)); 4086 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 4087 I->replaceAllUsesWith(Res); 4088 cast<Instruction>(I)->eraseFromParent(); 4089 Erased.insert(I); 4090 State.reset(Def, Res, Part); 4091 } 4092 } 4093 4094 // We'll have created a bunch of ZExts that are now parentless. Clean up. 4095 for (const auto &KV : Cost->getMinimalBitwidths()) { 4096 // If the value wasn't vectorized, we must maintain the original scalar 4097 // type. The absence of the value from State indicates that it 4098 // wasn't vectorized. 4099 // FIXME: Should not rely on getVPValue at this point. 4100 VPValue *Def = State.Plan->getVPValue(KV.first, true); 4101 if (!State.hasAnyVectorValue(Def)) 4102 continue; 4103 for (unsigned Part = 0; Part < UF; ++Part) { 4104 Value *I = State.get(Def, Part); 4105 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 4106 if (Inst && Inst->use_empty()) { 4107 Value *NewI = Inst->getOperand(0); 4108 Inst->eraseFromParent(); 4109 State.reset(Def, NewI, Part); 4110 } 4111 } 4112 } 4113 } 4114 4115 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 4116 // Insert truncates and extends for any truncated instructions as hints to 4117 // InstCombine. 4118 if (VF.isVector()) 4119 truncateToMinimalBitwidths(State); 4120 4121 // Fix widened non-induction PHIs by setting up the PHI operands. 4122 if (OrigPHIsToFix.size()) { 4123 assert(EnableVPlanNativePath && 4124 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 4125 fixNonInductionPHIs(State); 4126 } 4127 4128 // At this point every instruction in the original loop is widened to a 4129 // vector form. Now we need to fix the recurrences in the loop. These PHI 4130 // nodes are currently empty because we did not want to introduce cycles. 4131 // This is the second stage of vectorizing recurrences. 4132 fixCrossIterationPHIs(State); 4133 4134 // Forget the original basic block. 4135 PSE.getSE()->forgetLoop(OrigLoop); 4136 4137 // If we inserted an edge from the middle block to the unique exit block, 4138 // update uses outside the loop (phis) to account for the newly inserted 4139 // edge. 4140 if (!Cost->requiresScalarEpilogue(VF)) { 4141 // Fix-up external users of the induction variables. 4142 for (auto &Entry : Legal->getInductionVars()) 4143 fixupIVUsers(Entry.first, Entry.second, 4144 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 4145 IVEndValues[Entry.first], LoopMiddleBlock); 4146 4147 fixLCSSAPHIs(State); 4148 } 4149 4150 for (Instruction *PI : PredicatedInstructions) 4151 sinkScalarOperands(&*PI); 4152 4153 // Remove redundant induction instructions. 4154 cse(LoopVectorBody); 4155 4156 // Set/update profile weights for the vector and remainder loops as original 4157 // loop iterations are now distributed among them. Note that original loop 4158 // represented by LoopScalarBody becomes remainder loop after vectorization. 4159 // 4160 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 4161 // end up getting slightly roughened result but that should be OK since 4162 // profile is not inherently precise anyway. Note also possible bypass of 4163 // vector code caused by legality checks is ignored, assigning all the weight 4164 // to the vector loop, optimistically. 4165 // 4166 // For scalable vectorization we can't know at compile time how many iterations 4167 // of the loop are handled in one vector iteration, so instead assume a pessimistic 4168 // vscale of '1'. 4169 setProfileInfoAfterUnrolling( 4170 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), 4171 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); 4172 } 4173 4174 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 4175 // In order to support recurrences we need to be able to vectorize Phi nodes. 4176 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4177 // stage #2: We now need to fix the recurrences by adding incoming edges to 4178 // the currently empty PHI nodes. At this point every instruction in the 4179 // original loop is widened to a vector form so we can use them to construct 4180 // the incoming edges. 4181 VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock(); 4182 for (VPRecipeBase &R : Header->phis()) { 4183 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) 4184 fixReduction(ReductionPhi, State); 4185 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 4186 fixFirstOrderRecurrence(FOR, State); 4187 } 4188 } 4189 4190 void InnerLoopVectorizer::fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, 4191 VPTransformState &State) { 4192 // This is the second phase of vectorizing first-order recurrences. An 4193 // overview of the transformation is described below. Suppose we have the 4194 // following loop. 4195 // 4196 // for (int i = 0; i < n; ++i) 4197 // b[i] = a[i] - a[i - 1]; 4198 // 4199 // There is a first-order recurrence on "a". For this loop, the shorthand 4200 // scalar IR looks like: 4201 // 4202 // scalar.ph: 4203 // s_init = a[-1] 4204 // br scalar.body 4205 // 4206 // scalar.body: 4207 // i = phi [0, scalar.ph], [i+1, scalar.body] 4208 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4209 // s2 = a[i] 4210 // b[i] = s2 - s1 4211 // br cond, scalar.body, ... 4212 // 4213 // In this example, s1 is a recurrence because it's value depends on the 4214 // previous iteration. In the first phase of vectorization, we created a 4215 // vector phi v1 for s1. We now complete the vectorization and produce the 4216 // shorthand vector IR shown below (for VF = 4, UF = 1). 4217 // 4218 // vector.ph: 4219 // v_init = vector(..., ..., ..., a[-1]) 4220 // br vector.body 4221 // 4222 // vector.body 4223 // i = phi [0, vector.ph], [i+4, vector.body] 4224 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4225 // v2 = a[i, i+1, i+2, i+3]; 4226 // v3 = vector(v1(3), v2(0, 1, 2)) 4227 // b[i, i+1, i+2, i+3] = v2 - v3 4228 // br cond, vector.body, middle.block 4229 // 4230 // middle.block: 4231 // x = v2(3) 4232 // br scalar.ph 4233 // 4234 // scalar.ph: 4235 // s_init = phi [x, middle.block], [a[-1], otherwise] 4236 // br scalar.body 4237 // 4238 // After execution completes the vector loop, we extract the next value of 4239 // the recurrence (x) to use as the initial value in the scalar loop. 4240 4241 // Extract the last vector element in the middle block. This will be the 4242 // initial value for the recurrence when jumping to the scalar loop. 4243 VPValue *PreviousDef = PhiR->getBackedgeValue(); 4244 Value *Incoming = State.get(PreviousDef, UF - 1); 4245 auto *ExtractForScalar = Incoming; 4246 auto *IdxTy = Builder.getInt32Ty(); 4247 if (VF.isVector()) { 4248 auto *One = ConstantInt::get(IdxTy, 1); 4249 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4250 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4251 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 4252 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 4253 "vector.recur.extract"); 4254 } 4255 // Extract the second last element in the middle block if the 4256 // Phi is used outside the loop. We need to extract the phi itself 4257 // and not the last element (the phi update in the current iteration). This 4258 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4259 // when the scalar loop is not run at all. 4260 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4261 if (VF.isVector()) { 4262 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4263 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 4264 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4265 Incoming, Idx, "vector.recur.extract.for.phi"); 4266 } else if (UF > 1) 4267 // When loop is unrolled without vectorizing, initialize 4268 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 4269 // of `Incoming`. This is analogous to the vectorized case above: extracting 4270 // the second last element when VF > 1. 4271 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 4272 4273 // Fix the initial value of the original recurrence in the scalar loop. 4274 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4275 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); 4276 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4277 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); 4278 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4279 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4280 Start->addIncoming(Incoming, BB); 4281 } 4282 4283 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 4284 Phi->setName("scalar.recur"); 4285 4286 // Finally, fix users of the recurrence outside the loop. The users will need 4287 // either the last value of the scalar recurrence or the last value of the 4288 // vector recurrence we extracted in the middle block. Since the loop is in 4289 // LCSSA form, we just need to find all the phi nodes for the original scalar 4290 // recurrence in the exit block, and then add an edge for the middle block. 4291 // Note that LCSSA does not imply single entry when the original scalar loop 4292 // had multiple exiting edges (as we always run the last iteration in the 4293 // scalar epilogue); in that case, there is no edge from middle to exit and 4294 // and thus no phis which needed updated. 4295 if (!Cost->requiresScalarEpilogue(VF)) 4296 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4297 if (any_of(LCSSAPhi.incoming_values(), 4298 [Phi](Value *V) { return V == Phi; })) 4299 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4300 } 4301 4302 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, 4303 VPTransformState &State) { 4304 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 4305 // Get it's reduction variable descriptor. 4306 assert(Legal->isReductionVariable(OrigPhi) && 4307 "Unable to find the reduction variable"); 4308 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 4309 4310 RecurKind RK = RdxDesc.getRecurrenceKind(); 4311 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4312 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4313 setDebugLocFromInst(ReductionStartValue); 4314 4315 VPValue *LoopExitInstDef = PhiR->getBackedgeValue(); 4316 // This is the vector-clone of the value that leaves the loop. 4317 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 4318 4319 // Wrap flags are in general invalid after vectorization, clear them. 4320 clearReductionWrapFlags(RdxDesc, State); 4321 4322 // Before each round, move the insertion point right between 4323 // the PHIs and the values we are going to write. 4324 // This allows us to write both PHINodes and the extractelement 4325 // instructions. 4326 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4327 4328 setDebugLocFromInst(LoopExitInst); 4329 4330 Type *PhiTy = OrigPhi->getType(); 4331 // If tail is folded by masking, the vector value to leave the loop should be 4332 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 4333 // instead of the former. For an inloop reduction the reduction will already 4334 // be predicated, and does not need to be handled here. 4335 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { 4336 for (unsigned Part = 0; Part < UF; ++Part) { 4337 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 4338 Value *Sel = nullptr; 4339 for (User *U : VecLoopExitInst->users()) { 4340 if (isa<SelectInst>(U)) { 4341 assert(!Sel && "Reduction exit feeding two selects"); 4342 Sel = U; 4343 } else 4344 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 4345 } 4346 assert(Sel && "Reduction exit feeds no select"); 4347 State.reset(LoopExitInstDef, Sel, Part); 4348 4349 // If the target can create a predicated operator for the reduction at no 4350 // extra cost in the loop (for example a predicated vadd), it can be 4351 // cheaper for the select to remain in the loop than be sunk out of it, 4352 // and so use the select value for the phi instead of the old 4353 // LoopExitValue. 4354 if (PreferPredicatedReductionSelect || 4355 TTI->preferPredicatedReductionSelect( 4356 RdxDesc.getOpcode(), PhiTy, 4357 TargetTransformInfo::ReductionFlags())) { 4358 auto *VecRdxPhi = 4359 cast<PHINode>(State.get(PhiR->getVPSingleValue(), Part)); 4360 VecRdxPhi->setIncomingValueForBlock( 4361 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 4362 } 4363 } 4364 } 4365 4366 // If the vector reduction can be performed in a smaller type, we truncate 4367 // then extend the loop exit value to enable InstCombine to evaluate the 4368 // entire expression in the smaller type. 4369 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 4370 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"); 4371 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4372 Builder.SetInsertPoint( 4373 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4374 VectorParts RdxParts(UF); 4375 for (unsigned Part = 0; Part < UF; ++Part) { 4376 RdxParts[Part] = State.get(LoopExitInstDef, Part); 4377 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4378 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4379 : Builder.CreateZExt(Trunc, VecTy); 4380 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 4381 UI != RdxParts[Part]->user_end();) 4382 if (*UI != Trunc) { 4383 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 4384 RdxParts[Part] = Extnd; 4385 } else { 4386 ++UI; 4387 } 4388 } 4389 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4390 for (unsigned Part = 0; Part < UF; ++Part) { 4391 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4392 State.reset(LoopExitInstDef, RdxParts[Part], Part); 4393 } 4394 } 4395 4396 // Reduce all of the unrolled parts into a single vector. 4397 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 4398 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 4399 4400 // The middle block terminator has already been assigned a DebugLoc here (the 4401 // OrigLoop's single latch terminator). We want the whole middle block to 4402 // appear to execute on this line because: (a) it is all compiler generated, 4403 // (b) these instructions are always executed after evaluating the latch 4404 // conditional branch, and (c) other passes may add new predecessors which 4405 // terminate on this line. This is the easiest way to ensure we don't 4406 // accidentally cause an extra step back into the loop while debugging. 4407 setDebugLocFromInst(LoopMiddleBlock->getTerminator()); 4408 if (PhiR->isOrdered()) 4409 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 4410 else { 4411 // Floating-point operations should have some FMF to enable the reduction. 4412 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 4413 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 4414 for (unsigned Part = 1; Part < UF; ++Part) { 4415 Value *RdxPart = State.get(LoopExitInstDef, Part); 4416 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 4417 ReducedPartRdx = Builder.CreateBinOp( 4418 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 4419 } else { 4420 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4421 } 4422 } 4423 } 4424 4425 // Create the reduction after the loop. Note that inloop reductions create the 4426 // target reduction in the loop using a Reduction recipe. 4427 if (VF.isVector() && !PhiR->isInLoop()) { 4428 ReducedPartRdx = 4429 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx); 4430 // If the reduction can be performed in a smaller type, we need to extend 4431 // the reduction to the wider type before we branch to the original loop. 4432 if (PhiTy != RdxDesc.getRecurrenceType()) 4433 ReducedPartRdx = RdxDesc.isSigned() 4434 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 4435 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 4436 } 4437 4438 // Create a phi node that merges control-flow from the backedge-taken check 4439 // block and the middle block. 4440 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 4441 LoopScalarPreHeader->getTerminator()); 4442 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4443 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4444 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4445 4446 // Now, we need to fix the users of the reduction variable 4447 // inside and outside of the scalar remainder loop. 4448 4449 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4450 // in the exit blocks. See comment on analogous loop in 4451 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4452 if (!Cost->requiresScalarEpilogue(VF)) 4453 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4454 if (any_of(LCSSAPhi.incoming_values(), 4455 [LoopExitInst](Value *V) { return V == LoopExitInst; })) 4456 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4457 4458 // Fix the scalar loop reduction variable with the incoming reduction sum 4459 // from the vector body and from the backedge value. 4460 int IncomingEdgeBlockIdx = 4461 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4462 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4463 // Pick the other block. 4464 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4465 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4466 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4467 } 4468 4469 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 4470 VPTransformState &State) { 4471 RecurKind RK = RdxDesc.getRecurrenceKind(); 4472 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4473 return; 4474 4475 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4476 assert(LoopExitInstr && "null loop exit instruction"); 4477 SmallVector<Instruction *, 8> Worklist; 4478 SmallPtrSet<Instruction *, 8> Visited; 4479 Worklist.push_back(LoopExitInstr); 4480 Visited.insert(LoopExitInstr); 4481 4482 while (!Worklist.empty()) { 4483 Instruction *Cur = Worklist.pop_back_val(); 4484 if (isa<OverflowingBinaryOperator>(Cur)) 4485 for (unsigned Part = 0; Part < UF; ++Part) { 4486 // FIXME: Should not rely on getVPValue at this point. 4487 Value *V = State.get(State.Plan->getVPValue(Cur, true), Part); 4488 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4489 } 4490 4491 for (User *U : Cur->users()) { 4492 Instruction *UI = cast<Instruction>(U); 4493 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4494 Visited.insert(UI).second) 4495 Worklist.push_back(UI); 4496 } 4497 } 4498 } 4499 4500 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4501 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4502 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4503 // Some phis were already hand updated by the reduction and recurrence 4504 // code above, leave them alone. 4505 continue; 4506 4507 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4508 // Non-instruction incoming values will have only one value. 4509 4510 VPLane Lane = VPLane::getFirstLane(); 4511 if (isa<Instruction>(IncomingValue) && 4512 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), 4513 VF)) 4514 Lane = VPLane::getLastLaneForVF(VF); 4515 4516 // Can be a loop invariant incoming value or the last scalar value to be 4517 // extracted from the vectorized loop. 4518 // FIXME: Should not rely on getVPValue at this point. 4519 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4520 Value *lastIncomingValue = 4521 OrigLoop->isLoopInvariant(IncomingValue) 4522 ? IncomingValue 4523 : State.get(State.Plan->getVPValue(IncomingValue, true), 4524 VPIteration(UF - 1, Lane)); 4525 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4526 } 4527 } 4528 4529 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4530 // The basic block and loop containing the predicated instruction. 4531 auto *PredBB = PredInst->getParent(); 4532 auto *VectorLoop = LI->getLoopFor(PredBB); 4533 4534 // Initialize a worklist with the operands of the predicated instruction. 4535 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4536 4537 // Holds instructions that we need to analyze again. An instruction may be 4538 // reanalyzed if we don't yet know if we can sink it or not. 4539 SmallVector<Instruction *, 8> InstsToReanalyze; 4540 4541 // Returns true if a given use occurs in the predicated block. Phi nodes use 4542 // their operands in their corresponding predecessor blocks. 4543 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4544 auto *I = cast<Instruction>(U.getUser()); 4545 BasicBlock *BB = I->getParent(); 4546 if (auto *Phi = dyn_cast<PHINode>(I)) 4547 BB = Phi->getIncomingBlock( 4548 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4549 return BB == PredBB; 4550 }; 4551 4552 // Iteratively sink the scalarized operands of the predicated instruction 4553 // into the block we created for it. When an instruction is sunk, it's 4554 // operands are then added to the worklist. The algorithm ends after one pass 4555 // through the worklist doesn't sink a single instruction. 4556 bool Changed; 4557 do { 4558 // Add the instructions that need to be reanalyzed to the worklist, and 4559 // reset the changed indicator. 4560 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4561 InstsToReanalyze.clear(); 4562 Changed = false; 4563 4564 while (!Worklist.empty()) { 4565 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4566 4567 // We can't sink an instruction if it is a phi node, is not in the loop, 4568 // or may have side effects. 4569 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4570 I->mayHaveSideEffects()) 4571 continue; 4572 4573 // If the instruction is already in PredBB, check if we can sink its 4574 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4575 // sinking the scalar instruction I, hence it appears in PredBB; but it 4576 // may have failed to sink I's operands (recursively), which we try 4577 // (again) here. 4578 if (I->getParent() == PredBB) { 4579 Worklist.insert(I->op_begin(), I->op_end()); 4580 continue; 4581 } 4582 4583 // It's legal to sink the instruction if all its uses occur in the 4584 // predicated block. Otherwise, there's nothing to do yet, and we may 4585 // need to reanalyze the instruction. 4586 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4587 InstsToReanalyze.push_back(I); 4588 continue; 4589 } 4590 4591 // Move the instruction to the beginning of the predicated block, and add 4592 // it's operands to the worklist. 4593 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4594 Worklist.insert(I->op_begin(), I->op_end()); 4595 4596 // The sinking may have enabled other instructions to be sunk, so we will 4597 // need to iterate. 4598 Changed = true; 4599 } 4600 } while (Changed); 4601 } 4602 4603 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4604 for (PHINode *OrigPhi : OrigPHIsToFix) { 4605 VPWidenPHIRecipe *VPPhi = 4606 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4607 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4608 // Make sure the builder has a valid insert point. 4609 Builder.SetInsertPoint(NewPhi); 4610 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4611 VPValue *Inc = VPPhi->getIncomingValue(i); 4612 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4613 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4614 } 4615 } 4616 } 4617 4618 bool InnerLoopVectorizer::useOrderedReductions(RecurrenceDescriptor &RdxDesc) { 4619 return Cost->useOrderedReductions(RdxDesc); 4620 } 4621 4622 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, 4623 VPUser &Operands, unsigned UF, 4624 ElementCount VF, bool IsPtrLoopInvariant, 4625 SmallBitVector &IsIndexLoopInvariant, 4626 VPTransformState &State) { 4627 // Construct a vector GEP by widening the operands of the scalar GEP as 4628 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4629 // results in a vector of pointers when at least one operand of the GEP 4630 // is vector-typed. Thus, to keep the representation compact, we only use 4631 // vector-typed operands for loop-varying values. 4632 4633 if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 4634 // If we are vectorizing, but the GEP has only loop-invariant operands, 4635 // the GEP we build (by only using vector-typed operands for 4636 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4637 // produce a vector of pointers, we need to either arbitrarily pick an 4638 // operand to broadcast, or broadcast a clone of the original GEP. 4639 // Here, we broadcast a clone of the original. 4640 // 4641 // TODO: If at some point we decide to scalarize instructions having 4642 // loop-invariant operands, this special case will no longer be 4643 // required. We would add the scalarization decision to 4644 // collectLoopScalars() and teach getVectorValue() to broadcast 4645 // the lane-zero scalar value. 4646 auto *Clone = Builder.Insert(GEP->clone()); 4647 for (unsigned Part = 0; Part < UF; ++Part) { 4648 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4649 State.set(VPDef, EntryPart, Part); 4650 addMetadata(EntryPart, GEP); 4651 } 4652 } else { 4653 // If the GEP has at least one loop-varying operand, we are sure to 4654 // produce a vector of pointers. But if we are only unrolling, we want 4655 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4656 // produce with the code below will be scalar (if VF == 1) or vector 4657 // (otherwise). Note that for the unroll-only case, we still maintain 4658 // values in the vector mapping with initVector, as we do for other 4659 // instructions. 4660 for (unsigned Part = 0; Part < UF; ++Part) { 4661 // The pointer operand of the new GEP. If it's loop-invariant, we 4662 // won't broadcast it. 4663 auto *Ptr = IsPtrLoopInvariant 4664 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 4665 : State.get(Operands.getOperand(0), Part); 4666 4667 // Collect all the indices for the new GEP. If any index is 4668 // loop-invariant, we won't broadcast it. 4669 SmallVector<Value *, 4> Indices; 4670 for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) { 4671 VPValue *Operand = Operands.getOperand(I); 4672 if (IsIndexLoopInvariant[I - 1]) 4673 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 4674 else 4675 Indices.push_back(State.get(Operand, Part)); 4676 } 4677 4678 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4679 // but it should be a vector, otherwise. 4680 auto *NewGEP = 4681 GEP->isInBounds() 4682 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 4683 Indices) 4684 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 4685 assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) && 4686 "NewGEP is not a pointer vector"); 4687 State.set(VPDef, NewGEP, Part); 4688 addMetadata(NewGEP, GEP); 4689 } 4690 } 4691 } 4692 4693 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4694 VPWidenPHIRecipe *PhiR, 4695 VPTransformState &State) { 4696 PHINode *P = cast<PHINode>(PN); 4697 if (EnableVPlanNativePath) { 4698 // Currently we enter here in the VPlan-native path for non-induction 4699 // PHIs where all control flow is uniform. We simply widen these PHIs. 4700 // Create a vector phi with no operands - the vector phi operands will be 4701 // set at the end of vector code generation. 4702 Type *VecTy = (State.VF.isScalar()) 4703 ? PN->getType() 4704 : VectorType::get(PN->getType(), State.VF); 4705 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4706 State.set(PhiR, VecPhi, 0); 4707 OrigPHIsToFix.push_back(P); 4708 4709 return; 4710 } 4711 4712 assert(PN->getParent() == OrigLoop->getHeader() && 4713 "Non-header phis should have been handled elsewhere"); 4714 4715 // In order to support recurrences we need to be able to vectorize Phi nodes. 4716 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4717 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4718 // this value when we vectorize all of the instructions that use the PHI. 4719 4720 assert(!Legal->isReductionVariable(P) && 4721 "reductions should be handled elsewhere"); 4722 4723 setDebugLocFromInst(P); 4724 4725 // This PHINode must be an induction variable. 4726 // Make sure that we know about it. 4727 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4728 4729 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4730 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4731 4732 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4733 // which can be found from the original scalar operations. 4734 switch (II.getKind()) { 4735 case InductionDescriptor::IK_NoInduction: 4736 llvm_unreachable("Unknown induction"); 4737 case InductionDescriptor::IK_IntInduction: 4738 case InductionDescriptor::IK_FpInduction: 4739 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4740 case InductionDescriptor::IK_PtrInduction: { 4741 // Handle the pointer induction variable case. 4742 assert(P->getType()->isPointerTy() && "Unexpected type."); 4743 4744 if (Cost->isScalarAfterVectorization(P, State.VF)) { 4745 // This is the normalized GEP that starts counting at zero. 4746 Value *PtrInd = 4747 Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType()); 4748 // Determine the number of scalars we need to generate for each unroll 4749 // iteration. If the instruction is uniform, we only need to generate the 4750 // first lane. Otherwise, we generate all VF values. 4751 bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF); 4752 unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue(); 4753 4754 bool NeedsVectorIndex = !IsUniform && VF.isScalable(); 4755 Value *UnitStepVec = nullptr, *PtrIndSplat = nullptr; 4756 if (NeedsVectorIndex) { 4757 Type *VecIVTy = VectorType::get(PtrInd->getType(), VF); 4758 UnitStepVec = Builder.CreateStepVector(VecIVTy); 4759 PtrIndSplat = Builder.CreateVectorSplat(VF, PtrInd); 4760 } 4761 4762 for (unsigned Part = 0; Part < UF; ++Part) { 4763 Value *PartStart = createStepForVF( 4764 Builder, ConstantInt::get(PtrInd->getType(), Part), VF); 4765 4766 if (NeedsVectorIndex) { 4767 // Here we cache the whole vector, which means we can support the 4768 // extraction of any lane. However, in some cases the extractelement 4769 // instruction that is generated for scalar uses of this vector (e.g. 4770 // a load instruction) is not folded away. Therefore we still 4771 // calculate values for the first n lanes to avoid redundant moves 4772 // (when extracting the 0th element) and to produce scalar code (i.e. 4773 // additional add/gep instructions instead of expensive extractelement 4774 // instructions) when extracting higher-order elements. 4775 Value *PartStartSplat = Builder.CreateVectorSplat(VF, PartStart); 4776 Value *Indices = Builder.CreateAdd(PartStartSplat, UnitStepVec); 4777 Value *GlobalIndices = Builder.CreateAdd(PtrIndSplat, Indices); 4778 Value *SclrGep = 4779 emitTransformedIndex(Builder, GlobalIndices, PSE.getSE(), DL, II); 4780 SclrGep->setName("next.gep"); 4781 State.set(PhiR, SclrGep, Part); 4782 } 4783 4784 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4785 Value *Idx = Builder.CreateAdd( 4786 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 4787 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4788 Value *SclrGep = 4789 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 4790 SclrGep->setName("next.gep"); 4791 State.set(PhiR, SclrGep, VPIteration(Part, Lane)); 4792 } 4793 } 4794 return; 4795 } 4796 assert(isa<SCEVConstant>(II.getStep()) && 4797 "Induction step not a SCEV constant!"); 4798 Type *PhiType = II.getStep()->getType(); 4799 4800 // Build a pointer phi 4801 Value *ScalarStartValue = II.getStartValue(); 4802 Type *ScStValueType = ScalarStartValue->getType(); 4803 PHINode *NewPointerPhi = 4804 PHINode::Create(ScStValueType, 2, "pointer.phi", Induction); 4805 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4806 4807 // A pointer induction, performed by using a gep 4808 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4809 Instruction *InductionLoc = LoopLatch->getTerminator(); 4810 const SCEV *ScalarStep = II.getStep(); 4811 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4812 Value *ScalarStepValue = 4813 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4814 Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF); 4815 Value *NumUnrolledElems = 4816 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 4817 Value *InductionGEP = GetElementPtrInst::Create( 4818 II.getElementType(), NewPointerPhi, 4819 Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 4820 InductionLoc); 4821 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4822 4823 // Create UF many actual address geps that use the pointer 4824 // phi as base and a vectorized version of the step value 4825 // (<step*0, ..., step*N>) as offset. 4826 for (unsigned Part = 0; Part < State.UF; ++Part) { 4827 Type *VecPhiType = VectorType::get(PhiType, State.VF); 4828 Value *StartOffsetScalar = 4829 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 4830 Value *StartOffset = 4831 Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 4832 // Create a vector of consecutive numbers from zero to VF. 4833 StartOffset = 4834 Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType)); 4835 4836 Value *GEP = Builder.CreateGEP( 4837 II.getElementType(), NewPointerPhi, 4838 Builder.CreateMul( 4839 StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue), 4840 "vector.gep")); 4841 State.set(PhiR, GEP, Part); 4842 } 4843 } 4844 } 4845 } 4846 4847 /// A helper function for checking whether an integer division-related 4848 /// instruction may divide by zero (in which case it must be predicated if 4849 /// executed conditionally in the scalar code). 4850 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4851 /// Non-zero divisors that are non compile-time constants will not be 4852 /// converted into multiplication, so we will still end up scalarizing 4853 /// the division, but can do so w/o predication. 4854 static bool mayDivideByZero(Instruction &I) { 4855 assert((I.getOpcode() == Instruction::UDiv || 4856 I.getOpcode() == Instruction::SDiv || 4857 I.getOpcode() == Instruction::URem || 4858 I.getOpcode() == Instruction::SRem) && 4859 "Unexpected instruction"); 4860 Value *Divisor = I.getOperand(1); 4861 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4862 return !CInt || CInt->isZero(); 4863 } 4864 4865 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def, 4866 VPUser &User, 4867 VPTransformState &State) { 4868 switch (I.getOpcode()) { 4869 case Instruction::Call: 4870 case Instruction::Br: 4871 case Instruction::PHI: 4872 case Instruction::GetElementPtr: 4873 case Instruction::Select: 4874 llvm_unreachable("This instruction is handled by a different recipe."); 4875 case Instruction::UDiv: 4876 case Instruction::SDiv: 4877 case Instruction::SRem: 4878 case Instruction::URem: 4879 case Instruction::Add: 4880 case Instruction::FAdd: 4881 case Instruction::Sub: 4882 case Instruction::FSub: 4883 case Instruction::FNeg: 4884 case Instruction::Mul: 4885 case Instruction::FMul: 4886 case Instruction::FDiv: 4887 case Instruction::FRem: 4888 case Instruction::Shl: 4889 case Instruction::LShr: 4890 case Instruction::AShr: 4891 case Instruction::And: 4892 case Instruction::Or: 4893 case Instruction::Xor: { 4894 // Just widen unops and binops. 4895 setDebugLocFromInst(&I); 4896 4897 for (unsigned Part = 0; Part < UF; ++Part) { 4898 SmallVector<Value *, 2> Ops; 4899 for (VPValue *VPOp : User.operands()) 4900 Ops.push_back(State.get(VPOp, Part)); 4901 4902 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 4903 4904 if (auto *VecOp = dyn_cast<Instruction>(V)) 4905 VecOp->copyIRFlags(&I); 4906 4907 // Use this vector value for all users of the original instruction. 4908 State.set(Def, V, Part); 4909 addMetadata(V, &I); 4910 } 4911 4912 break; 4913 } 4914 case Instruction::ICmp: 4915 case Instruction::FCmp: { 4916 // Widen compares. Generate vector compares. 4917 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4918 auto *Cmp = cast<CmpInst>(&I); 4919 setDebugLocFromInst(Cmp); 4920 for (unsigned Part = 0; Part < UF; ++Part) { 4921 Value *A = State.get(User.getOperand(0), Part); 4922 Value *B = State.get(User.getOperand(1), Part); 4923 Value *C = nullptr; 4924 if (FCmp) { 4925 // Propagate fast math flags. 4926 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4927 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4928 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4929 } else { 4930 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4931 } 4932 State.set(Def, C, Part); 4933 addMetadata(C, &I); 4934 } 4935 4936 break; 4937 } 4938 4939 case Instruction::ZExt: 4940 case Instruction::SExt: 4941 case Instruction::FPToUI: 4942 case Instruction::FPToSI: 4943 case Instruction::FPExt: 4944 case Instruction::PtrToInt: 4945 case Instruction::IntToPtr: 4946 case Instruction::SIToFP: 4947 case Instruction::UIToFP: 4948 case Instruction::Trunc: 4949 case Instruction::FPTrunc: 4950 case Instruction::BitCast: { 4951 auto *CI = cast<CastInst>(&I); 4952 setDebugLocFromInst(CI); 4953 4954 /// Vectorize casts. 4955 Type *DestTy = 4956 (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF); 4957 4958 for (unsigned Part = 0; Part < UF; ++Part) { 4959 Value *A = State.get(User.getOperand(0), Part); 4960 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4961 State.set(Def, Cast, Part); 4962 addMetadata(Cast, &I); 4963 } 4964 break; 4965 } 4966 default: 4967 // This instruction is not vectorized by simple widening. 4968 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4969 llvm_unreachable("Unhandled instruction!"); 4970 } // end of switch. 4971 } 4972 4973 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4974 VPUser &ArgOperands, 4975 VPTransformState &State) { 4976 assert(!isa<DbgInfoIntrinsic>(I) && 4977 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4978 setDebugLocFromInst(&I); 4979 4980 Module *M = I.getParent()->getParent()->getParent(); 4981 auto *CI = cast<CallInst>(&I); 4982 4983 SmallVector<Type *, 4> Tys; 4984 for (Value *ArgOperand : CI->arg_operands()) 4985 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4986 4987 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4988 4989 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4990 // version of the instruction. 4991 // Is it beneficial to perform intrinsic call compared to lib call? 4992 bool NeedToScalarize = false; 4993 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4994 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4995 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4996 assert((UseVectorIntrinsic || !NeedToScalarize) && 4997 "Instruction should be scalarized elsewhere."); 4998 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 4999 "Either the intrinsic cost or vector call cost must be valid"); 5000 5001 for (unsigned Part = 0; Part < UF; ++Part) { 5002 SmallVector<Type *, 2> TysForDecl = {CI->getType()}; 5003 SmallVector<Value *, 4> Args; 5004 for (auto &I : enumerate(ArgOperands.operands())) { 5005 // Some intrinsics have a scalar argument - don't replace it with a 5006 // vector. 5007 Value *Arg; 5008 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 5009 Arg = State.get(I.value(), Part); 5010 else { 5011 Arg = State.get(I.value(), VPIteration(0, 0)); 5012 if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index())) 5013 TysForDecl.push_back(Arg->getType()); 5014 } 5015 Args.push_back(Arg); 5016 } 5017 5018 Function *VectorF; 5019 if (UseVectorIntrinsic) { 5020 // Use vector version of the intrinsic. 5021 if (VF.isVector()) 5022 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 5023 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 5024 assert(VectorF && "Can't retrieve vector intrinsic."); 5025 } else { 5026 // Use vector version of the function call. 5027 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 5028 #ifndef NDEBUG 5029 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 5030 "Can't create vector function."); 5031 #endif 5032 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 5033 } 5034 SmallVector<OperandBundleDef, 1> OpBundles; 5035 CI->getOperandBundlesAsDefs(OpBundles); 5036 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 5037 5038 if (isa<FPMathOperator>(V)) 5039 V->copyFastMathFlags(CI); 5040 5041 State.set(Def, V, Part); 5042 addMetadata(V, &I); 5043 } 5044 } 5045 5046 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef, 5047 VPUser &Operands, 5048 bool InvariantCond, 5049 VPTransformState &State) { 5050 setDebugLocFromInst(&I); 5051 5052 // The condition can be loop invariant but still defined inside the 5053 // loop. This means that we can't just use the original 'cond' value. 5054 // We have to take the 'vectorized' value and pick the first lane. 5055 // Instcombine will make this a no-op. 5056 auto *InvarCond = InvariantCond 5057 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 5058 : nullptr; 5059 5060 for (unsigned Part = 0; Part < UF; ++Part) { 5061 Value *Cond = 5062 InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part); 5063 Value *Op0 = State.get(Operands.getOperand(1), Part); 5064 Value *Op1 = State.get(Operands.getOperand(2), Part); 5065 Value *Sel = Builder.CreateSelect(Cond, Op0, Op1); 5066 State.set(VPDef, Sel, Part); 5067 addMetadata(Sel, &I); 5068 } 5069 } 5070 5071 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 5072 // We should not collect Scalars more than once per VF. Right now, this 5073 // function is called from collectUniformsAndScalars(), which already does 5074 // this check. Collecting Scalars for VF=1 does not make any sense. 5075 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 5076 "This function should not be visited twice for the same VF"); 5077 5078 SmallSetVector<Instruction *, 8> Worklist; 5079 5080 // These sets are used to seed the analysis with pointers used by memory 5081 // accesses that will remain scalar. 5082 SmallSetVector<Instruction *, 8> ScalarPtrs; 5083 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 5084 auto *Latch = TheLoop->getLoopLatch(); 5085 5086 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 5087 // The pointer operands of loads and stores will be scalar as long as the 5088 // memory access is not a gather or scatter operation. The value operand of a 5089 // store will remain scalar if the store is scalarized. 5090 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 5091 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 5092 assert(WideningDecision != CM_Unknown && 5093 "Widening decision should be ready at this moment"); 5094 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 5095 if (Ptr == Store->getValueOperand()) 5096 return WideningDecision == CM_Scalarize; 5097 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 5098 "Ptr is neither a value or pointer operand"); 5099 return WideningDecision != CM_GatherScatter; 5100 }; 5101 5102 // A helper that returns true if the given value is a bitcast or 5103 // getelementptr instruction contained in the loop. 5104 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 5105 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 5106 isa<GetElementPtrInst>(V)) && 5107 !TheLoop->isLoopInvariant(V); 5108 }; 5109 5110 auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) { 5111 if (!isa<PHINode>(Ptr) || 5112 !Legal->getInductionVars().count(cast<PHINode>(Ptr))) 5113 return false; 5114 auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)]; 5115 if (Induction.getKind() != InductionDescriptor::IK_PtrInduction) 5116 return false; 5117 return isScalarUse(MemAccess, Ptr); 5118 }; 5119 5120 // A helper that evaluates a memory access's use of a pointer. If the 5121 // pointer is actually the pointer induction of a loop, it is being 5122 // inserted into Worklist. If the use will be a scalar use, and the 5123 // pointer is only used by memory accesses, we place the pointer in 5124 // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs. 5125 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 5126 if (isScalarPtrInduction(MemAccess, Ptr)) { 5127 Worklist.insert(cast<Instruction>(Ptr)); 5128 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr 5129 << "\n"); 5130 5131 Instruction *Update = cast<Instruction>( 5132 cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch)); 5133 ScalarPtrs.insert(Update); 5134 return; 5135 } 5136 // We only care about bitcast and getelementptr instructions contained in 5137 // the loop. 5138 if (!isLoopVaryingBitCastOrGEP(Ptr)) 5139 return; 5140 5141 // If the pointer has already been identified as scalar (e.g., if it was 5142 // also identified as uniform), there's nothing to do. 5143 auto *I = cast<Instruction>(Ptr); 5144 if (Worklist.count(I)) 5145 return; 5146 5147 // If the use of the pointer will be a scalar use, and all users of the 5148 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 5149 // place the pointer in PossibleNonScalarPtrs. 5150 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 5151 return isa<LoadInst>(U) || isa<StoreInst>(U); 5152 })) 5153 ScalarPtrs.insert(I); 5154 else 5155 PossibleNonScalarPtrs.insert(I); 5156 }; 5157 5158 // We seed the scalars analysis with three classes of instructions: (1) 5159 // instructions marked uniform-after-vectorization and (2) bitcast, 5160 // getelementptr and (pointer) phi instructions used by memory accesses 5161 // requiring a scalar use. 5162 // 5163 // (1) Add to the worklist all instructions that have been identified as 5164 // uniform-after-vectorization. 5165 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 5166 5167 // (2) Add to the worklist all bitcast and getelementptr instructions used by 5168 // memory accesses requiring a scalar use. The pointer operands of loads and 5169 // stores will be scalar as long as the memory accesses is not a gather or 5170 // scatter operation. The value operand of a store will remain scalar if the 5171 // store is scalarized. 5172 for (auto *BB : TheLoop->blocks()) 5173 for (auto &I : *BB) { 5174 if (auto *Load = dyn_cast<LoadInst>(&I)) { 5175 evaluatePtrUse(Load, Load->getPointerOperand()); 5176 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 5177 evaluatePtrUse(Store, Store->getPointerOperand()); 5178 evaluatePtrUse(Store, Store->getValueOperand()); 5179 } 5180 } 5181 for (auto *I : ScalarPtrs) 5182 if (!PossibleNonScalarPtrs.count(I)) { 5183 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 5184 Worklist.insert(I); 5185 } 5186 5187 // Insert the forced scalars. 5188 // FIXME: Currently widenPHIInstruction() often creates a dead vector 5189 // induction variable when the PHI user is scalarized. 5190 auto ForcedScalar = ForcedScalars.find(VF); 5191 if (ForcedScalar != ForcedScalars.end()) 5192 for (auto *I : ForcedScalar->second) 5193 Worklist.insert(I); 5194 5195 // Expand the worklist by looking through any bitcasts and getelementptr 5196 // instructions we've already identified as scalar. This is similar to the 5197 // expansion step in collectLoopUniforms(); however, here we're only 5198 // expanding to include additional bitcasts and getelementptr instructions. 5199 unsigned Idx = 0; 5200 while (Idx != Worklist.size()) { 5201 Instruction *Dst = Worklist[Idx++]; 5202 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 5203 continue; 5204 auto *Src = cast<Instruction>(Dst->getOperand(0)); 5205 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 5206 auto *J = cast<Instruction>(U); 5207 return !TheLoop->contains(J) || Worklist.count(J) || 5208 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 5209 isScalarUse(J, Src)); 5210 })) { 5211 Worklist.insert(Src); 5212 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 5213 } 5214 } 5215 5216 // An induction variable will remain scalar if all users of the induction 5217 // variable and induction variable update remain scalar. 5218 for (auto &Induction : Legal->getInductionVars()) { 5219 auto *Ind = Induction.first; 5220 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5221 5222 // If tail-folding is applied, the primary induction variable will be used 5223 // to feed a vector compare. 5224 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 5225 continue; 5226 5227 // Determine if all users of the induction variable are scalar after 5228 // vectorization. 5229 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5230 auto *I = cast<Instruction>(U); 5231 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 5232 }); 5233 if (!ScalarInd) 5234 continue; 5235 5236 // Determine if all users of the induction variable update instruction are 5237 // scalar after vectorization. 5238 auto ScalarIndUpdate = 5239 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5240 auto *I = cast<Instruction>(U); 5241 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 5242 }); 5243 if (!ScalarIndUpdate) 5244 continue; 5245 5246 // The induction variable and its update instruction will remain scalar. 5247 Worklist.insert(Ind); 5248 Worklist.insert(IndUpdate); 5249 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5250 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 5251 << "\n"); 5252 } 5253 5254 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 5255 } 5256 5257 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) const { 5258 if (!blockNeedsPredication(I->getParent())) 5259 return false; 5260 switch(I->getOpcode()) { 5261 default: 5262 break; 5263 case Instruction::Load: 5264 case Instruction::Store: { 5265 if (!Legal->isMaskRequired(I)) 5266 return false; 5267 auto *Ptr = getLoadStorePointerOperand(I); 5268 auto *Ty = getLoadStoreType(I); 5269 const Align Alignment = getLoadStoreAlignment(I); 5270 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 5271 TTI.isLegalMaskedGather(Ty, Alignment)) 5272 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 5273 TTI.isLegalMaskedScatter(Ty, Alignment)); 5274 } 5275 case Instruction::UDiv: 5276 case Instruction::SDiv: 5277 case Instruction::SRem: 5278 case Instruction::URem: 5279 return mayDivideByZero(*I); 5280 } 5281 return false; 5282 } 5283 5284 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 5285 Instruction *I, ElementCount VF) { 5286 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 5287 assert(getWideningDecision(I, VF) == CM_Unknown && 5288 "Decision should not be set yet."); 5289 auto *Group = getInterleavedAccessGroup(I); 5290 assert(Group && "Must have a group."); 5291 5292 // If the instruction's allocated size doesn't equal it's type size, it 5293 // requires padding and will be scalarized. 5294 auto &DL = I->getModule()->getDataLayout(); 5295 auto *ScalarTy = getLoadStoreType(I); 5296 if (hasIrregularType(ScalarTy, DL)) 5297 return false; 5298 5299 // Check if masking is required. 5300 // A Group may need masking for one of two reasons: it resides in a block that 5301 // needs predication, or it was decided to use masking to deal with gaps 5302 // (either a gap at the end of a load-access that may result in a speculative 5303 // load, or any gaps in a store-access). 5304 bool PredicatedAccessRequiresMasking = 5305 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 5306 bool LoadAccessWithGapsRequiresEpilogMasking = 5307 isa<LoadInst>(I) && Group->requiresScalarEpilogue() && 5308 !isScalarEpilogueAllowed(); 5309 bool StoreAccessWithGapsRequiresMasking = 5310 isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()); 5311 if (!PredicatedAccessRequiresMasking && 5312 !LoadAccessWithGapsRequiresEpilogMasking && 5313 !StoreAccessWithGapsRequiresMasking) 5314 return true; 5315 5316 // If masked interleaving is required, we expect that the user/target had 5317 // enabled it, because otherwise it either wouldn't have been created or 5318 // it should have been invalidated by the CostModel. 5319 assert(useMaskedInterleavedAccesses(TTI) && 5320 "Masked interleave-groups for predicated accesses are not enabled."); 5321 5322 auto *Ty = getLoadStoreType(I); 5323 const Align Alignment = getLoadStoreAlignment(I); 5324 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 5325 : TTI.isLegalMaskedStore(Ty, Alignment); 5326 } 5327 5328 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 5329 Instruction *I, ElementCount VF) { 5330 // Get and ensure we have a valid memory instruction. 5331 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction"); 5332 5333 auto *Ptr = getLoadStorePointerOperand(I); 5334 auto *ScalarTy = getLoadStoreType(I); 5335 5336 // In order to be widened, the pointer should be consecutive, first of all. 5337 if (!Legal->isConsecutivePtr(ScalarTy, Ptr)) 5338 return false; 5339 5340 // If the instruction is a store located in a predicated block, it will be 5341 // scalarized. 5342 if (isScalarWithPredication(I)) 5343 return false; 5344 5345 // If the instruction's allocated size doesn't equal it's type size, it 5346 // requires padding and will be scalarized. 5347 auto &DL = I->getModule()->getDataLayout(); 5348 if (hasIrregularType(ScalarTy, DL)) 5349 return false; 5350 5351 return true; 5352 } 5353 5354 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 5355 // We should not collect Uniforms more than once per VF. Right now, 5356 // this function is called from collectUniformsAndScalars(), which 5357 // already does this check. Collecting Uniforms for VF=1 does not make any 5358 // sense. 5359 5360 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 5361 "This function should not be visited twice for the same VF"); 5362 5363 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5364 // not analyze again. Uniforms.count(VF) will return 1. 5365 Uniforms[VF].clear(); 5366 5367 // We now know that the loop is vectorizable! 5368 // Collect instructions inside the loop that will remain uniform after 5369 // vectorization. 5370 5371 // Global values, params and instructions outside of current loop are out of 5372 // scope. 5373 auto isOutOfScope = [&](Value *V) -> bool { 5374 Instruction *I = dyn_cast<Instruction>(V); 5375 return (!I || !TheLoop->contains(I)); 5376 }; 5377 5378 SetVector<Instruction *> Worklist; 5379 BasicBlock *Latch = TheLoop->getLoopLatch(); 5380 5381 // Instructions that are scalar with predication must not be considered 5382 // uniform after vectorization, because that would create an erroneous 5383 // replicating region where only a single instance out of VF should be formed. 5384 // TODO: optimize such seldom cases if found important, see PR40816. 5385 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 5386 if (isOutOfScope(I)) { 5387 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 5388 << *I << "\n"); 5389 return; 5390 } 5391 if (isScalarWithPredication(I)) { 5392 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 5393 << *I << "\n"); 5394 return; 5395 } 5396 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 5397 Worklist.insert(I); 5398 }; 5399 5400 // Start with the conditional branch. If the branch condition is an 5401 // instruction contained in the loop that is only used by the branch, it is 5402 // uniform. 5403 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5404 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 5405 addToWorklistIfAllowed(Cmp); 5406 5407 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 5408 InstWidening WideningDecision = getWideningDecision(I, VF); 5409 assert(WideningDecision != CM_Unknown && 5410 "Widening decision should be ready at this moment"); 5411 5412 // A uniform memory op is itself uniform. We exclude uniform stores 5413 // here as they demand the last lane, not the first one. 5414 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 5415 assert(WideningDecision == CM_Scalarize); 5416 return true; 5417 } 5418 5419 return (WideningDecision == CM_Widen || 5420 WideningDecision == CM_Widen_Reverse || 5421 WideningDecision == CM_Interleave); 5422 }; 5423 5424 5425 // Returns true if Ptr is the pointer operand of a memory access instruction 5426 // I, and I is known to not require scalarization. 5427 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5428 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5429 }; 5430 5431 // Holds a list of values which are known to have at least one uniform use. 5432 // Note that there may be other uses which aren't uniform. A "uniform use" 5433 // here is something which only demands lane 0 of the unrolled iterations; 5434 // it does not imply that all lanes produce the same value (e.g. this is not 5435 // the usual meaning of uniform) 5436 SetVector<Value *> HasUniformUse; 5437 5438 // Scan the loop for instructions which are either a) known to have only 5439 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 5440 for (auto *BB : TheLoop->blocks()) 5441 for (auto &I : *BB) { 5442 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) { 5443 switch (II->getIntrinsicID()) { 5444 case Intrinsic::sideeffect: 5445 case Intrinsic::experimental_noalias_scope_decl: 5446 case Intrinsic::assume: 5447 case Intrinsic::lifetime_start: 5448 case Intrinsic::lifetime_end: 5449 if (TheLoop->hasLoopInvariantOperands(&I)) 5450 addToWorklistIfAllowed(&I); 5451 break; 5452 default: 5453 break; 5454 } 5455 } 5456 5457 // ExtractValue instructions must be uniform, because the operands are 5458 // known to be loop-invariant. 5459 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) { 5460 assert(isOutOfScope(EVI->getAggregateOperand()) && 5461 "Expected aggregate value to be loop invariant"); 5462 addToWorklistIfAllowed(EVI); 5463 continue; 5464 } 5465 5466 // If there's no pointer operand, there's nothing to do. 5467 auto *Ptr = getLoadStorePointerOperand(&I); 5468 if (!Ptr) 5469 continue; 5470 5471 // A uniform memory op is itself uniform. We exclude uniform stores 5472 // here as they demand the last lane, not the first one. 5473 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 5474 addToWorklistIfAllowed(&I); 5475 5476 if (isUniformDecision(&I, VF)) { 5477 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 5478 HasUniformUse.insert(Ptr); 5479 } 5480 } 5481 5482 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 5483 // demanding) users. Since loops are assumed to be in LCSSA form, this 5484 // disallows uses outside the loop as well. 5485 for (auto *V : HasUniformUse) { 5486 if (isOutOfScope(V)) 5487 continue; 5488 auto *I = cast<Instruction>(V); 5489 auto UsersAreMemAccesses = 5490 llvm::all_of(I->users(), [&](User *U) -> bool { 5491 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 5492 }); 5493 if (UsersAreMemAccesses) 5494 addToWorklistIfAllowed(I); 5495 } 5496 5497 // Expand Worklist in topological order: whenever a new instruction 5498 // is added , its users should be already inside Worklist. It ensures 5499 // a uniform instruction will only be used by uniform instructions. 5500 unsigned idx = 0; 5501 while (idx != Worklist.size()) { 5502 Instruction *I = Worklist[idx++]; 5503 5504 for (auto OV : I->operand_values()) { 5505 // isOutOfScope operands cannot be uniform instructions. 5506 if (isOutOfScope(OV)) 5507 continue; 5508 // First order recurrence Phi's should typically be considered 5509 // non-uniform. 5510 auto *OP = dyn_cast<PHINode>(OV); 5511 if (OP && Legal->isFirstOrderRecurrence(OP)) 5512 continue; 5513 // If all the users of the operand are uniform, then add the 5514 // operand into the uniform worklist. 5515 auto *OI = cast<Instruction>(OV); 5516 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5517 auto *J = cast<Instruction>(U); 5518 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 5519 })) 5520 addToWorklistIfAllowed(OI); 5521 } 5522 } 5523 5524 // For an instruction to be added into Worklist above, all its users inside 5525 // the loop should also be in Worklist. However, this condition cannot be 5526 // true for phi nodes that form a cyclic dependence. We must process phi 5527 // nodes separately. An induction variable will remain uniform if all users 5528 // of the induction variable and induction variable update remain uniform. 5529 // The code below handles both pointer and non-pointer induction variables. 5530 for (auto &Induction : Legal->getInductionVars()) { 5531 auto *Ind = Induction.first; 5532 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5533 5534 // Determine if all users of the induction variable are uniform after 5535 // vectorization. 5536 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5537 auto *I = cast<Instruction>(U); 5538 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5539 isVectorizedMemAccessUse(I, Ind); 5540 }); 5541 if (!UniformInd) 5542 continue; 5543 5544 // Determine if all users of the induction variable update instruction are 5545 // uniform after vectorization. 5546 auto UniformIndUpdate = 5547 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5548 auto *I = cast<Instruction>(U); 5549 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5550 isVectorizedMemAccessUse(I, IndUpdate); 5551 }); 5552 if (!UniformIndUpdate) 5553 continue; 5554 5555 // The induction variable and its update instruction will remain uniform. 5556 addToWorklistIfAllowed(Ind); 5557 addToWorklistIfAllowed(IndUpdate); 5558 } 5559 5560 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5561 } 5562 5563 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5564 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5565 5566 if (Legal->getRuntimePointerChecking()->Need) { 5567 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5568 "runtime pointer checks needed. Enable vectorization of this " 5569 "loop with '#pragma clang loop vectorize(enable)' when " 5570 "compiling with -Os/-Oz", 5571 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5572 return true; 5573 } 5574 5575 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5576 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5577 "runtime SCEV checks needed. Enable vectorization of this " 5578 "loop with '#pragma clang loop vectorize(enable)' when " 5579 "compiling with -Os/-Oz", 5580 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5581 return true; 5582 } 5583 5584 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5585 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5586 reportVectorizationFailure("Runtime stride check for small trip count", 5587 "runtime stride == 1 checks needed. Enable vectorization of " 5588 "this loop without such check by compiling with -Os/-Oz", 5589 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5590 return true; 5591 } 5592 5593 return false; 5594 } 5595 5596 ElementCount 5597 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 5598 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) 5599 return ElementCount::getScalable(0); 5600 5601 if (Hints->isScalableVectorizationDisabled()) { 5602 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 5603 "ScalableVectorizationDisabled", ORE, TheLoop); 5604 return ElementCount::getScalable(0); 5605 } 5606 5607 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n"); 5608 5609 auto MaxScalableVF = ElementCount::getScalable( 5610 std::numeric_limits<ElementCount::ScalarTy>::max()); 5611 5612 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 5613 // FIXME: While for scalable vectors this is currently sufficient, this should 5614 // be replaced by a more detailed mechanism that filters out specific VFs, 5615 // instead of invalidating vectorization for a whole set of VFs based on the 5616 // MaxVF. 5617 5618 // Disable scalable vectorization if the loop contains unsupported reductions. 5619 if (!canVectorizeReductions(MaxScalableVF)) { 5620 reportVectorizationInfo( 5621 "Scalable vectorization not supported for the reduction " 5622 "operations found in this loop.", 5623 "ScalableVFUnfeasible", ORE, TheLoop); 5624 return ElementCount::getScalable(0); 5625 } 5626 5627 // Disable scalable vectorization if the loop contains any instructions 5628 // with element types not supported for scalable vectors. 5629 if (any_of(ElementTypesInLoop, [&](Type *Ty) { 5630 return !Ty->isVoidTy() && 5631 !this->TTI.isElementTypeLegalForScalableVector(Ty); 5632 })) { 5633 reportVectorizationInfo("Scalable vectorization is not supported " 5634 "for all element types found in this loop.", 5635 "ScalableVFUnfeasible", ORE, TheLoop); 5636 return ElementCount::getScalable(0); 5637 } 5638 5639 if (Legal->isSafeForAnyVectorWidth()) 5640 return MaxScalableVF; 5641 5642 // Limit MaxScalableVF by the maximum safe dependence distance. 5643 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5644 if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) { 5645 unsigned VScaleMax = TheFunction->getFnAttribute(Attribute::VScaleRange) 5646 .getVScaleRangeArgs() 5647 .second; 5648 if (VScaleMax > 0) 5649 MaxVScale = VScaleMax; 5650 } 5651 MaxScalableVF = ElementCount::getScalable( 5652 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5653 if (!MaxScalableVF) 5654 reportVectorizationInfo( 5655 "Max legal vector width too small, scalable vectorization " 5656 "unfeasible.", 5657 "ScalableVFUnfeasible", ORE, TheLoop); 5658 5659 return MaxScalableVF; 5660 } 5661 5662 FixedScalableVFPair 5663 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount, 5664 ElementCount UserVF) { 5665 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5666 unsigned SmallestType, WidestType; 5667 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5668 5669 // Get the maximum safe dependence distance in bits computed by LAA. 5670 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5671 // the memory accesses that is most restrictive (involved in the smallest 5672 // dependence distance). 5673 unsigned MaxSafeElements = 5674 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 5675 5676 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 5677 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 5678 5679 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 5680 << ".\n"); 5681 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 5682 << ".\n"); 5683 5684 // First analyze the UserVF, fall back if the UserVF should be ignored. 5685 if (UserVF) { 5686 auto MaxSafeUserVF = 5687 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 5688 5689 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { 5690 // If `VF=vscale x N` is safe, then so is `VF=N` 5691 if (UserVF.isScalable()) 5692 return FixedScalableVFPair( 5693 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); 5694 else 5695 return UserVF; 5696 } 5697 5698 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 5699 5700 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 5701 // is better to ignore the hint and let the compiler choose a suitable VF. 5702 if (!UserVF.isScalable()) { 5703 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5704 << " is unsafe, clamping to max safe VF=" 5705 << MaxSafeFixedVF << ".\n"); 5706 ORE->emit([&]() { 5707 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5708 TheLoop->getStartLoc(), 5709 TheLoop->getHeader()) 5710 << "User-specified vectorization factor " 5711 << ore::NV("UserVectorizationFactor", UserVF) 5712 << " is unsafe, clamping to maximum safe vectorization factor " 5713 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 5714 }); 5715 return MaxSafeFixedVF; 5716 } 5717 5718 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 5719 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5720 << " is ignored because scalable vectors are not " 5721 "available.\n"); 5722 ORE->emit([&]() { 5723 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5724 TheLoop->getStartLoc(), 5725 TheLoop->getHeader()) 5726 << "User-specified vectorization factor " 5727 << ore::NV("UserVectorizationFactor", UserVF) 5728 << " is ignored because the target does not support scalable " 5729 "vectors. The compiler will pick a more suitable value."; 5730 }); 5731 } else { 5732 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5733 << " is unsafe. Ignoring scalable UserVF.\n"); 5734 ORE->emit([&]() { 5735 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5736 TheLoop->getStartLoc(), 5737 TheLoop->getHeader()) 5738 << "User-specified vectorization factor " 5739 << ore::NV("UserVectorizationFactor", UserVF) 5740 << " is unsafe. Ignoring the hint to let the compiler pick a " 5741 "more suitable value."; 5742 }); 5743 } 5744 } 5745 5746 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5747 << " / " << WidestType << " bits.\n"); 5748 5749 FixedScalableVFPair Result(ElementCount::getFixed(1), 5750 ElementCount::getScalable(0)); 5751 if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, 5752 WidestType, MaxSafeFixedVF)) 5753 Result.FixedVF = MaxVF; 5754 5755 if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, 5756 WidestType, MaxSafeScalableVF)) 5757 if (MaxVF.isScalable()) { 5758 Result.ScalableVF = MaxVF; 5759 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 5760 << "\n"); 5761 } 5762 5763 return Result; 5764 } 5765 5766 FixedScalableVFPair 5767 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5768 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5769 // TODO: It may by useful to do since it's still likely to be dynamically 5770 // uniform if the target can skip. 5771 reportVectorizationFailure( 5772 "Not inserting runtime ptr check for divergent target", 5773 "runtime pointer checks needed. Not enabled for divergent target", 5774 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5775 return FixedScalableVFPair::getNone(); 5776 } 5777 5778 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5779 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5780 if (TC == 1) { 5781 reportVectorizationFailure("Single iteration (non) loop", 5782 "loop trip count is one, irrelevant for vectorization", 5783 "SingleIterationLoop", ORE, TheLoop); 5784 return FixedScalableVFPair::getNone(); 5785 } 5786 5787 switch (ScalarEpilogueStatus) { 5788 case CM_ScalarEpilogueAllowed: 5789 return computeFeasibleMaxVF(TC, UserVF); 5790 case CM_ScalarEpilogueNotAllowedUsePredicate: 5791 LLVM_FALLTHROUGH; 5792 case CM_ScalarEpilogueNotNeededUsePredicate: 5793 LLVM_DEBUG( 5794 dbgs() << "LV: vector predicate hint/switch found.\n" 5795 << "LV: Not allowing scalar epilogue, creating predicated " 5796 << "vector loop.\n"); 5797 break; 5798 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5799 // fallthrough as a special case of OptForSize 5800 case CM_ScalarEpilogueNotAllowedOptSize: 5801 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5802 LLVM_DEBUG( 5803 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5804 else 5805 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5806 << "count.\n"); 5807 5808 // Bail if runtime checks are required, which are not good when optimising 5809 // for size. 5810 if (runtimeChecksRequired()) 5811 return FixedScalableVFPair::getNone(); 5812 5813 break; 5814 } 5815 5816 // The only loops we can vectorize without a scalar epilogue, are loops with 5817 // a bottom-test and a single exiting block. We'd have to handle the fact 5818 // that not every instruction executes on the last iteration. This will 5819 // require a lane mask which varies through the vector loop body. (TODO) 5820 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5821 // If there was a tail-folding hint/switch, but we can't fold the tail by 5822 // masking, fallback to a vectorization with a scalar epilogue. 5823 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5824 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5825 "scalar epilogue instead.\n"); 5826 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5827 return computeFeasibleMaxVF(TC, UserVF); 5828 } 5829 return FixedScalableVFPair::getNone(); 5830 } 5831 5832 // Now try the tail folding 5833 5834 // Invalidate interleave groups that require an epilogue if we can't mask 5835 // the interleave-group. 5836 if (!useMaskedInterleavedAccesses(TTI)) { 5837 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5838 "No decisions should have been taken at this point"); 5839 // Note: There is no need to invalidate any cost modeling decisions here, as 5840 // non where taken so far. 5841 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5842 } 5843 5844 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF); 5845 // Avoid tail folding if the trip count is known to be a multiple of any VF 5846 // we chose. 5847 // FIXME: The condition below pessimises the case for fixed-width vectors, 5848 // when scalable VFs are also candidates for vectorization. 5849 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5850 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5851 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5852 "MaxFixedVF must be a power of 2"); 5853 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5854 : MaxFixedVF.getFixedValue(); 5855 ScalarEvolution *SE = PSE.getSE(); 5856 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5857 const SCEV *ExitCount = SE->getAddExpr( 5858 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5859 const SCEV *Rem = SE->getURemExpr( 5860 SE->applyLoopGuards(ExitCount, TheLoop), 5861 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5862 if (Rem->isZero()) { 5863 // Accept MaxFixedVF if we do not have a tail. 5864 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5865 return MaxFactors; 5866 } 5867 } 5868 5869 // For scalable vectors, don't use tail folding as this is currently not yet 5870 // supported. The code is likely to have ended up here if the tripcount is 5871 // low, in which case it makes sense not to use scalable vectors. 5872 if (MaxFactors.ScalableVF.isVector()) 5873 MaxFactors.ScalableVF = ElementCount::getScalable(0); 5874 5875 // If we don't know the precise trip count, or if the trip count that we 5876 // found modulo the vectorization factor is not zero, try to fold the tail 5877 // by masking. 5878 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5879 if (Legal->prepareToFoldTailByMasking()) { 5880 FoldTailByMasking = true; 5881 return MaxFactors; 5882 } 5883 5884 // If there was a tail-folding hint/switch, but we can't fold the tail by 5885 // masking, fallback to a vectorization with a scalar epilogue. 5886 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5887 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5888 "scalar epilogue instead.\n"); 5889 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5890 return MaxFactors; 5891 } 5892 5893 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5894 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5895 return FixedScalableVFPair::getNone(); 5896 } 5897 5898 if (TC == 0) { 5899 reportVectorizationFailure( 5900 "Unable to calculate the loop count due to complex control flow", 5901 "unable to calculate the loop count due to complex control flow", 5902 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5903 return FixedScalableVFPair::getNone(); 5904 } 5905 5906 reportVectorizationFailure( 5907 "Cannot optimize for size and vectorize at the same time.", 5908 "cannot optimize for size and vectorize at the same time. " 5909 "Enable vectorization of this loop with '#pragma clang loop " 5910 "vectorize(enable)' when compiling with -Os/-Oz", 5911 "NoTailLoopWithOptForSize", ORE, TheLoop); 5912 return FixedScalableVFPair::getNone(); 5913 } 5914 5915 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5916 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5917 const ElementCount &MaxSafeVF) { 5918 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5919 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5920 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5921 : TargetTransformInfo::RGK_FixedWidthVector); 5922 5923 // Convenience function to return the minimum of two ElementCounts. 5924 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5925 assert((LHS.isScalable() == RHS.isScalable()) && 5926 "Scalable flags must match"); 5927 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5928 }; 5929 5930 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5931 // Note that both WidestRegister and WidestType may not be a powers of 2. 5932 auto MaxVectorElementCount = ElementCount::get( 5933 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5934 ComputeScalableMaxVF); 5935 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5936 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5937 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5938 5939 if (!MaxVectorElementCount) { 5940 LLVM_DEBUG(dbgs() << "LV: The target has no " 5941 << (ComputeScalableMaxVF ? "scalable" : "fixed") 5942 << " vector registers.\n"); 5943 return ElementCount::getFixed(1); 5944 } 5945 5946 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5947 if (ConstTripCount && 5948 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5949 isPowerOf2_32(ConstTripCount)) { 5950 // We need to clamp the VF to be the ConstTripCount. There is no point in 5951 // choosing a higher viable VF as done in the loop below. If 5952 // MaxVectorElementCount is scalable, we only fall back on a fixed VF when 5953 // the TC is less than or equal to the known number of lanes. 5954 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 5955 << ConstTripCount << "\n"); 5956 return TripCountEC; 5957 } 5958 5959 ElementCount MaxVF = MaxVectorElementCount; 5960 if (TTI.shouldMaximizeVectorBandwidth() || 5961 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5962 auto MaxVectorElementCountMaxBW = ElementCount::get( 5963 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5964 ComputeScalableMaxVF); 5965 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5966 5967 // Collect all viable vectorization factors larger than the default MaxVF 5968 // (i.e. MaxVectorElementCount). 5969 SmallVector<ElementCount, 8> VFs; 5970 for (ElementCount VS = MaxVectorElementCount * 2; 5971 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5972 VFs.push_back(VS); 5973 5974 // For each VF calculate its register usage. 5975 auto RUs = calculateRegisterUsage(VFs); 5976 5977 // Select the largest VF which doesn't require more registers than existing 5978 // ones. 5979 for (int i = RUs.size() - 1; i >= 0; --i) { 5980 bool Selected = true; 5981 for (auto &pair : RUs[i].MaxLocalUsers) { 5982 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5983 if (pair.second > TargetNumRegisters) 5984 Selected = false; 5985 } 5986 if (Selected) { 5987 MaxVF = VFs[i]; 5988 break; 5989 } 5990 } 5991 if (ElementCount MinVF = 5992 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 5993 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5994 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5995 << ") with target's minimum: " << MinVF << '\n'); 5996 MaxVF = MinVF; 5997 } 5998 } 5999 } 6000 return MaxVF; 6001 } 6002 6003 bool LoopVectorizationCostModel::isMoreProfitable( 6004 const VectorizationFactor &A, const VectorizationFactor &B) const { 6005 InstructionCost CostA = A.Cost; 6006 InstructionCost CostB = B.Cost; 6007 6008 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 6009 6010 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 6011 MaxTripCount) { 6012 // If we are folding the tail and the trip count is a known (possibly small) 6013 // constant, the trip count will be rounded up to an integer number of 6014 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 6015 // which we compare directly. When not folding the tail, the total cost will 6016 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 6017 // approximated with the per-lane cost below instead of using the tripcount 6018 // as here. 6019 auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 6020 auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 6021 return RTCostA < RTCostB; 6022 } 6023 6024 // When set to preferred, for now assume vscale may be larger than 1, so 6025 // that scalable vectorization is slightly favorable over fixed-width 6026 // vectorization. 6027 if (Hints->isScalableVectorizationPreferred()) 6028 if (A.Width.isScalable() && !B.Width.isScalable()) 6029 return (CostA * B.Width.getKnownMinValue()) <= 6030 (CostB * A.Width.getKnownMinValue()); 6031 6032 // To avoid the need for FP division: 6033 // (CostA / A.Width) < (CostB / B.Width) 6034 // <=> (CostA * B.Width) < (CostB * A.Width) 6035 return (CostA * B.Width.getKnownMinValue()) < 6036 (CostB * A.Width.getKnownMinValue()); 6037 } 6038 6039 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 6040 const ElementCountSet &VFCandidates) { 6041 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 6042 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 6043 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 6044 assert(VFCandidates.count(ElementCount::getFixed(1)) && 6045 "Expected Scalar VF to be a candidate"); 6046 6047 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 6048 VectorizationFactor ChosenFactor = ScalarCost; 6049 6050 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 6051 if (ForceVectorization && VFCandidates.size() > 1) { 6052 // Ignore scalar width, because the user explicitly wants vectorization. 6053 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 6054 // evaluation. 6055 ChosenFactor.Cost = InstructionCost::getMax(); 6056 } 6057 6058 SmallVector<InstructionVFPair> InvalidCosts; 6059 for (const auto &i : VFCandidates) { 6060 // The cost for scalar VF=1 is already calculated, so ignore it. 6061 if (i.isScalar()) 6062 continue; 6063 6064 VectorizationCostTy C = expectedCost(i, &InvalidCosts); 6065 VectorizationFactor Candidate(i, C.first); 6066 LLVM_DEBUG( 6067 dbgs() << "LV: Vector loop of width " << i << " costs: " 6068 << (Candidate.Cost / Candidate.Width.getKnownMinValue()) 6069 << (i.isScalable() ? " (assuming a minimum vscale of 1)" : "") 6070 << ".\n"); 6071 6072 if (!C.second && !ForceVectorization) { 6073 LLVM_DEBUG( 6074 dbgs() << "LV: Not considering vector loop of width " << i 6075 << " because it will not generate any vector instructions.\n"); 6076 continue; 6077 } 6078 6079 // If profitable add it to ProfitableVF list. 6080 if (isMoreProfitable(Candidate, ScalarCost)) 6081 ProfitableVFs.push_back(Candidate); 6082 6083 if (isMoreProfitable(Candidate, ChosenFactor)) 6084 ChosenFactor = Candidate; 6085 } 6086 6087 // Emit a report of VFs with invalid costs in the loop. 6088 if (!InvalidCosts.empty()) { 6089 // Group the remarks per instruction, keeping the instruction order from 6090 // InvalidCosts. 6091 std::map<Instruction *, unsigned> Numbering; 6092 unsigned I = 0; 6093 for (auto &Pair : InvalidCosts) 6094 if (!Numbering.count(Pair.first)) 6095 Numbering[Pair.first] = I++; 6096 6097 // Sort the list, first on instruction(number) then on VF. 6098 llvm::sort(InvalidCosts, 6099 [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { 6100 if (Numbering[A.first] != Numbering[B.first]) 6101 return Numbering[A.first] < Numbering[B.first]; 6102 ElementCountComparator ECC; 6103 return ECC(A.second, B.second); 6104 }); 6105 6106 // For a list of ordered instruction-vf pairs: 6107 // [(load, vf1), (load, vf2), (store, vf1)] 6108 // Group the instructions together to emit separate remarks for: 6109 // load (vf1, vf2) 6110 // store (vf1) 6111 auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); 6112 auto Subset = ArrayRef<InstructionVFPair>(); 6113 do { 6114 if (Subset.empty()) 6115 Subset = Tail.take_front(1); 6116 6117 Instruction *I = Subset.front().first; 6118 6119 // If the next instruction is different, or if there are no other pairs, 6120 // emit a remark for the collated subset. e.g. 6121 // [(load, vf1), (load, vf2))] 6122 // to emit: 6123 // remark: invalid costs for 'load' at VF=(vf, vf2) 6124 if (Subset == Tail || Tail[Subset.size()].first != I) { 6125 std::string OutString; 6126 raw_string_ostream OS(OutString); 6127 assert(!Subset.empty() && "Unexpected empty range"); 6128 OS << "Instruction with invalid costs prevented vectorization at VF=("; 6129 for (auto &Pair : Subset) 6130 OS << (Pair.second == Subset.front().second ? "" : ", ") 6131 << Pair.second; 6132 OS << "):"; 6133 if (auto *CI = dyn_cast<CallInst>(I)) 6134 OS << " call to " << CI->getCalledFunction()->getName(); 6135 else 6136 OS << " " << I->getOpcodeName(); 6137 OS.flush(); 6138 reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); 6139 Tail = Tail.drop_front(Subset.size()); 6140 Subset = {}; 6141 } else 6142 // Grow the subset by one element 6143 Subset = Tail.take_front(Subset.size() + 1); 6144 } while (!Tail.empty()); 6145 } 6146 6147 if (!EnableCondStoresVectorization && NumPredStores) { 6148 reportVectorizationFailure("There are conditional stores.", 6149 "store that is conditionally executed prevents vectorization", 6150 "ConditionalStore", ORE, TheLoop); 6151 ChosenFactor = ScalarCost; 6152 } 6153 6154 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 6155 ChosenFactor.Cost >= ScalarCost.Cost) dbgs() 6156 << "LV: Vectorization seems to be not beneficial, " 6157 << "but was forced by a user.\n"); 6158 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 6159 return ChosenFactor; 6160 } 6161 6162 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 6163 const Loop &L, ElementCount VF) const { 6164 // Cross iteration phis such as reductions need special handling and are 6165 // currently unsupported. 6166 if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) { 6167 return Legal->isFirstOrderRecurrence(&Phi) || 6168 Legal->isReductionVariable(&Phi); 6169 })) 6170 return false; 6171 6172 // Phis with uses outside of the loop require special handling and are 6173 // currently unsupported. 6174 for (auto &Entry : Legal->getInductionVars()) { 6175 // Look for uses of the value of the induction at the last iteration. 6176 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 6177 for (User *U : PostInc->users()) 6178 if (!L.contains(cast<Instruction>(U))) 6179 return false; 6180 // Look for uses of penultimate value of the induction. 6181 for (User *U : Entry.first->users()) 6182 if (!L.contains(cast<Instruction>(U))) 6183 return false; 6184 } 6185 6186 // Induction variables that are widened require special handling that is 6187 // currently not supported. 6188 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 6189 return !(this->isScalarAfterVectorization(Entry.first, VF) || 6190 this->isProfitableToScalarize(Entry.first, VF)); 6191 })) 6192 return false; 6193 6194 // Epilogue vectorization code has not been auditted to ensure it handles 6195 // non-latch exits properly. It may be fine, but it needs auditted and 6196 // tested. 6197 if (L.getExitingBlock() != L.getLoopLatch()) 6198 return false; 6199 6200 return true; 6201 } 6202 6203 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 6204 const ElementCount VF) const { 6205 // FIXME: We need a much better cost-model to take different parameters such 6206 // as register pressure, code size increase and cost of extra branches into 6207 // account. For now we apply a very crude heuristic and only consider loops 6208 // with vectorization factors larger than a certain value. 6209 // We also consider epilogue vectorization unprofitable for targets that don't 6210 // consider interleaving beneficial (eg. MVE). 6211 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 6212 return false; 6213 if (VF.getFixedValue() >= EpilogueVectorizationMinVF) 6214 return true; 6215 return false; 6216 } 6217 6218 VectorizationFactor 6219 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 6220 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 6221 VectorizationFactor Result = VectorizationFactor::Disabled(); 6222 if (!EnableEpilogueVectorization) { 6223 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 6224 return Result; 6225 } 6226 6227 if (!isScalarEpilogueAllowed()) { 6228 LLVM_DEBUG( 6229 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 6230 "allowed.\n";); 6231 return Result; 6232 } 6233 6234 // FIXME: This can be fixed for scalable vectors later, because at this stage 6235 // the LoopVectorizer will only consider vectorizing a loop with scalable 6236 // vectors when the loop has a hint to enable vectorization for a given VF. 6237 if (MainLoopVF.isScalable()) { 6238 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not " 6239 "yet supported.\n"); 6240 return Result; 6241 } 6242 6243 // Not really a cost consideration, but check for unsupported cases here to 6244 // simplify the logic. 6245 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 6246 LLVM_DEBUG( 6247 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 6248 "not a supported candidate.\n";); 6249 return Result; 6250 } 6251 6252 if (EpilogueVectorizationForceVF > 1) { 6253 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 6254 if (LVP.hasPlanWithVFs( 6255 {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)})) 6256 return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0}; 6257 else { 6258 LLVM_DEBUG( 6259 dbgs() 6260 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 6261 return Result; 6262 } 6263 } 6264 6265 if (TheLoop->getHeader()->getParent()->hasOptSize() || 6266 TheLoop->getHeader()->getParent()->hasMinSize()) { 6267 LLVM_DEBUG( 6268 dbgs() 6269 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 6270 return Result; 6271 } 6272 6273 if (!isEpilogueVectorizationProfitable(MainLoopVF)) 6274 return Result; 6275 6276 for (auto &NextVF : ProfitableVFs) 6277 if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) && 6278 (Result.Width.getFixedValue() == 1 || 6279 isMoreProfitable(NextVF, Result)) && 6280 LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width})) 6281 Result = NextVF; 6282 6283 if (Result != VectorizationFactor::Disabled()) 6284 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 6285 << Result.Width.getFixedValue() << "\n";); 6286 return Result; 6287 } 6288 6289 std::pair<unsigned, unsigned> 6290 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6291 unsigned MinWidth = -1U; 6292 unsigned MaxWidth = 8; 6293 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6294 for (Type *T : ElementTypesInLoop) { 6295 MinWidth = std::min<unsigned>( 6296 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 6297 MaxWidth = std::max<unsigned>( 6298 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 6299 } 6300 return {MinWidth, MaxWidth}; 6301 } 6302 6303 void LoopVectorizationCostModel::collectElementTypesForWidening() { 6304 ElementTypesInLoop.clear(); 6305 // For each block. 6306 for (BasicBlock *BB : TheLoop->blocks()) { 6307 // For each instruction in the loop. 6308 for (Instruction &I : BB->instructionsWithoutDebug()) { 6309 Type *T = I.getType(); 6310 6311 // Skip ignored values. 6312 if (ValuesToIgnore.count(&I)) 6313 continue; 6314 6315 // Only examine Loads, Stores and PHINodes. 6316 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6317 continue; 6318 6319 // Examine PHI nodes that are reduction variables. Update the type to 6320 // account for the recurrence type. 6321 if (auto *PN = dyn_cast<PHINode>(&I)) { 6322 if (!Legal->isReductionVariable(PN)) 6323 continue; 6324 const RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[PN]; 6325 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 6326 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 6327 RdxDesc.getRecurrenceType(), 6328 TargetTransformInfo::ReductionFlags())) 6329 continue; 6330 T = RdxDesc.getRecurrenceType(); 6331 } 6332 6333 // Examine the stored values. 6334 if (auto *ST = dyn_cast<StoreInst>(&I)) 6335 T = ST->getValueOperand()->getType(); 6336 6337 // Ignore loaded pointer types and stored pointer types that are not 6338 // vectorizable. 6339 // 6340 // FIXME: The check here attempts to predict whether a load or store will 6341 // be vectorized. We only know this for certain after a VF has 6342 // been selected. Here, we assume that if an access can be 6343 // vectorized, it will be. We should also look at extending this 6344 // optimization to non-pointer types. 6345 // 6346 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 6347 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 6348 continue; 6349 6350 ElementTypesInLoop.insert(T); 6351 } 6352 } 6353 } 6354 6355 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 6356 unsigned LoopCost) { 6357 // -- The interleave heuristics -- 6358 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6359 // There are many micro-architectural considerations that we can't predict 6360 // at this level. For example, frontend pressure (on decode or fetch) due to 6361 // code size, or the number and capabilities of the execution ports. 6362 // 6363 // We use the following heuristics to select the interleave count: 6364 // 1. If the code has reductions, then we interleave to break the cross 6365 // iteration dependency. 6366 // 2. If the loop is really small, then we interleave to reduce the loop 6367 // overhead. 6368 // 3. We don't interleave if we think that we will spill registers to memory 6369 // due to the increased register pressure. 6370 6371 if (!isScalarEpilogueAllowed()) 6372 return 1; 6373 6374 // We used the distance for the interleave count. 6375 if (Legal->getMaxSafeDepDistBytes() != -1U) 6376 return 1; 6377 6378 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 6379 const bool HasReductions = !Legal->getReductionVars().empty(); 6380 // Do not interleave loops with a relatively small known or estimated trip 6381 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 6382 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 6383 // because with the above conditions interleaving can expose ILP and break 6384 // cross iteration dependences for reductions. 6385 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 6386 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 6387 return 1; 6388 6389 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6390 // We divide by these constants so assume that we have at least one 6391 // instruction that uses at least one register. 6392 for (auto& pair : R.MaxLocalUsers) { 6393 pair.second = std::max(pair.second, 1U); 6394 } 6395 6396 // We calculate the interleave count using the following formula. 6397 // Subtract the number of loop invariants from the number of available 6398 // registers. These registers are used by all of the interleaved instances. 6399 // Next, divide the remaining registers by the number of registers that is 6400 // required by the loop, in order to estimate how many parallel instances 6401 // fit without causing spills. All of this is rounded down if necessary to be 6402 // a power of two. We want power of two interleave count to simplify any 6403 // addressing operations or alignment considerations. 6404 // We also want power of two interleave counts to ensure that the induction 6405 // variable of the vector loop wraps to zero, when tail is folded by masking; 6406 // this currently happens when OptForSize, in which case IC is set to 1 above. 6407 unsigned IC = UINT_MAX; 6408 6409 for (auto& pair : R.MaxLocalUsers) { 6410 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 6411 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6412 << " registers of " 6413 << TTI.getRegisterClassName(pair.first) << " register class\n"); 6414 if (VF.isScalar()) { 6415 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6416 TargetNumRegisters = ForceTargetNumScalarRegs; 6417 } else { 6418 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6419 TargetNumRegisters = ForceTargetNumVectorRegs; 6420 } 6421 unsigned MaxLocalUsers = pair.second; 6422 unsigned LoopInvariantRegs = 0; 6423 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 6424 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 6425 6426 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 6427 // Don't count the induction variable as interleaved. 6428 if (EnableIndVarRegisterHeur) { 6429 TmpIC = 6430 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 6431 std::max(1U, (MaxLocalUsers - 1))); 6432 } 6433 6434 IC = std::min(IC, TmpIC); 6435 } 6436 6437 // Clamp the interleave ranges to reasonable counts. 6438 unsigned MaxInterleaveCount = 6439 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 6440 6441 // Check if the user has overridden the max. 6442 if (VF.isScalar()) { 6443 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6444 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6445 } else { 6446 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6447 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6448 } 6449 6450 // If trip count is known or estimated compile time constant, limit the 6451 // interleave count to be less than the trip count divided by VF, provided it 6452 // is at least 1. 6453 // 6454 // For scalable vectors we can't know if interleaving is beneficial. It may 6455 // not be beneficial for small loops if none of the lanes in the second vector 6456 // iterations is enabled. However, for larger loops, there is likely to be a 6457 // similar benefit as for fixed-width vectors. For now, we choose to leave 6458 // the InterleaveCount as if vscale is '1', although if some information about 6459 // the vector is known (e.g. min vector size), we can make a better decision. 6460 if (BestKnownTC) { 6461 MaxInterleaveCount = 6462 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 6463 // Make sure MaxInterleaveCount is greater than 0. 6464 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 6465 } 6466 6467 assert(MaxInterleaveCount > 0 && 6468 "Maximum interleave count must be greater than 0"); 6469 6470 // Clamp the calculated IC to be between the 1 and the max interleave count 6471 // that the target and trip count allows. 6472 if (IC > MaxInterleaveCount) 6473 IC = MaxInterleaveCount; 6474 else 6475 // Make sure IC is greater than 0. 6476 IC = std::max(1u, IC); 6477 6478 assert(IC > 0 && "Interleave count must be greater than 0."); 6479 6480 // If we did not calculate the cost for VF (because the user selected the VF) 6481 // then we calculate the cost of VF here. 6482 if (LoopCost == 0) { 6483 InstructionCost C = expectedCost(VF).first; 6484 assert(C.isValid() && "Expected to have chosen a VF with valid cost"); 6485 LoopCost = *C.getValue(); 6486 } 6487 6488 assert(LoopCost && "Non-zero loop cost expected"); 6489 6490 // Interleave if we vectorized this loop and there is a reduction that could 6491 // benefit from interleaving. 6492 if (VF.isVector() && HasReductions) { 6493 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6494 return IC; 6495 } 6496 6497 // Note that if we've already vectorized the loop we will have done the 6498 // runtime check and so interleaving won't require further checks. 6499 bool InterleavingRequiresRuntimePointerCheck = 6500 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 6501 6502 // We want to interleave small loops in order to reduce the loop overhead and 6503 // potentially expose ILP opportunities. 6504 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 6505 << "LV: IC is " << IC << '\n' 6506 << "LV: VF is " << VF << '\n'); 6507 const bool AggressivelyInterleaveReductions = 6508 TTI.enableAggressiveInterleaving(HasReductions); 6509 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6510 // We assume that the cost overhead is 1 and we use the cost model 6511 // to estimate the cost of the loop and interleave until the cost of the 6512 // loop overhead is about 5% of the cost of the loop. 6513 unsigned SmallIC = 6514 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6515 6516 // Interleave until store/load ports (estimated by max interleave count) are 6517 // saturated. 6518 unsigned NumStores = Legal->getNumStores(); 6519 unsigned NumLoads = Legal->getNumLoads(); 6520 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6521 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6522 6523 // If we have a scalar reduction (vector reductions are already dealt with 6524 // by this point), we can increase the critical path length if the loop 6525 // we're interleaving is inside another loop. For tree-wise reductions 6526 // set the limit to 2, and for ordered reductions it's best to disable 6527 // interleaving entirely. 6528 if (HasReductions && TheLoop->getLoopDepth() > 1) { 6529 bool HasOrderedReductions = 6530 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 6531 const RecurrenceDescriptor &RdxDesc = Reduction.second; 6532 return RdxDesc.isOrdered(); 6533 }); 6534 if (HasOrderedReductions) { 6535 LLVM_DEBUG( 6536 dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); 6537 return 1; 6538 } 6539 6540 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6541 SmallIC = std::min(SmallIC, F); 6542 StoresIC = std::min(StoresIC, F); 6543 LoadsIC = std::min(LoadsIC, F); 6544 } 6545 6546 if (EnableLoadStoreRuntimeInterleave && 6547 std::max(StoresIC, LoadsIC) > SmallIC) { 6548 LLVM_DEBUG( 6549 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6550 return std::max(StoresIC, LoadsIC); 6551 } 6552 6553 // If there are scalar reductions and TTI has enabled aggressive 6554 // interleaving for reductions, we will interleave to expose ILP. 6555 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6556 AggressivelyInterleaveReductions) { 6557 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6558 // Interleave no less than SmallIC but not as aggressive as the normal IC 6559 // to satisfy the rare situation when resources are too limited. 6560 return std::max(IC / 2, SmallIC); 6561 } else { 6562 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6563 return SmallIC; 6564 } 6565 } 6566 6567 // Interleave if this is a large loop (small loops are already dealt with by 6568 // this point) that could benefit from interleaving. 6569 if (AggressivelyInterleaveReductions) { 6570 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6571 return IC; 6572 } 6573 6574 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6575 return 1; 6576 } 6577 6578 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6579 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6580 // This function calculates the register usage by measuring the highest number 6581 // of values that are alive at a single location. Obviously, this is a very 6582 // rough estimation. We scan the loop in a topological order in order and 6583 // assign a number to each instruction. We use RPO to ensure that defs are 6584 // met before their users. We assume that each instruction that has in-loop 6585 // users starts an interval. We record every time that an in-loop value is 6586 // used, so we have a list of the first and last occurrences of each 6587 // instruction. Next, we transpose this data structure into a multi map that 6588 // holds the list of intervals that *end* at a specific location. This multi 6589 // map allows us to perform a linear search. We scan the instructions linearly 6590 // and record each time that a new interval starts, by placing it in a set. 6591 // If we find this value in the multi-map then we remove it from the set. 6592 // The max register usage is the maximum size of the set. 6593 // We also search for instructions that are defined outside the loop, but are 6594 // used inside the loop. We need this number separately from the max-interval 6595 // usage number because when we unroll, loop-invariant values do not take 6596 // more register. 6597 LoopBlocksDFS DFS(TheLoop); 6598 DFS.perform(LI); 6599 6600 RegisterUsage RU; 6601 6602 // Each 'key' in the map opens a new interval. The values 6603 // of the map are the index of the 'last seen' usage of the 6604 // instruction that is the key. 6605 using IntervalMap = DenseMap<Instruction *, unsigned>; 6606 6607 // Maps instruction to its index. 6608 SmallVector<Instruction *, 64> IdxToInstr; 6609 // Marks the end of each interval. 6610 IntervalMap EndPoint; 6611 // Saves the list of instruction indices that are used in the loop. 6612 SmallPtrSet<Instruction *, 8> Ends; 6613 // Saves the list of values that are used in the loop but are 6614 // defined outside the loop, such as arguments and constants. 6615 SmallPtrSet<Value *, 8> LoopInvariants; 6616 6617 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6618 for (Instruction &I : BB->instructionsWithoutDebug()) { 6619 IdxToInstr.push_back(&I); 6620 6621 // Save the end location of each USE. 6622 for (Value *U : I.operands()) { 6623 auto *Instr = dyn_cast<Instruction>(U); 6624 6625 // Ignore non-instruction values such as arguments, constants, etc. 6626 if (!Instr) 6627 continue; 6628 6629 // If this instruction is outside the loop then record it and continue. 6630 if (!TheLoop->contains(Instr)) { 6631 LoopInvariants.insert(Instr); 6632 continue; 6633 } 6634 6635 // Overwrite previous end points. 6636 EndPoint[Instr] = IdxToInstr.size(); 6637 Ends.insert(Instr); 6638 } 6639 } 6640 } 6641 6642 // Saves the list of intervals that end with the index in 'key'. 6643 using InstrList = SmallVector<Instruction *, 2>; 6644 DenseMap<unsigned, InstrList> TransposeEnds; 6645 6646 // Transpose the EndPoints to a list of values that end at each index. 6647 for (auto &Interval : EndPoint) 6648 TransposeEnds[Interval.second].push_back(Interval.first); 6649 6650 SmallPtrSet<Instruction *, 8> OpenIntervals; 6651 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6652 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6653 6654 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6655 6656 // A lambda that gets the register usage for the given type and VF. 6657 const auto &TTICapture = TTI; 6658 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned { 6659 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6660 return 0; 6661 InstructionCost::CostType RegUsage = 6662 *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue(); 6663 assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() && 6664 "Nonsensical values for register usage."); 6665 return RegUsage; 6666 }; 6667 6668 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6669 Instruction *I = IdxToInstr[i]; 6670 6671 // Remove all of the instructions that end at this location. 6672 InstrList &List = TransposeEnds[i]; 6673 for (Instruction *ToRemove : List) 6674 OpenIntervals.erase(ToRemove); 6675 6676 // Ignore instructions that are never used within the loop. 6677 if (!Ends.count(I)) 6678 continue; 6679 6680 // Skip ignored values. 6681 if (ValuesToIgnore.count(I)) 6682 continue; 6683 6684 // For each VF find the maximum usage of registers. 6685 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6686 // Count the number of live intervals. 6687 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6688 6689 if (VFs[j].isScalar()) { 6690 for (auto Inst : OpenIntervals) { 6691 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6692 if (RegUsage.find(ClassID) == RegUsage.end()) 6693 RegUsage[ClassID] = 1; 6694 else 6695 RegUsage[ClassID] += 1; 6696 } 6697 } else { 6698 collectUniformsAndScalars(VFs[j]); 6699 for (auto Inst : OpenIntervals) { 6700 // Skip ignored values for VF > 1. 6701 if (VecValuesToIgnore.count(Inst)) 6702 continue; 6703 if (isScalarAfterVectorization(Inst, VFs[j])) { 6704 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6705 if (RegUsage.find(ClassID) == RegUsage.end()) 6706 RegUsage[ClassID] = 1; 6707 else 6708 RegUsage[ClassID] += 1; 6709 } else { 6710 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6711 if (RegUsage.find(ClassID) == RegUsage.end()) 6712 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6713 else 6714 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6715 } 6716 } 6717 } 6718 6719 for (auto& pair : RegUsage) { 6720 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6721 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6722 else 6723 MaxUsages[j][pair.first] = pair.second; 6724 } 6725 } 6726 6727 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6728 << OpenIntervals.size() << '\n'); 6729 6730 // Add the current instruction to the list of open intervals. 6731 OpenIntervals.insert(I); 6732 } 6733 6734 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6735 SmallMapVector<unsigned, unsigned, 4> Invariant; 6736 6737 for (auto Inst : LoopInvariants) { 6738 unsigned Usage = 6739 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6740 unsigned ClassID = 6741 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6742 if (Invariant.find(ClassID) == Invariant.end()) 6743 Invariant[ClassID] = Usage; 6744 else 6745 Invariant[ClassID] += Usage; 6746 } 6747 6748 LLVM_DEBUG({ 6749 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6750 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6751 << " item\n"; 6752 for (const auto &pair : MaxUsages[i]) { 6753 dbgs() << "LV(REG): RegisterClass: " 6754 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6755 << " registers\n"; 6756 } 6757 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6758 << " item\n"; 6759 for (const auto &pair : Invariant) { 6760 dbgs() << "LV(REG): RegisterClass: " 6761 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6762 << " registers\n"; 6763 } 6764 }); 6765 6766 RU.LoopInvariantRegs = Invariant; 6767 RU.MaxLocalUsers = MaxUsages[i]; 6768 RUs[i] = RU; 6769 } 6770 6771 return RUs; 6772 } 6773 6774 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 6775 // TODO: Cost model for emulated masked load/store is completely 6776 // broken. This hack guides the cost model to use an artificially 6777 // high enough value to practically disable vectorization with such 6778 // operations, except where previously deployed legality hack allowed 6779 // using very low cost values. This is to avoid regressions coming simply 6780 // from moving "masked load/store" check from legality to cost model. 6781 // Masked Load/Gather emulation was previously never allowed. 6782 // Limited number of Masked Store/Scatter emulation was allowed. 6783 assert(isPredicatedInst(I) && 6784 "Expecting a scalar emulated instruction"); 6785 return isa<LoadInst>(I) || 6786 (isa<StoreInst>(I) && 6787 NumPredStores > NumberOfStoresToPredicate); 6788 } 6789 6790 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6791 // If we aren't vectorizing the loop, or if we've already collected the 6792 // instructions to scalarize, there's nothing to do. Collection may already 6793 // have occurred if we have a user-selected VF and are now computing the 6794 // expected cost for interleaving. 6795 if (VF.isScalar() || VF.isZero() || 6796 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6797 return; 6798 6799 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6800 // not profitable to scalarize any instructions, the presence of VF in the 6801 // map will indicate that we've analyzed it already. 6802 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6803 6804 // Find all the instructions that are scalar with predication in the loop and 6805 // determine if it would be better to not if-convert the blocks they are in. 6806 // If so, we also record the instructions to scalarize. 6807 for (BasicBlock *BB : TheLoop->blocks()) { 6808 if (!blockNeedsPredication(BB)) 6809 continue; 6810 for (Instruction &I : *BB) 6811 if (isScalarWithPredication(&I)) { 6812 ScalarCostsTy ScalarCosts; 6813 // Do not apply discount if scalable, because that would lead to 6814 // invalid scalarization costs. 6815 // Do not apply discount logic if hacked cost is needed 6816 // for emulated masked memrefs. 6817 if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I) && 6818 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6819 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6820 // Remember that BB will remain after vectorization. 6821 PredicatedBBsAfterVectorization.insert(BB); 6822 } 6823 } 6824 } 6825 6826 int LoopVectorizationCostModel::computePredInstDiscount( 6827 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6828 assert(!isUniformAfterVectorization(PredInst, VF) && 6829 "Instruction marked uniform-after-vectorization will be predicated"); 6830 6831 // Initialize the discount to zero, meaning that the scalar version and the 6832 // vector version cost the same. 6833 InstructionCost Discount = 0; 6834 6835 // Holds instructions to analyze. The instructions we visit are mapped in 6836 // ScalarCosts. Those instructions are the ones that would be scalarized if 6837 // we find that the scalar version costs less. 6838 SmallVector<Instruction *, 8> Worklist; 6839 6840 // Returns true if the given instruction can be scalarized. 6841 auto canBeScalarized = [&](Instruction *I) -> bool { 6842 // We only attempt to scalarize instructions forming a single-use chain 6843 // from the original predicated block that would otherwise be vectorized. 6844 // Although not strictly necessary, we give up on instructions we know will 6845 // already be scalar to avoid traversing chains that are unlikely to be 6846 // beneficial. 6847 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6848 isScalarAfterVectorization(I, VF)) 6849 return false; 6850 6851 // If the instruction is scalar with predication, it will be analyzed 6852 // separately. We ignore it within the context of PredInst. 6853 if (isScalarWithPredication(I)) 6854 return false; 6855 6856 // If any of the instruction's operands are uniform after vectorization, 6857 // the instruction cannot be scalarized. This prevents, for example, a 6858 // masked load from being scalarized. 6859 // 6860 // We assume we will only emit a value for lane zero of an instruction 6861 // marked uniform after vectorization, rather than VF identical values. 6862 // Thus, if we scalarize an instruction that uses a uniform, we would 6863 // create uses of values corresponding to the lanes we aren't emitting code 6864 // for. This behavior can be changed by allowing getScalarValue to clone 6865 // the lane zero values for uniforms rather than asserting. 6866 for (Use &U : I->operands()) 6867 if (auto *J = dyn_cast<Instruction>(U.get())) 6868 if (isUniformAfterVectorization(J, VF)) 6869 return false; 6870 6871 // Otherwise, we can scalarize the instruction. 6872 return true; 6873 }; 6874 6875 // Compute the expected cost discount from scalarizing the entire expression 6876 // feeding the predicated instruction. We currently only consider expressions 6877 // that are single-use instruction chains. 6878 Worklist.push_back(PredInst); 6879 while (!Worklist.empty()) { 6880 Instruction *I = Worklist.pop_back_val(); 6881 6882 // If we've already analyzed the instruction, there's nothing to do. 6883 if (ScalarCosts.find(I) != ScalarCosts.end()) 6884 continue; 6885 6886 // Compute the cost of the vector instruction. Note that this cost already 6887 // includes the scalarization overhead of the predicated instruction. 6888 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6889 6890 // Compute the cost of the scalarized instruction. This cost is the cost of 6891 // the instruction as if it wasn't if-converted and instead remained in the 6892 // predicated block. We will scale this cost by block probability after 6893 // computing the scalarization overhead. 6894 InstructionCost ScalarCost = 6895 VF.getFixedValue() * 6896 getInstructionCost(I, ElementCount::getFixed(1)).first; 6897 6898 // Compute the scalarization overhead of needed insertelement instructions 6899 // and phi nodes. 6900 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6901 ScalarCost += TTI.getScalarizationOverhead( 6902 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6903 APInt::getAllOnes(VF.getFixedValue()), true, false); 6904 ScalarCost += 6905 VF.getFixedValue() * 6906 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6907 } 6908 6909 // Compute the scalarization overhead of needed extractelement 6910 // instructions. For each of the instruction's operands, if the operand can 6911 // be scalarized, add it to the worklist; otherwise, account for the 6912 // overhead. 6913 for (Use &U : I->operands()) 6914 if (auto *J = dyn_cast<Instruction>(U.get())) { 6915 assert(VectorType::isValidElementType(J->getType()) && 6916 "Instruction has non-scalar type"); 6917 if (canBeScalarized(J)) 6918 Worklist.push_back(J); 6919 else if (needsExtract(J, VF)) { 6920 ScalarCost += TTI.getScalarizationOverhead( 6921 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6922 APInt::getAllOnes(VF.getFixedValue()), false, true); 6923 } 6924 } 6925 6926 // Scale the total scalar cost by block probability. 6927 ScalarCost /= getReciprocalPredBlockProb(); 6928 6929 // Compute the discount. A non-negative discount means the vector version 6930 // of the instruction costs more, and scalarizing would be beneficial. 6931 Discount += VectorCost - ScalarCost; 6932 ScalarCosts[I] = ScalarCost; 6933 } 6934 6935 return *Discount.getValue(); 6936 } 6937 6938 LoopVectorizationCostModel::VectorizationCostTy 6939 LoopVectorizationCostModel::expectedCost( 6940 ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { 6941 VectorizationCostTy Cost; 6942 6943 // For each block. 6944 for (BasicBlock *BB : TheLoop->blocks()) { 6945 VectorizationCostTy BlockCost; 6946 6947 // For each instruction in the old loop. 6948 for (Instruction &I : BB->instructionsWithoutDebug()) { 6949 // Skip ignored values. 6950 if (ValuesToIgnore.count(&I) || 6951 (VF.isVector() && VecValuesToIgnore.count(&I))) 6952 continue; 6953 6954 VectorizationCostTy C = getInstructionCost(&I, VF); 6955 6956 // Check if we should override the cost. 6957 if (C.first.isValid() && 6958 ForceTargetInstructionCost.getNumOccurrences() > 0) 6959 C.first = InstructionCost(ForceTargetInstructionCost); 6960 6961 // Keep a list of instructions with invalid costs. 6962 if (Invalid && !C.first.isValid()) 6963 Invalid->emplace_back(&I, VF); 6964 6965 BlockCost.first += C.first; 6966 BlockCost.second |= C.second; 6967 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6968 << " for VF " << VF << " For instruction: " << I 6969 << '\n'); 6970 } 6971 6972 // If we are vectorizing a predicated block, it will have been 6973 // if-converted. This means that the block's instructions (aside from 6974 // stores and instructions that may divide by zero) will now be 6975 // unconditionally executed. For the scalar case, we may not always execute 6976 // the predicated block, if it is an if-else block. Thus, scale the block's 6977 // cost by the probability of executing it. blockNeedsPredication from 6978 // Legal is used so as to not include all blocks in tail folded loops. 6979 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6980 BlockCost.first /= getReciprocalPredBlockProb(); 6981 6982 Cost.first += BlockCost.first; 6983 Cost.second |= BlockCost.second; 6984 } 6985 6986 return Cost; 6987 } 6988 6989 /// Gets Address Access SCEV after verifying that the access pattern 6990 /// is loop invariant except the induction variable dependence. 6991 /// 6992 /// This SCEV can be sent to the Target in order to estimate the address 6993 /// calculation cost. 6994 static const SCEV *getAddressAccessSCEV( 6995 Value *Ptr, 6996 LoopVectorizationLegality *Legal, 6997 PredicatedScalarEvolution &PSE, 6998 const Loop *TheLoop) { 6999 7000 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 7001 if (!Gep) 7002 return nullptr; 7003 7004 // We are looking for a gep with all loop invariant indices except for one 7005 // which should be an induction variable. 7006 auto SE = PSE.getSE(); 7007 unsigned NumOperands = Gep->getNumOperands(); 7008 for (unsigned i = 1; i < NumOperands; ++i) { 7009 Value *Opd = Gep->getOperand(i); 7010 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 7011 !Legal->isInductionVariable(Opd)) 7012 return nullptr; 7013 } 7014 7015 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 7016 return PSE.getSCEV(Ptr); 7017 } 7018 7019 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 7020 return Legal->hasStride(I->getOperand(0)) || 7021 Legal->hasStride(I->getOperand(1)); 7022 } 7023 7024 InstructionCost 7025 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 7026 ElementCount VF) { 7027 assert(VF.isVector() && 7028 "Scalarization cost of instruction implies vectorization."); 7029 if (VF.isScalable()) 7030 return InstructionCost::getInvalid(); 7031 7032 Type *ValTy = getLoadStoreType(I); 7033 auto SE = PSE.getSE(); 7034 7035 unsigned AS = getLoadStoreAddressSpace(I); 7036 Value *Ptr = getLoadStorePointerOperand(I); 7037 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 7038 7039 // Figure out whether the access is strided and get the stride value 7040 // if it's known in compile time 7041 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 7042 7043 // Get the cost of the scalar memory instruction and address computation. 7044 InstructionCost Cost = 7045 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 7046 7047 // Don't pass *I here, since it is scalar but will actually be part of a 7048 // vectorized loop where the user of it is a vectorized instruction. 7049 const Align Alignment = getLoadStoreAlignment(I); 7050 Cost += VF.getKnownMinValue() * 7051 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 7052 AS, TTI::TCK_RecipThroughput); 7053 7054 // Get the overhead of the extractelement and insertelement instructions 7055 // we might create due to scalarization. 7056 Cost += getScalarizationOverhead(I, VF); 7057 7058 // If we have a predicated load/store, it will need extra i1 extracts and 7059 // conditional branches, but may not be executed for each vector lane. Scale 7060 // the cost by the probability of executing the predicated block. 7061 if (isPredicatedInst(I)) { 7062 Cost /= getReciprocalPredBlockProb(); 7063 7064 // Add the cost of an i1 extract and a branch 7065 auto *Vec_i1Ty = 7066 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 7067 Cost += TTI.getScalarizationOverhead( 7068 Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()), 7069 /*Insert=*/false, /*Extract=*/true); 7070 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 7071 7072 if (useEmulatedMaskMemRefHack(I)) 7073 // Artificially setting to a high enough value to practically disable 7074 // vectorization with such operations. 7075 Cost = 3000000; 7076 } 7077 7078 return Cost; 7079 } 7080 7081 InstructionCost 7082 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 7083 ElementCount VF) { 7084 Type *ValTy = getLoadStoreType(I); 7085 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7086 Value *Ptr = getLoadStorePointerOperand(I); 7087 unsigned AS = getLoadStoreAddressSpace(I); 7088 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr); 7089 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7090 7091 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7092 "Stride should be 1 or -1 for consecutive memory access"); 7093 const Align Alignment = getLoadStoreAlignment(I); 7094 InstructionCost Cost = 0; 7095 if (Legal->isMaskRequired(I)) 7096 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 7097 CostKind); 7098 else 7099 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 7100 CostKind, I); 7101 7102 bool Reverse = ConsecutiveStride < 0; 7103 if (Reverse) 7104 Cost += 7105 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 7106 return Cost; 7107 } 7108 7109 InstructionCost 7110 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 7111 ElementCount VF) { 7112 assert(Legal->isUniformMemOp(*I)); 7113 7114 Type *ValTy = getLoadStoreType(I); 7115 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7116 const Align Alignment = getLoadStoreAlignment(I); 7117 unsigned AS = getLoadStoreAddressSpace(I); 7118 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7119 if (isa<LoadInst>(I)) { 7120 return TTI.getAddressComputationCost(ValTy) + 7121 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 7122 CostKind) + 7123 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 7124 } 7125 StoreInst *SI = cast<StoreInst>(I); 7126 7127 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 7128 return TTI.getAddressComputationCost(ValTy) + 7129 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 7130 CostKind) + 7131 (isLoopInvariantStoreValue 7132 ? 0 7133 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 7134 VF.getKnownMinValue() - 1)); 7135 } 7136 7137 InstructionCost 7138 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 7139 ElementCount VF) { 7140 Type *ValTy = getLoadStoreType(I); 7141 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7142 const Align Alignment = getLoadStoreAlignment(I); 7143 const Value *Ptr = getLoadStorePointerOperand(I); 7144 7145 return TTI.getAddressComputationCost(VectorTy) + 7146 TTI.getGatherScatterOpCost( 7147 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 7148 TargetTransformInfo::TCK_RecipThroughput, I); 7149 } 7150 7151 InstructionCost 7152 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 7153 ElementCount VF) { 7154 // TODO: Once we have support for interleaving with scalable vectors 7155 // we can calculate the cost properly here. 7156 if (VF.isScalable()) 7157 return InstructionCost::getInvalid(); 7158 7159 Type *ValTy = getLoadStoreType(I); 7160 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7161 unsigned AS = getLoadStoreAddressSpace(I); 7162 7163 auto Group = getInterleavedAccessGroup(I); 7164 assert(Group && "Fail to get an interleaved access group."); 7165 7166 unsigned InterleaveFactor = Group->getFactor(); 7167 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 7168 7169 // Holds the indices of existing members in the interleaved group. 7170 SmallVector<unsigned, 4> Indices; 7171 for (unsigned IF = 0; IF < InterleaveFactor; IF++) 7172 if (Group->getMember(IF)) 7173 Indices.push_back(IF); 7174 7175 // Calculate the cost of the whole interleaved group. 7176 bool UseMaskForGaps = 7177 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) || 7178 (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor())); 7179 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 7180 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 7181 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 7182 7183 if (Group->isReverse()) { 7184 // TODO: Add support for reversed masked interleaved access. 7185 assert(!Legal->isMaskRequired(I) && 7186 "Reverse masked interleaved access not supported."); 7187 Cost += 7188 Group->getNumMembers() * 7189 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 7190 } 7191 return Cost; 7192 } 7193 7194 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( 7195 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 7196 using namespace llvm::PatternMatch; 7197 // Early exit for no inloop reductions 7198 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 7199 return None; 7200 auto *VectorTy = cast<VectorType>(Ty); 7201 7202 // We are looking for a pattern of, and finding the minimal acceptable cost: 7203 // reduce(mul(ext(A), ext(B))) or 7204 // reduce(mul(A, B)) or 7205 // reduce(ext(A)) or 7206 // reduce(A). 7207 // The basic idea is that we walk down the tree to do that, finding the root 7208 // reduction instruction in InLoopReductionImmediateChains. From there we find 7209 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 7210 // of the components. If the reduction cost is lower then we return it for the 7211 // reduction instruction and 0 for the other instructions in the pattern. If 7212 // it is not we return an invalid cost specifying the orignal cost method 7213 // should be used. 7214 Instruction *RetI = I; 7215 if (match(RetI, m_ZExtOrSExt(m_Value()))) { 7216 if (!RetI->hasOneUser()) 7217 return None; 7218 RetI = RetI->user_back(); 7219 } 7220 if (match(RetI, m_Mul(m_Value(), m_Value())) && 7221 RetI->user_back()->getOpcode() == Instruction::Add) { 7222 if (!RetI->hasOneUser()) 7223 return None; 7224 RetI = RetI->user_back(); 7225 } 7226 7227 // Test if the found instruction is a reduction, and if not return an invalid 7228 // cost specifying the parent to use the original cost modelling. 7229 if (!InLoopReductionImmediateChains.count(RetI)) 7230 return None; 7231 7232 // Find the reduction this chain is a part of and calculate the basic cost of 7233 // the reduction on its own. 7234 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 7235 Instruction *ReductionPhi = LastChain; 7236 while (!isa<PHINode>(ReductionPhi)) 7237 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 7238 7239 const RecurrenceDescriptor &RdxDesc = 7240 Legal->getReductionVars()[cast<PHINode>(ReductionPhi)]; 7241 7242 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 7243 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); 7244 7245 // If we're using ordered reductions then we can just return the base cost 7246 // here, since getArithmeticReductionCost calculates the full ordered 7247 // reduction cost when FP reassociation is not allowed. 7248 if (useOrderedReductions(RdxDesc)) 7249 return BaseCost; 7250 7251 // Get the operand that was not the reduction chain and match it to one of the 7252 // patterns, returning the better cost if it is found. 7253 Instruction *RedOp = RetI->getOperand(1) == LastChain 7254 ? dyn_cast<Instruction>(RetI->getOperand(0)) 7255 : dyn_cast<Instruction>(RetI->getOperand(1)); 7256 7257 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 7258 7259 Instruction *Op0, *Op1; 7260 if (RedOp && 7261 match(RedOp, 7262 m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) && 7263 match(Op0, m_ZExtOrSExt(m_Value())) && 7264 Op0->getOpcode() == Op1->getOpcode() && 7265 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 7266 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) && 7267 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) { 7268 7269 // Matched reduce(ext(mul(ext(A), ext(B))) 7270 // Note that the extend opcodes need to all match, or if A==B they will have 7271 // been converted to zext(mul(sext(A), sext(A))) as it is known positive, 7272 // which is equally fine. 7273 bool IsUnsigned = isa<ZExtInst>(Op0); 7274 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 7275 auto *MulType = VectorType::get(Op0->getType(), VectorTy); 7276 7277 InstructionCost ExtCost = 7278 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType, 7279 TTI::CastContextHint::None, CostKind, Op0); 7280 InstructionCost MulCost = 7281 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind); 7282 InstructionCost Ext2Cost = 7283 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType, 7284 TTI::CastContextHint::None, CostKind, RedOp); 7285 7286 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7287 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7288 CostKind); 7289 7290 if (RedCost.isValid() && 7291 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost) 7292 return I == RetI ? RedCost : 0; 7293 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && 7294 !TheLoop->isLoopInvariant(RedOp)) { 7295 // Matched reduce(ext(A)) 7296 bool IsUnsigned = isa<ZExtInst>(RedOp); 7297 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 7298 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7299 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7300 CostKind); 7301 7302 InstructionCost ExtCost = 7303 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 7304 TTI::CastContextHint::None, CostKind, RedOp); 7305 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 7306 return I == RetI ? RedCost : 0; 7307 } else if (RedOp && 7308 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { 7309 if (match(Op0, m_ZExtOrSExt(m_Value())) && 7310 Op0->getOpcode() == Op1->getOpcode() && 7311 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 7312 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 7313 bool IsUnsigned = isa<ZExtInst>(Op0); 7314 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 7315 // Matched reduce(mul(ext, ext)) 7316 InstructionCost ExtCost = 7317 TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType, 7318 TTI::CastContextHint::None, CostKind, Op0); 7319 InstructionCost MulCost = 7320 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7321 7322 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7323 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7324 CostKind); 7325 7326 if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost) 7327 return I == RetI ? RedCost : 0; 7328 } else if (!match(I, m_ZExtOrSExt(m_Value()))) { 7329 // Matched reduce(mul()) 7330 InstructionCost MulCost = 7331 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7332 7333 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7334 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 7335 CostKind); 7336 7337 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 7338 return I == RetI ? RedCost : 0; 7339 } 7340 } 7341 7342 return I == RetI ? Optional<InstructionCost>(BaseCost) : None; 7343 } 7344 7345 InstructionCost 7346 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 7347 ElementCount VF) { 7348 // Calculate scalar cost only. Vectorization cost should be ready at this 7349 // moment. 7350 if (VF.isScalar()) { 7351 Type *ValTy = getLoadStoreType(I); 7352 const Align Alignment = getLoadStoreAlignment(I); 7353 unsigned AS = getLoadStoreAddressSpace(I); 7354 7355 return TTI.getAddressComputationCost(ValTy) + 7356 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 7357 TTI::TCK_RecipThroughput, I); 7358 } 7359 return getWideningCost(I, VF); 7360 } 7361 7362 LoopVectorizationCostModel::VectorizationCostTy 7363 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7364 ElementCount VF) { 7365 // If we know that this instruction will remain uniform, check the cost of 7366 // the scalar version. 7367 if (isUniformAfterVectorization(I, VF)) 7368 VF = ElementCount::getFixed(1); 7369 7370 if (VF.isVector() && isProfitableToScalarize(I, VF)) 7371 return VectorizationCostTy(InstsToScalarize[VF][I], false); 7372 7373 // Forced scalars do not have any scalarization overhead. 7374 auto ForcedScalar = ForcedScalars.find(VF); 7375 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 7376 auto InstSet = ForcedScalar->second; 7377 if (InstSet.count(I)) 7378 return VectorizationCostTy( 7379 (getInstructionCost(I, ElementCount::getFixed(1)).first * 7380 VF.getKnownMinValue()), 7381 false); 7382 } 7383 7384 Type *VectorTy; 7385 InstructionCost C = getInstructionCost(I, VF, VectorTy); 7386 7387 bool TypeNotScalarized = 7388 VF.isVector() && VectorTy->isVectorTy() && 7389 TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue(); 7390 return VectorizationCostTy(C, TypeNotScalarized); 7391 } 7392 7393 InstructionCost 7394 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 7395 ElementCount VF) const { 7396 7397 // There is no mechanism yet to create a scalable scalarization loop, 7398 // so this is currently Invalid. 7399 if (VF.isScalable()) 7400 return InstructionCost::getInvalid(); 7401 7402 if (VF.isScalar()) 7403 return 0; 7404 7405 InstructionCost Cost = 0; 7406 Type *RetTy = ToVectorTy(I->getType(), VF); 7407 if (!RetTy->isVoidTy() && 7408 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 7409 Cost += TTI.getScalarizationOverhead( 7410 cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true, 7411 false); 7412 7413 // Some targets keep addresses scalar. 7414 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 7415 return Cost; 7416 7417 // Some targets support efficient element stores. 7418 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 7419 return Cost; 7420 7421 // Collect operands to consider. 7422 CallInst *CI = dyn_cast<CallInst>(I); 7423 Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands(); 7424 7425 // Skip operands that do not require extraction/scalarization and do not incur 7426 // any overhead. 7427 SmallVector<Type *> Tys; 7428 for (auto *V : filterExtractingOperands(Ops, VF)) 7429 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 7430 return Cost + TTI.getOperandsScalarizationOverhead( 7431 filterExtractingOperands(Ops, VF), Tys); 7432 } 7433 7434 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 7435 if (VF.isScalar()) 7436 return; 7437 NumPredStores = 0; 7438 for (BasicBlock *BB : TheLoop->blocks()) { 7439 // For each instruction in the old loop. 7440 for (Instruction &I : *BB) { 7441 Value *Ptr = getLoadStorePointerOperand(&I); 7442 if (!Ptr) 7443 continue; 7444 7445 // TODO: We should generate better code and update the cost model for 7446 // predicated uniform stores. Today they are treated as any other 7447 // predicated store (see added test cases in 7448 // invariant-store-vectorization.ll). 7449 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 7450 NumPredStores++; 7451 7452 if (Legal->isUniformMemOp(I)) { 7453 // TODO: Avoid replicating loads and stores instead of 7454 // relying on instcombine to remove them. 7455 // Load: Scalar load + broadcast 7456 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 7457 InstructionCost Cost; 7458 if (isa<StoreInst>(&I) && VF.isScalable() && 7459 isLegalGatherOrScatter(&I)) { 7460 Cost = getGatherScatterCost(&I, VF); 7461 setWideningDecision(&I, VF, CM_GatherScatter, Cost); 7462 } else { 7463 assert((isa<LoadInst>(&I) || !VF.isScalable()) && 7464 "Cannot yet scalarize uniform stores"); 7465 Cost = getUniformMemOpCost(&I, VF); 7466 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7467 } 7468 continue; 7469 } 7470 7471 // We assume that widening is the best solution when possible. 7472 if (memoryInstructionCanBeWidened(&I, VF)) { 7473 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 7474 int ConsecutiveStride = Legal->isConsecutivePtr( 7475 getLoadStoreType(&I), getLoadStorePointerOperand(&I)); 7476 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7477 "Expected consecutive stride."); 7478 InstWidening Decision = 7479 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 7480 setWideningDecision(&I, VF, Decision, Cost); 7481 continue; 7482 } 7483 7484 // Choose between Interleaving, Gather/Scatter or Scalarization. 7485 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 7486 unsigned NumAccesses = 1; 7487 if (isAccessInterleaved(&I)) { 7488 auto Group = getInterleavedAccessGroup(&I); 7489 assert(Group && "Fail to get an interleaved access group."); 7490 7491 // Make one decision for the whole group. 7492 if (getWideningDecision(&I, VF) != CM_Unknown) 7493 continue; 7494 7495 NumAccesses = Group->getNumMembers(); 7496 if (interleavedAccessCanBeWidened(&I, VF)) 7497 InterleaveCost = getInterleaveGroupCost(&I, VF); 7498 } 7499 7500 InstructionCost GatherScatterCost = 7501 isLegalGatherOrScatter(&I) 7502 ? getGatherScatterCost(&I, VF) * NumAccesses 7503 : InstructionCost::getInvalid(); 7504 7505 InstructionCost ScalarizationCost = 7506 getMemInstScalarizationCost(&I, VF) * NumAccesses; 7507 7508 // Choose better solution for the current VF, 7509 // write down this decision and use it during vectorization. 7510 InstructionCost Cost; 7511 InstWidening Decision; 7512 if (InterleaveCost <= GatherScatterCost && 7513 InterleaveCost < ScalarizationCost) { 7514 Decision = CM_Interleave; 7515 Cost = InterleaveCost; 7516 } else if (GatherScatterCost < ScalarizationCost) { 7517 Decision = CM_GatherScatter; 7518 Cost = GatherScatterCost; 7519 } else { 7520 Decision = CM_Scalarize; 7521 Cost = ScalarizationCost; 7522 } 7523 // If the instructions belongs to an interleave group, the whole group 7524 // receives the same decision. The whole group receives the cost, but 7525 // the cost will actually be assigned to one instruction. 7526 if (auto Group = getInterleavedAccessGroup(&I)) 7527 setWideningDecision(Group, VF, Decision, Cost); 7528 else 7529 setWideningDecision(&I, VF, Decision, Cost); 7530 } 7531 } 7532 7533 // Make sure that any load of address and any other address computation 7534 // remains scalar unless there is gather/scatter support. This avoids 7535 // inevitable extracts into address registers, and also has the benefit of 7536 // activating LSR more, since that pass can't optimize vectorized 7537 // addresses. 7538 if (TTI.prefersVectorizedAddressing()) 7539 return; 7540 7541 // Start with all scalar pointer uses. 7542 SmallPtrSet<Instruction *, 8> AddrDefs; 7543 for (BasicBlock *BB : TheLoop->blocks()) 7544 for (Instruction &I : *BB) { 7545 Instruction *PtrDef = 7546 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7547 if (PtrDef && TheLoop->contains(PtrDef) && 7548 getWideningDecision(&I, VF) != CM_GatherScatter) 7549 AddrDefs.insert(PtrDef); 7550 } 7551 7552 // Add all instructions used to generate the addresses. 7553 SmallVector<Instruction *, 4> Worklist; 7554 append_range(Worklist, AddrDefs); 7555 while (!Worklist.empty()) { 7556 Instruction *I = Worklist.pop_back_val(); 7557 for (auto &Op : I->operands()) 7558 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7559 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7560 AddrDefs.insert(InstOp).second) 7561 Worklist.push_back(InstOp); 7562 } 7563 7564 for (auto *I : AddrDefs) { 7565 if (isa<LoadInst>(I)) { 7566 // Setting the desired widening decision should ideally be handled in 7567 // by cost functions, but since this involves the task of finding out 7568 // if the loaded register is involved in an address computation, it is 7569 // instead changed here when we know this is the case. 7570 InstWidening Decision = getWideningDecision(I, VF); 7571 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7572 // Scalarize a widened load of address. 7573 setWideningDecision( 7574 I, VF, CM_Scalarize, 7575 (VF.getKnownMinValue() * 7576 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7577 else if (auto Group = getInterleavedAccessGroup(I)) { 7578 // Scalarize an interleave group of address loads. 7579 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7580 if (Instruction *Member = Group->getMember(I)) 7581 setWideningDecision( 7582 Member, VF, CM_Scalarize, 7583 (VF.getKnownMinValue() * 7584 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7585 } 7586 } 7587 } else 7588 // Make sure I gets scalarized and a cost estimate without 7589 // scalarization overhead. 7590 ForcedScalars[VF].insert(I); 7591 } 7592 } 7593 7594 InstructionCost 7595 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 7596 Type *&VectorTy) { 7597 Type *RetTy = I->getType(); 7598 if (canTruncateToMinimalBitwidth(I, VF)) 7599 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7600 auto SE = PSE.getSE(); 7601 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7602 7603 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 7604 ElementCount VF) -> bool { 7605 if (VF.isScalar()) 7606 return true; 7607 7608 auto Scalarized = InstsToScalarize.find(VF); 7609 assert(Scalarized != InstsToScalarize.end() && 7610 "VF not yet analyzed for scalarization profitability"); 7611 return !Scalarized->second.count(I) && 7612 llvm::all_of(I->users(), [&](User *U) { 7613 auto *UI = cast<Instruction>(U); 7614 return !Scalarized->second.count(UI); 7615 }); 7616 }; 7617 (void) hasSingleCopyAfterVectorization; 7618 7619 if (isScalarAfterVectorization(I, VF)) { 7620 // With the exception of GEPs and PHIs, after scalarization there should 7621 // only be one copy of the instruction generated in the loop. This is 7622 // because the VF is either 1, or any instructions that need scalarizing 7623 // have already been dealt with by the the time we get here. As a result, 7624 // it means we don't have to multiply the instruction cost by VF. 7625 assert(I->getOpcode() == Instruction::GetElementPtr || 7626 I->getOpcode() == Instruction::PHI || 7627 (I->getOpcode() == Instruction::BitCast && 7628 I->getType()->isPointerTy()) || 7629 hasSingleCopyAfterVectorization(I, VF)); 7630 VectorTy = RetTy; 7631 } else 7632 VectorTy = ToVectorTy(RetTy, VF); 7633 7634 // TODO: We need to estimate the cost of intrinsic calls. 7635 switch (I->getOpcode()) { 7636 case Instruction::GetElementPtr: 7637 // We mark this instruction as zero-cost because the cost of GEPs in 7638 // vectorized code depends on whether the corresponding memory instruction 7639 // is scalarized or not. Therefore, we handle GEPs with the memory 7640 // instruction cost. 7641 return 0; 7642 case Instruction::Br: { 7643 // In cases of scalarized and predicated instructions, there will be VF 7644 // predicated blocks in the vectorized loop. Each branch around these 7645 // blocks requires also an extract of its vector compare i1 element. 7646 bool ScalarPredicatedBB = false; 7647 BranchInst *BI = cast<BranchInst>(I); 7648 if (VF.isVector() && BI->isConditional() && 7649 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7650 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7651 ScalarPredicatedBB = true; 7652 7653 if (ScalarPredicatedBB) { 7654 // Not possible to scalarize scalable vector with predicated instructions. 7655 if (VF.isScalable()) 7656 return InstructionCost::getInvalid(); 7657 // Return cost for branches around scalarized and predicated blocks. 7658 auto *Vec_i1Ty = 7659 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7660 return ( 7661 TTI.getScalarizationOverhead( 7662 Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) + 7663 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); 7664 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7665 // The back-edge branch will remain, as will all scalar branches. 7666 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7667 else 7668 // This branch will be eliminated by if-conversion. 7669 return 0; 7670 // Note: We currently assume zero cost for an unconditional branch inside 7671 // a predicated block since it will become a fall-through, although we 7672 // may decide in the future to call TTI for all branches. 7673 } 7674 case Instruction::PHI: { 7675 auto *Phi = cast<PHINode>(I); 7676 7677 // First-order recurrences are replaced by vector shuffles inside the loop. 7678 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7679 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7680 return TTI.getShuffleCost( 7681 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7682 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7683 7684 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7685 // converted into select instructions. We require N - 1 selects per phi 7686 // node, where N is the number of incoming values. 7687 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7688 return (Phi->getNumIncomingValues() - 1) * 7689 TTI.getCmpSelInstrCost( 7690 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7691 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7692 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7693 7694 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7695 } 7696 case Instruction::UDiv: 7697 case Instruction::SDiv: 7698 case Instruction::URem: 7699 case Instruction::SRem: 7700 // If we have a predicated instruction, it may not be executed for each 7701 // vector lane. Get the scalarization cost and scale this amount by the 7702 // probability of executing the predicated block. If the instruction is not 7703 // predicated, we fall through to the next case. 7704 if (VF.isVector() && isScalarWithPredication(I)) { 7705 InstructionCost Cost = 0; 7706 7707 // These instructions have a non-void type, so account for the phi nodes 7708 // that we will create. This cost is likely to be zero. The phi node 7709 // cost, if any, should be scaled by the block probability because it 7710 // models a copy at the end of each predicated block. 7711 Cost += VF.getKnownMinValue() * 7712 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7713 7714 // The cost of the non-predicated instruction. 7715 Cost += VF.getKnownMinValue() * 7716 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7717 7718 // The cost of insertelement and extractelement instructions needed for 7719 // scalarization. 7720 Cost += getScalarizationOverhead(I, VF); 7721 7722 // Scale the cost by the probability of executing the predicated blocks. 7723 // This assumes the predicated block for each vector lane is equally 7724 // likely. 7725 return Cost / getReciprocalPredBlockProb(); 7726 } 7727 LLVM_FALLTHROUGH; 7728 case Instruction::Add: 7729 case Instruction::FAdd: 7730 case Instruction::Sub: 7731 case Instruction::FSub: 7732 case Instruction::Mul: 7733 case Instruction::FMul: 7734 case Instruction::FDiv: 7735 case Instruction::FRem: 7736 case Instruction::Shl: 7737 case Instruction::LShr: 7738 case Instruction::AShr: 7739 case Instruction::And: 7740 case Instruction::Or: 7741 case Instruction::Xor: { 7742 // Since we will replace the stride by 1 the multiplication should go away. 7743 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7744 return 0; 7745 7746 // Detect reduction patterns 7747 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7748 return *RedCost; 7749 7750 // Certain instructions can be cheaper to vectorize if they have a constant 7751 // second vector operand. One example of this are shifts on x86. 7752 Value *Op2 = I->getOperand(1); 7753 TargetTransformInfo::OperandValueProperties Op2VP; 7754 TargetTransformInfo::OperandValueKind Op2VK = 7755 TTI.getOperandInfo(Op2, Op2VP); 7756 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7757 Op2VK = TargetTransformInfo::OK_UniformValue; 7758 7759 SmallVector<const Value *, 4> Operands(I->operand_values()); 7760 return TTI.getArithmeticInstrCost( 7761 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7762 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7763 } 7764 case Instruction::FNeg: { 7765 return TTI.getArithmeticInstrCost( 7766 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7767 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7768 TargetTransformInfo::OP_None, I->getOperand(0), I); 7769 } 7770 case Instruction::Select: { 7771 SelectInst *SI = cast<SelectInst>(I); 7772 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7773 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7774 7775 const Value *Op0, *Op1; 7776 using namespace llvm::PatternMatch; 7777 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7778 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7779 // select x, y, false --> x & y 7780 // select x, true, y --> x | y 7781 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7782 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7783 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7784 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7785 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7786 Op1->getType()->getScalarSizeInBits() == 1); 7787 7788 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7789 return TTI.getArithmeticInstrCost( 7790 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7791 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7792 } 7793 7794 Type *CondTy = SI->getCondition()->getType(); 7795 if (!ScalarCond) 7796 CondTy = VectorType::get(CondTy, VF); 7797 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, 7798 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7799 } 7800 case Instruction::ICmp: 7801 case Instruction::FCmp: { 7802 Type *ValTy = I->getOperand(0)->getType(); 7803 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7804 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7805 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7806 VectorTy = ToVectorTy(ValTy, VF); 7807 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7808 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7809 } 7810 case Instruction::Store: 7811 case Instruction::Load: { 7812 ElementCount Width = VF; 7813 if (Width.isVector()) { 7814 InstWidening Decision = getWideningDecision(I, Width); 7815 assert(Decision != CM_Unknown && 7816 "CM decision should be taken at this point"); 7817 if (Decision == CM_Scalarize) 7818 Width = ElementCount::getFixed(1); 7819 } 7820 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7821 return getMemoryInstructionCost(I, VF); 7822 } 7823 case Instruction::BitCast: 7824 if (I->getType()->isPointerTy()) 7825 return 0; 7826 LLVM_FALLTHROUGH; 7827 case Instruction::ZExt: 7828 case Instruction::SExt: 7829 case Instruction::FPToUI: 7830 case Instruction::FPToSI: 7831 case Instruction::FPExt: 7832 case Instruction::PtrToInt: 7833 case Instruction::IntToPtr: 7834 case Instruction::SIToFP: 7835 case Instruction::UIToFP: 7836 case Instruction::Trunc: 7837 case Instruction::FPTrunc: { 7838 // Computes the CastContextHint from a Load/Store instruction. 7839 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7840 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7841 "Expected a load or a store!"); 7842 7843 if (VF.isScalar() || !TheLoop->contains(I)) 7844 return TTI::CastContextHint::Normal; 7845 7846 switch (getWideningDecision(I, VF)) { 7847 case LoopVectorizationCostModel::CM_GatherScatter: 7848 return TTI::CastContextHint::GatherScatter; 7849 case LoopVectorizationCostModel::CM_Interleave: 7850 return TTI::CastContextHint::Interleave; 7851 case LoopVectorizationCostModel::CM_Scalarize: 7852 case LoopVectorizationCostModel::CM_Widen: 7853 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7854 : TTI::CastContextHint::Normal; 7855 case LoopVectorizationCostModel::CM_Widen_Reverse: 7856 return TTI::CastContextHint::Reversed; 7857 case LoopVectorizationCostModel::CM_Unknown: 7858 llvm_unreachable("Instr did not go through cost modelling?"); 7859 } 7860 7861 llvm_unreachable("Unhandled case!"); 7862 }; 7863 7864 unsigned Opcode = I->getOpcode(); 7865 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7866 // For Trunc, the context is the only user, which must be a StoreInst. 7867 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7868 if (I->hasOneUse()) 7869 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7870 CCH = ComputeCCH(Store); 7871 } 7872 // For Z/Sext, the context is the operand, which must be a LoadInst. 7873 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7874 Opcode == Instruction::FPExt) { 7875 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7876 CCH = ComputeCCH(Load); 7877 } 7878 7879 // We optimize the truncation of induction variables having constant 7880 // integer steps. The cost of these truncations is the same as the scalar 7881 // operation. 7882 if (isOptimizableIVTruncate(I, VF)) { 7883 auto *Trunc = cast<TruncInst>(I); 7884 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7885 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7886 } 7887 7888 // Detect reduction patterns 7889 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7890 return *RedCost; 7891 7892 Type *SrcScalarTy = I->getOperand(0)->getType(); 7893 Type *SrcVecTy = 7894 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7895 if (canTruncateToMinimalBitwidth(I, VF)) { 7896 // This cast is going to be shrunk. This may remove the cast or it might 7897 // turn it into slightly different cast. For example, if MinBW == 16, 7898 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7899 // 7900 // Calculate the modified src and dest types. 7901 Type *MinVecTy = VectorTy; 7902 if (Opcode == Instruction::Trunc) { 7903 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7904 VectorTy = 7905 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7906 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7907 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7908 VectorTy = 7909 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7910 } 7911 } 7912 7913 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7914 } 7915 case Instruction::Call: { 7916 bool NeedToScalarize; 7917 CallInst *CI = cast<CallInst>(I); 7918 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7919 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7920 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7921 return std::min(CallCost, IntrinsicCost); 7922 } 7923 return CallCost; 7924 } 7925 case Instruction::ExtractValue: 7926 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7927 case Instruction::Alloca: 7928 // We cannot easily widen alloca to a scalable alloca, as 7929 // the result would need to be a vector of pointers. 7930 if (VF.isScalable()) 7931 return InstructionCost::getInvalid(); 7932 LLVM_FALLTHROUGH; 7933 default: 7934 // This opcode is unknown. Assume that it is the same as 'mul'. 7935 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7936 } // end of switch. 7937 } 7938 7939 char LoopVectorize::ID = 0; 7940 7941 static const char lv_name[] = "Loop Vectorization"; 7942 7943 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7944 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7945 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7946 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7947 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7948 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7949 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7950 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7951 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7952 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7953 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7954 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7955 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7956 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7957 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7958 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7959 7960 namespace llvm { 7961 7962 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7963 7964 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7965 bool VectorizeOnlyWhenForced) { 7966 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7967 } 7968 7969 } // end namespace llvm 7970 7971 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7972 // Check if the pointer operand of a load or store instruction is 7973 // consecutive. 7974 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7975 return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr); 7976 return false; 7977 } 7978 7979 void LoopVectorizationCostModel::collectValuesToIgnore() { 7980 // Ignore ephemeral values. 7981 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7982 7983 // Ignore type-promoting instructions we identified during reduction 7984 // detection. 7985 for (auto &Reduction : Legal->getReductionVars()) { 7986 RecurrenceDescriptor &RedDes = Reduction.second; 7987 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7988 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7989 } 7990 // Ignore type-casting instructions we identified during induction 7991 // detection. 7992 for (auto &Induction : Legal->getInductionVars()) { 7993 InductionDescriptor &IndDes = Induction.second; 7994 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7995 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7996 } 7997 } 7998 7999 void LoopVectorizationCostModel::collectInLoopReductions() { 8000 for (auto &Reduction : Legal->getReductionVars()) { 8001 PHINode *Phi = Reduction.first; 8002 RecurrenceDescriptor &RdxDesc = Reduction.second; 8003 8004 // We don't collect reductions that are type promoted (yet). 8005 if (RdxDesc.getRecurrenceType() != Phi->getType()) 8006 continue; 8007 8008 // If the target would prefer this reduction to happen "in-loop", then we 8009 // want to record it as such. 8010 unsigned Opcode = RdxDesc.getOpcode(); 8011 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 8012 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 8013 TargetTransformInfo::ReductionFlags())) 8014 continue; 8015 8016 // Check that we can correctly put the reductions into the loop, by 8017 // finding the chain of operations that leads from the phi to the loop 8018 // exit value. 8019 SmallVector<Instruction *, 4> ReductionOperations = 8020 RdxDesc.getReductionOpChain(Phi, TheLoop); 8021 bool InLoop = !ReductionOperations.empty(); 8022 if (InLoop) { 8023 InLoopReductionChains[Phi] = ReductionOperations; 8024 // Add the elements to InLoopReductionImmediateChains for cost modelling. 8025 Instruction *LastChain = Phi; 8026 for (auto *I : ReductionOperations) { 8027 InLoopReductionImmediateChains[I] = LastChain; 8028 LastChain = I; 8029 } 8030 } 8031 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 8032 << " reduction for phi: " << *Phi << "\n"); 8033 } 8034 } 8035 8036 // TODO: we could return a pair of values that specify the max VF and 8037 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 8038 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 8039 // doesn't have a cost model that can choose which plan to execute if 8040 // more than one is generated. 8041 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 8042 LoopVectorizationCostModel &CM) { 8043 unsigned WidestType; 8044 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 8045 return WidestVectorRegBits / WidestType; 8046 } 8047 8048 VectorizationFactor 8049 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 8050 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 8051 ElementCount VF = UserVF; 8052 // Outer loop handling: They may require CFG and instruction level 8053 // transformations before even evaluating whether vectorization is profitable. 8054 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 8055 // the vectorization pipeline. 8056 if (!OrigLoop->isInnermost()) { 8057 // If the user doesn't provide a vectorization factor, determine a 8058 // reasonable one. 8059 if (UserVF.isZero()) { 8060 VF = ElementCount::getFixed(determineVPlanVF( 8061 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 8062 .getFixedSize(), 8063 CM)); 8064 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 8065 8066 // Make sure we have a VF > 1 for stress testing. 8067 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 8068 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 8069 << "overriding computed VF.\n"); 8070 VF = ElementCount::getFixed(4); 8071 } 8072 } 8073 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 8074 assert(isPowerOf2_32(VF.getKnownMinValue()) && 8075 "VF needs to be a power of two"); 8076 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 8077 << "VF " << VF << " to build VPlans.\n"); 8078 buildVPlans(VF, VF); 8079 8080 // For VPlan build stress testing, we bail out after VPlan construction. 8081 if (VPlanBuildStressTest) 8082 return VectorizationFactor::Disabled(); 8083 8084 return {VF, 0 /*Cost*/}; 8085 } 8086 8087 LLVM_DEBUG( 8088 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 8089 "VPlan-native path.\n"); 8090 return VectorizationFactor::Disabled(); 8091 } 8092 8093 Optional<VectorizationFactor> 8094 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 8095 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8096 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 8097 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 8098 return None; 8099 8100 // Invalidate interleave groups if all blocks of loop will be predicated. 8101 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 8102 !useMaskedInterleavedAccesses(*TTI)) { 8103 LLVM_DEBUG( 8104 dbgs() 8105 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 8106 "which requires masked-interleaved support.\n"); 8107 if (CM.InterleaveInfo.invalidateGroups()) 8108 // Invalidating interleave groups also requires invalidating all decisions 8109 // based on them, which includes widening decisions and uniform and scalar 8110 // values. 8111 CM.invalidateCostModelingDecisions(); 8112 } 8113 8114 ElementCount MaxUserVF = 8115 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 8116 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 8117 if (!UserVF.isZero() && UserVFIsLegal) { 8118 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 8119 "VF needs to be a power of two"); 8120 // Collect the instructions (and their associated costs) that will be more 8121 // profitable to scalarize. 8122 if (CM.selectUserVectorizationFactor(UserVF)) { 8123 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 8124 CM.collectInLoopReductions(); 8125 buildVPlansWithVPRecipes(UserVF, UserVF); 8126 LLVM_DEBUG(printPlans(dbgs())); 8127 return {{UserVF, 0}}; 8128 } else 8129 reportVectorizationInfo("UserVF ignored because of invalid costs.", 8130 "InvalidCost", ORE, OrigLoop); 8131 } 8132 8133 // Populate the set of Vectorization Factor Candidates. 8134 ElementCountSet VFCandidates; 8135 for (auto VF = ElementCount::getFixed(1); 8136 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 8137 VFCandidates.insert(VF); 8138 for (auto VF = ElementCount::getScalable(1); 8139 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 8140 VFCandidates.insert(VF); 8141 8142 for (const auto &VF : VFCandidates) { 8143 // Collect Uniform and Scalar instructions after vectorization with VF. 8144 CM.collectUniformsAndScalars(VF); 8145 8146 // Collect the instructions (and their associated costs) that will be more 8147 // profitable to scalarize. 8148 if (VF.isVector()) 8149 CM.collectInstsToScalarize(VF); 8150 } 8151 8152 CM.collectInLoopReductions(); 8153 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 8154 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 8155 8156 LLVM_DEBUG(printPlans(dbgs())); 8157 if (!MaxFactors.hasVector()) 8158 return VectorizationFactor::Disabled(); 8159 8160 // Select the optimal vectorization factor. 8161 auto SelectedVF = CM.selectVectorizationFactor(VFCandidates); 8162 8163 // Check if it is profitable to vectorize with runtime checks. 8164 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 8165 if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { 8166 bool PragmaThresholdReached = 8167 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 8168 bool ThresholdReached = 8169 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 8170 if ((ThresholdReached && !Hints.allowReordering()) || 8171 PragmaThresholdReached) { 8172 ORE->emit([&]() { 8173 return OptimizationRemarkAnalysisAliasing( 8174 DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(), 8175 OrigLoop->getHeader()) 8176 << "loop not vectorized: cannot prove it is safe to reorder " 8177 "memory operations"; 8178 }); 8179 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 8180 Hints.emitRemarkWithHints(); 8181 return VectorizationFactor::Disabled(); 8182 } 8183 } 8184 return SelectedVF; 8185 } 8186 8187 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) { 8188 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 8189 << '\n'); 8190 BestVF = VF; 8191 BestUF = UF; 8192 8193 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 8194 return !Plan->hasVF(VF); 8195 }); 8196 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 8197 } 8198 8199 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 8200 DominatorTree *DT) { 8201 // Perform the actual loop transformation. 8202 8203 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 8204 assert(BestVF.hasValue() && "Vectorization Factor is missing"); 8205 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 8206 8207 VPTransformState State{ 8208 *BestVF, BestUF, LI, DT, ILV.Builder, &ILV, VPlans.front().get()}; 8209 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 8210 State.TripCount = ILV.getOrCreateTripCount(nullptr); 8211 State.CanonicalIV = ILV.Induction; 8212 8213 ILV.printDebugTracesAtStart(); 8214 8215 //===------------------------------------------------===// 8216 // 8217 // Notice: any optimization or new instruction that go 8218 // into the code below should also be implemented in 8219 // the cost-model. 8220 // 8221 //===------------------------------------------------===// 8222 8223 // 2. Copy and widen instructions from the old loop into the new loop. 8224 VPlans.front()->execute(&State); 8225 8226 // 3. Fix the vectorized code: take care of header phi's, live-outs, 8227 // predication, updating analyses. 8228 ILV.fixVectorizedLoop(State); 8229 8230 ILV.printDebugTracesAtEnd(); 8231 } 8232 8233 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 8234 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 8235 for (const auto &Plan : VPlans) 8236 if (PrintVPlansInDotFormat) 8237 Plan->printDOT(O); 8238 else 8239 Plan->print(O); 8240 } 8241 #endif 8242 8243 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 8244 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 8245 8246 // We create new control-flow for the vectorized loop, so the original exit 8247 // conditions will be dead after vectorization if it's only used by the 8248 // terminator 8249 SmallVector<BasicBlock*> ExitingBlocks; 8250 OrigLoop->getExitingBlocks(ExitingBlocks); 8251 for (auto *BB : ExitingBlocks) { 8252 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 8253 if (!Cmp || !Cmp->hasOneUse()) 8254 continue; 8255 8256 // TODO: we should introduce a getUniqueExitingBlocks on Loop 8257 if (!DeadInstructions.insert(Cmp).second) 8258 continue; 8259 8260 // The operands of the icmp is often a dead trunc, used by IndUpdate. 8261 // TODO: can recurse through operands in general 8262 for (Value *Op : Cmp->operands()) { 8263 if (isa<TruncInst>(Op) && Op->hasOneUse()) 8264 DeadInstructions.insert(cast<Instruction>(Op)); 8265 } 8266 } 8267 8268 // We create new "steps" for induction variable updates to which the original 8269 // induction variables map. An original update instruction will be dead if 8270 // all its users except the induction variable are dead. 8271 auto *Latch = OrigLoop->getLoopLatch(); 8272 for (auto &Induction : Legal->getInductionVars()) { 8273 PHINode *Ind = Induction.first; 8274 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 8275 8276 // If the tail is to be folded by masking, the primary induction variable, 8277 // if exists, isn't dead: it will be used for masking. Don't kill it. 8278 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 8279 continue; 8280 8281 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 8282 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 8283 })) 8284 DeadInstructions.insert(IndUpdate); 8285 8286 // We record as "Dead" also the type-casting instructions we had identified 8287 // during induction analysis. We don't need any handling for them in the 8288 // vectorized loop because we have proven that, under a proper runtime 8289 // test guarding the vectorized loop, the value of the phi, and the casted 8290 // value of the phi, are the same. The last instruction in this casting chain 8291 // will get its scalar/vector/widened def from the scalar/vector/widened def 8292 // of the respective phi node. Any other casts in the induction def-use chain 8293 // have no other uses outside the phi update chain, and will be ignored. 8294 InductionDescriptor &IndDes = Induction.second; 8295 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 8296 DeadInstructions.insert(Casts.begin(), Casts.end()); 8297 } 8298 } 8299 8300 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 8301 8302 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 8303 8304 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 8305 Instruction::BinaryOps BinOp) { 8306 // When unrolling and the VF is 1, we only need to add a simple scalar. 8307 Type *Ty = Val->getType(); 8308 assert(!Ty->isVectorTy() && "Val must be a scalar"); 8309 8310 if (Ty->isFloatingPointTy()) { 8311 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 8312 8313 // Floating-point operations inherit FMF via the builder's flags. 8314 Value *MulOp = Builder.CreateFMul(C, Step); 8315 return Builder.CreateBinOp(BinOp, Val, MulOp); 8316 } 8317 Constant *C = ConstantInt::get(Ty, StartIdx); 8318 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 8319 } 8320 8321 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 8322 SmallVector<Metadata *, 4> MDs; 8323 // Reserve first location for self reference to the LoopID metadata node. 8324 MDs.push_back(nullptr); 8325 bool IsUnrollMetadata = false; 8326 MDNode *LoopID = L->getLoopID(); 8327 if (LoopID) { 8328 // First find existing loop unrolling disable metadata. 8329 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 8330 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 8331 if (MD) { 8332 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 8333 IsUnrollMetadata = 8334 S && S->getString().startswith("llvm.loop.unroll.disable"); 8335 } 8336 MDs.push_back(LoopID->getOperand(i)); 8337 } 8338 } 8339 8340 if (!IsUnrollMetadata) { 8341 // Add runtime unroll disable metadata. 8342 LLVMContext &Context = L->getHeader()->getContext(); 8343 SmallVector<Metadata *, 1> DisableOperands; 8344 DisableOperands.push_back( 8345 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 8346 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 8347 MDs.push_back(DisableNode); 8348 MDNode *NewLoopID = MDNode::get(Context, MDs); 8349 // Set operand 0 to refer to the loop id itself. 8350 NewLoopID->replaceOperandWith(0, NewLoopID); 8351 L->setLoopID(NewLoopID); 8352 } 8353 } 8354 8355 //===--------------------------------------------------------------------===// 8356 // EpilogueVectorizerMainLoop 8357 //===--------------------------------------------------------------------===// 8358 8359 /// This function is partially responsible for generating the control flow 8360 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8361 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 8362 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8363 Loop *Lp = createVectorLoopSkeleton(""); 8364 8365 // Generate the code to check the minimum iteration count of the vector 8366 // epilogue (see below). 8367 EPI.EpilogueIterationCountCheck = 8368 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); 8369 EPI.EpilogueIterationCountCheck->setName("iter.check"); 8370 8371 // Generate the code to check any assumptions that we've made for SCEV 8372 // expressions. 8373 EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader); 8374 8375 // Generate the code that checks at runtime if arrays overlap. We put the 8376 // checks into a separate block to make the more common case of few elements 8377 // faster. 8378 EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 8379 8380 // Generate the iteration count check for the main loop, *after* the check 8381 // for the epilogue loop, so that the path-length is shorter for the case 8382 // that goes directly through the vector epilogue. The longer-path length for 8383 // the main loop is compensated for, by the gain from vectorizing the larger 8384 // trip count. Note: the branch will get updated later on when we vectorize 8385 // the epilogue. 8386 EPI.MainLoopIterationCountCheck = 8387 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); 8388 8389 // Generate the induction variable. 8390 OldInduction = Legal->getPrimaryInduction(); 8391 Type *IdxTy = Legal->getWidestInductionType(); 8392 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8393 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 8394 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8395 EPI.VectorTripCount = CountRoundDown; 8396 Induction = 8397 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8398 getDebugLocFromInstOrOperands(OldInduction)); 8399 8400 // Skip induction resume value creation here because they will be created in 8401 // the second pass. If we created them here, they wouldn't be used anyway, 8402 // because the vplan in the second pass still contains the inductions from the 8403 // original loop. 8404 8405 return completeLoopSkeleton(Lp, OrigLoopID); 8406 } 8407 8408 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 8409 LLVM_DEBUG({ 8410 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 8411 << "Main Loop VF:" << EPI.MainLoopVF 8412 << ", Main Loop UF:" << EPI.MainLoopUF 8413 << ", Epilogue Loop VF:" << EPI.EpilogueVF 8414 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8415 }); 8416 } 8417 8418 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 8419 DEBUG_WITH_TYPE(VerboseDebug, { 8420 dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n"; 8421 }); 8422 } 8423 8424 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( 8425 Loop *L, BasicBlock *Bypass, bool ForEpilogue) { 8426 assert(L && "Expected valid Loop."); 8427 assert(Bypass && "Expected valid bypass basic block."); 8428 ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF; 8429 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 8430 Value *Count = getOrCreateTripCount(L); 8431 // Reuse existing vector loop preheader for TC checks. 8432 // Note that new preheader block is generated for vector loop. 8433 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 8434 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 8435 8436 // Generate code to check if the loop's trip count is less than VF * UF of the 8437 // main vector loop. 8438 auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? 8439 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8440 8441 Value *CheckMinIters = Builder.CreateICmp( 8442 P, Count, getRuntimeVF(Builder, Count->getType(), VFactor * UFactor), 8443 "min.iters.check"); 8444 8445 if (!ForEpilogue) 8446 TCCheckBlock->setName("vector.main.loop.iter.check"); 8447 8448 // Create new preheader for vector loop. 8449 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 8450 DT, LI, nullptr, "vector.ph"); 8451 8452 if (ForEpilogue) { 8453 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 8454 DT->getNode(Bypass)->getIDom()) && 8455 "TC check is expected to dominate Bypass"); 8456 8457 // Update dominator for Bypass & LoopExit. 8458 DT->changeImmediateDominator(Bypass, TCCheckBlock); 8459 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8460 // For loops with multiple exits, there's no edge from the middle block 8461 // to exit blocks (as the epilogue must run) and thus no need to update 8462 // the immediate dominator of the exit blocks. 8463 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 8464 8465 LoopBypassBlocks.push_back(TCCheckBlock); 8466 8467 // Save the trip count so we don't have to regenerate it in the 8468 // vec.epilog.iter.check. This is safe to do because the trip count 8469 // generated here dominates the vector epilog iter check. 8470 EPI.TripCount = Count; 8471 } 8472 8473 ReplaceInstWithInst( 8474 TCCheckBlock->getTerminator(), 8475 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8476 8477 return TCCheckBlock; 8478 } 8479 8480 //===--------------------------------------------------------------------===// 8481 // EpilogueVectorizerEpilogueLoop 8482 //===--------------------------------------------------------------------===// 8483 8484 /// This function is partially responsible for generating the control flow 8485 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8486 BasicBlock * 8487 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 8488 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8489 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 8490 8491 // Now, compare the remaining count and if there aren't enough iterations to 8492 // execute the vectorized epilogue skip to the scalar part. 8493 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 8494 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 8495 LoopVectorPreHeader = 8496 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 8497 LI, nullptr, "vec.epilog.ph"); 8498 emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, 8499 VecEpilogueIterationCountCheck); 8500 8501 // Adjust the control flow taking the state info from the main loop 8502 // vectorization into account. 8503 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 8504 "expected this to be saved from the previous pass."); 8505 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 8506 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 8507 8508 DT->changeImmediateDominator(LoopVectorPreHeader, 8509 EPI.MainLoopIterationCountCheck); 8510 8511 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 8512 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8513 8514 if (EPI.SCEVSafetyCheck) 8515 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 8516 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8517 if (EPI.MemSafetyCheck) 8518 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 8519 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8520 8521 DT->changeImmediateDominator( 8522 VecEpilogueIterationCountCheck, 8523 VecEpilogueIterationCountCheck->getSinglePredecessor()); 8524 8525 DT->changeImmediateDominator(LoopScalarPreHeader, 8526 EPI.EpilogueIterationCountCheck); 8527 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8528 // If there is an epilogue which must run, there's no edge from the 8529 // middle block to exit blocks and thus no need to update the immediate 8530 // dominator of the exit blocks. 8531 DT->changeImmediateDominator(LoopExitBlock, 8532 EPI.EpilogueIterationCountCheck); 8533 8534 // Keep track of bypass blocks, as they feed start values to the induction 8535 // phis in the scalar loop preheader. 8536 if (EPI.SCEVSafetyCheck) 8537 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 8538 if (EPI.MemSafetyCheck) 8539 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 8540 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 8541 8542 // Generate a resume induction for the vector epilogue and put it in the 8543 // vector epilogue preheader 8544 Type *IdxTy = Legal->getWidestInductionType(); 8545 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 8546 LoopVectorPreHeader->getFirstNonPHI()); 8547 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 8548 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 8549 EPI.MainLoopIterationCountCheck); 8550 8551 // Generate the induction variable. 8552 OldInduction = Legal->getPrimaryInduction(); 8553 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8554 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 8555 Value *StartIdx = EPResumeVal; 8556 Induction = 8557 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8558 getDebugLocFromInstOrOperands(OldInduction)); 8559 8560 // Generate induction resume values. These variables save the new starting 8561 // indexes for the scalar loop. They are used to test if there are any tail 8562 // iterations left once the vector loop has completed. 8563 // Note that when the vectorized epilogue is skipped due to iteration count 8564 // check, then the resume value for the induction variable comes from 8565 // the trip count of the main vector loop, hence passing the AdditionalBypass 8566 // argument. 8567 createInductionResumeValues(Lp, CountRoundDown, 8568 {VecEpilogueIterationCountCheck, 8569 EPI.VectorTripCount} /* AdditionalBypass */); 8570 8571 AddRuntimeUnrollDisableMetaData(Lp); 8572 return completeLoopSkeleton(Lp, OrigLoopID); 8573 } 8574 8575 BasicBlock * 8576 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 8577 Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { 8578 8579 assert(EPI.TripCount && 8580 "Expected trip count to have been safed in the first pass."); 8581 assert( 8582 (!isa<Instruction>(EPI.TripCount) || 8583 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 8584 "saved trip count does not dominate insertion point."); 8585 Value *TC = EPI.TripCount; 8586 IRBuilder<> Builder(Insert->getTerminator()); 8587 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 8588 8589 // Generate code to check if the loop's trip count is less than VF * UF of the 8590 // vector epilogue loop. 8591 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? 8592 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8593 8594 Value *CheckMinIters = Builder.CreateICmp( 8595 P, Count, 8596 getRuntimeVF(Builder, Count->getType(), EPI.EpilogueVF * EPI.EpilogueUF), 8597 "min.epilog.iters.check"); 8598 8599 ReplaceInstWithInst( 8600 Insert->getTerminator(), 8601 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8602 8603 LoopBypassBlocks.push_back(Insert); 8604 return Insert; 8605 } 8606 8607 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 8608 LLVM_DEBUG({ 8609 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 8610 << "Epilogue Loop VF:" << EPI.EpilogueVF 8611 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8612 }); 8613 } 8614 8615 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 8616 DEBUG_WITH_TYPE(VerboseDebug, { 8617 dbgs() << "final fn:\n" << *Induction->getFunction() << "\n"; 8618 }); 8619 } 8620 8621 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8622 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8623 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8624 bool PredicateAtRangeStart = Predicate(Range.Start); 8625 8626 for (ElementCount TmpVF = Range.Start * 2; 8627 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8628 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8629 Range.End = TmpVF; 8630 break; 8631 } 8632 8633 return PredicateAtRangeStart; 8634 } 8635 8636 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8637 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8638 /// of VF's starting at a given VF and extending it as much as possible. Each 8639 /// vectorization decision can potentially shorten this sub-range during 8640 /// buildVPlan(). 8641 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8642 ElementCount MaxVF) { 8643 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8644 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8645 VFRange SubRange = {VF, MaxVFPlusOne}; 8646 VPlans.push_back(buildVPlan(SubRange)); 8647 VF = SubRange.End; 8648 } 8649 } 8650 8651 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8652 VPlanPtr &Plan) { 8653 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8654 8655 // Look for cached value. 8656 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8657 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8658 if (ECEntryIt != EdgeMaskCache.end()) 8659 return ECEntryIt->second; 8660 8661 VPValue *SrcMask = createBlockInMask(Src, Plan); 8662 8663 // The terminator has to be a branch inst! 8664 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8665 assert(BI && "Unexpected terminator found"); 8666 8667 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8668 return EdgeMaskCache[Edge] = SrcMask; 8669 8670 // If source is an exiting block, we know the exit edge is dynamically dead 8671 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8672 // adding uses of an otherwise potentially dead instruction. 8673 if (OrigLoop->isLoopExiting(Src)) 8674 return EdgeMaskCache[Edge] = SrcMask; 8675 8676 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8677 assert(EdgeMask && "No Edge Mask found for condition"); 8678 8679 if (BI->getSuccessor(0) != Dst) 8680 EdgeMask = Builder.createNot(EdgeMask); 8681 8682 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8683 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8684 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8685 // The select version does not introduce new UB if SrcMask is false and 8686 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8687 VPValue *False = Plan->getOrAddVPValue( 8688 ConstantInt::getFalse(BI->getCondition()->getType())); 8689 EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False); 8690 } 8691 8692 return EdgeMaskCache[Edge] = EdgeMask; 8693 } 8694 8695 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8696 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8697 8698 // Look for cached value. 8699 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8700 if (BCEntryIt != BlockMaskCache.end()) 8701 return BCEntryIt->second; 8702 8703 // All-one mask is modelled as no-mask following the convention for masked 8704 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8705 VPValue *BlockMask = nullptr; 8706 8707 if (OrigLoop->getHeader() == BB) { 8708 if (!CM.blockNeedsPredication(BB)) 8709 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8710 8711 // Create the block in mask as the first non-phi instruction in the block. 8712 VPBuilder::InsertPointGuard Guard(Builder); 8713 auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi(); 8714 Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint); 8715 8716 // Introduce the early-exit compare IV <= BTC to form header block mask. 8717 // This is used instead of IV < TC because TC may wrap, unlike BTC. 8718 // Start by constructing the desired canonical IV. 8719 VPValue *IV = nullptr; 8720 if (Legal->getPrimaryInduction()) 8721 IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction()); 8722 else { 8723 auto IVRecipe = new VPWidenCanonicalIVRecipe(); 8724 Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint); 8725 IV = IVRecipe->getVPSingleValue(); 8726 } 8727 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8728 bool TailFolded = !CM.isScalarEpilogueAllowed(); 8729 8730 if (TailFolded && CM.TTI.emitGetActiveLaneMask()) { 8731 // While ActiveLaneMask is a binary op that consumes the loop tripcount 8732 // as a second argument, we only pass the IV here and extract the 8733 // tripcount from the transform state where codegen of the VP instructions 8734 // happen. 8735 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV}); 8736 } else { 8737 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8738 } 8739 return BlockMaskCache[BB] = BlockMask; 8740 } 8741 8742 // This is the block mask. We OR all incoming edges. 8743 for (auto *Predecessor : predecessors(BB)) { 8744 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8745 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8746 return BlockMaskCache[BB] = EdgeMask; 8747 8748 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8749 BlockMask = EdgeMask; 8750 continue; 8751 } 8752 8753 BlockMask = Builder.createOr(BlockMask, EdgeMask); 8754 } 8755 8756 return BlockMaskCache[BB] = BlockMask; 8757 } 8758 8759 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8760 ArrayRef<VPValue *> Operands, 8761 VFRange &Range, 8762 VPlanPtr &Plan) { 8763 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8764 "Must be called with either a load or store"); 8765 8766 auto willWiden = [&](ElementCount VF) -> bool { 8767 if (VF.isScalar()) 8768 return false; 8769 LoopVectorizationCostModel::InstWidening Decision = 8770 CM.getWideningDecision(I, VF); 8771 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8772 "CM decision should be taken at this point."); 8773 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8774 return true; 8775 if (CM.isScalarAfterVectorization(I, VF) || 8776 CM.isProfitableToScalarize(I, VF)) 8777 return false; 8778 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8779 }; 8780 8781 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8782 return nullptr; 8783 8784 VPValue *Mask = nullptr; 8785 if (Legal->isMaskRequired(I)) 8786 Mask = createBlockInMask(I->getParent(), Plan); 8787 8788 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8789 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask); 8790 8791 StoreInst *Store = cast<StoreInst>(I); 8792 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8793 Mask); 8794 } 8795 8796 VPWidenIntOrFpInductionRecipe * 8797 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, 8798 ArrayRef<VPValue *> Operands) const { 8799 // Check if this is an integer or fp induction. If so, build the recipe that 8800 // produces its scalar and vector values. 8801 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8802 if (II.getKind() == InductionDescriptor::IK_IntInduction || 8803 II.getKind() == InductionDescriptor::IK_FpInduction) { 8804 assert(II.getStartValue() == 8805 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8806 const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts(); 8807 return new VPWidenIntOrFpInductionRecipe( 8808 Phi, Operands[0], Casts.empty() ? nullptr : Casts.front()); 8809 } 8810 8811 return nullptr; 8812 } 8813 8814 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8815 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, 8816 VPlan &Plan) const { 8817 // Optimize the special case where the source is a constant integer 8818 // induction variable. Notice that we can only optimize the 'trunc' case 8819 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8820 // (c) other casts depend on pointer size. 8821 8822 // Determine whether \p K is a truncation based on an induction variable that 8823 // can be optimized. 8824 auto isOptimizableIVTruncate = 8825 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8826 return [=](ElementCount VF) -> bool { 8827 return CM.isOptimizableIVTruncate(K, VF); 8828 }; 8829 }; 8830 8831 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8832 isOptimizableIVTruncate(I), Range)) { 8833 8834 InductionDescriptor II = 8835 Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0))); 8836 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8837 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 8838 Start, nullptr, I); 8839 } 8840 return nullptr; 8841 } 8842 8843 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8844 ArrayRef<VPValue *> Operands, 8845 VPlanPtr &Plan) { 8846 // If all incoming values are equal, the incoming VPValue can be used directly 8847 // instead of creating a new VPBlendRecipe. 8848 VPValue *FirstIncoming = Operands[0]; 8849 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8850 return FirstIncoming == Inc; 8851 })) { 8852 return Operands[0]; 8853 } 8854 8855 // We know that all PHIs in non-header blocks are converted into selects, so 8856 // we don't have to worry about the insertion order and we can just use the 8857 // builder. At this point we generate the predication tree. There may be 8858 // duplications since this is a simple recursive scan, but future 8859 // optimizations will clean it up. 8860 SmallVector<VPValue *, 2> OperandsWithMask; 8861 unsigned NumIncoming = Phi->getNumIncomingValues(); 8862 8863 for (unsigned In = 0; In < NumIncoming; In++) { 8864 VPValue *EdgeMask = 8865 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8866 assert((EdgeMask || NumIncoming == 1) && 8867 "Multiple predecessors with one having a full mask"); 8868 OperandsWithMask.push_back(Operands[In]); 8869 if (EdgeMask) 8870 OperandsWithMask.push_back(EdgeMask); 8871 } 8872 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8873 } 8874 8875 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8876 ArrayRef<VPValue *> Operands, 8877 VFRange &Range) const { 8878 8879 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8880 [this, CI](ElementCount VF) { return CM.isScalarWithPredication(CI); }, 8881 Range); 8882 8883 if (IsPredicated) 8884 return nullptr; 8885 8886 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8887 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8888 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8889 ID == Intrinsic::pseudoprobe || 8890 ID == Intrinsic::experimental_noalias_scope_decl)) 8891 return nullptr; 8892 8893 auto willWiden = [&](ElementCount VF) -> bool { 8894 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8895 // The following case may be scalarized depending on the VF. 8896 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8897 // version of the instruction. 8898 // Is it beneficial to perform intrinsic call compared to lib call? 8899 bool NeedToScalarize = false; 8900 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8901 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8902 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8903 return UseVectorIntrinsic || !NeedToScalarize; 8904 }; 8905 8906 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8907 return nullptr; 8908 8909 ArrayRef<VPValue *> Ops = Operands.take_front(CI->getNumArgOperands()); 8910 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8911 } 8912 8913 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8914 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8915 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8916 // Instruction should be widened, unless it is scalar after vectorization, 8917 // scalarization is profitable or it is predicated. 8918 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8919 return CM.isScalarAfterVectorization(I, VF) || 8920 CM.isProfitableToScalarize(I, VF) || CM.isScalarWithPredication(I); 8921 }; 8922 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8923 Range); 8924 } 8925 8926 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8927 ArrayRef<VPValue *> Operands) const { 8928 auto IsVectorizableOpcode = [](unsigned Opcode) { 8929 switch (Opcode) { 8930 case Instruction::Add: 8931 case Instruction::And: 8932 case Instruction::AShr: 8933 case Instruction::BitCast: 8934 case Instruction::FAdd: 8935 case Instruction::FCmp: 8936 case Instruction::FDiv: 8937 case Instruction::FMul: 8938 case Instruction::FNeg: 8939 case Instruction::FPExt: 8940 case Instruction::FPToSI: 8941 case Instruction::FPToUI: 8942 case Instruction::FPTrunc: 8943 case Instruction::FRem: 8944 case Instruction::FSub: 8945 case Instruction::ICmp: 8946 case Instruction::IntToPtr: 8947 case Instruction::LShr: 8948 case Instruction::Mul: 8949 case Instruction::Or: 8950 case Instruction::PtrToInt: 8951 case Instruction::SDiv: 8952 case Instruction::Select: 8953 case Instruction::SExt: 8954 case Instruction::Shl: 8955 case Instruction::SIToFP: 8956 case Instruction::SRem: 8957 case Instruction::Sub: 8958 case Instruction::Trunc: 8959 case Instruction::UDiv: 8960 case Instruction::UIToFP: 8961 case Instruction::URem: 8962 case Instruction::Xor: 8963 case Instruction::ZExt: 8964 return true; 8965 } 8966 return false; 8967 }; 8968 8969 if (!IsVectorizableOpcode(I->getOpcode())) 8970 return nullptr; 8971 8972 // Success: widen this instruction. 8973 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 8974 } 8975 8976 void VPRecipeBuilder::fixHeaderPhis() { 8977 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 8978 for (VPWidenPHIRecipe *R : PhisToFix) { 8979 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 8980 VPRecipeBase *IncR = 8981 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 8982 R->addOperand(IncR->getVPSingleValue()); 8983 } 8984 } 8985 8986 VPBasicBlock *VPRecipeBuilder::handleReplication( 8987 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8988 VPlanPtr &Plan) { 8989 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8990 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8991 Range); 8992 8993 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8994 [&](ElementCount VF) { return CM.isPredicatedInst(I); }, Range); 8995 8996 // Even if the instruction is not marked as uniform, there are certain 8997 // intrinsic calls that can be effectively treated as such, so we check for 8998 // them here. Conservatively, we only do this for scalable vectors, since 8999 // for fixed-width VFs we can always fall back on full scalarization. 9000 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) { 9001 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) { 9002 case Intrinsic::assume: 9003 case Intrinsic::lifetime_start: 9004 case Intrinsic::lifetime_end: 9005 // For scalable vectors if one of the operands is variant then we still 9006 // want to mark as uniform, which will generate one instruction for just 9007 // the first lane of the vector. We can't scalarize the call in the same 9008 // way as for fixed-width vectors because we don't know how many lanes 9009 // there are. 9010 // 9011 // The reasons for doing it this way for scalable vectors are: 9012 // 1. For the assume intrinsic generating the instruction for the first 9013 // lane is still be better than not generating any at all. For 9014 // example, the input may be a splat across all lanes. 9015 // 2. For the lifetime start/end intrinsics the pointer operand only 9016 // does anything useful when the input comes from a stack object, 9017 // which suggests it should always be uniform. For non-stack objects 9018 // the effect is to poison the object, which still allows us to 9019 // remove the call. 9020 IsUniform = true; 9021 break; 9022 default: 9023 break; 9024 } 9025 } 9026 9027 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 9028 IsUniform, IsPredicated); 9029 setRecipe(I, Recipe); 9030 Plan->addVPValue(I, Recipe); 9031 9032 // Find if I uses a predicated instruction. If so, it will use its scalar 9033 // value. Avoid hoisting the insert-element which packs the scalar value into 9034 // a vector value, as that happens iff all users use the vector value. 9035 for (VPValue *Op : Recipe->operands()) { 9036 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 9037 if (!PredR) 9038 continue; 9039 auto *RepR = 9040 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 9041 assert(RepR->isPredicated() && 9042 "expected Replicate recipe to be predicated"); 9043 RepR->setAlsoPack(false); 9044 } 9045 9046 // Finalize the recipe for Instr, first if it is not predicated. 9047 if (!IsPredicated) { 9048 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 9049 VPBB->appendRecipe(Recipe); 9050 return VPBB; 9051 } 9052 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 9053 assert(VPBB->getSuccessors().empty() && 9054 "VPBB has successors when handling predicated replication."); 9055 // Record predicated instructions for above packing optimizations. 9056 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 9057 VPBlockUtils::insertBlockAfter(Region, VPBB); 9058 auto *RegSucc = new VPBasicBlock(); 9059 VPBlockUtils::insertBlockAfter(RegSucc, Region); 9060 return RegSucc; 9061 } 9062 9063 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 9064 VPRecipeBase *PredRecipe, 9065 VPlanPtr &Plan) { 9066 // Instructions marked for predication are replicated and placed under an 9067 // if-then construct to prevent side-effects. 9068 9069 // Generate recipes to compute the block mask for this region. 9070 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 9071 9072 // Build the triangular if-then region. 9073 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 9074 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 9075 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 9076 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 9077 auto *PHIRecipe = Instr->getType()->isVoidTy() 9078 ? nullptr 9079 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 9080 if (PHIRecipe) { 9081 Plan->removeVPValueFor(Instr); 9082 Plan->addVPValue(Instr, PHIRecipe); 9083 } 9084 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 9085 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 9086 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 9087 9088 // Note: first set Entry as region entry and then connect successors starting 9089 // from it in order, to propagate the "parent" of each VPBasicBlock. 9090 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 9091 VPBlockUtils::connectBlocks(Pred, Exit); 9092 9093 return Region; 9094 } 9095 9096 VPRecipeOrVPValueTy 9097 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 9098 ArrayRef<VPValue *> Operands, 9099 VFRange &Range, VPlanPtr &Plan) { 9100 // First, check for specific widening recipes that deal with calls, memory 9101 // operations, inductions and Phi nodes. 9102 if (auto *CI = dyn_cast<CallInst>(Instr)) 9103 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 9104 9105 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 9106 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 9107 9108 VPRecipeBase *Recipe; 9109 if (auto Phi = dyn_cast<PHINode>(Instr)) { 9110 if (Phi->getParent() != OrigLoop->getHeader()) 9111 return tryToBlend(Phi, Operands, Plan); 9112 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands))) 9113 return toVPRecipeResult(Recipe); 9114 9115 VPWidenPHIRecipe *PhiRecipe = nullptr; 9116 if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) { 9117 VPValue *StartV = Operands[0]; 9118 if (Legal->isReductionVariable(Phi)) { 9119 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 9120 assert(RdxDesc.getRecurrenceStartValue() == 9121 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 9122 PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, 9123 CM.isInLoopReduction(Phi), 9124 CM.useOrderedReductions(RdxDesc)); 9125 } else { 9126 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); 9127 } 9128 9129 // Record the incoming value from the backedge, so we can add the incoming 9130 // value from the backedge after all recipes have been created. 9131 recordRecipeOf(cast<Instruction>( 9132 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 9133 PhisToFix.push_back(PhiRecipe); 9134 } else { 9135 // TODO: record start and backedge value for remaining pointer induction 9136 // phis. 9137 assert(Phi->getType()->isPointerTy() && 9138 "only pointer phis should be handled here"); 9139 PhiRecipe = new VPWidenPHIRecipe(Phi); 9140 } 9141 9142 return toVPRecipeResult(PhiRecipe); 9143 } 9144 9145 if (isa<TruncInst>(Instr) && 9146 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 9147 Range, *Plan))) 9148 return toVPRecipeResult(Recipe); 9149 9150 if (!shouldWiden(Instr, Range)) 9151 return nullptr; 9152 9153 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 9154 return toVPRecipeResult(new VPWidenGEPRecipe( 9155 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 9156 9157 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 9158 bool InvariantCond = 9159 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 9160 return toVPRecipeResult(new VPWidenSelectRecipe( 9161 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 9162 } 9163 9164 return toVPRecipeResult(tryToWiden(Instr, Operands)); 9165 } 9166 9167 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 9168 ElementCount MaxVF) { 9169 assert(OrigLoop->isInnermost() && "Inner loop expected."); 9170 9171 // Collect instructions from the original loop that will become trivially dead 9172 // in the vectorized loop. We don't need to vectorize these instructions. For 9173 // example, original induction update instructions can become dead because we 9174 // separately emit induction "steps" when generating code for the new loop. 9175 // Similarly, we create a new latch condition when setting up the structure 9176 // of the new loop, so the old one can become dead. 9177 SmallPtrSet<Instruction *, 4> DeadInstructions; 9178 collectTriviallyDeadInstructions(DeadInstructions); 9179 9180 // Add assume instructions we need to drop to DeadInstructions, to prevent 9181 // them from being added to the VPlan. 9182 // TODO: We only need to drop assumes in blocks that get flattend. If the 9183 // control flow is preserved, we should keep them. 9184 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 9185 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 9186 9187 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 9188 // Dead instructions do not need sinking. Remove them from SinkAfter. 9189 for (Instruction *I : DeadInstructions) 9190 SinkAfter.erase(I); 9191 9192 // Cannot sink instructions after dead instructions (there won't be any 9193 // recipes for them). Instead, find the first non-dead previous instruction. 9194 for (auto &P : Legal->getSinkAfter()) { 9195 Instruction *SinkTarget = P.second; 9196 Instruction *FirstInst = &*SinkTarget->getParent()->begin(); 9197 (void)FirstInst; 9198 while (DeadInstructions.contains(SinkTarget)) { 9199 assert( 9200 SinkTarget != FirstInst && 9201 "Must find a live instruction (at least the one feeding the " 9202 "first-order recurrence PHI) before reaching beginning of the block"); 9203 SinkTarget = SinkTarget->getPrevNode(); 9204 assert(SinkTarget != P.first && 9205 "sink source equals target, no sinking required"); 9206 } 9207 P.second = SinkTarget; 9208 } 9209 9210 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 9211 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 9212 VFRange SubRange = {VF, MaxVFPlusOne}; 9213 VPlans.push_back( 9214 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 9215 VF = SubRange.End; 9216 } 9217 } 9218 9219 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 9220 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 9221 const MapVector<Instruction *, Instruction *> &SinkAfter) { 9222 9223 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 9224 9225 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 9226 9227 // --------------------------------------------------------------------------- 9228 // Pre-construction: record ingredients whose recipes we'll need to further 9229 // process after constructing the initial VPlan. 9230 // --------------------------------------------------------------------------- 9231 9232 // Mark instructions we'll need to sink later and their targets as 9233 // ingredients whose recipe we'll need to record. 9234 for (auto &Entry : SinkAfter) { 9235 RecipeBuilder.recordRecipeOf(Entry.first); 9236 RecipeBuilder.recordRecipeOf(Entry.second); 9237 } 9238 for (auto &Reduction : CM.getInLoopReductionChains()) { 9239 PHINode *Phi = Reduction.first; 9240 RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind(); 9241 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9242 9243 RecipeBuilder.recordRecipeOf(Phi); 9244 for (auto &R : ReductionOperations) { 9245 RecipeBuilder.recordRecipeOf(R); 9246 // For min/max reducitons, where we have a pair of icmp/select, we also 9247 // need to record the ICmp recipe, so it can be removed later. 9248 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 9249 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 9250 } 9251 } 9252 9253 // For each interleave group which is relevant for this (possibly trimmed) 9254 // Range, add it to the set of groups to be later applied to the VPlan and add 9255 // placeholders for its members' Recipes which we'll be replacing with a 9256 // single VPInterleaveRecipe. 9257 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 9258 auto applyIG = [IG, this](ElementCount VF) -> bool { 9259 return (VF.isVector() && // Query is illegal for VF == 1 9260 CM.getWideningDecision(IG->getInsertPos(), VF) == 9261 LoopVectorizationCostModel::CM_Interleave); 9262 }; 9263 if (!getDecisionAndClampRange(applyIG, Range)) 9264 continue; 9265 InterleaveGroups.insert(IG); 9266 for (unsigned i = 0; i < IG->getFactor(); i++) 9267 if (Instruction *Member = IG->getMember(i)) 9268 RecipeBuilder.recordRecipeOf(Member); 9269 }; 9270 9271 // --------------------------------------------------------------------------- 9272 // Build initial VPlan: Scan the body of the loop in a topological order to 9273 // visit each basic block after having visited its predecessor basic blocks. 9274 // --------------------------------------------------------------------------- 9275 9276 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 9277 auto Plan = std::make_unique<VPlan>(); 9278 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 9279 Plan->setEntry(VPBB); 9280 9281 // Scan the body of the loop in a topological order to visit each basic block 9282 // after having visited its predecessor basic blocks. 9283 LoopBlocksDFS DFS(OrigLoop); 9284 DFS.perform(LI); 9285 9286 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 9287 // Relevant instructions from basic block BB will be grouped into VPRecipe 9288 // ingredients and fill a new VPBasicBlock. 9289 unsigned VPBBsForBB = 0; 9290 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 9291 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 9292 VPBB = FirstVPBBForBB; 9293 Builder.setInsertPoint(VPBB); 9294 9295 // Introduce each ingredient into VPlan. 9296 // TODO: Model and preserve debug instrinsics in VPlan. 9297 for (Instruction &I : BB->instructionsWithoutDebug()) { 9298 Instruction *Instr = &I; 9299 9300 // First filter out irrelevant instructions, to ensure no recipes are 9301 // built for them. 9302 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 9303 continue; 9304 9305 SmallVector<VPValue *, 4> Operands; 9306 auto *Phi = dyn_cast<PHINode>(Instr); 9307 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 9308 Operands.push_back(Plan->getOrAddVPValue( 9309 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 9310 } else { 9311 auto OpRange = Plan->mapToVPValues(Instr->operands()); 9312 Operands = {OpRange.begin(), OpRange.end()}; 9313 } 9314 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 9315 Instr, Operands, Range, Plan)) { 9316 // If Instr can be simplified to an existing VPValue, use it. 9317 if (RecipeOrValue.is<VPValue *>()) { 9318 auto *VPV = RecipeOrValue.get<VPValue *>(); 9319 Plan->addVPValue(Instr, VPV); 9320 // If the re-used value is a recipe, register the recipe for the 9321 // instruction, in case the recipe for Instr needs to be recorded. 9322 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 9323 RecipeBuilder.setRecipe(Instr, R); 9324 continue; 9325 } 9326 // Otherwise, add the new recipe. 9327 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 9328 for (auto *Def : Recipe->definedValues()) { 9329 auto *UV = Def->getUnderlyingValue(); 9330 Plan->addVPValue(UV, Def); 9331 } 9332 9333 RecipeBuilder.setRecipe(Instr, Recipe); 9334 VPBB->appendRecipe(Recipe); 9335 continue; 9336 } 9337 9338 // Otherwise, if all widening options failed, Instruction is to be 9339 // replicated. This may create a successor for VPBB. 9340 VPBasicBlock *NextVPBB = 9341 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 9342 if (NextVPBB != VPBB) { 9343 VPBB = NextVPBB; 9344 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 9345 : ""); 9346 } 9347 } 9348 } 9349 9350 RecipeBuilder.fixHeaderPhis(); 9351 9352 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 9353 // may also be empty, such as the last one VPBB, reflecting original 9354 // basic-blocks with no recipes. 9355 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 9356 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 9357 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 9358 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 9359 delete PreEntry; 9360 9361 // --------------------------------------------------------------------------- 9362 // Transform initial VPlan: Apply previously taken decisions, in order, to 9363 // bring the VPlan to its final state. 9364 // --------------------------------------------------------------------------- 9365 9366 // Apply Sink-After legal constraints. 9367 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 9368 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 9369 if (Region && Region->isReplicator()) { 9370 assert(Region->getNumSuccessors() == 1 && 9371 Region->getNumPredecessors() == 1 && "Expected SESE region!"); 9372 assert(R->getParent()->size() == 1 && 9373 "A recipe in an original replicator region must be the only " 9374 "recipe in its block"); 9375 return Region; 9376 } 9377 return nullptr; 9378 }; 9379 for (auto &Entry : SinkAfter) { 9380 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 9381 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 9382 9383 auto *TargetRegion = GetReplicateRegion(Target); 9384 auto *SinkRegion = GetReplicateRegion(Sink); 9385 if (!SinkRegion) { 9386 // If the sink source is not a replicate region, sink the recipe directly. 9387 if (TargetRegion) { 9388 // The target is in a replication region, make sure to move Sink to 9389 // the block after it, not into the replication region itself. 9390 VPBasicBlock *NextBlock = 9391 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 9392 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 9393 } else 9394 Sink->moveAfter(Target); 9395 continue; 9396 } 9397 9398 // The sink source is in a replicate region. Unhook the region from the CFG. 9399 auto *SinkPred = SinkRegion->getSinglePredecessor(); 9400 auto *SinkSucc = SinkRegion->getSingleSuccessor(); 9401 VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); 9402 VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); 9403 VPBlockUtils::connectBlocks(SinkPred, SinkSucc); 9404 9405 if (TargetRegion) { 9406 // The target recipe is also in a replicate region, move the sink region 9407 // after the target region. 9408 auto *TargetSucc = TargetRegion->getSingleSuccessor(); 9409 VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); 9410 VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); 9411 VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); 9412 } else { 9413 // The sink source is in a replicate region, we need to move the whole 9414 // replicate region, which should only contain a single recipe in the 9415 // main block. 9416 auto *SplitBlock = 9417 Target->getParent()->splitAt(std::next(Target->getIterator())); 9418 9419 auto *SplitPred = SplitBlock->getSinglePredecessor(); 9420 9421 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 9422 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 9423 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 9424 if (VPBB == SplitPred) 9425 VPBB = SplitBlock; 9426 } 9427 } 9428 9429 // Adjust the recipes for any inloop reductions. 9430 adjustRecipesForReductions(VPBB, Plan, RecipeBuilder, Range.Start); 9431 9432 // Introduce a recipe to combine the incoming and previous values of a 9433 // first-order recurrence. 9434 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9435 auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); 9436 if (!RecurPhi) 9437 continue; 9438 9439 auto *RecurSplice = cast<VPInstruction>( 9440 Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 9441 {RecurPhi, RecurPhi->getBackedgeValue()})); 9442 9443 VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); 9444 if (auto *Region = GetReplicateRegion(PrevRecipe)) { 9445 VPBasicBlock *Succ = cast<VPBasicBlock>(Region->getSingleSuccessor()); 9446 RecurSplice->moveBefore(*Succ, Succ->getFirstNonPhi()); 9447 } else 9448 RecurSplice->moveAfter(PrevRecipe); 9449 RecurPhi->replaceAllUsesWith(RecurSplice); 9450 // Set the first operand of RecurSplice to RecurPhi again, after replacing 9451 // all users. 9452 RecurSplice->setOperand(0, RecurPhi); 9453 } 9454 9455 // Interleave memory: for each Interleave Group we marked earlier as relevant 9456 // for this VPlan, replace the Recipes widening its memory instructions with a 9457 // single VPInterleaveRecipe at its insertion point. 9458 for (auto IG : InterleaveGroups) { 9459 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 9460 RecipeBuilder.getRecipe(IG->getInsertPos())); 9461 SmallVector<VPValue *, 4> StoredValues; 9462 for (unsigned i = 0; i < IG->getFactor(); ++i) 9463 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 9464 auto *StoreR = 9465 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); 9466 StoredValues.push_back(StoreR->getStoredValue()); 9467 } 9468 9469 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 9470 Recipe->getMask()); 9471 VPIG->insertBefore(Recipe); 9472 unsigned J = 0; 9473 for (unsigned i = 0; i < IG->getFactor(); ++i) 9474 if (Instruction *Member = IG->getMember(i)) { 9475 if (!Member->getType()->isVoidTy()) { 9476 VPValue *OriginalV = Plan->getVPValue(Member); 9477 Plan->removeVPValueFor(Member); 9478 Plan->addVPValue(Member, VPIG->getVPValue(J)); 9479 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 9480 J++; 9481 } 9482 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9483 } 9484 } 9485 9486 // From this point onwards, VPlan-to-VPlan transformations may change the plan 9487 // in ways that accessing values using original IR values is incorrect. 9488 Plan->disableValue2VPValue(); 9489 9490 VPlanTransforms::sinkScalarOperands(*Plan); 9491 VPlanTransforms::mergeReplicateRegions(*Plan); 9492 9493 std::string PlanName; 9494 raw_string_ostream RSO(PlanName); 9495 ElementCount VF = Range.Start; 9496 Plan->addVF(VF); 9497 RSO << "Initial VPlan for VF={" << VF; 9498 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9499 Plan->addVF(VF); 9500 RSO << "," << VF; 9501 } 9502 RSO << "},UF>=1"; 9503 RSO.flush(); 9504 Plan->setName(PlanName); 9505 9506 return Plan; 9507 } 9508 9509 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9510 // Outer loop handling: They may require CFG and instruction level 9511 // transformations before even evaluating whether vectorization is profitable. 9512 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9513 // the vectorization pipeline. 9514 assert(!OrigLoop->isInnermost()); 9515 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9516 9517 // Create new empty VPlan 9518 auto Plan = std::make_unique<VPlan>(); 9519 9520 // Build hierarchical CFG 9521 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9522 HCFGBuilder.buildHierarchicalCFG(); 9523 9524 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9525 VF *= 2) 9526 Plan->addVF(VF); 9527 9528 if (EnableVPlanPredication) { 9529 VPlanPredicator VPP(*Plan); 9530 VPP.predicate(); 9531 9532 // Avoid running transformation to recipes until masked code generation in 9533 // VPlan-native path is in place. 9534 return Plan; 9535 } 9536 9537 SmallPtrSet<Instruction *, 1> DeadInstructions; 9538 VPlanTransforms::VPInstructionsToVPRecipes(OrigLoop, Plan, 9539 Legal->getInductionVars(), 9540 DeadInstructions, *PSE.getSE()); 9541 return Plan; 9542 } 9543 9544 // Adjust the recipes for reductions. For in-loop reductions the chain of 9545 // instructions leading from the loop exit instr to the phi need to be converted 9546 // to reductions, with one operand being vector and the other being the scalar 9547 // reduction chain. For other reductions, a select is introduced between the phi 9548 // and live-out recipes when folding the tail. 9549 void LoopVectorizationPlanner::adjustRecipesForReductions( 9550 VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, 9551 ElementCount MinVF) { 9552 for (auto &Reduction : CM.getInLoopReductionChains()) { 9553 PHINode *Phi = Reduction.first; 9554 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 9555 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9556 9557 if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) 9558 continue; 9559 9560 // ReductionOperations are orders top-down from the phi's use to the 9561 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9562 // which of the two operands will remain scalar and which will be reduced. 9563 // For minmax the chain will be the select instructions. 9564 Instruction *Chain = Phi; 9565 for (Instruction *R : ReductionOperations) { 9566 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9567 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9568 9569 VPValue *ChainOp = Plan->getVPValue(Chain); 9570 unsigned FirstOpId; 9571 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9572 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9573 "Expected to replace a VPWidenSelectSC"); 9574 FirstOpId = 1; 9575 } else { 9576 assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe)) && 9577 "Expected to replace a VPWidenSC"); 9578 FirstOpId = 0; 9579 } 9580 unsigned VecOpId = 9581 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9582 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9583 9584 auto *CondOp = CM.foldTailByMasking() 9585 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9586 : nullptr; 9587 VPReductionRecipe *RedRecipe = new VPReductionRecipe( 9588 &RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9589 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9590 Plan->removeVPValueFor(R); 9591 Plan->addVPValue(R, RedRecipe); 9592 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9593 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9594 WidenRecipe->eraseFromParent(); 9595 9596 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9597 VPRecipeBase *CompareRecipe = 9598 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9599 assert(isa<VPWidenRecipe>(CompareRecipe) && 9600 "Expected to replace a VPWidenSC"); 9601 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9602 "Expected no remaining users"); 9603 CompareRecipe->eraseFromParent(); 9604 } 9605 Chain = R; 9606 } 9607 } 9608 9609 // If tail is folded by masking, introduce selects between the phi 9610 // and the live-out instruction of each reduction, at the end of the latch. 9611 if (CM.foldTailByMasking()) { 9612 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9613 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R); 9614 if (!PhiR || PhiR->isInLoop()) 9615 continue; 9616 Builder.setInsertPoint(LatchVPBB); 9617 VPValue *Cond = 9618 RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9619 VPValue *Red = PhiR->getBackedgeValue(); 9620 Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR}); 9621 } 9622 } 9623 } 9624 9625 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9626 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9627 VPSlotTracker &SlotTracker) const { 9628 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9629 IG->getInsertPos()->printAsOperand(O, false); 9630 O << ", "; 9631 getAddr()->printAsOperand(O, SlotTracker); 9632 VPValue *Mask = getMask(); 9633 if (Mask) { 9634 O << ", "; 9635 Mask->printAsOperand(O, SlotTracker); 9636 } 9637 9638 unsigned OpIdx = 0; 9639 for (unsigned i = 0; i < IG->getFactor(); ++i) { 9640 if (!IG->getMember(i)) 9641 continue; 9642 if (getNumStoreOperands() > 0) { 9643 O << "\n" << Indent << " store "; 9644 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker); 9645 O << " to index " << i; 9646 } else { 9647 O << "\n" << Indent << " "; 9648 getVPValue(OpIdx)->printAsOperand(O, SlotTracker); 9649 O << " = load from index " << i; 9650 } 9651 ++OpIdx; 9652 } 9653 } 9654 #endif 9655 9656 void VPWidenCallRecipe::execute(VPTransformState &State) { 9657 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9658 *this, State); 9659 } 9660 9661 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9662 State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()), 9663 this, *this, InvariantCond, State); 9664 } 9665 9666 void VPWidenRecipe::execute(VPTransformState &State) { 9667 State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State); 9668 } 9669 9670 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9671 State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this, 9672 *this, State.UF, State.VF, IsPtrLoopInvariant, 9673 IsIndexLoopInvariant, State); 9674 } 9675 9676 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9677 assert(!State.Instance && "Int or FP induction being replicated."); 9678 State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(), 9679 getTruncInst(), getVPValue(0), 9680 getCastValue(), State); 9681 } 9682 9683 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9684 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this, 9685 State); 9686 } 9687 9688 void VPBlendRecipe::execute(VPTransformState &State) { 9689 State.ILV->setDebugLocFromInst(Phi, &State.Builder); 9690 // We know that all PHIs in non-header blocks are converted into 9691 // selects, so we don't have to worry about the insertion order and we 9692 // can just use the builder. 9693 // At this point we generate the predication tree. There may be 9694 // duplications since this is a simple recursive scan, but future 9695 // optimizations will clean it up. 9696 9697 unsigned NumIncoming = getNumIncomingValues(); 9698 9699 // Generate a sequence of selects of the form: 9700 // SELECT(Mask3, In3, 9701 // SELECT(Mask2, In2, 9702 // SELECT(Mask1, In1, 9703 // In0))) 9704 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9705 // are essentially undef are taken from In0. 9706 InnerLoopVectorizer::VectorParts Entry(State.UF); 9707 for (unsigned In = 0; In < NumIncoming; ++In) { 9708 for (unsigned Part = 0; Part < State.UF; ++Part) { 9709 // We might have single edge PHIs (blocks) - use an identity 9710 // 'select' for the first PHI operand. 9711 Value *In0 = State.get(getIncomingValue(In), Part); 9712 if (In == 0) 9713 Entry[Part] = In0; // Initialize with the first incoming value. 9714 else { 9715 // Select between the current value and the previous incoming edge 9716 // based on the incoming mask. 9717 Value *Cond = State.get(getMask(In), Part); 9718 Entry[Part] = 9719 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9720 } 9721 } 9722 } 9723 for (unsigned Part = 0; Part < State.UF; ++Part) 9724 State.set(this, Entry[Part], Part); 9725 } 9726 9727 void VPInterleaveRecipe::execute(VPTransformState &State) { 9728 assert(!State.Instance && "Interleave group being replicated."); 9729 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9730 getStoredValues(), getMask()); 9731 } 9732 9733 void VPReductionRecipe::execute(VPTransformState &State) { 9734 assert(!State.Instance && "Reduction being replicated."); 9735 Value *PrevInChain = State.get(getChainOp(), 0); 9736 for (unsigned Part = 0; Part < State.UF; ++Part) { 9737 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9738 bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); 9739 Value *NewVecOp = State.get(getVecOp(), Part); 9740 if (VPValue *Cond = getCondOp()) { 9741 Value *NewCond = State.get(Cond, Part); 9742 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9743 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 9744 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9745 Constant *IdenVec = 9746 ConstantVector::getSplat(VecTy->getElementCount(), Iden); 9747 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9748 NewVecOp = Select; 9749 } 9750 Value *NewRed; 9751 Value *NextInChain; 9752 if (IsOrdered) { 9753 if (State.VF.isVector()) 9754 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9755 PrevInChain); 9756 else 9757 NewRed = State.Builder.CreateBinOp( 9758 (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), 9759 PrevInChain, NewVecOp); 9760 PrevInChain = NewRed; 9761 } else { 9762 PrevInChain = State.get(getChainOp(), Part); 9763 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9764 } 9765 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9766 NextInChain = 9767 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9768 NewRed, PrevInChain); 9769 } else if (IsOrdered) 9770 NextInChain = NewRed; 9771 else { 9772 NextInChain = State.Builder.CreateBinOp( 9773 (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed, 9774 PrevInChain); 9775 } 9776 State.set(this, NextInChain, Part); 9777 } 9778 } 9779 9780 void VPReplicateRecipe::execute(VPTransformState &State) { 9781 if (State.Instance) { // Generate a single instance. 9782 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9783 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, 9784 *State.Instance, IsPredicated, State); 9785 // Insert scalar instance packing it into a vector. 9786 if (AlsoPack && State.VF.isVector()) { 9787 // If we're constructing lane 0, initialize to start from poison. 9788 if (State.Instance->Lane.isFirstLane()) { 9789 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9790 Value *Poison = PoisonValue::get( 9791 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9792 State.set(this, Poison, State.Instance->Part); 9793 } 9794 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9795 } 9796 return; 9797 } 9798 9799 // Generate scalar instances for all VF lanes of all UF parts, unless the 9800 // instruction is uniform inwhich case generate only the first lane for each 9801 // of the UF parts. 9802 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9803 assert((!State.VF.isScalable() || IsUniform) && 9804 "Can't scalarize a scalable vector"); 9805 for (unsigned Part = 0; Part < State.UF; ++Part) 9806 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9807 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, 9808 VPIteration(Part, Lane), IsPredicated, 9809 State); 9810 } 9811 9812 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9813 assert(State.Instance && "Branch on Mask works only on single instance."); 9814 9815 unsigned Part = State.Instance->Part; 9816 unsigned Lane = State.Instance->Lane.getKnownLane(); 9817 9818 Value *ConditionBit = nullptr; 9819 VPValue *BlockInMask = getMask(); 9820 if (BlockInMask) { 9821 ConditionBit = State.get(BlockInMask, Part); 9822 if (ConditionBit->getType()->isVectorTy()) 9823 ConditionBit = State.Builder.CreateExtractElement( 9824 ConditionBit, State.Builder.getInt32(Lane)); 9825 } else // Block in mask is all-one. 9826 ConditionBit = State.Builder.getTrue(); 9827 9828 // Replace the temporary unreachable terminator with a new conditional branch, 9829 // whose two destinations will be set later when they are created. 9830 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9831 assert(isa<UnreachableInst>(CurrentTerminator) && 9832 "Expected to replace unreachable terminator with conditional branch."); 9833 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9834 CondBr->setSuccessor(0, nullptr); 9835 ReplaceInstWithInst(CurrentTerminator, CondBr); 9836 } 9837 9838 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9839 assert(State.Instance && "Predicated instruction PHI works per instance."); 9840 Instruction *ScalarPredInst = 9841 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9842 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9843 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9844 assert(PredicatingBB && "Predicated block has no single predecessor."); 9845 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9846 "operand must be VPReplicateRecipe"); 9847 9848 // By current pack/unpack logic we need to generate only a single phi node: if 9849 // a vector value for the predicated instruction exists at this point it means 9850 // the instruction has vector users only, and a phi for the vector value is 9851 // needed. In this case the recipe of the predicated instruction is marked to 9852 // also do that packing, thereby "hoisting" the insert-element sequence. 9853 // Otherwise, a phi node for the scalar value is needed. 9854 unsigned Part = State.Instance->Part; 9855 if (State.hasVectorValue(getOperand(0), Part)) { 9856 Value *VectorValue = State.get(getOperand(0), Part); 9857 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9858 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9859 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9860 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9861 if (State.hasVectorValue(this, Part)) 9862 State.reset(this, VPhi, Part); 9863 else 9864 State.set(this, VPhi, Part); 9865 // NOTE: Currently we need to update the value of the operand, so the next 9866 // predicated iteration inserts its generated value in the correct vector. 9867 State.reset(getOperand(0), VPhi, Part); 9868 } else { 9869 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9870 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9871 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9872 PredicatingBB); 9873 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9874 if (State.hasScalarValue(this, *State.Instance)) 9875 State.reset(this, Phi, *State.Instance); 9876 else 9877 State.set(this, Phi, *State.Instance); 9878 // NOTE: Currently we need to update the value of the operand, so the next 9879 // predicated iteration inserts its generated value in the correct vector. 9880 State.reset(getOperand(0), Phi, *State.Instance); 9881 } 9882 } 9883 9884 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9885 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9886 State.ILV->vectorizeMemoryInstruction( 9887 &Ingredient, State, StoredValue ? nullptr : getVPSingleValue(), getAddr(), 9888 StoredValue, getMask()); 9889 } 9890 9891 // Determine how to lower the scalar epilogue, which depends on 1) optimising 9892 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 9893 // predication, and 4) a TTI hook that analyses whether the loop is suitable 9894 // for predication. 9895 static ScalarEpilogueLowering getScalarEpilogueLowering( 9896 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 9897 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 9898 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 9899 LoopVectorizationLegality &LVL) { 9900 // 1) OptSize takes precedence over all other options, i.e. if this is set, 9901 // don't look at hints or options, and don't request a scalar epilogue. 9902 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 9903 // LoopAccessInfo (due to code dependency and not being able to reliably get 9904 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 9905 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 9906 // versioning when the vectorization is forced, unlike hasOptSize. So revert 9907 // back to the old way and vectorize with versioning when forced. See D81345.) 9908 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 9909 PGSOQueryType::IRPass) && 9910 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 9911 return CM_ScalarEpilogueNotAllowedOptSize; 9912 9913 // 2) If set, obey the directives 9914 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 9915 switch (PreferPredicateOverEpilogue) { 9916 case PreferPredicateTy::ScalarEpilogue: 9917 return CM_ScalarEpilogueAllowed; 9918 case PreferPredicateTy::PredicateElseScalarEpilogue: 9919 return CM_ScalarEpilogueNotNeededUsePredicate; 9920 case PreferPredicateTy::PredicateOrDontVectorize: 9921 return CM_ScalarEpilogueNotAllowedUsePredicate; 9922 }; 9923 } 9924 9925 // 3) If set, obey the hints 9926 switch (Hints.getPredicate()) { 9927 case LoopVectorizeHints::FK_Enabled: 9928 return CM_ScalarEpilogueNotNeededUsePredicate; 9929 case LoopVectorizeHints::FK_Disabled: 9930 return CM_ScalarEpilogueAllowed; 9931 }; 9932 9933 // 4) if the TTI hook indicates this is profitable, request predication. 9934 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 9935 LVL.getLAI())) 9936 return CM_ScalarEpilogueNotNeededUsePredicate; 9937 9938 return CM_ScalarEpilogueAllowed; 9939 } 9940 9941 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 9942 // If Values have been set for this Def return the one relevant for \p Part. 9943 if (hasVectorValue(Def, Part)) 9944 return Data.PerPartOutput[Def][Part]; 9945 9946 if (!hasScalarValue(Def, {Part, 0})) { 9947 Value *IRV = Def->getLiveInIRValue(); 9948 Value *B = ILV->getBroadcastInstrs(IRV); 9949 set(Def, B, Part); 9950 return B; 9951 } 9952 9953 Value *ScalarValue = get(Def, {Part, 0}); 9954 // If we aren't vectorizing, we can just copy the scalar map values over 9955 // to the vector map. 9956 if (VF.isScalar()) { 9957 set(Def, ScalarValue, Part); 9958 return ScalarValue; 9959 } 9960 9961 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 9962 bool IsUniform = RepR && RepR->isUniform(); 9963 9964 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 9965 // Check if there is a scalar value for the selected lane. 9966 if (!hasScalarValue(Def, {Part, LastLane})) { 9967 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 9968 assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) && 9969 "unexpected recipe found to be invariant"); 9970 IsUniform = true; 9971 LastLane = 0; 9972 } 9973 9974 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 9975 // Set the insert point after the last scalarized instruction or after the 9976 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 9977 // will directly follow the scalar definitions. 9978 auto OldIP = Builder.saveIP(); 9979 auto NewIP = 9980 isa<PHINode>(LastInst) 9981 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 9982 : std::next(BasicBlock::iterator(LastInst)); 9983 Builder.SetInsertPoint(&*NewIP); 9984 9985 // However, if we are vectorizing, we need to construct the vector values. 9986 // If the value is known to be uniform after vectorization, we can just 9987 // broadcast the scalar value corresponding to lane zero for each unroll 9988 // iteration. Otherwise, we construct the vector values using 9989 // insertelement instructions. Since the resulting vectors are stored in 9990 // State, we will only generate the insertelements once. 9991 Value *VectorValue = nullptr; 9992 if (IsUniform) { 9993 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 9994 set(Def, VectorValue, Part); 9995 } else { 9996 // Initialize packing with insertelements to start from undef. 9997 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 9998 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 9999 set(Def, Undef, Part); 10000 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 10001 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 10002 VectorValue = get(Def, Part); 10003 } 10004 Builder.restoreIP(OldIP); 10005 return VectorValue; 10006 } 10007 10008 // Process the loop in the VPlan-native vectorization path. This path builds 10009 // VPlan upfront in the vectorization pipeline, which allows to apply 10010 // VPlan-to-VPlan transformations from the very beginning without modifying the 10011 // input LLVM IR. 10012 static bool processLoopInVPlanNativePath( 10013 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 10014 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 10015 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 10016 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 10017 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 10018 LoopVectorizationRequirements &Requirements) { 10019 10020 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 10021 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 10022 return false; 10023 } 10024 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 10025 Function *F = L->getHeader()->getParent(); 10026 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 10027 10028 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10029 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 10030 10031 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 10032 &Hints, IAI); 10033 // Use the planner for outer loop vectorization. 10034 // TODO: CM is not used at this point inside the planner. Turn CM into an 10035 // optional argument if we don't need it in the future. 10036 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 10037 Requirements, ORE); 10038 10039 // Get user vectorization factor. 10040 ElementCount UserVF = Hints.getWidth(); 10041 10042 CM.collectElementTypesForWidening(); 10043 10044 // Plan how to best vectorize, return the best VF and its cost. 10045 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 10046 10047 // If we are stress testing VPlan builds, do not attempt to generate vector 10048 // code. Masked vector code generation support will follow soon. 10049 // Also, do not attempt to vectorize if no vector code will be produced. 10050 if (VPlanBuildStressTest || EnableVPlanPredication || 10051 VectorizationFactor::Disabled() == VF) 10052 return false; 10053 10054 LVP.setBestPlan(VF.Width, 1); 10055 10056 { 10057 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10058 F->getParent()->getDataLayout()); 10059 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 10060 &CM, BFI, PSI, Checks); 10061 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 10062 << L->getHeader()->getParent()->getName() << "\"\n"); 10063 LVP.executePlan(LB, DT); 10064 } 10065 10066 // Mark the loop as already vectorized to avoid vectorizing again. 10067 Hints.setAlreadyVectorized(); 10068 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10069 return true; 10070 } 10071 10072 // Emit a remark if there are stores to floats that required a floating point 10073 // extension. If the vectorized loop was generated with floating point there 10074 // will be a performance penalty from the conversion overhead and the change in 10075 // the vector width. 10076 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 10077 SmallVector<Instruction *, 4> Worklist; 10078 for (BasicBlock *BB : L->getBlocks()) { 10079 for (Instruction &Inst : *BB) { 10080 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 10081 if (S->getValueOperand()->getType()->isFloatTy()) 10082 Worklist.push_back(S); 10083 } 10084 } 10085 } 10086 10087 // Traverse the floating point stores upwards searching, for floating point 10088 // conversions. 10089 SmallPtrSet<const Instruction *, 4> Visited; 10090 SmallPtrSet<const Instruction *, 4> EmittedRemark; 10091 while (!Worklist.empty()) { 10092 auto *I = Worklist.pop_back_val(); 10093 if (!L->contains(I)) 10094 continue; 10095 if (!Visited.insert(I).second) 10096 continue; 10097 10098 // Emit a remark if the floating point store required a floating 10099 // point conversion. 10100 // TODO: More work could be done to identify the root cause such as a 10101 // constant or a function return type and point the user to it. 10102 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 10103 ORE->emit([&]() { 10104 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 10105 I->getDebugLoc(), L->getHeader()) 10106 << "floating point conversion changes vector width. " 10107 << "Mixed floating point precision requires an up/down " 10108 << "cast that will negatively impact performance."; 10109 }); 10110 10111 for (Use &Op : I->operands()) 10112 if (auto *OpI = dyn_cast<Instruction>(Op)) 10113 Worklist.push_back(OpI); 10114 } 10115 } 10116 10117 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 10118 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 10119 !EnableLoopInterleaving), 10120 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 10121 !EnableLoopVectorization) {} 10122 10123 bool LoopVectorizePass::processLoop(Loop *L) { 10124 assert((EnableVPlanNativePath || L->isInnermost()) && 10125 "VPlan-native path is not enabled. Only process inner loops."); 10126 10127 #ifndef NDEBUG 10128 const std::string DebugLocStr = getDebugLocString(L); 10129 #endif /* NDEBUG */ 10130 10131 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 10132 << L->getHeader()->getParent()->getName() << "\" from " 10133 << DebugLocStr << "\n"); 10134 10135 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 10136 10137 LLVM_DEBUG( 10138 dbgs() << "LV: Loop hints:" 10139 << " force=" 10140 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 10141 ? "disabled" 10142 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 10143 ? "enabled" 10144 : "?")) 10145 << " width=" << Hints.getWidth() 10146 << " interleave=" << Hints.getInterleave() << "\n"); 10147 10148 // Function containing loop 10149 Function *F = L->getHeader()->getParent(); 10150 10151 // Looking at the diagnostic output is the only way to determine if a loop 10152 // was vectorized (other than looking at the IR or machine code), so it 10153 // is important to generate an optimization remark for each loop. Most of 10154 // these messages are generated as OptimizationRemarkAnalysis. Remarks 10155 // generated as OptimizationRemark and OptimizationRemarkMissed are 10156 // less verbose reporting vectorized loops and unvectorized loops that may 10157 // benefit from vectorization, respectively. 10158 10159 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 10160 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 10161 return false; 10162 } 10163 10164 PredicatedScalarEvolution PSE(*SE, *L); 10165 10166 // Check if it is legal to vectorize the loop. 10167 LoopVectorizationRequirements Requirements; 10168 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 10169 &Requirements, &Hints, DB, AC, BFI, PSI); 10170 if (!LVL.canVectorize(EnableVPlanNativePath)) { 10171 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 10172 Hints.emitRemarkWithHints(); 10173 return false; 10174 } 10175 10176 // Check the function attributes and profiles to find out if this function 10177 // should be optimized for size. 10178 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10179 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 10180 10181 // Entrance to the VPlan-native vectorization path. Outer loops are processed 10182 // here. They may require CFG and instruction level transformations before 10183 // even evaluating whether vectorization is profitable. Since we cannot modify 10184 // the incoming IR, we need to build VPlan upfront in the vectorization 10185 // pipeline. 10186 if (!L->isInnermost()) 10187 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 10188 ORE, BFI, PSI, Hints, Requirements); 10189 10190 assert(L->isInnermost() && "Inner loop expected."); 10191 10192 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 10193 // count by optimizing for size, to minimize overheads. 10194 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 10195 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 10196 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 10197 << "This loop is worth vectorizing only if no scalar " 10198 << "iteration overheads are incurred."); 10199 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 10200 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 10201 else { 10202 LLVM_DEBUG(dbgs() << "\n"); 10203 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 10204 } 10205 } 10206 10207 // Check the function attributes to see if implicit floats are allowed. 10208 // FIXME: This check doesn't seem possibly correct -- what if the loop is 10209 // an integer loop and the vector instructions selected are purely integer 10210 // vector instructions? 10211 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10212 reportVectorizationFailure( 10213 "Can't vectorize when the NoImplicitFloat attribute is used", 10214 "loop not vectorized due to NoImplicitFloat attribute", 10215 "NoImplicitFloat", ORE, L); 10216 Hints.emitRemarkWithHints(); 10217 return false; 10218 } 10219 10220 // Check if the target supports potentially unsafe FP vectorization. 10221 // FIXME: Add a check for the type of safety issue (denormal, signaling) 10222 // for the target we're vectorizing for, to make sure none of the 10223 // additional fp-math flags can help. 10224 if (Hints.isPotentiallyUnsafe() && 10225 TTI->isFPVectorizationPotentiallyUnsafe()) { 10226 reportVectorizationFailure( 10227 "Potentially unsafe FP op prevents vectorization", 10228 "loop not vectorized due to unsafe FP support.", 10229 "UnsafeFP", ORE, L); 10230 Hints.emitRemarkWithHints(); 10231 return false; 10232 } 10233 10234 bool AllowOrderedReductions; 10235 // If the flag is set, use that instead and override the TTI behaviour. 10236 if (ForceOrderedReductions.getNumOccurrences() > 0) 10237 AllowOrderedReductions = ForceOrderedReductions; 10238 else 10239 AllowOrderedReductions = TTI->enableOrderedReductions(); 10240 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) { 10241 ORE->emit([&]() { 10242 auto *ExactFPMathInst = Requirements.getExactFPInst(); 10243 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 10244 ExactFPMathInst->getDebugLoc(), 10245 ExactFPMathInst->getParent()) 10246 << "loop not vectorized: cannot prove it is safe to reorder " 10247 "floating-point operations"; 10248 }); 10249 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 10250 "reorder floating-point operations\n"); 10251 Hints.emitRemarkWithHints(); 10252 return false; 10253 } 10254 10255 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 10256 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 10257 10258 // If an override option has been passed in for interleaved accesses, use it. 10259 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 10260 UseInterleaved = EnableInterleavedMemAccesses; 10261 10262 // Analyze interleaved memory accesses. 10263 if (UseInterleaved) { 10264 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10265 } 10266 10267 // Use the cost model. 10268 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10269 F, &Hints, IAI); 10270 CM.collectValuesToIgnore(); 10271 CM.collectElementTypesForWidening(); 10272 10273 // Use the planner for vectorization. 10274 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 10275 Requirements, ORE); 10276 10277 // Get user vectorization factor and interleave count. 10278 ElementCount UserVF = Hints.getWidth(); 10279 unsigned UserIC = Hints.getInterleave(); 10280 10281 // Plan how to best vectorize, return the best VF and its cost. 10282 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10283 10284 VectorizationFactor VF = VectorizationFactor::Disabled(); 10285 unsigned IC = 1; 10286 10287 if (MaybeVF) { 10288 VF = *MaybeVF; 10289 // Select the interleave count. 10290 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10291 } 10292 10293 // Identify the diagnostic messages that should be produced. 10294 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10295 bool VectorizeLoop = true, InterleaveLoop = true; 10296 if (VF.Width.isScalar()) { 10297 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10298 VecDiagMsg = std::make_pair( 10299 "VectorizationNotBeneficial", 10300 "the cost-model indicates that vectorization is not beneficial"); 10301 VectorizeLoop = false; 10302 } 10303 10304 if (!MaybeVF && UserIC > 1) { 10305 // Tell the user interleaving was avoided up-front, despite being explicitly 10306 // requested. 10307 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10308 "interleaving should be avoided up front\n"); 10309 IntDiagMsg = std::make_pair( 10310 "InterleavingAvoided", 10311 "Ignoring UserIC, because interleaving was avoided up front"); 10312 InterleaveLoop = false; 10313 } else if (IC == 1 && UserIC <= 1) { 10314 // Tell the user interleaving is not beneficial. 10315 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10316 IntDiagMsg = std::make_pair( 10317 "InterleavingNotBeneficial", 10318 "the cost-model indicates that interleaving is not beneficial"); 10319 InterleaveLoop = false; 10320 if (UserIC == 1) { 10321 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10322 IntDiagMsg.second += 10323 " and is explicitly disabled or interleave count is set to 1"; 10324 } 10325 } else if (IC > 1 && UserIC == 1) { 10326 // Tell the user interleaving is beneficial, but it explicitly disabled. 10327 LLVM_DEBUG( 10328 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10329 IntDiagMsg = std::make_pair( 10330 "InterleavingBeneficialButDisabled", 10331 "the cost-model indicates that interleaving is beneficial " 10332 "but is explicitly disabled or interleave count is set to 1"); 10333 InterleaveLoop = false; 10334 } 10335 10336 // Override IC if user provided an interleave count. 10337 IC = UserIC > 0 ? UserIC : IC; 10338 10339 // Emit diagnostic messages, if any. 10340 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10341 if (!VectorizeLoop && !InterleaveLoop) { 10342 // Do not vectorize or interleaving the loop. 10343 ORE->emit([&]() { 10344 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10345 L->getStartLoc(), L->getHeader()) 10346 << VecDiagMsg.second; 10347 }); 10348 ORE->emit([&]() { 10349 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10350 L->getStartLoc(), L->getHeader()) 10351 << IntDiagMsg.second; 10352 }); 10353 return false; 10354 } else if (!VectorizeLoop && InterleaveLoop) { 10355 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10356 ORE->emit([&]() { 10357 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10358 L->getStartLoc(), L->getHeader()) 10359 << VecDiagMsg.second; 10360 }); 10361 } else if (VectorizeLoop && !InterleaveLoop) { 10362 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10363 << ") in " << DebugLocStr << '\n'); 10364 ORE->emit([&]() { 10365 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10366 L->getStartLoc(), L->getHeader()) 10367 << IntDiagMsg.second; 10368 }); 10369 } else if (VectorizeLoop && InterleaveLoop) { 10370 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10371 << ") in " << DebugLocStr << '\n'); 10372 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10373 } 10374 10375 bool DisableRuntimeUnroll = false; 10376 MDNode *OrigLoopID = L->getLoopID(); 10377 { 10378 // Optimistically generate runtime checks. Drop them if they turn out to not 10379 // be profitable. Limit the scope of Checks, so the cleanup happens 10380 // immediately after vector codegeneration is done. 10381 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10382 F->getParent()->getDataLayout()); 10383 if (!VF.Width.isScalar() || IC > 1) 10384 Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate()); 10385 LVP.setBestPlan(VF.Width, IC); 10386 10387 using namespace ore; 10388 if (!VectorizeLoop) { 10389 assert(IC > 1 && "interleave count should not be 1 or 0"); 10390 // If we decided that it is not legal to vectorize the loop, then 10391 // interleave it. 10392 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10393 &CM, BFI, PSI, Checks); 10394 LVP.executePlan(Unroller, DT); 10395 10396 ORE->emit([&]() { 10397 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10398 L->getHeader()) 10399 << "interleaved loop (interleaved count: " 10400 << NV("InterleaveCount", IC) << ")"; 10401 }); 10402 } else { 10403 // If we decided that it is *legal* to vectorize the loop, then do it. 10404 10405 // Consider vectorizing the epilogue too if it's profitable. 10406 VectorizationFactor EpilogueVF = 10407 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10408 if (EpilogueVF.Width.isVector()) { 10409 10410 // The first pass vectorizes the main loop and creates a scalar epilogue 10411 // to be vectorized by executing the plan (potentially with a different 10412 // factor) again shortly afterwards. 10413 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1); 10414 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10415 EPI, &LVL, &CM, BFI, PSI, Checks); 10416 10417 LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF); 10418 LVP.executePlan(MainILV, DT); 10419 ++LoopsVectorized; 10420 10421 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10422 formLCSSARecursively(*L, *DT, LI, SE); 10423 10424 // Second pass vectorizes the epilogue and adjusts the control flow 10425 // edges from the first pass. 10426 LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF); 10427 EPI.MainLoopVF = EPI.EpilogueVF; 10428 EPI.MainLoopUF = EPI.EpilogueUF; 10429 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10430 ORE, EPI, &LVL, &CM, BFI, PSI, 10431 Checks); 10432 LVP.executePlan(EpilogILV, DT); 10433 ++LoopsEpilogueVectorized; 10434 10435 if (!MainILV.areSafetyChecksAdded()) 10436 DisableRuntimeUnroll = true; 10437 } else { 10438 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 10439 &LVL, &CM, BFI, PSI, Checks); 10440 LVP.executePlan(LB, DT); 10441 ++LoopsVectorized; 10442 10443 // Add metadata to disable runtime unrolling a scalar loop when there 10444 // are no runtime checks about strides and memory. A scalar loop that is 10445 // rarely used is not worth unrolling. 10446 if (!LB.areSafetyChecksAdded()) 10447 DisableRuntimeUnroll = true; 10448 } 10449 // Report the vectorization decision. 10450 ORE->emit([&]() { 10451 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10452 L->getHeader()) 10453 << "vectorized loop (vectorization width: " 10454 << NV("VectorizationFactor", VF.Width) 10455 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10456 }); 10457 } 10458 10459 if (ORE->allowExtraAnalysis(LV_NAME)) 10460 checkMixedPrecision(L, ORE); 10461 } 10462 10463 Optional<MDNode *> RemainderLoopID = 10464 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10465 LLVMLoopVectorizeFollowupEpilogue}); 10466 if (RemainderLoopID.hasValue()) { 10467 L->setLoopID(RemainderLoopID.getValue()); 10468 } else { 10469 if (DisableRuntimeUnroll) 10470 AddRuntimeUnrollDisableMetaData(L); 10471 10472 // Mark the loop as already vectorized to avoid vectorizing again. 10473 Hints.setAlreadyVectorized(); 10474 } 10475 10476 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10477 return true; 10478 } 10479 10480 LoopVectorizeResult LoopVectorizePass::runImpl( 10481 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10482 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10483 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10484 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10485 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10486 SE = &SE_; 10487 LI = &LI_; 10488 TTI = &TTI_; 10489 DT = &DT_; 10490 BFI = &BFI_; 10491 TLI = TLI_; 10492 AA = &AA_; 10493 AC = &AC_; 10494 GetLAA = &GetLAA_; 10495 DB = &DB_; 10496 ORE = &ORE_; 10497 PSI = PSI_; 10498 10499 // Don't attempt if 10500 // 1. the target claims to have no vector registers, and 10501 // 2. interleaving won't help ILP. 10502 // 10503 // The second condition is necessary because, even if the target has no 10504 // vector registers, loop vectorization may still enable scalar 10505 // interleaving. 10506 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10507 TTI->getMaxInterleaveFactor(1) < 2) 10508 return LoopVectorizeResult(false, false); 10509 10510 bool Changed = false, CFGChanged = false; 10511 10512 // The vectorizer requires loops to be in simplified form. 10513 // Since simplification may add new inner loops, it has to run before the 10514 // legality and profitability checks. This means running the loop vectorizer 10515 // will simplify all loops, regardless of whether anything end up being 10516 // vectorized. 10517 for (auto &L : *LI) 10518 Changed |= CFGChanged |= 10519 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10520 10521 // Build up a worklist of inner-loops to vectorize. This is necessary as 10522 // the act of vectorizing or partially unrolling a loop creates new loops 10523 // and can invalidate iterators across the loops. 10524 SmallVector<Loop *, 8> Worklist; 10525 10526 for (Loop *L : *LI) 10527 collectSupportedLoops(*L, LI, ORE, Worklist); 10528 10529 LoopsAnalyzed += Worklist.size(); 10530 10531 // Now walk the identified inner loops. 10532 while (!Worklist.empty()) { 10533 Loop *L = Worklist.pop_back_val(); 10534 10535 // For the inner loops we actually process, form LCSSA to simplify the 10536 // transform. 10537 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10538 10539 Changed |= CFGChanged |= processLoop(L); 10540 } 10541 10542 // Process each loop nest in the function. 10543 return LoopVectorizeResult(Changed, CFGChanged); 10544 } 10545 10546 PreservedAnalyses LoopVectorizePass::run(Function &F, 10547 FunctionAnalysisManager &AM) { 10548 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10549 auto &LI = AM.getResult<LoopAnalysis>(F); 10550 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10551 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10552 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10553 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10554 auto &AA = AM.getResult<AAManager>(F); 10555 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10556 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10557 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10558 10559 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10560 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10561 [&](Loop &L) -> const LoopAccessInfo & { 10562 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10563 TLI, TTI, nullptr, nullptr}; 10564 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10565 }; 10566 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10567 ProfileSummaryInfo *PSI = 10568 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10569 LoopVectorizeResult Result = 10570 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10571 if (!Result.MadeAnyChange) 10572 return PreservedAnalyses::all(); 10573 PreservedAnalyses PA; 10574 10575 // We currently do not preserve loopinfo/dominator analyses with outer loop 10576 // vectorization. Until this is addressed, mark these analyses as preserved 10577 // only for non-VPlan-native path. 10578 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10579 if (!EnableVPlanNativePath) { 10580 PA.preserve<LoopAnalysis>(); 10581 PA.preserve<DominatorTreeAnalysis>(); 10582 } 10583 if (!Result.MadeCFGChange) 10584 PA.preserveSet<CFGAnalyses>(); 10585 return PA; 10586 } 10587