1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 91 #include "llvm/Analysis/ProfileSummaryInfo.h" 92 #include "llvm/Analysis/ScalarEvolution.h" 93 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 94 #include "llvm/Analysis/TargetLibraryInfo.h" 95 #include "llvm/Analysis/TargetTransformInfo.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/LLVMContext.h" 116 #include "llvm/IR/Metadata.h" 117 #include "llvm/IR/Module.h" 118 #include "llvm/IR/Operator.h" 119 #include "llvm/IR/PatternMatch.h" 120 #include "llvm/IR/Type.h" 121 #include "llvm/IR/Use.h" 122 #include "llvm/IR/User.h" 123 #include "llvm/IR/Value.h" 124 #include "llvm/IR/ValueHandle.h" 125 #include "llvm/IR/Verifier.h" 126 #include "llvm/InitializePasses.h" 127 #include "llvm/Pass.h" 128 #include "llvm/Support/Casting.h" 129 #include "llvm/Support/CommandLine.h" 130 #include "llvm/Support/Compiler.h" 131 #include "llvm/Support/Debug.h" 132 #include "llvm/Support/ErrorHandling.h" 133 #include "llvm/Support/InstructionCost.h" 134 #include "llvm/Support/MathExtras.h" 135 #include "llvm/Support/raw_ostream.h" 136 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 137 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 138 #include "llvm/Transforms/Utils/LoopSimplify.h" 139 #include "llvm/Transforms/Utils/LoopUtils.h" 140 #include "llvm/Transforms/Utils/LoopVersioning.h" 141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 142 #include "llvm/Transforms/Utils/SizeOpts.h" 143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 144 #include <algorithm> 145 #include <cassert> 146 #include <cstdint> 147 #include <cstdlib> 148 #include <functional> 149 #include <iterator> 150 #include <limits> 151 #include <memory> 152 #include <string> 153 #include <tuple> 154 #include <utility> 155 156 using namespace llvm; 157 158 #define LV_NAME "loop-vectorize" 159 #define DEBUG_TYPE LV_NAME 160 161 #ifndef NDEBUG 162 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 163 #endif 164 165 /// @{ 166 /// Metadata attribute names 167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 168 const char LLVMLoopVectorizeFollowupVectorized[] = 169 "llvm.loop.vectorize.followup_vectorized"; 170 const char LLVMLoopVectorizeFollowupEpilogue[] = 171 "llvm.loop.vectorize.followup_epilogue"; 172 /// @} 173 174 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 177 178 static cl::opt<bool> EnableEpilogueVectorization( 179 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 180 cl::desc("Enable vectorization of epilogue loops.")); 181 182 static cl::opt<unsigned> EpilogueVectorizationForceVF( 183 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 184 cl::desc("When epilogue vectorization is enabled, and a value greater than " 185 "1 is specified, forces the given VF for all applicable epilogue " 186 "loops.")); 187 188 static cl::opt<unsigned> EpilogueVectorizationMinVF( 189 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 190 cl::desc("Only loops with vectorization factor equal to or larger than " 191 "the specified value are considered for epilogue vectorization.")); 192 193 /// Loops with a known constant trip count below this number are vectorized only 194 /// if no scalar iteration overheads are incurred. 195 static cl::opt<unsigned> TinyTripCountVectorThreshold( 196 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 197 cl::desc("Loops with a constant trip count that is smaller than this " 198 "value are vectorized only if no scalar iteration overheads " 199 "are incurred.")); 200 201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 202 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 203 cl::desc("The maximum allowed number of runtime memory checks with a " 204 "vectorize(enable) pragma.")); 205 206 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 207 // that predication is preferred, and this lists all options. I.e., the 208 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 209 // and predicate the instructions accordingly. If tail-folding fails, there are 210 // different fallback strategies depending on these values: 211 namespace PreferPredicateTy { 212 enum Option { 213 ScalarEpilogue = 0, 214 PredicateElseScalarEpilogue, 215 PredicateOrDontVectorize 216 }; 217 } // namespace PreferPredicateTy 218 219 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 220 "prefer-predicate-over-epilogue", 221 cl::init(PreferPredicateTy::ScalarEpilogue), 222 cl::Hidden, 223 cl::desc("Tail-folding and predication preferences over creating a scalar " 224 "epilogue loop."), 225 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 226 "scalar-epilogue", 227 "Don't tail-predicate loops, create scalar epilogue"), 228 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 229 "predicate-else-scalar-epilogue", 230 "prefer tail-folding, create scalar epilogue if tail " 231 "folding fails."), 232 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 233 "predicate-dont-vectorize", 234 "prefers tail-folding, don't attempt vectorization if " 235 "tail-folding fails."))); 236 237 static cl::opt<bool> MaximizeBandwidth( 238 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 239 cl::desc("Maximize bandwidth when selecting vectorization factor which " 240 "will be determined by the smallest type in loop.")); 241 242 static cl::opt<bool> EnableInterleavedMemAccesses( 243 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 244 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 245 246 /// An interleave-group may need masking if it resides in a block that needs 247 /// predication, or in order to mask away gaps. 248 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 249 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 250 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 251 252 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 253 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 254 cl::desc("We don't interleave loops with a estimated constant trip count " 255 "below this number")); 256 257 static cl::opt<unsigned> ForceTargetNumScalarRegs( 258 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 259 cl::desc("A flag that overrides the target's number of scalar registers.")); 260 261 static cl::opt<unsigned> ForceTargetNumVectorRegs( 262 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 263 cl::desc("A flag that overrides the target's number of vector registers.")); 264 265 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 266 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 267 cl::desc("A flag that overrides the target's max interleave factor for " 268 "scalar loops.")); 269 270 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 271 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 272 cl::desc("A flag that overrides the target's max interleave factor for " 273 "vectorized loops.")); 274 275 static cl::opt<unsigned> ForceTargetInstructionCost( 276 "force-target-instruction-cost", cl::init(0), cl::Hidden, 277 cl::desc("A flag that overrides the target's expected cost for " 278 "an instruction to a single constant value. Mostly " 279 "useful for getting consistent testing.")); 280 281 static cl::opt<bool> ForceTargetSupportsScalableVectors( 282 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 283 cl::desc( 284 "Pretend that scalable vectors are supported, even if the target does " 285 "not support them. This flag should only be used for testing.")); 286 287 static cl::opt<unsigned> SmallLoopCost( 288 "small-loop-cost", cl::init(20), cl::Hidden, 289 cl::desc( 290 "The cost of a loop that is considered 'small' by the interleaver.")); 291 292 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 293 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 294 cl::desc("Enable the use of the block frequency analysis to access PGO " 295 "heuristics minimizing code growth in cold regions and being more " 296 "aggressive in hot regions.")); 297 298 // Runtime interleave loops for load/store throughput. 299 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 300 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 301 cl::desc( 302 "Enable runtime interleaving until load/store ports are saturated")); 303 304 /// Interleave small loops with scalar reductions. 305 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 306 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 307 cl::desc("Enable interleaving for loops with small iteration counts that " 308 "contain scalar reductions to expose ILP.")); 309 310 /// The number of stores in a loop that are allowed to need predication. 311 static cl::opt<unsigned> NumberOfStoresToPredicate( 312 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 313 cl::desc("Max number of stores to be predicated behind an if.")); 314 315 static cl::opt<bool> EnableIndVarRegisterHeur( 316 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 317 cl::desc("Count the induction variable only once when interleaving")); 318 319 static cl::opt<bool> EnableCondStoresVectorization( 320 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 321 cl::desc("Enable if predication of stores during vectorization.")); 322 323 static cl::opt<unsigned> MaxNestedScalarReductionIC( 324 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 325 cl::desc("The maximum interleave count to use when interleaving a scalar " 326 "reduction in a nested loop.")); 327 328 static cl::opt<bool> 329 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 330 cl::Hidden, 331 cl::desc("Prefer in-loop vector reductions, " 332 "overriding the targets preference.")); 333 334 static cl::opt<bool> ForceOrderedReductions( 335 "force-ordered-reductions", cl::init(false), cl::Hidden, 336 cl::desc("Enable the vectorisation of loops with in-order (strict) " 337 "FP reductions")); 338 339 static cl::opt<bool> PreferPredicatedReductionSelect( 340 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 341 cl::desc( 342 "Prefer predicating a reduction operation over an after loop select.")); 343 344 cl::opt<bool> EnableVPlanNativePath( 345 "enable-vplan-native-path", cl::init(false), cl::Hidden, 346 cl::desc("Enable VPlan-native vectorization path with " 347 "support for outer loop vectorization.")); 348 349 // FIXME: Remove this switch once we have divergence analysis. Currently we 350 // assume divergent non-backedge branches when this switch is true. 351 cl::opt<bool> EnableVPlanPredication( 352 "enable-vplan-predication", cl::init(false), cl::Hidden, 353 cl::desc("Enable VPlan-native vectorization path predicator with " 354 "support for outer loop vectorization.")); 355 356 // This flag enables the stress testing of the VPlan H-CFG construction in the 357 // VPlan-native vectorization path. It must be used in conjuction with 358 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 359 // verification of the H-CFGs built. 360 static cl::opt<bool> VPlanBuildStressTest( 361 "vplan-build-stress-test", cl::init(false), cl::Hidden, 362 cl::desc( 363 "Build VPlan for every supported loop nest in the function and bail " 364 "out right after the build (stress test the VPlan H-CFG construction " 365 "in the VPlan-native vectorization path).")); 366 367 cl::opt<bool> llvm::EnableLoopInterleaving( 368 "interleave-loops", cl::init(true), cl::Hidden, 369 cl::desc("Enable loop interleaving in Loop vectorization passes")); 370 cl::opt<bool> llvm::EnableLoopVectorization( 371 "vectorize-loops", cl::init(true), cl::Hidden, 372 cl::desc("Run the Loop vectorization passes")); 373 374 cl::opt<bool> PrintVPlansInDotFormat( 375 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 376 cl::desc("Use dot format instead of plain text when dumping VPlans")); 377 378 /// A helper function that returns true if the given type is irregular. The 379 /// type is irregular if its allocated size doesn't equal the store size of an 380 /// element of the corresponding vector type. 381 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 382 // Determine if an array of N elements of type Ty is "bitcast compatible" 383 // with a <N x Ty> vector. 384 // This is only true if there is no padding between the array elements. 385 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 386 } 387 388 /// A helper function that returns the reciprocal of the block probability of 389 /// predicated blocks. If we return X, we are assuming the predicated block 390 /// will execute once for every X iterations of the loop header. 391 /// 392 /// TODO: We should use actual block probability here, if available. Currently, 393 /// we always assume predicated blocks have a 50% chance of executing. 394 static unsigned getReciprocalPredBlockProb() { return 2; } 395 396 /// A helper function that returns an integer or floating-point constant with 397 /// value C. 398 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 399 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 400 : ConstantFP::get(Ty, C); 401 } 402 403 /// Returns "best known" trip count for the specified loop \p L as defined by 404 /// the following procedure: 405 /// 1) Returns exact trip count if it is known. 406 /// 2) Returns expected trip count according to profile data if any. 407 /// 3) Returns upper bound estimate if it is known. 408 /// 4) Returns None if all of the above failed. 409 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 410 // Check if exact trip count is known. 411 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 412 return ExpectedTC; 413 414 // Check if there is an expected trip count available from profile data. 415 if (LoopVectorizeWithBlockFrequency) 416 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 417 return EstimatedTC; 418 419 // Check if upper bound estimate is known. 420 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 421 return ExpectedTC; 422 423 return None; 424 } 425 426 // Forward declare GeneratedRTChecks. 427 class GeneratedRTChecks; 428 429 namespace llvm { 430 431 /// InnerLoopVectorizer vectorizes loops which contain only one basic 432 /// block to a specified vectorization factor (VF). 433 /// This class performs the widening of scalars into vectors, or multiple 434 /// scalars. This class also implements the following features: 435 /// * It inserts an epilogue loop for handling loops that don't have iteration 436 /// counts that are known to be a multiple of the vectorization factor. 437 /// * It handles the code generation for reduction variables. 438 /// * Scalarization (implementation using scalars) of un-vectorizable 439 /// instructions. 440 /// InnerLoopVectorizer does not perform any vectorization-legality 441 /// checks, and relies on the caller to check for the different legality 442 /// aspects. The InnerLoopVectorizer relies on the 443 /// LoopVectorizationLegality class to provide information about the induction 444 /// and reduction variables that were found to a given vectorization factor. 445 class InnerLoopVectorizer { 446 public: 447 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 448 LoopInfo *LI, DominatorTree *DT, 449 const TargetLibraryInfo *TLI, 450 const TargetTransformInfo *TTI, AssumptionCache *AC, 451 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 452 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 453 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 454 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 455 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 456 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 457 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 458 PSI(PSI), RTChecks(RTChecks) { 459 // Query this against the original loop and save it here because the profile 460 // of the original loop header may change as the transformation happens. 461 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 462 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 463 } 464 465 virtual ~InnerLoopVectorizer() = default; 466 467 /// Create a new empty loop that will contain vectorized instructions later 468 /// on, while the old loop will be used as the scalar remainder. Control flow 469 /// is generated around the vectorized (and scalar epilogue) loops consisting 470 /// of various checks and bypasses. Return the pre-header block of the new 471 /// loop. 472 /// In the case of epilogue vectorization, this function is overriden to 473 /// handle the more complex control flow around the loops. 474 virtual BasicBlock *createVectorizedLoopSkeleton(); 475 476 /// Widen a single instruction within the innermost loop. 477 void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands, 478 VPTransformState &State); 479 480 /// Widen a single call instruction within the innermost loop. 481 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 482 VPTransformState &State); 483 484 /// Widen a single select instruction within the innermost loop. 485 void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands, 486 bool InvariantCond, VPTransformState &State); 487 488 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 489 void fixVectorizedLoop(VPTransformState &State); 490 491 // Return true if any runtime check is added. 492 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 493 494 /// A type for vectorized values in the new loop. Each value from the 495 /// original loop, when vectorized, is represented by UF vector values in the 496 /// new unrolled loop, where UF is the unroll factor. 497 using VectorParts = SmallVector<Value *, 2>; 498 499 /// Vectorize a single GetElementPtrInst based on information gathered and 500 /// decisions taken during planning. 501 void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices, 502 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant, 503 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State); 504 505 /// Vectorize a single first-order recurrence or pointer induction PHINode in 506 /// a block. This method handles the induction variable canonicalization. It 507 /// supports both VF = 1 for unrolled loops and arbitrary length vectors. 508 void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR, 509 VPTransformState &State); 510 511 /// A helper function to scalarize a single Instruction in the innermost loop. 512 /// Generates a sequence of scalar instances for each lane between \p MinLane 513 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 514 /// inclusive. Uses the VPValue operands from \p Operands instead of \p 515 /// Instr's operands. 516 void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands, 517 const VPIteration &Instance, bool IfPredicateInstr, 518 VPTransformState &State); 519 520 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 521 /// is provided, the integer induction variable will first be truncated to 522 /// the corresponding type. 523 void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc, 524 VPValue *Def, VPValue *CastDef, 525 VPTransformState &State); 526 527 /// Construct the vector value of a scalarized value \p V one lane at a time. 528 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 529 VPTransformState &State); 530 531 /// Try to vectorize interleaved access group \p Group with the base address 532 /// given in \p Addr, optionally masking the vector operations if \p 533 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 534 /// values in the vectorized loop. 535 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 536 ArrayRef<VPValue *> VPDefs, 537 VPTransformState &State, VPValue *Addr, 538 ArrayRef<VPValue *> StoredValues, 539 VPValue *BlockInMask = nullptr); 540 541 /// Vectorize Load and Store instructions with the base address given in \p 542 /// Addr, optionally masking the vector operations if \p BlockInMask is 543 /// non-null. Use \p State to translate given VPValues to IR values in the 544 /// vectorized loop. 545 void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State, 546 VPValue *Def, VPValue *Addr, 547 VPValue *StoredValue, VPValue *BlockInMask); 548 549 /// Set the debug location in the builder \p Ptr using the debug location in 550 /// \p V. If \p Ptr is None then it uses the class member's Builder. 551 void setDebugLocFromInst(const Value *V, 552 Optional<IRBuilder<> *> CustomBuilder = None); 553 554 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 555 void fixNonInductionPHIs(VPTransformState &State); 556 557 /// Returns true if the reordering of FP operations is not allowed, but we are 558 /// able to vectorize with strict in-order reductions for the given RdxDesc. 559 bool useOrderedReductions(RecurrenceDescriptor &RdxDesc); 560 561 /// Create a broadcast instruction. This method generates a broadcast 562 /// instruction (shuffle) for loop invariant values and for the induction 563 /// value. If this is the induction variable then we extend it to N, N+1, ... 564 /// this is needed because each iteration in the loop corresponds to a SIMD 565 /// element. 566 virtual Value *getBroadcastInstrs(Value *V); 567 568 protected: 569 friend class LoopVectorizationPlanner; 570 571 /// A small list of PHINodes. 572 using PhiVector = SmallVector<PHINode *, 4>; 573 574 /// A type for scalarized values in the new loop. Each value from the 575 /// original loop, when scalarized, is represented by UF x VF scalar values 576 /// in the new unrolled loop, where UF is the unroll factor and VF is the 577 /// vectorization factor. 578 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 579 580 /// Set up the values of the IVs correctly when exiting the vector loop. 581 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 582 Value *CountRoundDown, Value *EndValue, 583 BasicBlock *MiddleBlock); 584 585 /// Create a new induction variable inside L. 586 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 587 Value *Step, Instruction *DL); 588 589 /// Handle all cross-iteration phis in the header. 590 void fixCrossIterationPHIs(VPTransformState &State); 591 592 /// Create the exit value of first order recurrences in the middle block and 593 /// update their users. 594 void fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, VPTransformState &State); 595 596 /// Create code for the loop exit value of the reduction. 597 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); 598 599 /// Clear NSW/NUW flags from reduction instructions if necessary. 600 void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 601 VPTransformState &State); 602 603 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 604 /// means we need to add the appropriate incoming value from the middle 605 /// block as exiting edges from the scalar epilogue loop (if present) are 606 /// already in place, and we exit the vector loop exclusively to the middle 607 /// block. 608 void fixLCSSAPHIs(VPTransformState &State); 609 610 /// Iteratively sink the scalarized operands of a predicated instruction into 611 /// the block that was created for it. 612 void sinkScalarOperands(Instruction *PredInst); 613 614 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 615 /// represented as. 616 void truncateToMinimalBitwidths(VPTransformState &State); 617 618 /// This function adds 619 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 620 /// to each vector element of Val. The sequence starts at StartIndex. 621 /// \p Opcode is relevant for FP induction variable. 622 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 623 Instruction::BinaryOps Opcode = 624 Instruction::BinaryOpsEnd); 625 626 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 627 /// variable on which to base the steps, \p Step is the size of the step, and 628 /// \p EntryVal is the value from the original loop that maps to the steps. 629 /// Note that \p EntryVal doesn't have to be an induction variable - it 630 /// can also be a truncate instruction. 631 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 632 const InductionDescriptor &ID, VPValue *Def, 633 VPValue *CastDef, VPTransformState &State); 634 635 /// Create a vector induction phi node based on an existing scalar one. \p 636 /// EntryVal is the value from the original loop that maps to the vector phi 637 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 638 /// truncate instruction, instead of widening the original IV, we widen a 639 /// version of the IV truncated to \p EntryVal's type. 640 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 641 Value *Step, Value *Start, 642 Instruction *EntryVal, VPValue *Def, 643 VPValue *CastDef, 644 VPTransformState &State); 645 646 /// Returns true if an instruction \p I should be scalarized instead of 647 /// vectorized for the chosen vectorization factor. 648 bool shouldScalarizeInstruction(Instruction *I) const; 649 650 /// Returns true if we should generate a scalar version of \p IV. 651 bool needsScalarInduction(Instruction *IV) const; 652 653 /// If there is a cast involved in the induction variable \p ID, which should 654 /// be ignored in the vectorized loop body, this function records the 655 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 656 /// cast. We had already proved that the casted Phi is equal to the uncasted 657 /// Phi in the vectorized loop (under a runtime guard), and therefore 658 /// there is no need to vectorize the cast - the same value can be used in the 659 /// vector loop for both the Phi and the cast. 660 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 661 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 662 /// 663 /// \p EntryVal is the value from the original loop that maps to the vector 664 /// phi node and is used to distinguish what is the IV currently being 665 /// processed - original one (if \p EntryVal is a phi corresponding to the 666 /// original IV) or the "newly-created" one based on the proof mentioned above 667 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 668 /// latter case \p EntryVal is a TruncInst and we must not record anything for 669 /// that IV, but it's error-prone to expect callers of this routine to care 670 /// about that, hence this explicit parameter. 671 void recordVectorLoopValueForInductionCast( 672 const InductionDescriptor &ID, const Instruction *EntryVal, 673 Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State, 674 unsigned Part, unsigned Lane = UINT_MAX); 675 676 /// Generate a shuffle sequence that will reverse the vector Vec. 677 virtual Value *reverseVector(Value *Vec); 678 679 /// Returns (and creates if needed) the original loop trip count. 680 Value *getOrCreateTripCount(Loop *NewLoop); 681 682 /// Returns (and creates if needed) the trip count of the widened loop. 683 Value *getOrCreateVectorTripCount(Loop *NewLoop); 684 685 /// Returns a bitcasted value to the requested vector type. 686 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 687 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 688 const DataLayout &DL); 689 690 /// Emit a bypass check to see if the vector trip count is zero, including if 691 /// it overflows. 692 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 693 694 /// Emit a bypass check to see if all of the SCEV assumptions we've 695 /// had to make are correct. Returns the block containing the checks or 696 /// nullptr if no checks have been added. 697 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass); 698 699 /// Emit bypass checks to check any memory assumptions we may have made. 700 /// Returns the block containing the checks or nullptr if no checks have been 701 /// added. 702 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 703 704 /// Compute the transformed value of Index at offset StartValue using step 705 /// StepValue. 706 /// For integer induction, returns StartValue + Index * StepValue. 707 /// For pointer induction, returns StartValue[Index * StepValue]. 708 /// FIXME: The newly created binary instructions should contain nsw/nuw 709 /// flags, which can be found from the original scalar operations. 710 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 711 const DataLayout &DL, 712 const InductionDescriptor &ID) const; 713 714 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 715 /// vector loop preheader, middle block and scalar preheader. Also 716 /// allocate a loop object for the new vector loop and return it. 717 Loop *createVectorLoopSkeleton(StringRef Prefix); 718 719 /// Create new phi nodes for the induction variables to resume iteration count 720 /// in the scalar epilogue, from where the vectorized loop left off (given by 721 /// \p VectorTripCount). 722 /// In cases where the loop skeleton is more complicated (eg. epilogue 723 /// vectorization) and the resume values can come from an additional bypass 724 /// block, the \p AdditionalBypass pair provides information about the bypass 725 /// block and the end value on the edge from bypass to this loop. 726 void createInductionResumeValues( 727 Loop *L, Value *VectorTripCount, 728 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 729 730 /// Complete the loop skeleton by adding debug MDs, creating appropriate 731 /// conditional branches in the middle block, preparing the builder and 732 /// running the verifier. Take in the vector loop \p L as argument, and return 733 /// the preheader of the completed vector loop. 734 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 735 736 /// Add additional metadata to \p To that was not present on \p Orig. 737 /// 738 /// Currently this is used to add the noalias annotations based on the 739 /// inserted memchecks. Use this for instructions that are *cloned* into the 740 /// vector loop. 741 void addNewMetadata(Instruction *To, const Instruction *Orig); 742 743 /// Add metadata from one instruction to another. 744 /// 745 /// This includes both the original MDs from \p From and additional ones (\see 746 /// addNewMetadata). Use this for *newly created* instructions in the vector 747 /// loop. 748 void addMetadata(Instruction *To, Instruction *From); 749 750 /// Similar to the previous function but it adds the metadata to a 751 /// vector of instructions. 752 void addMetadata(ArrayRef<Value *> To, Instruction *From); 753 754 /// Allow subclasses to override and print debug traces before/after vplan 755 /// execution, when trace information is requested. 756 virtual void printDebugTracesAtStart(){}; 757 virtual void printDebugTracesAtEnd(){}; 758 759 /// The original loop. 760 Loop *OrigLoop; 761 762 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 763 /// dynamic knowledge to simplify SCEV expressions and converts them to a 764 /// more usable form. 765 PredicatedScalarEvolution &PSE; 766 767 /// Loop Info. 768 LoopInfo *LI; 769 770 /// Dominator Tree. 771 DominatorTree *DT; 772 773 /// Alias Analysis. 774 AAResults *AA; 775 776 /// Target Library Info. 777 const TargetLibraryInfo *TLI; 778 779 /// Target Transform Info. 780 const TargetTransformInfo *TTI; 781 782 /// Assumption Cache. 783 AssumptionCache *AC; 784 785 /// Interface to emit optimization remarks. 786 OptimizationRemarkEmitter *ORE; 787 788 /// LoopVersioning. It's only set up (non-null) if memchecks were 789 /// used. 790 /// 791 /// This is currently only used to add no-alias metadata based on the 792 /// memchecks. The actually versioning is performed manually. 793 std::unique_ptr<LoopVersioning> LVer; 794 795 /// The vectorization SIMD factor to use. Each vector will have this many 796 /// vector elements. 797 ElementCount VF; 798 799 /// The vectorization unroll factor to use. Each scalar is vectorized to this 800 /// many different vector instructions. 801 unsigned UF; 802 803 /// The builder that we use 804 IRBuilder<> Builder; 805 806 // --- Vectorization state --- 807 808 /// The vector-loop preheader. 809 BasicBlock *LoopVectorPreHeader; 810 811 /// The scalar-loop preheader. 812 BasicBlock *LoopScalarPreHeader; 813 814 /// Middle Block between the vector and the scalar. 815 BasicBlock *LoopMiddleBlock; 816 817 /// The unique ExitBlock of the scalar loop if one exists. Note that 818 /// there can be multiple exiting edges reaching this block. 819 BasicBlock *LoopExitBlock; 820 821 /// The vector loop body. 822 BasicBlock *LoopVectorBody; 823 824 /// The scalar loop body. 825 BasicBlock *LoopScalarBody; 826 827 /// A list of all bypass blocks. The first block is the entry of the loop. 828 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 829 830 /// The new Induction variable which was added to the new block. 831 PHINode *Induction = nullptr; 832 833 /// The induction variable of the old basic block. 834 PHINode *OldInduction = nullptr; 835 836 /// Store instructions that were predicated. 837 SmallVector<Instruction *, 4> PredicatedInstructions; 838 839 /// Trip count of the original loop. 840 Value *TripCount = nullptr; 841 842 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 843 Value *VectorTripCount = nullptr; 844 845 /// The legality analysis. 846 LoopVectorizationLegality *Legal; 847 848 /// The profitablity analysis. 849 LoopVectorizationCostModel *Cost; 850 851 // Record whether runtime checks are added. 852 bool AddedSafetyChecks = false; 853 854 // Holds the end values for each induction variable. We save the end values 855 // so we can later fix-up the external users of the induction variables. 856 DenseMap<PHINode *, Value *> IVEndValues; 857 858 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 859 // fixed up at the end of vector code generation. 860 SmallVector<PHINode *, 8> OrigPHIsToFix; 861 862 /// BFI and PSI are used to check for profile guided size optimizations. 863 BlockFrequencyInfo *BFI; 864 ProfileSummaryInfo *PSI; 865 866 // Whether this loop should be optimized for size based on profile guided size 867 // optimizatios. 868 bool OptForSizeBasedOnProfile; 869 870 /// Structure to hold information about generated runtime checks, responsible 871 /// for cleaning the checks, if vectorization turns out unprofitable. 872 GeneratedRTChecks &RTChecks; 873 }; 874 875 class InnerLoopUnroller : public InnerLoopVectorizer { 876 public: 877 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 878 LoopInfo *LI, DominatorTree *DT, 879 const TargetLibraryInfo *TLI, 880 const TargetTransformInfo *TTI, AssumptionCache *AC, 881 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 882 LoopVectorizationLegality *LVL, 883 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 884 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 885 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 886 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 887 BFI, PSI, Check) {} 888 889 private: 890 Value *getBroadcastInstrs(Value *V) override; 891 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 892 Instruction::BinaryOps Opcode = 893 Instruction::BinaryOpsEnd) override; 894 Value *reverseVector(Value *Vec) override; 895 }; 896 897 /// Encapsulate information regarding vectorization of a loop and its epilogue. 898 /// This information is meant to be updated and used across two stages of 899 /// epilogue vectorization. 900 struct EpilogueLoopVectorizationInfo { 901 ElementCount MainLoopVF = ElementCount::getFixed(0); 902 unsigned MainLoopUF = 0; 903 ElementCount EpilogueVF = ElementCount::getFixed(0); 904 unsigned EpilogueUF = 0; 905 BasicBlock *MainLoopIterationCountCheck = nullptr; 906 BasicBlock *EpilogueIterationCountCheck = nullptr; 907 BasicBlock *SCEVSafetyCheck = nullptr; 908 BasicBlock *MemSafetyCheck = nullptr; 909 Value *TripCount = nullptr; 910 Value *VectorTripCount = nullptr; 911 912 EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF, 913 unsigned EUF) 914 : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF), 915 EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) { 916 assert(EUF == 1 && 917 "A high UF for the epilogue loop is likely not beneficial."); 918 } 919 }; 920 921 /// An extension of the inner loop vectorizer that creates a skeleton for a 922 /// vectorized loop that has its epilogue (residual) also vectorized. 923 /// The idea is to run the vplan on a given loop twice, firstly to setup the 924 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 925 /// from the first step and vectorize the epilogue. This is achieved by 926 /// deriving two concrete strategy classes from this base class and invoking 927 /// them in succession from the loop vectorizer planner. 928 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 929 public: 930 InnerLoopAndEpilogueVectorizer( 931 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 932 DominatorTree *DT, const TargetLibraryInfo *TLI, 933 const TargetTransformInfo *TTI, AssumptionCache *AC, 934 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 935 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 936 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 937 GeneratedRTChecks &Checks) 938 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 939 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 940 Checks), 941 EPI(EPI) {} 942 943 // Override this function to handle the more complex control flow around the 944 // three loops. 945 BasicBlock *createVectorizedLoopSkeleton() final override { 946 return createEpilogueVectorizedLoopSkeleton(); 947 } 948 949 /// The interface for creating a vectorized skeleton using one of two 950 /// different strategies, each corresponding to one execution of the vplan 951 /// as described above. 952 virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0; 953 954 /// Holds and updates state information required to vectorize the main loop 955 /// and its epilogue in two separate passes. This setup helps us avoid 956 /// regenerating and recomputing runtime safety checks. It also helps us to 957 /// shorten the iteration-count-check path length for the cases where the 958 /// iteration count of the loop is so small that the main vector loop is 959 /// completely skipped. 960 EpilogueLoopVectorizationInfo &EPI; 961 }; 962 963 /// A specialized derived class of inner loop vectorizer that performs 964 /// vectorization of *main* loops in the process of vectorizing loops and their 965 /// epilogues. 966 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 967 public: 968 EpilogueVectorizerMainLoop( 969 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 970 DominatorTree *DT, const TargetLibraryInfo *TLI, 971 const TargetTransformInfo *TTI, AssumptionCache *AC, 972 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 973 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 974 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 975 GeneratedRTChecks &Check) 976 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 977 EPI, LVL, CM, BFI, PSI, Check) {} 978 /// Implements the interface for creating a vectorized skeleton using the 979 /// *main loop* strategy (ie the first pass of vplan execution). 980 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 981 982 protected: 983 /// Emits an iteration count bypass check once for the main loop (when \p 984 /// ForEpilogue is false) and once for the epilogue loop (when \p 985 /// ForEpilogue is true). 986 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, 987 bool ForEpilogue); 988 void printDebugTracesAtStart() override; 989 void printDebugTracesAtEnd() override; 990 }; 991 992 // A specialized derived class of inner loop vectorizer that performs 993 // vectorization of *epilogue* loops in the process of vectorizing loops and 994 // their epilogues. 995 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 996 public: 997 EpilogueVectorizerEpilogueLoop( 998 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 999 DominatorTree *DT, const TargetLibraryInfo *TLI, 1000 const TargetTransformInfo *TTI, AssumptionCache *AC, 1001 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 1002 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 1003 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 1004 GeneratedRTChecks &Checks) 1005 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1006 EPI, LVL, CM, BFI, PSI, Checks) {} 1007 /// Implements the interface for creating a vectorized skeleton using the 1008 /// *epilogue loop* strategy (ie the second pass of vplan execution). 1009 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 1010 1011 protected: 1012 /// Emits an iteration count bypass check after the main vector loop has 1013 /// finished to see if there are any iterations left to execute by either 1014 /// the vector epilogue or the scalar epilogue. 1015 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, 1016 BasicBlock *Bypass, 1017 BasicBlock *Insert); 1018 void printDebugTracesAtStart() override; 1019 void printDebugTracesAtEnd() override; 1020 }; 1021 } // end namespace llvm 1022 1023 /// Look for a meaningful debug location on the instruction or it's 1024 /// operands. 1025 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 1026 if (!I) 1027 return I; 1028 1029 DebugLoc Empty; 1030 if (I->getDebugLoc() != Empty) 1031 return I; 1032 1033 for (Use &Op : I->operands()) { 1034 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 1035 if (OpInst->getDebugLoc() != Empty) 1036 return OpInst; 1037 } 1038 1039 return I; 1040 } 1041 1042 void InnerLoopVectorizer::setDebugLocFromInst( 1043 const Value *V, Optional<IRBuilder<> *> CustomBuilder) { 1044 IRBuilder<> *B = (CustomBuilder == None) ? &Builder : *CustomBuilder; 1045 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) { 1046 const DILocation *DIL = Inst->getDebugLoc(); 1047 1048 // When a FSDiscriminator is enabled, we don't need to add the multiply 1049 // factors to the discriminators. 1050 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 1051 !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { 1052 // FIXME: For scalable vectors, assume vscale=1. 1053 auto NewDIL = 1054 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 1055 if (NewDIL) 1056 B->SetCurrentDebugLocation(NewDIL.getValue()); 1057 else 1058 LLVM_DEBUG(dbgs() 1059 << "Failed to create new discriminator: " 1060 << DIL->getFilename() << " Line: " << DIL->getLine()); 1061 } else 1062 B->SetCurrentDebugLocation(DIL); 1063 } else 1064 B->SetCurrentDebugLocation(DebugLoc()); 1065 } 1066 1067 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 1068 /// is passed, the message relates to that particular instruction. 1069 #ifndef NDEBUG 1070 static void debugVectorizationMessage(const StringRef Prefix, 1071 const StringRef DebugMsg, 1072 Instruction *I) { 1073 dbgs() << "LV: " << Prefix << DebugMsg; 1074 if (I != nullptr) 1075 dbgs() << " " << *I; 1076 else 1077 dbgs() << '.'; 1078 dbgs() << '\n'; 1079 } 1080 #endif 1081 1082 /// Create an analysis remark that explains why vectorization failed 1083 /// 1084 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 1085 /// RemarkName is the identifier for the remark. If \p I is passed it is an 1086 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 1087 /// the location of the remark. \return the remark object that can be 1088 /// streamed to. 1089 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1090 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1091 Value *CodeRegion = TheLoop->getHeader(); 1092 DebugLoc DL = TheLoop->getStartLoc(); 1093 1094 if (I) { 1095 CodeRegion = I->getParent(); 1096 // If there is no debug location attached to the instruction, revert back to 1097 // using the loop's. 1098 if (I->getDebugLoc()) 1099 DL = I->getDebugLoc(); 1100 } 1101 1102 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 1103 } 1104 1105 /// Return a value for Step multiplied by VF. 1106 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) { 1107 assert(isa<ConstantInt>(Step) && "Expected an integer step"); 1108 Constant *StepVal = ConstantInt::get( 1109 Step->getType(), 1110 cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue()); 1111 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1112 } 1113 1114 namespace llvm { 1115 1116 /// Return the runtime value for VF. 1117 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) { 1118 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1119 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1120 } 1121 1122 void reportVectorizationFailure(const StringRef DebugMsg, 1123 const StringRef OREMsg, const StringRef ORETag, 1124 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1125 Instruction *I) { 1126 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 1127 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1128 ORE->emit( 1129 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1130 << "loop not vectorized: " << OREMsg); 1131 } 1132 1133 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 1134 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1135 Instruction *I) { 1136 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 1137 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1138 ORE->emit( 1139 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1140 << Msg); 1141 } 1142 1143 } // end namespace llvm 1144 1145 #ifndef NDEBUG 1146 /// \return string containing a file name and a line # for the given loop. 1147 static std::string getDebugLocString(const Loop *L) { 1148 std::string Result; 1149 if (L) { 1150 raw_string_ostream OS(Result); 1151 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1152 LoopDbgLoc.print(OS); 1153 else 1154 // Just print the module name. 1155 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1156 OS.flush(); 1157 } 1158 return Result; 1159 } 1160 #endif 1161 1162 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1163 const Instruction *Orig) { 1164 // If the loop was versioned with memchecks, add the corresponding no-alias 1165 // metadata. 1166 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1167 LVer->annotateInstWithNoAlias(To, Orig); 1168 } 1169 1170 void InnerLoopVectorizer::addMetadata(Instruction *To, 1171 Instruction *From) { 1172 propagateMetadata(To, From); 1173 addNewMetadata(To, From); 1174 } 1175 1176 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1177 Instruction *From) { 1178 for (Value *V : To) { 1179 if (Instruction *I = dyn_cast<Instruction>(V)) 1180 addMetadata(I, From); 1181 } 1182 } 1183 1184 namespace llvm { 1185 1186 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1187 // lowered. 1188 enum ScalarEpilogueLowering { 1189 1190 // The default: allowing scalar epilogues. 1191 CM_ScalarEpilogueAllowed, 1192 1193 // Vectorization with OptForSize: don't allow epilogues. 1194 CM_ScalarEpilogueNotAllowedOptSize, 1195 1196 // A special case of vectorisation with OptForSize: loops with a very small 1197 // trip count are considered for vectorization under OptForSize, thereby 1198 // making sure the cost of their loop body is dominant, free of runtime 1199 // guards and scalar iteration overheads. 1200 CM_ScalarEpilogueNotAllowedLowTripLoop, 1201 1202 // Loop hint predicate indicating an epilogue is undesired. 1203 CM_ScalarEpilogueNotNeededUsePredicate, 1204 1205 // Directive indicating we must either tail fold or not vectorize 1206 CM_ScalarEpilogueNotAllowedUsePredicate 1207 }; 1208 1209 /// ElementCountComparator creates a total ordering for ElementCount 1210 /// for the purposes of using it in a set structure. 1211 struct ElementCountComparator { 1212 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1213 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1214 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1215 } 1216 }; 1217 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1218 1219 /// LoopVectorizationCostModel - estimates the expected speedups due to 1220 /// vectorization. 1221 /// In many cases vectorization is not profitable. This can happen because of 1222 /// a number of reasons. In this class we mainly attempt to predict the 1223 /// expected speedup/slowdowns due to the supported instruction set. We use the 1224 /// TargetTransformInfo to query the different backends for the cost of 1225 /// different operations. 1226 class LoopVectorizationCostModel { 1227 public: 1228 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1229 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1230 LoopVectorizationLegality *Legal, 1231 const TargetTransformInfo &TTI, 1232 const TargetLibraryInfo *TLI, DemandedBits *DB, 1233 AssumptionCache *AC, 1234 OptimizationRemarkEmitter *ORE, const Function *F, 1235 const LoopVectorizeHints *Hints, 1236 InterleavedAccessInfo &IAI) 1237 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1238 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1239 Hints(Hints), InterleaveInfo(IAI) {} 1240 1241 /// \return An upper bound for the vectorization factors (both fixed and 1242 /// scalable). If the factors are 0, vectorization and interleaving should be 1243 /// avoided up front. 1244 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1245 1246 /// \return True if runtime checks are required for vectorization, and false 1247 /// otherwise. 1248 bool runtimeChecksRequired(); 1249 1250 /// \return The most profitable vectorization factor and the cost of that VF. 1251 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1252 /// then this vectorization factor will be selected if vectorization is 1253 /// possible. 1254 VectorizationFactor 1255 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1256 1257 VectorizationFactor 1258 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1259 const LoopVectorizationPlanner &LVP); 1260 1261 /// Setup cost-based decisions for user vectorization factor. 1262 /// \return true if the UserVF is a feasible VF to be chosen. 1263 bool selectUserVectorizationFactor(ElementCount UserVF) { 1264 collectUniformsAndScalars(UserVF); 1265 collectInstsToScalarize(UserVF); 1266 return expectedCost(UserVF).first.isValid(); 1267 } 1268 1269 /// \return The size (in bits) of the smallest and widest types in the code 1270 /// that needs to be vectorized. We ignore values that remain scalar such as 1271 /// 64 bit loop indices. 1272 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1273 1274 /// \return The desired interleave count. 1275 /// If interleave count has been specified by metadata it will be returned. 1276 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1277 /// are the selected vectorization factor and the cost of the selected VF. 1278 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1279 1280 /// Memory access instruction may be vectorized in more than one way. 1281 /// Form of instruction after vectorization depends on cost. 1282 /// This function takes cost-based decisions for Load/Store instructions 1283 /// and collects them in a map. This decisions map is used for building 1284 /// the lists of loop-uniform and loop-scalar instructions. 1285 /// The calculated cost is saved with widening decision in order to 1286 /// avoid redundant calculations. 1287 void setCostBasedWideningDecision(ElementCount VF); 1288 1289 /// A struct that represents some properties of the register usage 1290 /// of a loop. 1291 struct RegisterUsage { 1292 /// Holds the number of loop invariant values that are used in the loop. 1293 /// The key is ClassID of target-provided register class. 1294 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1295 /// Holds the maximum number of concurrent live intervals in the loop. 1296 /// The key is ClassID of target-provided register class. 1297 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1298 }; 1299 1300 /// \return Returns information about the register usages of the loop for the 1301 /// given vectorization factors. 1302 SmallVector<RegisterUsage, 8> 1303 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1304 1305 /// Collect values we want to ignore in the cost model. 1306 void collectValuesToIgnore(); 1307 1308 /// Collect all element types in the loop for which widening is needed. 1309 void collectElementTypesForWidening(); 1310 1311 /// Split reductions into those that happen in the loop, and those that happen 1312 /// outside. In loop reductions are collected into InLoopReductionChains. 1313 void collectInLoopReductions(); 1314 1315 /// Returns true if we should use strict in-order reductions for the given 1316 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, 1317 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering 1318 /// of FP operations. 1319 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) { 1320 return !Hints->allowReordering() && RdxDesc.isOrdered(); 1321 } 1322 1323 /// \returns The smallest bitwidth each instruction can be represented with. 1324 /// The vector equivalents of these instructions should be truncated to this 1325 /// type. 1326 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1327 return MinBWs; 1328 } 1329 1330 /// \returns True if it is more profitable to scalarize instruction \p I for 1331 /// vectorization factor \p VF. 1332 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1333 assert(VF.isVector() && 1334 "Profitable to scalarize relevant only for VF > 1."); 1335 1336 // Cost model is not run in the VPlan-native path - return conservative 1337 // result until this changes. 1338 if (EnableVPlanNativePath) 1339 return false; 1340 1341 auto Scalars = InstsToScalarize.find(VF); 1342 assert(Scalars != InstsToScalarize.end() && 1343 "VF not yet analyzed for scalarization profitability"); 1344 return Scalars->second.find(I) != Scalars->second.end(); 1345 } 1346 1347 /// Returns true if \p I is known to be uniform after vectorization. 1348 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1349 if (VF.isScalar()) 1350 return true; 1351 1352 // Cost model is not run in the VPlan-native path - return conservative 1353 // result until this changes. 1354 if (EnableVPlanNativePath) 1355 return false; 1356 1357 auto UniformsPerVF = Uniforms.find(VF); 1358 assert(UniformsPerVF != Uniforms.end() && 1359 "VF not yet analyzed for uniformity"); 1360 return UniformsPerVF->second.count(I); 1361 } 1362 1363 /// Returns true if \p I is known to be scalar after vectorization. 1364 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1365 if (VF.isScalar()) 1366 return true; 1367 1368 // Cost model is not run in the VPlan-native path - return conservative 1369 // result until this changes. 1370 if (EnableVPlanNativePath) 1371 return false; 1372 1373 auto ScalarsPerVF = Scalars.find(VF); 1374 assert(ScalarsPerVF != Scalars.end() && 1375 "Scalar values are not calculated for VF"); 1376 return ScalarsPerVF->second.count(I); 1377 } 1378 1379 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1380 /// for vectorization factor \p VF. 1381 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1382 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1383 !isProfitableToScalarize(I, VF) && 1384 !isScalarAfterVectorization(I, VF); 1385 } 1386 1387 /// Decision that was taken during cost calculation for memory instruction. 1388 enum InstWidening { 1389 CM_Unknown, 1390 CM_Widen, // For consecutive accesses with stride +1. 1391 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1392 CM_Interleave, 1393 CM_GatherScatter, 1394 CM_Scalarize 1395 }; 1396 1397 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1398 /// instruction \p I and vector width \p VF. 1399 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1400 InstructionCost Cost) { 1401 assert(VF.isVector() && "Expected VF >=2"); 1402 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1403 } 1404 1405 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1406 /// interleaving group \p Grp and vector width \p VF. 1407 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1408 ElementCount VF, InstWidening W, 1409 InstructionCost Cost) { 1410 assert(VF.isVector() && "Expected VF >=2"); 1411 /// Broadcast this decicion to all instructions inside the group. 1412 /// But the cost will be assigned to one instruction only. 1413 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1414 if (auto *I = Grp->getMember(i)) { 1415 if (Grp->getInsertPos() == I) 1416 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1417 else 1418 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1419 } 1420 } 1421 } 1422 1423 /// Return the cost model decision for the given instruction \p I and vector 1424 /// width \p VF. Return CM_Unknown if this instruction did not pass 1425 /// through the cost modeling. 1426 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1427 assert(VF.isVector() && "Expected VF to be a vector VF"); 1428 // Cost model is not run in the VPlan-native path - return conservative 1429 // result until this changes. 1430 if (EnableVPlanNativePath) 1431 return CM_GatherScatter; 1432 1433 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1434 auto Itr = WideningDecisions.find(InstOnVF); 1435 if (Itr == WideningDecisions.end()) 1436 return CM_Unknown; 1437 return Itr->second.first; 1438 } 1439 1440 /// Return the vectorization cost for the given instruction \p I and vector 1441 /// width \p VF. 1442 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1443 assert(VF.isVector() && "Expected VF >=2"); 1444 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1445 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1446 "The cost is not calculated"); 1447 return WideningDecisions[InstOnVF].second; 1448 } 1449 1450 /// Return True if instruction \p I is an optimizable truncate whose operand 1451 /// is an induction variable. Such a truncate will be removed by adding a new 1452 /// induction variable with the destination type. 1453 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1454 // If the instruction is not a truncate, return false. 1455 auto *Trunc = dyn_cast<TruncInst>(I); 1456 if (!Trunc) 1457 return false; 1458 1459 // Get the source and destination types of the truncate. 1460 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1461 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1462 1463 // If the truncate is free for the given types, return false. Replacing a 1464 // free truncate with an induction variable would add an induction variable 1465 // update instruction to each iteration of the loop. We exclude from this 1466 // check the primary induction variable since it will need an update 1467 // instruction regardless. 1468 Value *Op = Trunc->getOperand(0); 1469 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1470 return false; 1471 1472 // If the truncated value is not an induction variable, return false. 1473 return Legal->isInductionPhi(Op); 1474 } 1475 1476 /// Collects the instructions to scalarize for each predicated instruction in 1477 /// the loop. 1478 void collectInstsToScalarize(ElementCount VF); 1479 1480 /// Collect Uniform and Scalar values for the given \p VF. 1481 /// The sets depend on CM decision for Load/Store instructions 1482 /// that may be vectorized as interleave, gather-scatter or scalarized. 1483 void collectUniformsAndScalars(ElementCount VF) { 1484 // Do the analysis once. 1485 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1486 return; 1487 setCostBasedWideningDecision(VF); 1488 collectLoopUniforms(VF); 1489 collectLoopScalars(VF); 1490 } 1491 1492 /// Returns true if the target machine supports masked store operation 1493 /// for the given \p DataType and kind of access to \p Ptr. 1494 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1495 return Legal->isConsecutivePtr(Ptr) && 1496 TTI.isLegalMaskedStore(DataType, Alignment); 1497 } 1498 1499 /// Returns true if the target machine supports masked load operation 1500 /// for the given \p DataType and kind of access to \p Ptr. 1501 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1502 return Legal->isConsecutivePtr(Ptr) && 1503 TTI.isLegalMaskedLoad(DataType, Alignment); 1504 } 1505 1506 /// Returns true if the target machine can represent \p V as a masked gather 1507 /// or scatter operation. 1508 bool isLegalGatherOrScatter(Value *V) { 1509 bool LI = isa<LoadInst>(V); 1510 bool SI = isa<StoreInst>(V); 1511 if (!LI && !SI) 1512 return false; 1513 auto *Ty = getLoadStoreType(V); 1514 Align Align = getLoadStoreAlignment(V); 1515 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1516 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1517 } 1518 1519 /// Returns true if the target machine supports all of the reduction 1520 /// variables found for the given VF. 1521 bool canVectorizeReductions(ElementCount VF) const { 1522 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1523 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1524 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1525 })); 1526 } 1527 1528 /// Returns true if \p I is an instruction that will be scalarized with 1529 /// predication. Such instructions include conditional stores and 1530 /// instructions that may divide by zero. 1531 /// If a non-zero VF has been calculated, we check if I will be scalarized 1532 /// predication for that VF. 1533 bool isScalarWithPredication(Instruction *I) const; 1534 1535 // Returns true if \p I is an instruction that will be predicated either 1536 // through scalar predication or masked load/store or masked gather/scatter. 1537 // Superset of instructions that return true for isScalarWithPredication. 1538 bool isPredicatedInst(Instruction *I) { 1539 if (!blockNeedsPredication(I->getParent())) 1540 return false; 1541 // Loads and stores that need some form of masked operation are predicated 1542 // instructions. 1543 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1544 return Legal->isMaskRequired(I); 1545 return isScalarWithPredication(I); 1546 } 1547 1548 /// Returns true if \p I is a memory instruction with consecutive memory 1549 /// access that can be widened. 1550 bool 1551 memoryInstructionCanBeWidened(Instruction *I, 1552 ElementCount VF = ElementCount::getFixed(1)); 1553 1554 /// Returns true if \p I is a memory instruction in an interleaved-group 1555 /// of memory accesses that can be vectorized with wide vector loads/stores 1556 /// and shuffles. 1557 bool 1558 interleavedAccessCanBeWidened(Instruction *I, 1559 ElementCount VF = ElementCount::getFixed(1)); 1560 1561 /// Check if \p Instr belongs to any interleaved access group. 1562 bool isAccessInterleaved(Instruction *Instr) { 1563 return InterleaveInfo.isInterleaved(Instr); 1564 } 1565 1566 /// Get the interleaved access group that \p Instr belongs to. 1567 const InterleaveGroup<Instruction> * 1568 getInterleavedAccessGroup(Instruction *Instr) { 1569 return InterleaveInfo.getInterleaveGroup(Instr); 1570 } 1571 1572 /// Returns true if we're required to use a scalar epilogue for at least 1573 /// the final iteration of the original loop. 1574 bool requiresScalarEpilogue(ElementCount VF) const { 1575 if (!isScalarEpilogueAllowed()) 1576 return false; 1577 // If we might exit from anywhere but the latch, must run the exiting 1578 // iteration in scalar form. 1579 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1580 return true; 1581 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); 1582 } 1583 1584 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1585 /// loop hint annotation. 1586 bool isScalarEpilogueAllowed() const { 1587 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1588 } 1589 1590 /// Returns true if all loop blocks should be masked to fold tail loop. 1591 bool foldTailByMasking() const { return FoldTailByMasking; } 1592 1593 bool blockNeedsPredication(BasicBlock *BB) const { 1594 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1595 } 1596 1597 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1598 /// nodes to the chain of instructions representing the reductions. Uses a 1599 /// MapVector to ensure deterministic iteration order. 1600 using ReductionChainMap = 1601 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1602 1603 /// Return the chain of instructions representing an inloop reduction. 1604 const ReductionChainMap &getInLoopReductionChains() const { 1605 return InLoopReductionChains; 1606 } 1607 1608 /// Returns true if the Phi is part of an inloop reduction. 1609 bool isInLoopReduction(PHINode *Phi) const { 1610 return InLoopReductionChains.count(Phi); 1611 } 1612 1613 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1614 /// with factor VF. Return the cost of the instruction, including 1615 /// scalarization overhead if it's needed. 1616 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1617 1618 /// Estimate cost of a call instruction CI if it were vectorized with factor 1619 /// VF. Return the cost of the instruction, including scalarization overhead 1620 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1621 /// scalarized - 1622 /// i.e. either vector version isn't available, or is too expensive. 1623 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1624 bool &NeedToScalarize) const; 1625 1626 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1627 /// that of B. 1628 bool isMoreProfitable(const VectorizationFactor &A, 1629 const VectorizationFactor &B) const; 1630 1631 /// Invalidates decisions already taken by the cost model. 1632 void invalidateCostModelingDecisions() { 1633 WideningDecisions.clear(); 1634 Uniforms.clear(); 1635 Scalars.clear(); 1636 } 1637 1638 private: 1639 unsigned NumPredStores = 0; 1640 1641 /// \return An upper bound for the vectorization factors for both 1642 /// fixed and scalable vectorization, where the minimum-known number of 1643 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1644 /// disabled or unsupported, then the scalable part will be equal to 1645 /// ElementCount::getScalable(0). 1646 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1647 ElementCount UserVF); 1648 1649 /// \return the maximized element count based on the targets vector 1650 /// registers and the loop trip-count, but limited to a maximum safe VF. 1651 /// This is a helper function of computeFeasibleMaxVF. 1652 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure 1653 /// issue that occurred on one of the buildbots which cannot be reproduced 1654 /// without having access to the properietary compiler (see comments on 1655 /// D98509). The issue is currently under investigation and this workaround 1656 /// will be removed as soon as possible. 1657 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1658 unsigned SmallestType, 1659 unsigned WidestType, 1660 const ElementCount &MaxSafeVF); 1661 1662 /// \return the maximum legal scalable VF, based on the safe max number 1663 /// of elements. 1664 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1665 1666 /// The vectorization cost is a combination of the cost itself and a boolean 1667 /// indicating whether any of the contributing operations will actually 1668 /// operate on vector values after type legalization in the backend. If this 1669 /// latter value is false, then all operations will be scalarized (i.e. no 1670 /// vectorization has actually taken place). 1671 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1672 1673 /// Returns the expected execution cost. The unit of the cost does 1674 /// not matter because we use the 'cost' units to compare different 1675 /// vector widths. The cost that is returned is *not* normalized by 1676 /// the factor width. If \p Invalid is not nullptr, this function 1677 /// will add a pair(Instruction*, ElementCount) to \p Invalid for 1678 /// each instruction that has an Invalid cost for the given VF. 1679 using InstructionVFPair = std::pair<Instruction *, ElementCount>; 1680 VectorizationCostTy 1681 expectedCost(ElementCount VF, 1682 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); 1683 1684 /// Returns the execution time cost of an instruction for a given vector 1685 /// width. Vector width of one means scalar. 1686 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1687 1688 /// The cost-computation logic from getInstructionCost which provides 1689 /// the vector type as an output parameter. 1690 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1691 Type *&VectorTy); 1692 1693 /// Return the cost of instructions in an inloop reduction pattern, if I is 1694 /// part of that pattern. 1695 Optional<InstructionCost> 1696 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, 1697 TTI::TargetCostKind CostKind); 1698 1699 /// Calculate vectorization cost of memory instruction \p I. 1700 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1701 1702 /// The cost computation for scalarized memory instruction. 1703 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1704 1705 /// The cost computation for interleaving group of memory instructions. 1706 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1707 1708 /// The cost computation for Gather/Scatter instruction. 1709 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1710 1711 /// The cost computation for widening instruction \p I with consecutive 1712 /// memory access. 1713 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1714 1715 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1716 /// Load: scalar load + broadcast. 1717 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1718 /// element) 1719 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1720 1721 /// Estimate the overhead of scalarizing an instruction. This is a 1722 /// convenience wrapper for the type-based getScalarizationOverhead API. 1723 InstructionCost getScalarizationOverhead(Instruction *I, 1724 ElementCount VF) const; 1725 1726 /// Returns whether the instruction is a load or store and will be a emitted 1727 /// as a vector operation. 1728 bool isConsecutiveLoadOrStore(Instruction *I); 1729 1730 /// Returns true if an artificially high cost for emulated masked memrefs 1731 /// should be used. 1732 bool useEmulatedMaskMemRefHack(Instruction *I); 1733 1734 /// Map of scalar integer values to the smallest bitwidth they can be legally 1735 /// represented as. The vector equivalents of these values should be truncated 1736 /// to this type. 1737 MapVector<Instruction *, uint64_t> MinBWs; 1738 1739 /// A type representing the costs for instructions if they were to be 1740 /// scalarized rather than vectorized. The entries are Instruction-Cost 1741 /// pairs. 1742 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1743 1744 /// A set containing all BasicBlocks that are known to present after 1745 /// vectorization as a predicated block. 1746 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1747 1748 /// Records whether it is allowed to have the original scalar loop execute at 1749 /// least once. This may be needed as a fallback loop in case runtime 1750 /// aliasing/dependence checks fail, or to handle the tail/remainder 1751 /// iterations when the trip count is unknown or doesn't divide by the VF, 1752 /// or as a peel-loop to handle gaps in interleave-groups. 1753 /// Under optsize and when the trip count is very small we don't allow any 1754 /// iterations to execute in the scalar loop. 1755 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1756 1757 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1758 bool FoldTailByMasking = false; 1759 1760 /// A map holding scalar costs for different vectorization factors. The 1761 /// presence of a cost for an instruction in the mapping indicates that the 1762 /// instruction will be scalarized when vectorizing with the associated 1763 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1764 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1765 1766 /// Holds the instructions known to be uniform after vectorization. 1767 /// The data is collected per VF. 1768 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1769 1770 /// Holds the instructions known to be scalar after vectorization. 1771 /// The data is collected per VF. 1772 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1773 1774 /// Holds the instructions (address computations) that are forced to be 1775 /// scalarized. 1776 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1777 1778 /// PHINodes of the reductions that should be expanded in-loop along with 1779 /// their associated chains of reduction operations, in program order from top 1780 /// (PHI) to bottom 1781 ReductionChainMap InLoopReductionChains; 1782 1783 /// A Map of inloop reduction operations and their immediate chain operand. 1784 /// FIXME: This can be removed once reductions can be costed correctly in 1785 /// vplan. This was added to allow quick lookup to the inloop operations, 1786 /// without having to loop through InLoopReductionChains. 1787 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1788 1789 /// Returns the expected difference in cost from scalarizing the expression 1790 /// feeding a predicated instruction \p PredInst. The instructions to 1791 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1792 /// non-negative return value implies the expression will be scalarized. 1793 /// Currently, only single-use chains are considered for scalarization. 1794 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1795 ElementCount VF); 1796 1797 /// Collect the instructions that are uniform after vectorization. An 1798 /// instruction is uniform if we represent it with a single scalar value in 1799 /// the vectorized loop corresponding to each vector iteration. Examples of 1800 /// uniform instructions include pointer operands of consecutive or 1801 /// interleaved memory accesses. Note that although uniformity implies an 1802 /// instruction will be scalar, the reverse is not true. In general, a 1803 /// scalarized instruction will be represented by VF scalar values in the 1804 /// vectorized loop, each corresponding to an iteration of the original 1805 /// scalar loop. 1806 void collectLoopUniforms(ElementCount VF); 1807 1808 /// Collect the instructions that are scalar after vectorization. An 1809 /// instruction is scalar if it is known to be uniform or will be scalarized 1810 /// during vectorization. Non-uniform scalarized instructions will be 1811 /// represented by VF values in the vectorized loop, each corresponding to an 1812 /// iteration of the original scalar loop. 1813 void collectLoopScalars(ElementCount VF); 1814 1815 /// Keeps cost model vectorization decision and cost for instructions. 1816 /// Right now it is used for memory instructions only. 1817 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1818 std::pair<InstWidening, InstructionCost>>; 1819 1820 DecisionList WideningDecisions; 1821 1822 /// Returns true if \p V is expected to be vectorized and it needs to be 1823 /// extracted. 1824 bool needsExtract(Value *V, ElementCount VF) const { 1825 Instruction *I = dyn_cast<Instruction>(V); 1826 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1827 TheLoop->isLoopInvariant(I)) 1828 return false; 1829 1830 // Assume we can vectorize V (and hence we need extraction) if the 1831 // scalars are not computed yet. This can happen, because it is called 1832 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1833 // the scalars are collected. That should be a safe assumption in most 1834 // cases, because we check if the operands have vectorizable types 1835 // beforehand in LoopVectorizationLegality. 1836 return Scalars.find(VF) == Scalars.end() || 1837 !isScalarAfterVectorization(I, VF); 1838 }; 1839 1840 /// Returns a range containing only operands needing to be extracted. 1841 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1842 ElementCount VF) const { 1843 return SmallVector<Value *, 4>(make_filter_range( 1844 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1845 } 1846 1847 /// Determines if we have the infrastructure to vectorize loop \p L and its 1848 /// epilogue, assuming the main loop is vectorized by \p VF. 1849 bool isCandidateForEpilogueVectorization(const Loop &L, 1850 const ElementCount VF) const; 1851 1852 /// Returns true if epilogue vectorization is considered profitable, and 1853 /// false otherwise. 1854 /// \p VF is the vectorization factor chosen for the original loop. 1855 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1856 1857 public: 1858 /// The loop that we evaluate. 1859 Loop *TheLoop; 1860 1861 /// Predicated scalar evolution analysis. 1862 PredicatedScalarEvolution &PSE; 1863 1864 /// Loop Info analysis. 1865 LoopInfo *LI; 1866 1867 /// Vectorization legality. 1868 LoopVectorizationLegality *Legal; 1869 1870 /// Vector target information. 1871 const TargetTransformInfo &TTI; 1872 1873 /// Target Library Info. 1874 const TargetLibraryInfo *TLI; 1875 1876 /// Demanded bits analysis. 1877 DemandedBits *DB; 1878 1879 /// Assumption cache. 1880 AssumptionCache *AC; 1881 1882 /// Interface to emit optimization remarks. 1883 OptimizationRemarkEmitter *ORE; 1884 1885 const Function *TheFunction; 1886 1887 /// Loop Vectorize Hint. 1888 const LoopVectorizeHints *Hints; 1889 1890 /// The interleave access information contains groups of interleaved accesses 1891 /// with the same stride and close to each other. 1892 InterleavedAccessInfo &InterleaveInfo; 1893 1894 /// Values to ignore in the cost model. 1895 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1896 1897 /// Values to ignore in the cost model when VF > 1. 1898 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1899 1900 /// All element types found in the loop. 1901 SmallPtrSet<Type *, 16> ElementTypesInLoop; 1902 1903 /// Profitable vector factors. 1904 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1905 }; 1906 } // end namespace llvm 1907 1908 /// Helper struct to manage generating runtime checks for vectorization. 1909 /// 1910 /// The runtime checks are created up-front in temporary blocks to allow better 1911 /// estimating the cost and un-linked from the existing IR. After deciding to 1912 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1913 /// temporary blocks are completely removed. 1914 class GeneratedRTChecks { 1915 /// Basic block which contains the generated SCEV checks, if any. 1916 BasicBlock *SCEVCheckBlock = nullptr; 1917 1918 /// The value representing the result of the generated SCEV checks. If it is 1919 /// nullptr, either no SCEV checks have been generated or they have been used. 1920 Value *SCEVCheckCond = nullptr; 1921 1922 /// Basic block which contains the generated memory runtime checks, if any. 1923 BasicBlock *MemCheckBlock = nullptr; 1924 1925 /// The value representing the result of the generated memory runtime checks. 1926 /// If it is nullptr, either no memory runtime checks have been generated or 1927 /// they have been used. 1928 Instruction *MemRuntimeCheckCond = nullptr; 1929 1930 DominatorTree *DT; 1931 LoopInfo *LI; 1932 1933 SCEVExpander SCEVExp; 1934 SCEVExpander MemCheckExp; 1935 1936 public: 1937 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 1938 const DataLayout &DL) 1939 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 1940 MemCheckExp(SE, DL, "scev.check") {} 1941 1942 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 1943 /// accurately estimate the cost of the runtime checks. The blocks are 1944 /// un-linked from the IR and is added back during vector code generation. If 1945 /// there is no vector code generation, the check blocks are removed 1946 /// completely. 1947 void Create(Loop *L, const LoopAccessInfo &LAI, 1948 const SCEVUnionPredicate &UnionPred) { 1949 1950 BasicBlock *LoopHeader = L->getHeader(); 1951 BasicBlock *Preheader = L->getLoopPreheader(); 1952 1953 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 1954 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 1955 // may be used by SCEVExpander. The blocks will be un-linked from their 1956 // predecessors and removed from LI & DT at the end of the function. 1957 if (!UnionPred.isAlwaysTrue()) { 1958 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 1959 nullptr, "vector.scevcheck"); 1960 1961 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 1962 &UnionPred, SCEVCheckBlock->getTerminator()); 1963 } 1964 1965 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 1966 if (RtPtrChecking.Need) { 1967 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 1968 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 1969 "vector.memcheck"); 1970 1971 std::tie(std::ignore, MemRuntimeCheckCond) = 1972 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 1973 RtPtrChecking.getChecks(), MemCheckExp); 1974 assert(MemRuntimeCheckCond && 1975 "no RT checks generated although RtPtrChecking " 1976 "claimed checks are required"); 1977 } 1978 1979 if (!MemCheckBlock && !SCEVCheckBlock) 1980 return; 1981 1982 // Unhook the temporary block with the checks, update various places 1983 // accordingly. 1984 if (SCEVCheckBlock) 1985 SCEVCheckBlock->replaceAllUsesWith(Preheader); 1986 if (MemCheckBlock) 1987 MemCheckBlock->replaceAllUsesWith(Preheader); 1988 1989 if (SCEVCheckBlock) { 1990 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 1991 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 1992 Preheader->getTerminator()->eraseFromParent(); 1993 } 1994 if (MemCheckBlock) { 1995 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 1996 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 1997 Preheader->getTerminator()->eraseFromParent(); 1998 } 1999 2000 DT->changeImmediateDominator(LoopHeader, Preheader); 2001 if (MemCheckBlock) { 2002 DT->eraseNode(MemCheckBlock); 2003 LI->removeBlock(MemCheckBlock); 2004 } 2005 if (SCEVCheckBlock) { 2006 DT->eraseNode(SCEVCheckBlock); 2007 LI->removeBlock(SCEVCheckBlock); 2008 } 2009 } 2010 2011 /// Remove the created SCEV & memory runtime check blocks & instructions, if 2012 /// unused. 2013 ~GeneratedRTChecks() { 2014 SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT); 2015 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT); 2016 if (!SCEVCheckCond) 2017 SCEVCleaner.markResultUsed(); 2018 2019 if (!MemRuntimeCheckCond) 2020 MemCheckCleaner.markResultUsed(); 2021 2022 if (MemRuntimeCheckCond) { 2023 auto &SE = *MemCheckExp.getSE(); 2024 // Memory runtime check generation creates compares that use expanded 2025 // values. Remove them before running the SCEVExpanderCleaners. 2026 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2027 if (MemCheckExp.isInsertedInstruction(&I)) 2028 continue; 2029 SE.forgetValue(&I); 2030 SE.eraseValueFromMap(&I); 2031 I.eraseFromParent(); 2032 } 2033 } 2034 MemCheckCleaner.cleanup(); 2035 SCEVCleaner.cleanup(); 2036 2037 if (SCEVCheckCond) 2038 SCEVCheckBlock->eraseFromParent(); 2039 if (MemRuntimeCheckCond) 2040 MemCheckBlock->eraseFromParent(); 2041 } 2042 2043 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2044 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2045 /// depending on the generated condition. 2046 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass, 2047 BasicBlock *LoopVectorPreHeader, 2048 BasicBlock *LoopExitBlock) { 2049 if (!SCEVCheckCond) 2050 return nullptr; 2051 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) 2052 if (C->isZero()) 2053 return nullptr; 2054 2055 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2056 2057 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2058 // Create new preheader for vector loop. 2059 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2060 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2061 2062 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2063 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2064 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2065 SCEVCheckBlock); 2066 2067 DT->addNewBlock(SCEVCheckBlock, Pred); 2068 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2069 2070 ReplaceInstWithInst( 2071 SCEVCheckBlock->getTerminator(), 2072 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); 2073 // Mark the check as used, to prevent it from being removed during cleanup. 2074 SCEVCheckCond = nullptr; 2075 return SCEVCheckBlock; 2076 } 2077 2078 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2079 /// the branches to branch to the vector preheader or \p Bypass, depending on 2080 /// the generated condition. 2081 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass, 2082 BasicBlock *LoopVectorPreHeader) { 2083 // Check if we generated code that checks in runtime if arrays overlap. 2084 if (!MemRuntimeCheckCond) 2085 return nullptr; 2086 2087 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2088 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2089 MemCheckBlock); 2090 2091 DT->addNewBlock(MemCheckBlock, Pred); 2092 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2093 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2094 2095 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2096 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2097 2098 ReplaceInstWithInst( 2099 MemCheckBlock->getTerminator(), 2100 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2101 MemCheckBlock->getTerminator()->setDebugLoc( 2102 Pred->getTerminator()->getDebugLoc()); 2103 2104 // Mark the check as used, to prevent it from being removed during cleanup. 2105 MemRuntimeCheckCond = nullptr; 2106 return MemCheckBlock; 2107 } 2108 }; 2109 2110 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2111 // vectorization. The loop needs to be annotated with #pragma omp simd 2112 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2113 // vector length information is not provided, vectorization is not considered 2114 // explicit. Interleave hints are not allowed either. These limitations will be 2115 // relaxed in the future. 2116 // Please, note that we are currently forced to abuse the pragma 'clang 2117 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2118 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2119 // provides *explicit vectorization hints* (LV can bypass legal checks and 2120 // assume that vectorization is legal). However, both hints are implemented 2121 // using the same metadata (llvm.loop.vectorize, processed by 2122 // LoopVectorizeHints). This will be fixed in the future when the native IR 2123 // representation for pragma 'omp simd' is introduced. 2124 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2125 OptimizationRemarkEmitter *ORE) { 2126 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2127 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2128 2129 // Only outer loops with an explicit vectorization hint are supported. 2130 // Unannotated outer loops are ignored. 2131 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2132 return false; 2133 2134 Function *Fn = OuterLp->getHeader()->getParent(); 2135 if (!Hints.allowVectorization(Fn, OuterLp, 2136 true /*VectorizeOnlyWhenForced*/)) { 2137 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2138 return false; 2139 } 2140 2141 if (Hints.getInterleave() > 1) { 2142 // TODO: Interleave support is future work. 2143 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2144 "outer loops.\n"); 2145 Hints.emitRemarkWithHints(); 2146 return false; 2147 } 2148 2149 return true; 2150 } 2151 2152 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2153 OptimizationRemarkEmitter *ORE, 2154 SmallVectorImpl<Loop *> &V) { 2155 // Collect inner loops and outer loops without irreducible control flow. For 2156 // now, only collect outer loops that have explicit vectorization hints. If we 2157 // are stress testing the VPlan H-CFG construction, we collect the outermost 2158 // loop of every loop nest. 2159 if (L.isInnermost() || VPlanBuildStressTest || 2160 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2161 LoopBlocksRPO RPOT(&L); 2162 RPOT.perform(LI); 2163 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2164 V.push_back(&L); 2165 // TODO: Collect inner loops inside marked outer loops in case 2166 // vectorization fails for the outer loop. Do not invoke 2167 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2168 // already known to be reducible. We can use an inherited attribute for 2169 // that. 2170 return; 2171 } 2172 } 2173 for (Loop *InnerL : L) 2174 collectSupportedLoops(*InnerL, LI, ORE, V); 2175 } 2176 2177 namespace { 2178 2179 /// The LoopVectorize Pass. 2180 struct LoopVectorize : public FunctionPass { 2181 /// Pass identification, replacement for typeid 2182 static char ID; 2183 2184 LoopVectorizePass Impl; 2185 2186 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2187 bool VectorizeOnlyWhenForced = false) 2188 : FunctionPass(ID), 2189 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2190 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2191 } 2192 2193 bool runOnFunction(Function &F) override { 2194 if (skipFunction(F)) 2195 return false; 2196 2197 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2198 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2199 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2200 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2201 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2202 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2203 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2204 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2205 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2206 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2207 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2208 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2209 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2210 2211 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2212 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2213 2214 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2215 GetLAA, *ORE, PSI).MadeAnyChange; 2216 } 2217 2218 void getAnalysisUsage(AnalysisUsage &AU) const override { 2219 AU.addRequired<AssumptionCacheTracker>(); 2220 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2221 AU.addRequired<DominatorTreeWrapperPass>(); 2222 AU.addRequired<LoopInfoWrapperPass>(); 2223 AU.addRequired<ScalarEvolutionWrapperPass>(); 2224 AU.addRequired<TargetTransformInfoWrapperPass>(); 2225 AU.addRequired<AAResultsWrapperPass>(); 2226 AU.addRequired<LoopAccessLegacyAnalysis>(); 2227 AU.addRequired<DemandedBitsWrapperPass>(); 2228 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2229 AU.addRequired<InjectTLIMappingsLegacy>(); 2230 2231 // We currently do not preserve loopinfo/dominator analyses with outer loop 2232 // vectorization. Until this is addressed, mark these analyses as preserved 2233 // only for non-VPlan-native path. 2234 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2235 if (!EnableVPlanNativePath) { 2236 AU.addPreserved<LoopInfoWrapperPass>(); 2237 AU.addPreserved<DominatorTreeWrapperPass>(); 2238 } 2239 2240 AU.addPreserved<BasicAAWrapperPass>(); 2241 AU.addPreserved<GlobalsAAWrapperPass>(); 2242 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2243 } 2244 }; 2245 2246 } // end anonymous namespace 2247 2248 //===----------------------------------------------------------------------===// 2249 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2250 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2251 //===----------------------------------------------------------------------===// 2252 2253 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2254 // We need to place the broadcast of invariant variables outside the loop, 2255 // but only if it's proven safe to do so. Else, broadcast will be inside 2256 // vector loop body. 2257 Instruction *Instr = dyn_cast<Instruction>(V); 2258 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2259 (!Instr || 2260 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2261 // Place the code for broadcasting invariant variables in the new preheader. 2262 IRBuilder<>::InsertPointGuard Guard(Builder); 2263 if (SafeToHoist) 2264 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2265 2266 // Broadcast the scalar into all locations in the vector. 2267 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2268 2269 return Shuf; 2270 } 2271 2272 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2273 const InductionDescriptor &II, Value *Step, Value *Start, 2274 Instruction *EntryVal, VPValue *Def, VPValue *CastDef, 2275 VPTransformState &State) { 2276 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2277 "Expected either an induction phi-node or a truncate of it!"); 2278 2279 // Construct the initial value of the vector IV in the vector loop preheader 2280 auto CurrIP = Builder.saveIP(); 2281 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2282 if (isa<TruncInst>(EntryVal)) { 2283 assert(Start->getType()->isIntegerTy() && 2284 "Truncation requires an integer type"); 2285 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2286 Step = Builder.CreateTrunc(Step, TruncType); 2287 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2288 } 2289 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2290 Value *SteppedStart = 2291 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 2292 2293 // We create vector phi nodes for both integer and floating-point induction 2294 // variables. Here, we determine the kind of arithmetic we will perform. 2295 Instruction::BinaryOps AddOp; 2296 Instruction::BinaryOps MulOp; 2297 if (Step->getType()->isIntegerTy()) { 2298 AddOp = Instruction::Add; 2299 MulOp = Instruction::Mul; 2300 } else { 2301 AddOp = II.getInductionOpcode(); 2302 MulOp = Instruction::FMul; 2303 } 2304 2305 // Multiply the vectorization factor by the step using integer or 2306 // floating-point arithmetic as appropriate. 2307 Type *StepType = Step->getType(); 2308 if (Step->getType()->isFloatingPointTy()) 2309 StepType = IntegerType::get(StepType->getContext(), 2310 StepType->getScalarSizeInBits()); 2311 Value *RuntimeVF = getRuntimeVF(Builder, StepType, VF); 2312 if (Step->getType()->isFloatingPointTy()) 2313 RuntimeVF = Builder.CreateSIToFP(RuntimeVF, Step->getType()); 2314 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 2315 2316 // Create a vector splat to use in the induction update. 2317 // 2318 // FIXME: If the step is non-constant, we create the vector splat with 2319 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2320 // handle a constant vector splat. 2321 Value *SplatVF = isa<Constant>(Mul) 2322 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 2323 : Builder.CreateVectorSplat(VF, Mul); 2324 Builder.restoreIP(CurrIP); 2325 2326 // We may need to add the step a number of times, depending on the unroll 2327 // factor. The last of those goes into the PHI. 2328 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2329 &*LoopVectorBody->getFirstInsertionPt()); 2330 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 2331 Instruction *LastInduction = VecInd; 2332 for (unsigned Part = 0; Part < UF; ++Part) { 2333 State.set(Def, LastInduction, Part); 2334 2335 if (isa<TruncInst>(EntryVal)) 2336 addMetadata(LastInduction, EntryVal); 2337 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef, 2338 State, Part); 2339 2340 LastInduction = cast<Instruction>( 2341 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 2342 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 2343 } 2344 2345 // Move the last step to the end of the latch block. This ensures consistent 2346 // placement of all induction updates. 2347 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2348 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2349 auto *ICmp = cast<Instruction>(Br->getCondition()); 2350 LastInduction->moveBefore(ICmp); 2351 LastInduction->setName("vec.ind.next"); 2352 2353 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2354 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2355 } 2356 2357 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2358 return Cost->isScalarAfterVectorization(I, VF) || 2359 Cost->isProfitableToScalarize(I, VF); 2360 } 2361 2362 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2363 if (shouldScalarizeInstruction(IV)) 2364 return true; 2365 auto isScalarInst = [&](User *U) -> bool { 2366 auto *I = cast<Instruction>(U); 2367 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2368 }; 2369 return llvm::any_of(IV->users(), isScalarInst); 2370 } 2371 2372 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 2373 const InductionDescriptor &ID, const Instruction *EntryVal, 2374 Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State, 2375 unsigned Part, unsigned Lane) { 2376 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2377 "Expected either an induction phi-node or a truncate of it!"); 2378 2379 // This induction variable is not the phi from the original loop but the 2380 // newly-created IV based on the proof that casted Phi is equal to the 2381 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 2382 // re-uses the same InductionDescriptor that original IV uses but we don't 2383 // have to do any recording in this case - that is done when original IV is 2384 // processed. 2385 if (isa<TruncInst>(EntryVal)) 2386 return; 2387 2388 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 2389 if (Casts.empty()) 2390 return; 2391 // Only the first Cast instruction in the Casts vector is of interest. 2392 // The rest of the Casts (if exist) have no uses outside the 2393 // induction update chain itself. 2394 if (Lane < UINT_MAX) 2395 State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane)); 2396 else 2397 State.set(CastDef, VectorLoopVal, Part); 2398 } 2399 2400 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start, 2401 TruncInst *Trunc, VPValue *Def, 2402 VPValue *CastDef, 2403 VPTransformState &State) { 2404 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2405 "Primary induction variable must have an integer type"); 2406 2407 auto II = Legal->getInductionVars().find(IV); 2408 assert(II != Legal->getInductionVars().end() && "IV is not an induction"); 2409 2410 auto ID = II->second; 2411 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2412 2413 // The value from the original loop to which we are mapping the new induction 2414 // variable. 2415 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2416 2417 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2418 2419 // Generate code for the induction step. Note that induction steps are 2420 // required to be loop-invariant 2421 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 2422 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 2423 "Induction step should be loop invariant"); 2424 if (PSE.getSE()->isSCEVable(IV->getType())) { 2425 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2426 return Exp.expandCodeFor(Step, Step->getType(), 2427 LoopVectorPreHeader->getTerminator()); 2428 } 2429 return cast<SCEVUnknown>(Step)->getValue(); 2430 }; 2431 2432 // The scalar value to broadcast. This is derived from the canonical 2433 // induction variable. If a truncation type is given, truncate the canonical 2434 // induction variable and step. Otherwise, derive these values from the 2435 // induction descriptor. 2436 auto CreateScalarIV = [&](Value *&Step) -> Value * { 2437 Value *ScalarIV = Induction; 2438 if (IV != OldInduction) { 2439 ScalarIV = IV->getType()->isIntegerTy() 2440 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2441 : Builder.CreateCast(Instruction::SIToFP, Induction, 2442 IV->getType()); 2443 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 2444 ScalarIV->setName("offset.idx"); 2445 } 2446 if (Trunc) { 2447 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2448 assert(Step->getType()->isIntegerTy() && 2449 "Truncation requires an integer step"); 2450 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2451 Step = Builder.CreateTrunc(Step, TruncType); 2452 } 2453 return ScalarIV; 2454 }; 2455 2456 // Create the vector values from the scalar IV, in the absence of creating a 2457 // vector IV. 2458 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 2459 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2460 for (unsigned Part = 0; Part < UF; ++Part) { 2461 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2462 Value *EntryPart = 2463 getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step, 2464 ID.getInductionOpcode()); 2465 State.set(Def, EntryPart, Part); 2466 if (Trunc) 2467 addMetadata(EntryPart, Trunc); 2468 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef, 2469 State, Part); 2470 } 2471 }; 2472 2473 // Fast-math-flags propagate from the original induction instruction. 2474 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 2475 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 2476 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 2477 2478 // Now do the actual transformations, and start with creating the step value. 2479 Value *Step = CreateStepValue(ID.getStep()); 2480 if (VF.isZero() || VF.isScalar()) { 2481 Value *ScalarIV = CreateScalarIV(Step); 2482 CreateSplatIV(ScalarIV, Step); 2483 return; 2484 } 2485 2486 // Determine if we want a scalar version of the induction variable. This is 2487 // true if the induction variable itself is not widened, or if it has at 2488 // least one user in the loop that is not widened. 2489 auto NeedsScalarIV = needsScalarInduction(EntryVal); 2490 if (!NeedsScalarIV) { 2491 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2492 State); 2493 return; 2494 } 2495 2496 // Try to create a new independent vector induction variable. If we can't 2497 // create the phi node, we will splat the scalar induction variable in each 2498 // loop iteration. 2499 if (!shouldScalarizeInstruction(EntryVal)) { 2500 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2501 State); 2502 Value *ScalarIV = CreateScalarIV(Step); 2503 // Create scalar steps that can be used by instructions we will later 2504 // scalarize. Note that the addition of the scalar steps will not increase 2505 // the number of instructions in the loop in the common case prior to 2506 // InstCombine. We will be trading one vector extract for each scalar step. 2507 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2508 return; 2509 } 2510 2511 // All IV users are scalar instructions, so only emit a scalar IV, not a 2512 // vectorised IV. Except when we tail-fold, then the splat IV feeds the 2513 // predicate used by the masked loads/stores. 2514 Value *ScalarIV = CreateScalarIV(Step); 2515 if (!Cost->isScalarEpilogueAllowed()) 2516 CreateSplatIV(ScalarIV, Step); 2517 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2518 } 2519 2520 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2521 Instruction::BinaryOps BinOp) { 2522 // Create and check the types. 2523 auto *ValVTy = cast<VectorType>(Val->getType()); 2524 ElementCount VLen = ValVTy->getElementCount(); 2525 2526 Type *STy = Val->getType()->getScalarType(); 2527 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2528 "Induction Step must be an integer or FP"); 2529 assert(Step->getType() == STy && "Step has wrong type"); 2530 2531 SmallVector<Constant *, 8> Indices; 2532 2533 // Create a vector of consecutive numbers from zero to VF. 2534 VectorType *InitVecValVTy = ValVTy; 2535 Type *InitVecValSTy = STy; 2536 if (STy->isFloatingPointTy()) { 2537 InitVecValSTy = 2538 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2539 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2540 } 2541 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2542 2543 // Add on StartIdx 2544 Value *StartIdxSplat = Builder.CreateVectorSplat( 2545 VLen, ConstantInt::get(InitVecValSTy, StartIdx)); 2546 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2547 2548 if (STy->isIntegerTy()) { 2549 Step = Builder.CreateVectorSplat(VLen, Step); 2550 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2551 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2552 // which can be found from the original scalar operations. 2553 Step = Builder.CreateMul(InitVec, Step); 2554 return Builder.CreateAdd(Val, Step, "induction"); 2555 } 2556 2557 // Floating point induction. 2558 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2559 "Binary Opcode should be specified for FP induction"); 2560 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2561 Step = Builder.CreateVectorSplat(VLen, Step); 2562 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2563 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2564 } 2565 2566 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2567 Instruction *EntryVal, 2568 const InductionDescriptor &ID, 2569 VPValue *Def, VPValue *CastDef, 2570 VPTransformState &State) { 2571 // We shouldn't have to build scalar steps if we aren't vectorizing. 2572 assert(VF.isVector() && "VF should be greater than one"); 2573 // Get the value type and ensure it and the step have the same integer type. 2574 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2575 assert(ScalarIVTy == Step->getType() && 2576 "Val and Step should have the same type"); 2577 2578 // We build scalar steps for both integer and floating-point induction 2579 // variables. Here, we determine the kind of arithmetic we will perform. 2580 Instruction::BinaryOps AddOp; 2581 Instruction::BinaryOps MulOp; 2582 if (ScalarIVTy->isIntegerTy()) { 2583 AddOp = Instruction::Add; 2584 MulOp = Instruction::Mul; 2585 } else { 2586 AddOp = ID.getInductionOpcode(); 2587 MulOp = Instruction::FMul; 2588 } 2589 2590 // Determine the number of scalars we need to generate for each unroll 2591 // iteration. If EntryVal is uniform, we only need to generate the first 2592 // lane. Otherwise, we generate all VF values. 2593 bool IsUniform = 2594 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF); 2595 unsigned Lanes = IsUniform ? 1 : VF.getKnownMinValue(); 2596 // Compute the scalar steps and save the results in State. 2597 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2598 ScalarIVTy->getScalarSizeInBits()); 2599 Type *VecIVTy = nullptr; 2600 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2601 if (!IsUniform && VF.isScalable()) { 2602 VecIVTy = VectorType::get(ScalarIVTy, VF); 2603 UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, VF)); 2604 SplatStep = Builder.CreateVectorSplat(VF, Step); 2605 SplatIV = Builder.CreateVectorSplat(VF, ScalarIV); 2606 } 2607 2608 for (unsigned Part = 0; Part < UF; ++Part) { 2609 Value *StartIdx0 = 2610 createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF); 2611 2612 if (!IsUniform && VF.isScalable()) { 2613 auto *SplatStartIdx = Builder.CreateVectorSplat(VF, StartIdx0); 2614 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2615 if (ScalarIVTy->isFloatingPointTy()) 2616 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2617 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2618 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2619 State.set(Def, Add, Part); 2620 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2621 Part); 2622 // It's useful to record the lane values too for the known minimum number 2623 // of elements so we do those below. This improves the code quality when 2624 // trying to extract the first element, for example. 2625 } 2626 2627 if (ScalarIVTy->isFloatingPointTy()) 2628 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2629 2630 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2631 Value *StartIdx = Builder.CreateBinOp( 2632 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2633 // The step returned by `createStepForVF` is a runtime-evaluated value 2634 // when VF is scalable. Otherwise, it should be folded into a Constant. 2635 assert((VF.isScalable() || isa<Constant>(StartIdx)) && 2636 "Expected StartIdx to be folded to a constant when VF is not " 2637 "scalable"); 2638 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2639 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2640 State.set(Def, Add, VPIteration(Part, Lane)); 2641 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2642 Part, Lane); 2643 } 2644 } 2645 } 2646 2647 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2648 const VPIteration &Instance, 2649 VPTransformState &State) { 2650 Value *ScalarInst = State.get(Def, Instance); 2651 Value *VectorValue = State.get(Def, Instance.Part); 2652 VectorValue = Builder.CreateInsertElement( 2653 VectorValue, ScalarInst, 2654 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2655 State.set(Def, VectorValue, Instance.Part); 2656 } 2657 2658 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2659 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2660 return Builder.CreateVectorReverse(Vec, "reverse"); 2661 } 2662 2663 // Return whether we allow using masked interleave-groups (for dealing with 2664 // strided loads/stores that reside in predicated blocks, or for dealing 2665 // with gaps). 2666 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2667 // If an override option has been passed in for interleaved accesses, use it. 2668 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2669 return EnableMaskedInterleavedMemAccesses; 2670 2671 return TTI.enableMaskedInterleavedAccessVectorization(); 2672 } 2673 2674 // Try to vectorize the interleave group that \p Instr belongs to. 2675 // 2676 // E.g. Translate following interleaved load group (factor = 3): 2677 // for (i = 0; i < N; i+=3) { 2678 // R = Pic[i]; // Member of index 0 2679 // G = Pic[i+1]; // Member of index 1 2680 // B = Pic[i+2]; // Member of index 2 2681 // ... // do something to R, G, B 2682 // } 2683 // To: 2684 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2685 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2686 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2687 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2688 // 2689 // Or translate following interleaved store group (factor = 3): 2690 // for (i = 0; i < N; i+=3) { 2691 // ... do something to R, G, B 2692 // Pic[i] = R; // Member of index 0 2693 // Pic[i+1] = G; // Member of index 1 2694 // Pic[i+2] = B; // Member of index 2 2695 // } 2696 // To: 2697 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2698 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2699 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2700 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2701 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2702 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2703 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2704 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2705 VPValue *BlockInMask) { 2706 Instruction *Instr = Group->getInsertPos(); 2707 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2708 2709 // Prepare for the vector type of the interleaved load/store. 2710 Type *ScalarTy = getLoadStoreType(Instr); 2711 unsigned InterleaveFactor = Group->getFactor(); 2712 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2713 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2714 2715 // Prepare for the new pointers. 2716 SmallVector<Value *, 2> AddrParts; 2717 unsigned Index = Group->getIndex(Instr); 2718 2719 // TODO: extend the masked interleaved-group support to reversed access. 2720 assert((!BlockInMask || !Group->isReverse()) && 2721 "Reversed masked interleave-group not supported."); 2722 2723 // If the group is reverse, adjust the index to refer to the last vector lane 2724 // instead of the first. We adjust the index from the first vector lane, 2725 // rather than directly getting the pointer for lane VF - 1, because the 2726 // pointer operand of the interleaved access is supposed to be uniform. For 2727 // uniform instructions, we're only required to generate a value for the 2728 // first vector lane in each unroll iteration. 2729 if (Group->isReverse()) 2730 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2731 2732 for (unsigned Part = 0; Part < UF; Part++) { 2733 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2734 setDebugLocFromInst(AddrPart); 2735 2736 // Notice current instruction could be any index. Need to adjust the address 2737 // to the member of index 0. 2738 // 2739 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2740 // b = A[i]; // Member of index 0 2741 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2742 // 2743 // E.g. A[i+1] = a; // Member of index 1 2744 // A[i] = b; // Member of index 0 2745 // A[i+2] = c; // Member of index 2 (Current instruction) 2746 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2747 2748 bool InBounds = false; 2749 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2750 InBounds = gep->isInBounds(); 2751 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2752 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2753 2754 // Cast to the vector pointer type. 2755 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2756 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2757 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2758 } 2759 2760 setDebugLocFromInst(Instr); 2761 Value *PoisonVec = PoisonValue::get(VecTy); 2762 2763 Value *MaskForGaps = nullptr; 2764 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2765 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2766 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2767 } 2768 2769 // Vectorize the interleaved load group. 2770 if (isa<LoadInst>(Instr)) { 2771 // For each unroll part, create a wide load for the group. 2772 SmallVector<Value *, 2> NewLoads; 2773 for (unsigned Part = 0; Part < UF; Part++) { 2774 Instruction *NewLoad; 2775 if (BlockInMask || MaskForGaps) { 2776 assert(useMaskedInterleavedAccesses(*TTI) && 2777 "masked interleaved groups are not allowed."); 2778 Value *GroupMask = MaskForGaps; 2779 if (BlockInMask) { 2780 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2781 Value *ShuffledMask = Builder.CreateShuffleVector( 2782 BlockInMaskPart, 2783 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2784 "interleaved.mask"); 2785 GroupMask = MaskForGaps 2786 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2787 MaskForGaps) 2788 : ShuffledMask; 2789 } 2790 NewLoad = 2791 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), 2792 GroupMask, PoisonVec, "wide.masked.vec"); 2793 } 2794 else 2795 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2796 Group->getAlign(), "wide.vec"); 2797 Group->addMetadata(NewLoad); 2798 NewLoads.push_back(NewLoad); 2799 } 2800 2801 // For each member in the group, shuffle out the appropriate data from the 2802 // wide loads. 2803 unsigned J = 0; 2804 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2805 Instruction *Member = Group->getMember(I); 2806 2807 // Skip the gaps in the group. 2808 if (!Member) 2809 continue; 2810 2811 auto StrideMask = 2812 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2813 for (unsigned Part = 0; Part < UF; Part++) { 2814 Value *StridedVec = Builder.CreateShuffleVector( 2815 NewLoads[Part], StrideMask, "strided.vec"); 2816 2817 // If this member has different type, cast the result type. 2818 if (Member->getType() != ScalarTy) { 2819 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2820 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2821 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2822 } 2823 2824 if (Group->isReverse()) 2825 StridedVec = reverseVector(StridedVec); 2826 2827 State.set(VPDefs[J], StridedVec, Part); 2828 } 2829 ++J; 2830 } 2831 return; 2832 } 2833 2834 // The sub vector type for current instruction. 2835 auto *SubVT = VectorType::get(ScalarTy, VF); 2836 2837 // Vectorize the interleaved store group. 2838 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2839 assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && 2840 "masked interleaved groups are not allowed."); 2841 assert((!MaskForGaps || !VF.isScalable()) && 2842 "masking gaps for scalable vectors is not yet supported."); 2843 for (unsigned Part = 0; Part < UF; Part++) { 2844 // Collect the stored vector from each member. 2845 SmallVector<Value *, 4> StoredVecs; 2846 for (unsigned i = 0; i < InterleaveFactor; i++) { 2847 assert((Group->getMember(i) || MaskForGaps) && 2848 "Fail to get a member from an interleaved store group"); 2849 Instruction *Member = Group->getMember(i); 2850 2851 // Skip the gaps in the group. 2852 if (!Member) { 2853 Value *Undef = PoisonValue::get(SubVT); 2854 StoredVecs.push_back(Undef); 2855 continue; 2856 } 2857 2858 Value *StoredVec = State.get(StoredValues[i], Part); 2859 2860 if (Group->isReverse()) 2861 StoredVec = reverseVector(StoredVec); 2862 2863 // If this member has different type, cast it to a unified type. 2864 2865 if (StoredVec->getType() != SubVT) 2866 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2867 2868 StoredVecs.push_back(StoredVec); 2869 } 2870 2871 // Concatenate all vectors into a wide vector. 2872 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2873 2874 // Interleave the elements in the wide vector. 2875 Value *IVec = Builder.CreateShuffleVector( 2876 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2877 "interleaved.vec"); 2878 2879 Instruction *NewStoreInstr; 2880 if (BlockInMask || MaskForGaps) { 2881 Value *GroupMask = MaskForGaps; 2882 if (BlockInMask) { 2883 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2884 Value *ShuffledMask = Builder.CreateShuffleVector( 2885 BlockInMaskPart, 2886 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2887 "interleaved.mask"); 2888 GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And, 2889 ShuffledMask, MaskForGaps) 2890 : ShuffledMask; 2891 } 2892 NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part], 2893 Group->getAlign(), GroupMask); 2894 } else 2895 NewStoreInstr = 2896 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2897 2898 Group->addMetadata(NewStoreInstr); 2899 } 2900 } 2901 2902 void InnerLoopVectorizer::vectorizeMemoryInstruction( 2903 Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr, 2904 VPValue *StoredValue, VPValue *BlockInMask) { 2905 // Attempt to issue a wide load. 2906 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2907 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2908 2909 assert((LI || SI) && "Invalid Load/Store instruction"); 2910 assert((!SI || StoredValue) && "No stored value provided for widened store"); 2911 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 2912 2913 LoopVectorizationCostModel::InstWidening Decision = 2914 Cost->getWideningDecision(Instr, VF); 2915 assert((Decision == LoopVectorizationCostModel::CM_Widen || 2916 Decision == LoopVectorizationCostModel::CM_Widen_Reverse || 2917 Decision == LoopVectorizationCostModel::CM_GatherScatter) && 2918 "CM decision is not to widen the memory instruction"); 2919 2920 Type *ScalarDataTy = getLoadStoreType(Instr); 2921 2922 auto *DataTy = VectorType::get(ScalarDataTy, VF); 2923 const Align Alignment = getLoadStoreAlignment(Instr); 2924 2925 // Determine if the pointer operand of the access is either consecutive or 2926 // reverse consecutive. 2927 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2928 bool ConsecutiveStride = 2929 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2930 bool CreateGatherScatter = 2931 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2932 2933 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2934 // gather/scatter. Otherwise Decision should have been to Scalarize. 2935 assert((ConsecutiveStride || CreateGatherScatter) && 2936 "The instruction should be scalarized"); 2937 (void)ConsecutiveStride; 2938 2939 VectorParts BlockInMaskParts(UF); 2940 bool isMaskRequired = BlockInMask; 2941 if (isMaskRequired) 2942 for (unsigned Part = 0; Part < UF; ++Part) 2943 BlockInMaskParts[Part] = State.get(BlockInMask, Part); 2944 2945 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2946 // Calculate the pointer for the specific unroll-part. 2947 GetElementPtrInst *PartPtr = nullptr; 2948 2949 bool InBounds = false; 2950 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2951 InBounds = gep->isInBounds(); 2952 if (Reverse) { 2953 // If the address is consecutive but reversed, then the 2954 // wide store needs to start at the last vector element. 2955 // RunTimeVF = VScale * VF.getKnownMinValue() 2956 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 2957 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), VF); 2958 // NumElt = -Part * RunTimeVF 2959 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 2960 // LastLane = 1 - RunTimeVF 2961 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 2962 PartPtr = 2963 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 2964 PartPtr->setIsInBounds(InBounds); 2965 PartPtr = cast<GetElementPtrInst>( 2966 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 2967 PartPtr->setIsInBounds(InBounds); 2968 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2969 BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]); 2970 } else { 2971 Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF); 2972 PartPtr = cast<GetElementPtrInst>( 2973 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 2974 PartPtr->setIsInBounds(InBounds); 2975 } 2976 2977 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2978 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2979 }; 2980 2981 // Handle Stores: 2982 if (SI) { 2983 setDebugLocFromInst(SI); 2984 2985 for (unsigned Part = 0; Part < UF; ++Part) { 2986 Instruction *NewSI = nullptr; 2987 Value *StoredVal = State.get(StoredValue, Part); 2988 if (CreateGatherScatter) { 2989 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2990 Value *VectorGep = State.get(Addr, Part); 2991 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2992 MaskPart); 2993 } else { 2994 if (Reverse) { 2995 // If we store to reverse consecutive memory locations, then we need 2996 // to reverse the order of elements in the stored value. 2997 StoredVal = reverseVector(StoredVal); 2998 // We don't want to update the value in the map as it might be used in 2999 // another expression. So don't call resetVectorValue(StoredVal). 3000 } 3001 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 3002 if (isMaskRequired) 3003 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 3004 BlockInMaskParts[Part]); 3005 else 3006 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 3007 } 3008 addMetadata(NewSI, SI); 3009 } 3010 return; 3011 } 3012 3013 // Handle loads. 3014 assert(LI && "Must have a load instruction"); 3015 setDebugLocFromInst(LI); 3016 for (unsigned Part = 0; Part < UF; ++Part) { 3017 Value *NewLI; 3018 if (CreateGatherScatter) { 3019 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 3020 Value *VectorGep = State.get(Addr, Part); 3021 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, 3022 nullptr, "wide.masked.gather"); 3023 addMetadata(NewLI, LI); 3024 } else { 3025 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 3026 if (isMaskRequired) 3027 NewLI = Builder.CreateMaskedLoad( 3028 DataTy, VecPtr, Alignment, BlockInMaskParts[Part], 3029 PoisonValue::get(DataTy), "wide.masked.load"); 3030 else 3031 NewLI = 3032 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 3033 3034 // Add metadata to the load, but setVectorValue to the reverse shuffle. 3035 addMetadata(NewLI, LI); 3036 if (Reverse) 3037 NewLI = reverseVector(NewLI); 3038 } 3039 3040 State.set(Def, NewLI, Part); 3041 } 3042 } 3043 3044 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def, 3045 VPUser &User, 3046 const VPIteration &Instance, 3047 bool IfPredicateInstr, 3048 VPTransformState &State) { 3049 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 3050 3051 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 3052 // the first lane and part. 3053 if (isa<NoAliasScopeDeclInst>(Instr)) 3054 if (!Instance.isFirstIteration()) 3055 return; 3056 3057 setDebugLocFromInst(Instr); 3058 3059 // Does this instruction return a value ? 3060 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 3061 3062 Instruction *Cloned = Instr->clone(); 3063 if (!IsVoidRetTy) 3064 Cloned->setName(Instr->getName() + ".cloned"); 3065 3066 State.Builder.SetInsertPoint(Builder.GetInsertBlock(), 3067 Builder.GetInsertPoint()); 3068 // Replace the operands of the cloned instructions with their scalar 3069 // equivalents in the new loop. 3070 for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) { 3071 auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op)); 3072 auto InputInstance = Instance; 3073 if (!Operand || !OrigLoop->contains(Operand) || 3074 (Cost->isUniformAfterVectorization(Operand, State.VF))) 3075 InputInstance.Lane = VPLane::getFirstLane(); 3076 auto *NewOp = State.get(User.getOperand(op), InputInstance); 3077 Cloned->setOperand(op, NewOp); 3078 } 3079 addNewMetadata(Cloned, Instr); 3080 3081 // Place the cloned scalar in the new loop. 3082 Builder.Insert(Cloned); 3083 3084 State.set(Def, Cloned, Instance); 3085 3086 // If we just cloned a new assumption, add it the assumption cache. 3087 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 3088 AC->registerAssumption(II); 3089 3090 // End if-block. 3091 if (IfPredicateInstr) 3092 PredicatedInstructions.push_back(Cloned); 3093 } 3094 3095 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3096 Value *End, Value *Step, 3097 Instruction *DL) { 3098 BasicBlock *Header = L->getHeader(); 3099 BasicBlock *Latch = L->getLoopLatch(); 3100 // As we're just creating this loop, it's possible no latch exists 3101 // yet. If so, use the header as this will be a single block loop. 3102 if (!Latch) 3103 Latch = Header; 3104 3105 IRBuilder<> B(&*Header->getFirstInsertionPt()); 3106 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 3107 setDebugLocFromInst(OldInst, &B); 3108 auto *Induction = B.CreatePHI(Start->getType(), 2, "index"); 3109 3110 B.SetInsertPoint(Latch->getTerminator()); 3111 setDebugLocFromInst(OldInst, &B); 3112 3113 // Create i+1 and fill the PHINode. 3114 // 3115 // If the tail is not folded, we know that End - Start >= Step (either 3116 // statically or through the minimum iteration checks). We also know that both 3117 // Start % Step == 0 and End % Step == 0. We exit the vector loop if %IV + 3118 // %Step == %End. Hence we must exit the loop before %IV + %Step unsigned 3119 // overflows and we can mark the induction increment as NUW. 3120 Value *Next = B.CreateAdd(Induction, Step, "index.next", 3121 /*NUW=*/!Cost->foldTailByMasking(), /*NSW=*/false); 3122 Induction->addIncoming(Start, L->getLoopPreheader()); 3123 Induction->addIncoming(Next, Latch); 3124 // Create the compare. 3125 Value *ICmp = B.CreateICmpEQ(Next, End); 3126 B.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header); 3127 3128 // Now we have two terminators. Remove the old one from the block. 3129 Latch->getTerminator()->eraseFromParent(); 3130 3131 return Induction; 3132 } 3133 3134 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3135 if (TripCount) 3136 return TripCount; 3137 3138 assert(L && "Create Trip Count for null loop."); 3139 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3140 // Find the loop boundaries. 3141 ScalarEvolution *SE = PSE.getSE(); 3142 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3143 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 3144 "Invalid loop count"); 3145 3146 Type *IdxTy = Legal->getWidestInductionType(); 3147 assert(IdxTy && "No type for induction"); 3148 3149 // The exit count might have the type of i64 while the phi is i32. This can 3150 // happen if we have an induction variable that is sign extended before the 3151 // compare. The only way that we get a backedge taken count is that the 3152 // induction variable was signed and as such will not overflow. In such a case 3153 // truncation is legal. 3154 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 3155 IdxTy->getPrimitiveSizeInBits()) 3156 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3157 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3158 3159 // Get the total trip count from the count by adding 1. 3160 const SCEV *ExitCount = SE->getAddExpr( 3161 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3162 3163 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3164 3165 // Expand the trip count and place the new instructions in the preheader. 3166 // Notice that the pre-header does not change, only the loop body. 3167 SCEVExpander Exp(*SE, DL, "induction"); 3168 3169 // Count holds the overall loop count (N). 3170 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3171 L->getLoopPreheader()->getTerminator()); 3172 3173 if (TripCount->getType()->isPointerTy()) 3174 TripCount = 3175 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3176 L->getLoopPreheader()->getTerminator()); 3177 3178 return TripCount; 3179 } 3180 3181 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3182 if (VectorTripCount) 3183 return VectorTripCount; 3184 3185 Value *TC = getOrCreateTripCount(L); 3186 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3187 3188 Type *Ty = TC->getType(); 3189 // This is where we can make the step a runtime constant. 3190 Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF); 3191 3192 // If the tail is to be folded by masking, round the number of iterations N 3193 // up to a multiple of Step instead of rounding down. This is done by first 3194 // adding Step-1 and then rounding down. Note that it's ok if this addition 3195 // overflows: the vector induction variable will eventually wrap to zero given 3196 // that it starts at zero and its Step is a power of two; the loop will then 3197 // exit, with the last early-exit vector comparison also producing all-true. 3198 if (Cost->foldTailByMasking()) { 3199 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 3200 "VF*UF must be a power of 2 when folding tail by masking"); 3201 assert(!VF.isScalable() && 3202 "Tail folding not yet supported for scalable vectors"); 3203 TC = Builder.CreateAdd( 3204 TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up"); 3205 } 3206 3207 // Now we need to generate the expression for the part of the loop that the 3208 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3209 // iterations are not required for correctness, or N - Step, otherwise. Step 3210 // is equal to the vectorization factor (number of SIMD elements) times the 3211 // unroll factor (number of SIMD instructions). 3212 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3213 3214 // There are cases where we *must* run at least one iteration in the remainder 3215 // loop. See the cost model for when this can happen. If the step evenly 3216 // divides the trip count, we set the remainder to be equal to the step. If 3217 // the step does not evenly divide the trip count, no adjustment is necessary 3218 // since there will already be scalar iterations. Note that the minimum 3219 // iterations check ensures that N >= Step. 3220 if (Cost->requiresScalarEpilogue(VF)) { 3221 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3222 R = Builder.CreateSelect(IsZero, Step, R); 3223 } 3224 3225 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3226 3227 return VectorTripCount; 3228 } 3229 3230 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3231 const DataLayout &DL) { 3232 // Verify that V is a vector type with same number of elements as DstVTy. 3233 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 3234 unsigned VF = DstFVTy->getNumElements(); 3235 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 3236 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3237 Type *SrcElemTy = SrcVecTy->getElementType(); 3238 Type *DstElemTy = DstFVTy->getElementType(); 3239 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3240 "Vector elements must have same size"); 3241 3242 // Do a direct cast if element types are castable. 3243 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3244 return Builder.CreateBitOrPointerCast(V, DstFVTy); 3245 } 3246 // V cannot be directly casted to desired vector type. 3247 // May happen when V is a floating point vector but DstVTy is a vector of 3248 // pointers or vice-versa. Handle this using a two-step bitcast using an 3249 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3250 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3251 "Only one type should be a pointer type"); 3252 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3253 "Only one type should be a floating point type"); 3254 Type *IntTy = 3255 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3256 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 3257 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3258 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 3259 } 3260 3261 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3262 BasicBlock *Bypass) { 3263 Value *Count = getOrCreateTripCount(L); 3264 // Reuse existing vector loop preheader for TC checks. 3265 // Note that new preheader block is generated for vector loop. 3266 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 3267 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 3268 3269 // Generate code to check if the loop's trip count is less than VF * UF, or 3270 // equal to it in case a scalar epilogue is required; this implies that the 3271 // vector trip count is zero. This check also covers the case where adding one 3272 // to the backedge-taken count overflowed leading to an incorrect trip count 3273 // of zero. In this case we will also jump to the scalar loop. 3274 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE 3275 : ICmpInst::ICMP_ULT; 3276 3277 // If tail is to be folded, vector loop takes care of all iterations. 3278 Value *CheckMinIters = Builder.getFalse(); 3279 if (!Cost->foldTailByMasking()) { 3280 Value *Step = 3281 createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF); 3282 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3283 } 3284 // Create new preheader for vector loop. 3285 LoopVectorPreHeader = 3286 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3287 "vector.ph"); 3288 3289 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3290 DT->getNode(Bypass)->getIDom()) && 3291 "TC check is expected to dominate Bypass"); 3292 3293 // Update dominator for Bypass & LoopExit (if needed). 3294 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3295 if (!Cost->requiresScalarEpilogue(VF)) 3296 // If there is an epilogue which must run, there's no edge from the 3297 // middle block to exit blocks and thus no need to update the immediate 3298 // dominator of the exit blocks. 3299 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3300 3301 ReplaceInstWithInst( 3302 TCCheckBlock->getTerminator(), 3303 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3304 LoopBypassBlocks.push_back(TCCheckBlock); 3305 } 3306 3307 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3308 3309 BasicBlock *const SCEVCheckBlock = 3310 RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock); 3311 if (!SCEVCheckBlock) 3312 return nullptr; 3313 3314 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3315 (OptForSizeBasedOnProfile && 3316 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3317 "Cannot SCEV check stride or overflow when optimizing for size"); 3318 3319 3320 // Update dominator only if this is first RT check. 3321 if (LoopBypassBlocks.empty()) { 3322 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3323 if (!Cost->requiresScalarEpilogue(VF)) 3324 // If there is an epilogue which must run, there's no edge from the 3325 // middle block to exit blocks and thus no need to update the immediate 3326 // dominator of the exit blocks. 3327 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3328 } 3329 3330 LoopBypassBlocks.push_back(SCEVCheckBlock); 3331 AddedSafetyChecks = true; 3332 return SCEVCheckBlock; 3333 } 3334 3335 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, 3336 BasicBlock *Bypass) { 3337 // VPlan-native path does not do any analysis for runtime checks currently. 3338 if (EnableVPlanNativePath) 3339 return nullptr; 3340 3341 BasicBlock *const MemCheckBlock = 3342 RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader); 3343 3344 // Check if we generated code that checks in runtime if arrays overlap. We put 3345 // the checks into a separate block to make the more common case of few 3346 // elements faster. 3347 if (!MemCheckBlock) 3348 return nullptr; 3349 3350 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3351 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3352 "Cannot emit memory checks when optimizing for size, unless forced " 3353 "to vectorize."); 3354 ORE->emit([&]() { 3355 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3356 L->getStartLoc(), L->getHeader()) 3357 << "Code-size may be reduced by not forcing " 3358 "vectorization, or by source-code modifications " 3359 "eliminating the need for runtime checks " 3360 "(e.g., adding 'restrict')."; 3361 }); 3362 } 3363 3364 LoopBypassBlocks.push_back(MemCheckBlock); 3365 3366 AddedSafetyChecks = true; 3367 3368 // We currently don't use LoopVersioning for the actual loop cloning but we 3369 // still use it to add the noalias metadata. 3370 LVer = std::make_unique<LoopVersioning>( 3371 *Legal->getLAI(), 3372 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3373 DT, PSE.getSE()); 3374 LVer->prepareNoAliasMetadata(); 3375 return MemCheckBlock; 3376 } 3377 3378 Value *InnerLoopVectorizer::emitTransformedIndex( 3379 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 3380 const InductionDescriptor &ID) const { 3381 3382 SCEVExpander Exp(*SE, DL, "induction"); 3383 auto Step = ID.getStep(); 3384 auto StartValue = ID.getStartValue(); 3385 assert(Index->getType()->getScalarType() == Step->getType() && 3386 "Index scalar type does not match StepValue type"); 3387 3388 // Note: the IR at this point is broken. We cannot use SE to create any new 3389 // SCEV and then expand it, hoping that SCEV's simplification will give us 3390 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 3391 // lead to various SCEV crashes. So all we can do is to use builder and rely 3392 // on InstCombine for future simplifications. Here we handle some trivial 3393 // cases only. 3394 auto CreateAdd = [&B](Value *X, Value *Y) { 3395 assert(X->getType() == Y->getType() && "Types don't match!"); 3396 if (auto *CX = dyn_cast<ConstantInt>(X)) 3397 if (CX->isZero()) 3398 return Y; 3399 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3400 if (CY->isZero()) 3401 return X; 3402 return B.CreateAdd(X, Y); 3403 }; 3404 3405 // We allow X to be a vector type, in which case Y will potentially be 3406 // splatted into a vector with the same element count. 3407 auto CreateMul = [&B](Value *X, Value *Y) { 3408 assert(X->getType()->getScalarType() == Y->getType() && 3409 "Types don't match!"); 3410 if (auto *CX = dyn_cast<ConstantInt>(X)) 3411 if (CX->isOne()) 3412 return Y; 3413 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3414 if (CY->isOne()) 3415 return X; 3416 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 3417 if (XVTy && !isa<VectorType>(Y->getType())) 3418 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 3419 return B.CreateMul(X, Y); 3420 }; 3421 3422 // Get a suitable insert point for SCEV expansion. For blocks in the vector 3423 // loop, choose the end of the vector loop header (=LoopVectorBody), because 3424 // the DomTree is not kept up-to-date for additional blocks generated in the 3425 // vector loop. By using the header as insertion point, we guarantee that the 3426 // expanded instructions dominate all their uses. 3427 auto GetInsertPoint = [this, &B]() { 3428 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 3429 if (InsertBB != LoopVectorBody && 3430 LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB)) 3431 return LoopVectorBody->getTerminator(); 3432 return &*B.GetInsertPoint(); 3433 }; 3434 3435 switch (ID.getKind()) { 3436 case InductionDescriptor::IK_IntInduction: { 3437 assert(!isa<VectorType>(Index->getType()) && 3438 "Vector indices not supported for integer inductions yet"); 3439 assert(Index->getType() == StartValue->getType() && 3440 "Index type does not match StartValue type"); 3441 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 3442 return B.CreateSub(StartValue, Index); 3443 auto *Offset = CreateMul( 3444 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 3445 return CreateAdd(StartValue, Offset); 3446 } 3447 case InductionDescriptor::IK_PtrInduction: { 3448 assert(isa<SCEVConstant>(Step) && 3449 "Expected constant step for pointer induction"); 3450 return B.CreateGEP( 3451 ID.getElementType(), StartValue, 3452 CreateMul(Index, 3453 Exp.expandCodeFor(Step, Index->getType()->getScalarType(), 3454 GetInsertPoint()))); 3455 } 3456 case InductionDescriptor::IK_FpInduction: { 3457 assert(!isa<VectorType>(Index->getType()) && 3458 "Vector indices not supported for FP inductions yet"); 3459 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 3460 auto InductionBinOp = ID.getInductionBinOp(); 3461 assert(InductionBinOp && 3462 (InductionBinOp->getOpcode() == Instruction::FAdd || 3463 InductionBinOp->getOpcode() == Instruction::FSub) && 3464 "Original bin op should be defined for FP induction"); 3465 3466 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 3467 Value *MulExp = B.CreateFMul(StepValue, Index); 3468 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 3469 "induction"); 3470 } 3471 case InductionDescriptor::IK_NoInduction: 3472 return nullptr; 3473 } 3474 llvm_unreachable("invalid enum"); 3475 } 3476 3477 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3478 LoopScalarBody = OrigLoop->getHeader(); 3479 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3480 assert(LoopVectorPreHeader && "Invalid loop structure"); 3481 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr 3482 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && 3483 "multiple exit loop without required epilogue?"); 3484 3485 LoopMiddleBlock = 3486 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3487 LI, nullptr, Twine(Prefix) + "middle.block"); 3488 LoopScalarPreHeader = 3489 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3490 nullptr, Twine(Prefix) + "scalar.ph"); 3491 3492 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3493 3494 // Set up the middle block terminator. Two cases: 3495 // 1) If we know that we must execute the scalar epilogue, emit an 3496 // unconditional branch. 3497 // 2) Otherwise, we must have a single unique exit block (due to how we 3498 // implement the multiple exit case). In this case, set up a conditonal 3499 // branch from the middle block to the loop scalar preheader, and the 3500 // exit block. completeLoopSkeleton will update the condition to use an 3501 // iteration check, if required to decide whether to execute the remainder. 3502 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? 3503 BranchInst::Create(LoopScalarPreHeader) : 3504 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, 3505 Builder.getTrue()); 3506 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3507 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3508 3509 // We intentionally don't let SplitBlock to update LoopInfo since 3510 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3511 // LoopVectorBody is explicitly added to the correct place few lines later. 3512 LoopVectorBody = 3513 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3514 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3515 3516 // Update dominator for loop exit. 3517 if (!Cost->requiresScalarEpilogue(VF)) 3518 // If there is an epilogue which must run, there's no edge from the 3519 // middle block to exit blocks and thus no need to update the immediate 3520 // dominator of the exit blocks. 3521 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3522 3523 // Create and register the new vector loop. 3524 Loop *Lp = LI->AllocateLoop(); 3525 Loop *ParentLoop = OrigLoop->getParentLoop(); 3526 3527 // Insert the new loop into the loop nest and register the new basic blocks 3528 // before calling any utilities such as SCEV that require valid LoopInfo. 3529 if (ParentLoop) { 3530 ParentLoop->addChildLoop(Lp); 3531 } else { 3532 LI->addTopLevelLoop(Lp); 3533 } 3534 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3535 return Lp; 3536 } 3537 3538 void InnerLoopVectorizer::createInductionResumeValues( 3539 Loop *L, Value *VectorTripCount, 3540 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3541 assert(VectorTripCount && L && "Expected valid arguments"); 3542 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3543 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3544 "Inconsistent information about additional bypass."); 3545 // We are going to resume the execution of the scalar loop. 3546 // Go over all of the induction variables that we found and fix the 3547 // PHIs that are left in the scalar version of the loop. 3548 // The starting values of PHI nodes depend on the counter of the last 3549 // iteration in the vectorized loop. 3550 // If we come from a bypass edge then we need to start from the original 3551 // start value. 3552 for (auto &InductionEntry : Legal->getInductionVars()) { 3553 PHINode *OrigPhi = InductionEntry.first; 3554 InductionDescriptor II = InductionEntry.second; 3555 3556 // Create phi nodes to merge from the backedge-taken check block. 3557 PHINode *BCResumeVal = 3558 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3559 LoopScalarPreHeader->getTerminator()); 3560 // Copy original phi DL over to the new one. 3561 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3562 Value *&EndValue = IVEndValues[OrigPhi]; 3563 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3564 if (OrigPhi == OldInduction) { 3565 // We know what the end value is. 3566 EndValue = VectorTripCount; 3567 } else { 3568 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3569 3570 // Fast-math-flags propagate from the original induction instruction. 3571 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3572 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3573 3574 Type *StepType = II.getStep()->getType(); 3575 Instruction::CastOps CastOp = 3576 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3577 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3578 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3579 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3580 EndValue->setName("ind.end"); 3581 3582 // Compute the end value for the additional bypass (if applicable). 3583 if (AdditionalBypass.first) { 3584 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3585 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3586 StepType, true); 3587 CRD = 3588 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3589 EndValueFromAdditionalBypass = 3590 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3591 EndValueFromAdditionalBypass->setName("ind.end"); 3592 } 3593 } 3594 // The new PHI merges the original incoming value, in case of a bypass, 3595 // or the value at the end of the vectorized loop. 3596 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3597 3598 // Fix the scalar body counter (PHI node). 3599 // The old induction's phi node in the scalar body needs the truncated 3600 // value. 3601 for (BasicBlock *BB : LoopBypassBlocks) 3602 BCResumeVal->addIncoming(II.getStartValue(), BB); 3603 3604 if (AdditionalBypass.first) 3605 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3606 EndValueFromAdditionalBypass); 3607 3608 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3609 } 3610 } 3611 3612 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3613 MDNode *OrigLoopID) { 3614 assert(L && "Expected valid loop."); 3615 3616 // The trip counts should be cached by now. 3617 Value *Count = getOrCreateTripCount(L); 3618 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3619 3620 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3621 3622 // Add a check in the middle block to see if we have completed 3623 // all of the iterations in the first vector loop. Three cases: 3624 // 1) If we require a scalar epilogue, there is no conditional branch as 3625 // we unconditionally branch to the scalar preheader. Do nothing. 3626 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. 3627 // Thus if tail is to be folded, we know we don't need to run the 3628 // remainder and we can use the previous value for the condition (true). 3629 // 3) Otherwise, construct a runtime check. 3630 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { 3631 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3632 Count, VectorTripCount, "cmp.n", 3633 LoopMiddleBlock->getTerminator()); 3634 3635 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3636 // of the corresponding compare because they may have ended up with 3637 // different line numbers and we want to avoid awkward line stepping while 3638 // debugging. Eg. if the compare has got a line number inside the loop. 3639 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3640 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3641 } 3642 3643 // Get ready to start creating new instructions into the vectorized body. 3644 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3645 "Inconsistent vector loop preheader"); 3646 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3647 3648 Optional<MDNode *> VectorizedLoopID = 3649 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3650 LLVMLoopVectorizeFollowupVectorized}); 3651 if (VectorizedLoopID.hasValue()) { 3652 L->setLoopID(VectorizedLoopID.getValue()); 3653 3654 // Do not setAlreadyVectorized if loop attributes have been defined 3655 // explicitly. 3656 return LoopVectorPreHeader; 3657 } 3658 3659 // Keep all loop hints from the original loop on the vector loop (we'll 3660 // replace the vectorizer-specific hints below). 3661 if (MDNode *LID = OrigLoop->getLoopID()) 3662 L->setLoopID(LID); 3663 3664 LoopVectorizeHints Hints(L, true, *ORE); 3665 Hints.setAlreadyVectorized(); 3666 3667 #ifdef EXPENSIVE_CHECKS 3668 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3669 LI->verify(*DT); 3670 #endif 3671 3672 return LoopVectorPreHeader; 3673 } 3674 3675 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3676 /* 3677 In this function we generate a new loop. The new loop will contain 3678 the vectorized instructions while the old loop will continue to run the 3679 scalar remainder. 3680 3681 [ ] <-- loop iteration number check. 3682 / | 3683 / v 3684 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3685 | / | 3686 | / v 3687 || [ ] <-- vector pre header. 3688 |/ | 3689 | v 3690 | [ ] \ 3691 | [ ]_| <-- vector loop. 3692 | | 3693 | v 3694 \ -[ ] <--- middle-block. 3695 \/ | 3696 /\ v 3697 | ->[ ] <--- new preheader. 3698 | | 3699 (opt) v <-- edge from middle to exit iff epilogue is not required. 3700 | [ ] \ 3701 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). 3702 \ | 3703 \ v 3704 >[ ] <-- exit block(s). 3705 ... 3706 */ 3707 3708 // Get the metadata of the original loop before it gets modified. 3709 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3710 3711 // Workaround! Compute the trip count of the original loop and cache it 3712 // before we start modifying the CFG. This code has a systemic problem 3713 // wherein it tries to run analysis over partially constructed IR; this is 3714 // wrong, and not simply for SCEV. The trip count of the original loop 3715 // simply happens to be prone to hitting this in practice. In theory, we 3716 // can hit the same issue for any SCEV, or ValueTracking query done during 3717 // mutation. See PR49900. 3718 getOrCreateTripCount(OrigLoop); 3719 3720 // Create an empty vector loop, and prepare basic blocks for the runtime 3721 // checks. 3722 Loop *Lp = createVectorLoopSkeleton(""); 3723 3724 // Now, compare the new count to zero. If it is zero skip the vector loop and 3725 // jump to the scalar loop. This check also covers the case where the 3726 // backedge-taken count is uint##_max: adding one to it will overflow leading 3727 // to an incorrect trip count of zero. In this (rare) case we will also jump 3728 // to the scalar loop. 3729 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3730 3731 // Generate the code to check any assumptions that we've made for SCEV 3732 // expressions. 3733 emitSCEVChecks(Lp, LoopScalarPreHeader); 3734 3735 // Generate the code that checks in runtime if arrays overlap. We put the 3736 // checks into a separate block to make the more common case of few elements 3737 // faster. 3738 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3739 3740 // Some loops have a single integer induction variable, while other loops 3741 // don't. One example is c++ iterators that often have multiple pointer 3742 // induction variables. In the code below we also support a case where we 3743 // don't have a single induction variable. 3744 // 3745 // We try to obtain an induction variable from the original loop as hard 3746 // as possible. However if we don't find one that: 3747 // - is an integer 3748 // - counts from zero, stepping by one 3749 // - is the size of the widest induction variable type 3750 // then we create a new one. 3751 OldInduction = Legal->getPrimaryInduction(); 3752 Type *IdxTy = Legal->getWidestInductionType(); 3753 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3754 // The loop step is equal to the vectorization factor (num of SIMD elements) 3755 // times the unroll factor (num of SIMD instructions). 3756 Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt()); 3757 Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF); 3758 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3759 Induction = 3760 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3761 getDebugLocFromInstOrOperands(OldInduction)); 3762 3763 // Emit phis for the new starting index of the scalar loop. 3764 createInductionResumeValues(Lp, CountRoundDown); 3765 3766 return completeLoopSkeleton(Lp, OrigLoopID); 3767 } 3768 3769 // Fix up external users of the induction variable. At this point, we are 3770 // in LCSSA form, with all external PHIs that use the IV having one input value, 3771 // coming from the remainder loop. We need those PHIs to also have a correct 3772 // value for the IV when arriving directly from the middle block. 3773 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3774 const InductionDescriptor &II, 3775 Value *CountRoundDown, Value *EndValue, 3776 BasicBlock *MiddleBlock) { 3777 // There are two kinds of external IV usages - those that use the value 3778 // computed in the last iteration (the PHI) and those that use the penultimate 3779 // value (the value that feeds into the phi from the loop latch). 3780 // We allow both, but they, obviously, have different values. 3781 3782 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3783 3784 DenseMap<Value *, Value *> MissingVals; 3785 3786 // An external user of the last iteration's value should see the value that 3787 // the remainder loop uses to initialize its own IV. 3788 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3789 for (User *U : PostInc->users()) { 3790 Instruction *UI = cast<Instruction>(U); 3791 if (!OrigLoop->contains(UI)) { 3792 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3793 MissingVals[UI] = EndValue; 3794 } 3795 } 3796 3797 // An external user of the penultimate value need to see EndValue - Step. 3798 // The simplest way to get this is to recompute it from the constituent SCEVs, 3799 // that is Start + (Step * (CRD - 1)). 3800 for (User *U : OrigPhi->users()) { 3801 auto *UI = cast<Instruction>(U); 3802 if (!OrigLoop->contains(UI)) { 3803 const DataLayout &DL = 3804 OrigLoop->getHeader()->getModule()->getDataLayout(); 3805 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3806 3807 IRBuilder<> B(MiddleBlock->getTerminator()); 3808 3809 // Fast-math-flags propagate from the original induction instruction. 3810 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3811 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3812 3813 Value *CountMinusOne = B.CreateSub( 3814 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3815 Value *CMO = 3816 !II.getStep()->getType()->isIntegerTy() 3817 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3818 II.getStep()->getType()) 3819 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3820 CMO->setName("cast.cmo"); 3821 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3822 Escape->setName("ind.escape"); 3823 MissingVals[UI] = Escape; 3824 } 3825 } 3826 3827 for (auto &I : MissingVals) { 3828 PHINode *PHI = cast<PHINode>(I.first); 3829 // One corner case we have to handle is two IVs "chasing" each-other, 3830 // that is %IV2 = phi [...], [ %IV1, %latch ] 3831 // In this case, if IV1 has an external use, we need to avoid adding both 3832 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3833 // don't already have an incoming value for the middle block. 3834 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3835 PHI->addIncoming(I.second, MiddleBlock); 3836 } 3837 } 3838 3839 namespace { 3840 3841 struct CSEDenseMapInfo { 3842 static bool canHandle(const Instruction *I) { 3843 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3844 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3845 } 3846 3847 static inline Instruction *getEmptyKey() { 3848 return DenseMapInfo<Instruction *>::getEmptyKey(); 3849 } 3850 3851 static inline Instruction *getTombstoneKey() { 3852 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3853 } 3854 3855 static unsigned getHashValue(const Instruction *I) { 3856 assert(canHandle(I) && "Unknown instruction!"); 3857 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3858 I->value_op_end())); 3859 } 3860 3861 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3862 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3863 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3864 return LHS == RHS; 3865 return LHS->isIdenticalTo(RHS); 3866 } 3867 }; 3868 3869 } // end anonymous namespace 3870 3871 ///Perform cse of induction variable instructions. 3872 static void cse(BasicBlock *BB) { 3873 // Perform simple cse. 3874 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3875 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3876 Instruction *In = &*I++; 3877 3878 if (!CSEDenseMapInfo::canHandle(In)) 3879 continue; 3880 3881 // Check if we can replace this instruction with any of the 3882 // visited instructions. 3883 if (Instruction *V = CSEMap.lookup(In)) { 3884 In->replaceAllUsesWith(V); 3885 In->eraseFromParent(); 3886 continue; 3887 } 3888 3889 CSEMap[In] = In; 3890 } 3891 } 3892 3893 InstructionCost 3894 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3895 bool &NeedToScalarize) const { 3896 Function *F = CI->getCalledFunction(); 3897 Type *ScalarRetTy = CI->getType(); 3898 SmallVector<Type *, 4> Tys, ScalarTys; 3899 for (auto &ArgOp : CI->arg_operands()) 3900 ScalarTys.push_back(ArgOp->getType()); 3901 3902 // Estimate cost of scalarized vector call. The source operands are assumed 3903 // to be vectors, so we need to extract individual elements from there, 3904 // execute VF scalar calls, and then gather the result into the vector return 3905 // value. 3906 InstructionCost ScalarCallCost = 3907 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3908 if (VF.isScalar()) 3909 return ScalarCallCost; 3910 3911 // Compute corresponding vector type for return value and arguments. 3912 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3913 for (Type *ScalarTy : ScalarTys) 3914 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3915 3916 // Compute costs of unpacking argument values for the scalar calls and 3917 // packing the return values to a vector. 3918 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3919 3920 InstructionCost Cost = 3921 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3922 3923 // If we can't emit a vector call for this function, then the currently found 3924 // cost is the cost we need to return. 3925 NeedToScalarize = true; 3926 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3927 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3928 3929 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3930 return Cost; 3931 3932 // If the corresponding vector cost is cheaper, return its cost. 3933 InstructionCost VectorCallCost = 3934 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3935 if (VectorCallCost < Cost) { 3936 NeedToScalarize = false; 3937 Cost = VectorCallCost; 3938 } 3939 return Cost; 3940 } 3941 3942 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3943 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3944 return Elt; 3945 return VectorType::get(Elt, VF); 3946 } 3947 3948 InstructionCost 3949 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3950 ElementCount VF) const { 3951 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3952 assert(ID && "Expected intrinsic call!"); 3953 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3954 FastMathFlags FMF; 3955 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3956 FMF = FPMO->getFastMathFlags(); 3957 3958 SmallVector<const Value *> Arguments(CI->args()); 3959 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3960 SmallVector<Type *> ParamTys; 3961 std::transform(FTy->param_begin(), FTy->param_end(), 3962 std::back_inserter(ParamTys), 3963 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3964 3965 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3966 dyn_cast<IntrinsicInst>(CI)); 3967 return TTI.getIntrinsicInstrCost(CostAttrs, 3968 TargetTransformInfo::TCK_RecipThroughput); 3969 } 3970 3971 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3972 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3973 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3974 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3975 } 3976 3977 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3978 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3979 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3980 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3981 } 3982 3983 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3984 // For every instruction `I` in MinBWs, truncate the operands, create a 3985 // truncated version of `I` and reextend its result. InstCombine runs 3986 // later and will remove any ext/trunc pairs. 3987 SmallPtrSet<Value *, 4> Erased; 3988 for (const auto &KV : Cost->getMinimalBitwidths()) { 3989 // If the value wasn't vectorized, we must maintain the original scalar 3990 // type. The absence of the value from State indicates that it 3991 // wasn't vectorized. 3992 // FIXME: Should not rely on getVPValue at this point. 3993 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3994 if (!State.hasAnyVectorValue(Def)) 3995 continue; 3996 for (unsigned Part = 0; Part < UF; ++Part) { 3997 Value *I = State.get(Def, Part); 3998 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3999 continue; 4000 Type *OriginalTy = I->getType(); 4001 Type *ScalarTruncatedTy = 4002 IntegerType::get(OriginalTy->getContext(), KV.second); 4003 auto *TruncatedTy = VectorType::get( 4004 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); 4005 if (TruncatedTy == OriginalTy) 4006 continue; 4007 4008 IRBuilder<> B(cast<Instruction>(I)); 4009 auto ShrinkOperand = [&](Value *V) -> Value * { 4010 if (auto *ZI = dyn_cast<ZExtInst>(V)) 4011 if (ZI->getSrcTy() == TruncatedTy) 4012 return ZI->getOperand(0); 4013 return B.CreateZExtOrTrunc(V, TruncatedTy); 4014 }; 4015 4016 // The actual instruction modification depends on the instruction type, 4017 // unfortunately. 4018 Value *NewI = nullptr; 4019 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 4020 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 4021 ShrinkOperand(BO->getOperand(1))); 4022 4023 // Any wrapping introduced by shrinking this operation shouldn't be 4024 // considered undefined behavior. So, we can't unconditionally copy 4025 // arithmetic wrapping flags to NewI. 4026 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 4027 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 4028 NewI = 4029 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 4030 ShrinkOperand(CI->getOperand(1))); 4031 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 4032 NewI = B.CreateSelect(SI->getCondition(), 4033 ShrinkOperand(SI->getTrueValue()), 4034 ShrinkOperand(SI->getFalseValue())); 4035 } else if (auto *CI = dyn_cast<CastInst>(I)) { 4036 switch (CI->getOpcode()) { 4037 default: 4038 llvm_unreachable("Unhandled cast!"); 4039 case Instruction::Trunc: 4040 NewI = ShrinkOperand(CI->getOperand(0)); 4041 break; 4042 case Instruction::SExt: 4043 NewI = B.CreateSExtOrTrunc( 4044 CI->getOperand(0), 4045 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 4046 break; 4047 case Instruction::ZExt: 4048 NewI = B.CreateZExtOrTrunc( 4049 CI->getOperand(0), 4050 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 4051 break; 4052 } 4053 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 4054 auto Elements0 = 4055 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); 4056 auto *O0 = B.CreateZExtOrTrunc( 4057 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 4058 auto Elements1 = 4059 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); 4060 auto *O1 = B.CreateZExtOrTrunc( 4061 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 4062 4063 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 4064 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 4065 // Don't do anything with the operands, just extend the result. 4066 continue; 4067 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 4068 auto Elements = 4069 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); 4070 auto *O0 = B.CreateZExtOrTrunc( 4071 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 4072 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 4073 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 4074 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 4075 auto Elements = 4076 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); 4077 auto *O0 = B.CreateZExtOrTrunc( 4078 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 4079 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 4080 } else { 4081 // If we don't know what to do, be conservative and don't do anything. 4082 continue; 4083 } 4084 4085 // Lastly, extend the result. 4086 NewI->takeName(cast<Instruction>(I)); 4087 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 4088 I->replaceAllUsesWith(Res); 4089 cast<Instruction>(I)->eraseFromParent(); 4090 Erased.insert(I); 4091 State.reset(Def, Res, Part); 4092 } 4093 } 4094 4095 // We'll have created a bunch of ZExts that are now parentless. Clean up. 4096 for (const auto &KV : Cost->getMinimalBitwidths()) { 4097 // If the value wasn't vectorized, we must maintain the original scalar 4098 // type. The absence of the value from State indicates that it 4099 // wasn't vectorized. 4100 // FIXME: Should not rely on getVPValue at this point. 4101 VPValue *Def = State.Plan->getVPValue(KV.first, true); 4102 if (!State.hasAnyVectorValue(Def)) 4103 continue; 4104 for (unsigned Part = 0; Part < UF; ++Part) { 4105 Value *I = State.get(Def, Part); 4106 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 4107 if (Inst && Inst->use_empty()) { 4108 Value *NewI = Inst->getOperand(0); 4109 Inst->eraseFromParent(); 4110 State.reset(Def, NewI, Part); 4111 } 4112 } 4113 } 4114 } 4115 4116 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 4117 // Insert truncates and extends for any truncated instructions as hints to 4118 // InstCombine. 4119 if (VF.isVector()) 4120 truncateToMinimalBitwidths(State); 4121 4122 // Fix widened non-induction PHIs by setting up the PHI operands. 4123 if (OrigPHIsToFix.size()) { 4124 assert(EnableVPlanNativePath && 4125 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 4126 fixNonInductionPHIs(State); 4127 } 4128 4129 // At this point every instruction in the original loop is widened to a 4130 // vector form. Now we need to fix the recurrences in the loop. These PHI 4131 // nodes are currently empty because we did not want to introduce cycles. 4132 // This is the second stage of vectorizing recurrences. 4133 fixCrossIterationPHIs(State); 4134 4135 // Forget the original basic block. 4136 PSE.getSE()->forgetLoop(OrigLoop); 4137 4138 // If we inserted an edge from the middle block to the unique exit block, 4139 // update uses outside the loop (phis) to account for the newly inserted 4140 // edge. 4141 if (!Cost->requiresScalarEpilogue(VF)) { 4142 // Fix-up external users of the induction variables. 4143 for (auto &Entry : Legal->getInductionVars()) 4144 fixupIVUsers(Entry.first, Entry.second, 4145 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 4146 IVEndValues[Entry.first], LoopMiddleBlock); 4147 4148 fixLCSSAPHIs(State); 4149 } 4150 4151 for (Instruction *PI : PredicatedInstructions) 4152 sinkScalarOperands(&*PI); 4153 4154 // Remove redundant induction instructions. 4155 cse(LoopVectorBody); 4156 4157 // Set/update profile weights for the vector and remainder loops as original 4158 // loop iterations are now distributed among them. Note that original loop 4159 // represented by LoopScalarBody becomes remainder loop after vectorization. 4160 // 4161 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 4162 // end up getting slightly roughened result but that should be OK since 4163 // profile is not inherently precise anyway. Note also possible bypass of 4164 // vector code caused by legality checks is ignored, assigning all the weight 4165 // to the vector loop, optimistically. 4166 // 4167 // For scalable vectorization we can't know at compile time how many iterations 4168 // of the loop are handled in one vector iteration, so instead assume a pessimistic 4169 // vscale of '1'. 4170 setProfileInfoAfterUnrolling( 4171 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), 4172 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); 4173 } 4174 4175 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 4176 // In order to support recurrences we need to be able to vectorize Phi nodes. 4177 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4178 // stage #2: We now need to fix the recurrences by adding incoming edges to 4179 // the currently empty PHI nodes. At this point every instruction in the 4180 // original loop is widened to a vector form so we can use them to construct 4181 // the incoming edges. 4182 VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock(); 4183 for (VPRecipeBase &R : Header->phis()) { 4184 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) 4185 fixReduction(ReductionPhi, State); 4186 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 4187 fixFirstOrderRecurrence(FOR, State); 4188 } 4189 } 4190 4191 void InnerLoopVectorizer::fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, 4192 VPTransformState &State) { 4193 // This is the second phase of vectorizing first-order recurrences. An 4194 // overview of the transformation is described below. Suppose we have the 4195 // following loop. 4196 // 4197 // for (int i = 0; i < n; ++i) 4198 // b[i] = a[i] - a[i - 1]; 4199 // 4200 // There is a first-order recurrence on "a". For this loop, the shorthand 4201 // scalar IR looks like: 4202 // 4203 // scalar.ph: 4204 // s_init = a[-1] 4205 // br scalar.body 4206 // 4207 // scalar.body: 4208 // i = phi [0, scalar.ph], [i+1, scalar.body] 4209 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4210 // s2 = a[i] 4211 // b[i] = s2 - s1 4212 // br cond, scalar.body, ... 4213 // 4214 // In this example, s1 is a recurrence because it's value depends on the 4215 // previous iteration. In the first phase of vectorization, we created a 4216 // vector phi v1 for s1. We now complete the vectorization and produce the 4217 // shorthand vector IR shown below (for VF = 4, UF = 1). 4218 // 4219 // vector.ph: 4220 // v_init = vector(..., ..., ..., a[-1]) 4221 // br vector.body 4222 // 4223 // vector.body 4224 // i = phi [0, vector.ph], [i+4, vector.body] 4225 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4226 // v2 = a[i, i+1, i+2, i+3]; 4227 // v3 = vector(v1(3), v2(0, 1, 2)) 4228 // b[i, i+1, i+2, i+3] = v2 - v3 4229 // br cond, vector.body, middle.block 4230 // 4231 // middle.block: 4232 // x = v2(3) 4233 // br scalar.ph 4234 // 4235 // scalar.ph: 4236 // s_init = phi [x, middle.block], [a[-1], otherwise] 4237 // br scalar.body 4238 // 4239 // After execution completes the vector loop, we extract the next value of 4240 // the recurrence (x) to use as the initial value in the scalar loop. 4241 4242 // Extract the last vector element in the middle block. This will be the 4243 // initial value for the recurrence when jumping to the scalar loop. 4244 VPValue *PreviousDef = PhiR->getBackedgeValue(); 4245 Value *Incoming = State.get(PreviousDef, UF - 1); 4246 auto *ExtractForScalar = Incoming; 4247 auto *IdxTy = Builder.getInt32Ty(); 4248 if (VF.isVector()) { 4249 auto *One = ConstantInt::get(IdxTy, 1); 4250 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4251 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4252 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 4253 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 4254 "vector.recur.extract"); 4255 } 4256 // Extract the second last element in the middle block if the 4257 // Phi is used outside the loop. We need to extract the phi itself 4258 // and not the last element (the phi update in the current iteration). This 4259 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4260 // when the scalar loop is not run at all. 4261 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4262 if (VF.isVector()) { 4263 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4264 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 4265 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4266 Incoming, Idx, "vector.recur.extract.for.phi"); 4267 } else if (UF > 1) 4268 // When loop is unrolled without vectorizing, initialize 4269 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 4270 // of `Incoming`. This is analogous to the vectorized case above: extracting 4271 // the second last element when VF > 1. 4272 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 4273 4274 // Fix the initial value of the original recurrence in the scalar loop. 4275 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4276 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); 4277 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4278 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); 4279 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4280 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4281 Start->addIncoming(Incoming, BB); 4282 } 4283 4284 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 4285 Phi->setName("scalar.recur"); 4286 4287 // Finally, fix users of the recurrence outside the loop. The users will need 4288 // either the last value of the scalar recurrence or the last value of the 4289 // vector recurrence we extracted in the middle block. Since the loop is in 4290 // LCSSA form, we just need to find all the phi nodes for the original scalar 4291 // recurrence in the exit block, and then add an edge for the middle block. 4292 // Note that LCSSA does not imply single entry when the original scalar loop 4293 // had multiple exiting edges (as we always run the last iteration in the 4294 // scalar epilogue); in that case, there is no edge from middle to exit and 4295 // and thus no phis which needed updated. 4296 if (!Cost->requiresScalarEpilogue(VF)) 4297 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4298 if (any_of(LCSSAPhi.incoming_values(), 4299 [Phi](Value *V) { return V == Phi; })) 4300 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4301 } 4302 4303 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, 4304 VPTransformState &State) { 4305 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 4306 // Get it's reduction variable descriptor. 4307 assert(Legal->isReductionVariable(OrigPhi) && 4308 "Unable to find the reduction variable"); 4309 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 4310 4311 RecurKind RK = RdxDesc.getRecurrenceKind(); 4312 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4313 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4314 setDebugLocFromInst(ReductionStartValue); 4315 4316 VPValue *LoopExitInstDef = PhiR->getBackedgeValue(); 4317 // This is the vector-clone of the value that leaves the loop. 4318 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 4319 4320 // Wrap flags are in general invalid after vectorization, clear them. 4321 clearReductionWrapFlags(RdxDesc, State); 4322 4323 // Before each round, move the insertion point right between 4324 // the PHIs and the values we are going to write. 4325 // This allows us to write both PHINodes and the extractelement 4326 // instructions. 4327 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4328 4329 setDebugLocFromInst(LoopExitInst); 4330 4331 Type *PhiTy = OrigPhi->getType(); 4332 // If tail is folded by masking, the vector value to leave the loop should be 4333 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 4334 // instead of the former. For an inloop reduction the reduction will already 4335 // be predicated, and does not need to be handled here. 4336 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { 4337 for (unsigned Part = 0; Part < UF; ++Part) { 4338 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 4339 Value *Sel = nullptr; 4340 for (User *U : VecLoopExitInst->users()) { 4341 if (isa<SelectInst>(U)) { 4342 assert(!Sel && "Reduction exit feeding two selects"); 4343 Sel = U; 4344 } else 4345 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 4346 } 4347 assert(Sel && "Reduction exit feeds no select"); 4348 State.reset(LoopExitInstDef, Sel, Part); 4349 4350 // If the target can create a predicated operator for the reduction at no 4351 // extra cost in the loop (for example a predicated vadd), it can be 4352 // cheaper for the select to remain in the loop than be sunk out of it, 4353 // and so use the select value for the phi instead of the old 4354 // LoopExitValue. 4355 if (PreferPredicatedReductionSelect || 4356 TTI->preferPredicatedReductionSelect( 4357 RdxDesc.getOpcode(), PhiTy, 4358 TargetTransformInfo::ReductionFlags())) { 4359 auto *VecRdxPhi = 4360 cast<PHINode>(State.get(PhiR->getVPSingleValue(), Part)); 4361 VecRdxPhi->setIncomingValueForBlock( 4362 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 4363 } 4364 } 4365 } 4366 4367 // If the vector reduction can be performed in a smaller type, we truncate 4368 // then extend the loop exit value to enable InstCombine to evaluate the 4369 // entire expression in the smaller type. 4370 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 4371 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"); 4372 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4373 Builder.SetInsertPoint( 4374 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4375 VectorParts RdxParts(UF); 4376 for (unsigned Part = 0; Part < UF; ++Part) { 4377 RdxParts[Part] = State.get(LoopExitInstDef, Part); 4378 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4379 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4380 : Builder.CreateZExt(Trunc, VecTy); 4381 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 4382 UI != RdxParts[Part]->user_end();) 4383 if (*UI != Trunc) { 4384 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 4385 RdxParts[Part] = Extnd; 4386 } else { 4387 ++UI; 4388 } 4389 } 4390 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4391 for (unsigned Part = 0; Part < UF; ++Part) { 4392 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4393 State.reset(LoopExitInstDef, RdxParts[Part], Part); 4394 } 4395 } 4396 4397 // Reduce all of the unrolled parts into a single vector. 4398 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 4399 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 4400 4401 // The middle block terminator has already been assigned a DebugLoc here (the 4402 // OrigLoop's single latch terminator). We want the whole middle block to 4403 // appear to execute on this line because: (a) it is all compiler generated, 4404 // (b) these instructions are always executed after evaluating the latch 4405 // conditional branch, and (c) other passes may add new predecessors which 4406 // terminate on this line. This is the easiest way to ensure we don't 4407 // accidentally cause an extra step back into the loop while debugging. 4408 setDebugLocFromInst(LoopMiddleBlock->getTerminator()); 4409 if (PhiR->isOrdered()) 4410 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 4411 else { 4412 // Floating-point operations should have some FMF to enable the reduction. 4413 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 4414 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 4415 for (unsigned Part = 1; Part < UF; ++Part) { 4416 Value *RdxPart = State.get(LoopExitInstDef, Part); 4417 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 4418 ReducedPartRdx = Builder.CreateBinOp( 4419 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 4420 } else { 4421 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4422 } 4423 } 4424 } 4425 4426 // Create the reduction after the loop. Note that inloop reductions create the 4427 // target reduction in the loop using a Reduction recipe. 4428 if (VF.isVector() && !PhiR->isInLoop()) { 4429 ReducedPartRdx = 4430 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx); 4431 // If the reduction can be performed in a smaller type, we need to extend 4432 // the reduction to the wider type before we branch to the original loop. 4433 if (PhiTy != RdxDesc.getRecurrenceType()) 4434 ReducedPartRdx = RdxDesc.isSigned() 4435 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 4436 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 4437 } 4438 4439 // Create a phi node that merges control-flow from the backedge-taken check 4440 // block and the middle block. 4441 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 4442 LoopScalarPreHeader->getTerminator()); 4443 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4444 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4445 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4446 4447 // Now, we need to fix the users of the reduction variable 4448 // inside and outside of the scalar remainder loop. 4449 4450 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4451 // in the exit blocks. See comment on analogous loop in 4452 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4453 if (!Cost->requiresScalarEpilogue(VF)) 4454 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4455 if (any_of(LCSSAPhi.incoming_values(), 4456 [LoopExitInst](Value *V) { return V == LoopExitInst; })) 4457 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4458 4459 // Fix the scalar loop reduction variable with the incoming reduction sum 4460 // from the vector body and from the backedge value. 4461 int IncomingEdgeBlockIdx = 4462 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4463 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4464 // Pick the other block. 4465 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4466 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4467 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4468 } 4469 4470 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 4471 VPTransformState &State) { 4472 RecurKind RK = RdxDesc.getRecurrenceKind(); 4473 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4474 return; 4475 4476 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4477 assert(LoopExitInstr && "null loop exit instruction"); 4478 SmallVector<Instruction *, 8> Worklist; 4479 SmallPtrSet<Instruction *, 8> Visited; 4480 Worklist.push_back(LoopExitInstr); 4481 Visited.insert(LoopExitInstr); 4482 4483 while (!Worklist.empty()) { 4484 Instruction *Cur = Worklist.pop_back_val(); 4485 if (isa<OverflowingBinaryOperator>(Cur)) 4486 for (unsigned Part = 0; Part < UF; ++Part) { 4487 // FIXME: Should not rely on getVPValue at this point. 4488 Value *V = State.get(State.Plan->getVPValue(Cur, true), Part); 4489 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4490 } 4491 4492 for (User *U : Cur->users()) { 4493 Instruction *UI = cast<Instruction>(U); 4494 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4495 Visited.insert(UI).second) 4496 Worklist.push_back(UI); 4497 } 4498 } 4499 } 4500 4501 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4502 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4503 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4504 // Some phis were already hand updated by the reduction and recurrence 4505 // code above, leave them alone. 4506 continue; 4507 4508 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4509 // Non-instruction incoming values will have only one value. 4510 4511 VPLane Lane = VPLane::getFirstLane(); 4512 if (isa<Instruction>(IncomingValue) && 4513 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), 4514 VF)) 4515 Lane = VPLane::getLastLaneForVF(VF); 4516 4517 // Can be a loop invariant incoming value or the last scalar value to be 4518 // extracted from the vectorized loop. 4519 // FIXME: Should not rely on getVPValue at this point. 4520 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4521 Value *lastIncomingValue = 4522 OrigLoop->isLoopInvariant(IncomingValue) 4523 ? IncomingValue 4524 : State.get(State.Plan->getVPValue(IncomingValue, true), 4525 VPIteration(UF - 1, Lane)); 4526 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4527 } 4528 } 4529 4530 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4531 // The basic block and loop containing the predicated instruction. 4532 auto *PredBB = PredInst->getParent(); 4533 auto *VectorLoop = LI->getLoopFor(PredBB); 4534 4535 // Initialize a worklist with the operands of the predicated instruction. 4536 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4537 4538 // Holds instructions that we need to analyze again. An instruction may be 4539 // reanalyzed if we don't yet know if we can sink it or not. 4540 SmallVector<Instruction *, 8> InstsToReanalyze; 4541 4542 // Returns true if a given use occurs in the predicated block. Phi nodes use 4543 // their operands in their corresponding predecessor blocks. 4544 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4545 auto *I = cast<Instruction>(U.getUser()); 4546 BasicBlock *BB = I->getParent(); 4547 if (auto *Phi = dyn_cast<PHINode>(I)) 4548 BB = Phi->getIncomingBlock( 4549 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4550 return BB == PredBB; 4551 }; 4552 4553 // Iteratively sink the scalarized operands of the predicated instruction 4554 // into the block we created for it. When an instruction is sunk, it's 4555 // operands are then added to the worklist. The algorithm ends after one pass 4556 // through the worklist doesn't sink a single instruction. 4557 bool Changed; 4558 do { 4559 // Add the instructions that need to be reanalyzed to the worklist, and 4560 // reset the changed indicator. 4561 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4562 InstsToReanalyze.clear(); 4563 Changed = false; 4564 4565 while (!Worklist.empty()) { 4566 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4567 4568 // We can't sink an instruction if it is a phi node, is not in the loop, 4569 // or may have side effects. 4570 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4571 I->mayHaveSideEffects()) 4572 continue; 4573 4574 // If the instruction is already in PredBB, check if we can sink its 4575 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4576 // sinking the scalar instruction I, hence it appears in PredBB; but it 4577 // may have failed to sink I's operands (recursively), which we try 4578 // (again) here. 4579 if (I->getParent() == PredBB) { 4580 Worklist.insert(I->op_begin(), I->op_end()); 4581 continue; 4582 } 4583 4584 // It's legal to sink the instruction if all its uses occur in the 4585 // predicated block. Otherwise, there's nothing to do yet, and we may 4586 // need to reanalyze the instruction. 4587 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4588 InstsToReanalyze.push_back(I); 4589 continue; 4590 } 4591 4592 // Move the instruction to the beginning of the predicated block, and add 4593 // it's operands to the worklist. 4594 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4595 Worklist.insert(I->op_begin(), I->op_end()); 4596 4597 // The sinking may have enabled other instructions to be sunk, so we will 4598 // need to iterate. 4599 Changed = true; 4600 } 4601 } while (Changed); 4602 } 4603 4604 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4605 for (PHINode *OrigPhi : OrigPHIsToFix) { 4606 VPWidenPHIRecipe *VPPhi = 4607 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4608 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4609 // Make sure the builder has a valid insert point. 4610 Builder.SetInsertPoint(NewPhi); 4611 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4612 VPValue *Inc = VPPhi->getIncomingValue(i); 4613 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4614 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4615 } 4616 } 4617 } 4618 4619 bool InnerLoopVectorizer::useOrderedReductions(RecurrenceDescriptor &RdxDesc) { 4620 return Cost->useOrderedReductions(RdxDesc); 4621 } 4622 4623 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, 4624 VPUser &Operands, unsigned UF, 4625 ElementCount VF, bool IsPtrLoopInvariant, 4626 SmallBitVector &IsIndexLoopInvariant, 4627 VPTransformState &State) { 4628 // Construct a vector GEP by widening the operands of the scalar GEP as 4629 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4630 // results in a vector of pointers when at least one operand of the GEP 4631 // is vector-typed. Thus, to keep the representation compact, we only use 4632 // vector-typed operands for loop-varying values. 4633 4634 if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 4635 // If we are vectorizing, but the GEP has only loop-invariant operands, 4636 // the GEP we build (by only using vector-typed operands for 4637 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4638 // produce a vector of pointers, we need to either arbitrarily pick an 4639 // operand to broadcast, or broadcast a clone of the original GEP. 4640 // Here, we broadcast a clone of the original. 4641 // 4642 // TODO: If at some point we decide to scalarize instructions having 4643 // loop-invariant operands, this special case will no longer be 4644 // required. We would add the scalarization decision to 4645 // collectLoopScalars() and teach getVectorValue() to broadcast 4646 // the lane-zero scalar value. 4647 auto *Clone = Builder.Insert(GEP->clone()); 4648 for (unsigned Part = 0; Part < UF; ++Part) { 4649 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4650 State.set(VPDef, EntryPart, Part); 4651 addMetadata(EntryPart, GEP); 4652 } 4653 } else { 4654 // If the GEP has at least one loop-varying operand, we are sure to 4655 // produce a vector of pointers. But if we are only unrolling, we want 4656 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4657 // produce with the code below will be scalar (if VF == 1) or vector 4658 // (otherwise). Note that for the unroll-only case, we still maintain 4659 // values in the vector mapping with initVector, as we do for other 4660 // instructions. 4661 for (unsigned Part = 0; Part < UF; ++Part) { 4662 // The pointer operand of the new GEP. If it's loop-invariant, we 4663 // won't broadcast it. 4664 auto *Ptr = IsPtrLoopInvariant 4665 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 4666 : State.get(Operands.getOperand(0), Part); 4667 4668 // Collect all the indices for the new GEP. If any index is 4669 // loop-invariant, we won't broadcast it. 4670 SmallVector<Value *, 4> Indices; 4671 for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) { 4672 VPValue *Operand = Operands.getOperand(I); 4673 if (IsIndexLoopInvariant[I - 1]) 4674 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 4675 else 4676 Indices.push_back(State.get(Operand, Part)); 4677 } 4678 4679 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4680 // but it should be a vector, otherwise. 4681 auto *NewGEP = 4682 GEP->isInBounds() 4683 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 4684 Indices) 4685 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 4686 assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) && 4687 "NewGEP is not a pointer vector"); 4688 State.set(VPDef, NewGEP, Part); 4689 addMetadata(NewGEP, GEP); 4690 } 4691 } 4692 } 4693 4694 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4695 VPWidenPHIRecipe *PhiR, 4696 VPTransformState &State) { 4697 PHINode *P = cast<PHINode>(PN); 4698 if (EnableVPlanNativePath) { 4699 // Currently we enter here in the VPlan-native path for non-induction 4700 // PHIs where all control flow is uniform. We simply widen these PHIs. 4701 // Create a vector phi with no operands - the vector phi operands will be 4702 // set at the end of vector code generation. 4703 Type *VecTy = (State.VF.isScalar()) 4704 ? PN->getType() 4705 : VectorType::get(PN->getType(), State.VF); 4706 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4707 State.set(PhiR, VecPhi, 0); 4708 OrigPHIsToFix.push_back(P); 4709 4710 return; 4711 } 4712 4713 assert(PN->getParent() == OrigLoop->getHeader() && 4714 "Non-header phis should have been handled elsewhere"); 4715 4716 // In order to support recurrences we need to be able to vectorize Phi nodes. 4717 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4718 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4719 // this value when we vectorize all of the instructions that use the PHI. 4720 4721 assert(!Legal->isReductionVariable(P) && 4722 "reductions should be handled elsewhere"); 4723 4724 setDebugLocFromInst(P); 4725 4726 // This PHINode must be an induction variable. 4727 // Make sure that we know about it. 4728 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4729 4730 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4731 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4732 4733 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4734 // which can be found from the original scalar operations. 4735 switch (II.getKind()) { 4736 case InductionDescriptor::IK_NoInduction: 4737 llvm_unreachable("Unknown induction"); 4738 case InductionDescriptor::IK_IntInduction: 4739 case InductionDescriptor::IK_FpInduction: 4740 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4741 case InductionDescriptor::IK_PtrInduction: { 4742 // Handle the pointer induction variable case. 4743 assert(P->getType()->isPointerTy() && "Unexpected type."); 4744 4745 if (Cost->isScalarAfterVectorization(P, State.VF)) { 4746 // This is the normalized GEP that starts counting at zero. 4747 Value *PtrInd = 4748 Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType()); 4749 // Determine the number of scalars we need to generate for each unroll 4750 // iteration. If the instruction is uniform, we only need to generate the 4751 // first lane. Otherwise, we generate all VF values. 4752 bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF); 4753 unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue(); 4754 4755 bool NeedsVectorIndex = !IsUniform && VF.isScalable(); 4756 Value *UnitStepVec = nullptr, *PtrIndSplat = nullptr; 4757 if (NeedsVectorIndex) { 4758 Type *VecIVTy = VectorType::get(PtrInd->getType(), VF); 4759 UnitStepVec = Builder.CreateStepVector(VecIVTy); 4760 PtrIndSplat = Builder.CreateVectorSplat(VF, PtrInd); 4761 } 4762 4763 for (unsigned Part = 0; Part < UF; ++Part) { 4764 Value *PartStart = createStepForVF( 4765 Builder, ConstantInt::get(PtrInd->getType(), Part), VF); 4766 4767 if (NeedsVectorIndex) { 4768 Value *PartStartSplat = Builder.CreateVectorSplat(VF, PartStart); 4769 Value *Indices = Builder.CreateAdd(PartStartSplat, UnitStepVec); 4770 Value *GlobalIndices = Builder.CreateAdd(PtrIndSplat, Indices); 4771 Value *SclrGep = 4772 emitTransformedIndex(Builder, GlobalIndices, PSE.getSE(), DL, II); 4773 SclrGep->setName("next.gep"); 4774 State.set(PhiR, SclrGep, Part); 4775 // We've cached the whole vector, which means we can support the 4776 // extraction of any lane. 4777 continue; 4778 } 4779 4780 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4781 Value *Idx = Builder.CreateAdd( 4782 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 4783 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4784 Value *SclrGep = 4785 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 4786 SclrGep->setName("next.gep"); 4787 State.set(PhiR, SclrGep, VPIteration(Part, Lane)); 4788 } 4789 } 4790 return; 4791 } 4792 assert(isa<SCEVConstant>(II.getStep()) && 4793 "Induction step not a SCEV constant!"); 4794 Type *PhiType = II.getStep()->getType(); 4795 4796 // Build a pointer phi 4797 Value *ScalarStartValue = II.getStartValue(); 4798 Type *ScStValueType = ScalarStartValue->getType(); 4799 PHINode *NewPointerPhi = 4800 PHINode::Create(ScStValueType, 2, "pointer.phi", Induction); 4801 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4802 4803 // A pointer induction, performed by using a gep 4804 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4805 Instruction *InductionLoc = LoopLatch->getTerminator(); 4806 const SCEV *ScalarStep = II.getStep(); 4807 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4808 Value *ScalarStepValue = 4809 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4810 Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF); 4811 Value *NumUnrolledElems = 4812 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 4813 Value *InductionGEP = GetElementPtrInst::Create( 4814 II.getElementType(), NewPointerPhi, 4815 Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 4816 InductionLoc); 4817 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4818 4819 // Create UF many actual address geps that use the pointer 4820 // phi as base and a vectorized version of the step value 4821 // (<step*0, ..., step*N>) as offset. 4822 for (unsigned Part = 0; Part < State.UF; ++Part) { 4823 Type *VecPhiType = VectorType::get(PhiType, State.VF); 4824 Value *StartOffsetScalar = 4825 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 4826 Value *StartOffset = 4827 Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 4828 // Create a vector of consecutive numbers from zero to VF. 4829 StartOffset = 4830 Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType)); 4831 4832 Value *GEP = Builder.CreateGEP( 4833 II.getElementType(), NewPointerPhi, 4834 Builder.CreateMul( 4835 StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue), 4836 "vector.gep")); 4837 State.set(PhiR, GEP, Part); 4838 } 4839 } 4840 } 4841 } 4842 4843 /// A helper function for checking whether an integer division-related 4844 /// instruction may divide by zero (in which case it must be predicated if 4845 /// executed conditionally in the scalar code). 4846 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4847 /// Non-zero divisors that are non compile-time constants will not be 4848 /// converted into multiplication, so we will still end up scalarizing 4849 /// the division, but can do so w/o predication. 4850 static bool mayDivideByZero(Instruction &I) { 4851 assert((I.getOpcode() == Instruction::UDiv || 4852 I.getOpcode() == Instruction::SDiv || 4853 I.getOpcode() == Instruction::URem || 4854 I.getOpcode() == Instruction::SRem) && 4855 "Unexpected instruction"); 4856 Value *Divisor = I.getOperand(1); 4857 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4858 return !CInt || CInt->isZero(); 4859 } 4860 4861 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def, 4862 VPUser &User, 4863 VPTransformState &State) { 4864 switch (I.getOpcode()) { 4865 case Instruction::Call: 4866 case Instruction::Br: 4867 case Instruction::PHI: 4868 case Instruction::GetElementPtr: 4869 case Instruction::Select: 4870 llvm_unreachable("This instruction is handled by a different recipe."); 4871 case Instruction::UDiv: 4872 case Instruction::SDiv: 4873 case Instruction::SRem: 4874 case Instruction::URem: 4875 case Instruction::Add: 4876 case Instruction::FAdd: 4877 case Instruction::Sub: 4878 case Instruction::FSub: 4879 case Instruction::FNeg: 4880 case Instruction::Mul: 4881 case Instruction::FMul: 4882 case Instruction::FDiv: 4883 case Instruction::FRem: 4884 case Instruction::Shl: 4885 case Instruction::LShr: 4886 case Instruction::AShr: 4887 case Instruction::And: 4888 case Instruction::Or: 4889 case Instruction::Xor: { 4890 // Just widen unops and binops. 4891 setDebugLocFromInst(&I); 4892 4893 for (unsigned Part = 0; Part < UF; ++Part) { 4894 SmallVector<Value *, 2> Ops; 4895 for (VPValue *VPOp : User.operands()) 4896 Ops.push_back(State.get(VPOp, Part)); 4897 4898 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 4899 4900 if (auto *VecOp = dyn_cast<Instruction>(V)) 4901 VecOp->copyIRFlags(&I); 4902 4903 // Use this vector value for all users of the original instruction. 4904 State.set(Def, V, Part); 4905 addMetadata(V, &I); 4906 } 4907 4908 break; 4909 } 4910 case Instruction::ICmp: 4911 case Instruction::FCmp: { 4912 // Widen compares. Generate vector compares. 4913 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4914 auto *Cmp = cast<CmpInst>(&I); 4915 setDebugLocFromInst(Cmp); 4916 for (unsigned Part = 0; Part < UF; ++Part) { 4917 Value *A = State.get(User.getOperand(0), Part); 4918 Value *B = State.get(User.getOperand(1), Part); 4919 Value *C = nullptr; 4920 if (FCmp) { 4921 // Propagate fast math flags. 4922 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4923 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4924 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4925 } else { 4926 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4927 } 4928 State.set(Def, C, Part); 4929 addMetadata(C, &I); 4930 } 4931 4932 break; 4933 } 4934 4935 case Instruction::ZExt: 4936 case Instruction::SExt: 4937 case Instruction::FPToUI: 4938 case Instruction::FPToSI: 4939 case Instruction::FPExt: 4940 case Instruction::PtrToInt: 4941 case Instruction::IntToPtr: 4942 case Instruction::SIToFP: 4943 case Instruction::UIToFP: 4944 case Instruction::Trunc: 4945 case Instruction::FPTrunc: 4946 case Instruction::BitCast: { 4947 auto *CI = cast<CastInst>(&I); 4948 setDebugLocFromInst(CI); 4949 4950 /// Vectorize casts. 4951 Type *DestTy = 4952 (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF); 4953 4954 for (unsigned Part = 0; Part < UF; ++Part) { 4955 Value *A = State.get(User.getOperand(0), Part); 4956 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4957 State.set(Def, Cast, Part); 4958 addMetadata(Cast, &I); 4959 } 4960 break; 4961 } 4962 default: 4963 // This instruction is not vectorized by simple widening. 4964 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4965 llvm_unreachable("Unhandled instruction!"); 4966 } // end of switch. 4967 } 4968 4969 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4970 VPUser &ArgOperands, 4971 VPTransformState &State) { 4972 assert(!isa<DbgInfoIntrinsic>(I) && 4973 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4974 setDebugLocFromInst(&I); 4975 4976 Module *M = I.getParent()->getParent()->getParent(); 4977 auto *CI = cast<CallInst>(&I); 4978 4979 SmallVector<Type *, 4> Tys; 4980 for (Value *ArgOperand : CI->arg_operands()) 4981 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4982 4983 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4984 4985 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4986 // version of the instruction. 4987 // Is it beneficial to perform intrinsic call compared to lib call? 4988 bool NeedToScalarize = false; 4989 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4990 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4991 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4992 assert((UseVectorIntrinsic || !NeedToScalarize) && 4993 "Instruction should be scalarized elsewhere."); 4994 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 4995 "Either the intrinsic cost or vector call cost must be valid"); 4996 4997 for (unsigned Part = 0; Part < UF; ++Part) { 4998 SmallVector<Type *, 2> TysForDecl = {CI->getType()}; 4999 SmallVector<Value *, 4> Args; 5000 for (auto &I : enumerate(ArgOperands.operands())) { 5001 // Some intrinsics have a scalar argument - don't replace it with a 5002 // vector. 5003 Value *Arg; 5004 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 5005 Arg = State.get(I.value(), Part); 5006 else { 5007 Arg = State.get(I.value(), VPIteration(0, 0)); 5008 if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index())) 5009 TysForDecl.push_back(Arg->getType()); 5010 } 5011 Args.push_back(Arg); 5012 } 5013 5014 Function *VectorF; 5015 if (UseVectorIntrinsic) { 5016 // Use vector version of the intrinsic. 5017 if (VF.isVector()) 5018 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 5019 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 5020 assert(VectorF && "Can't retrieve vector intrinsic."); 5021 } else { 5022 // Use vector version of the function call. 5023 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 5024 #ifndef NDEBUG 5025 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 5026 "Can't create vector function."); 5027 #endif 5028 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 5029 } 5030 SmallVector<OperandBundleDef, 1> OpBundles; 5031 CI->getOperandBundlesAsDefs(OpBundles); 5032 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 5033 5034 if (isa<FPMathOperator>(V)) 5035 V->copyFastMathFlags(CI); 5036 5037 State.set(Def, V, Part); 5038 addMetadata(V, &I); 5039 } 5040 } 5041 5042 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef, 5043 VPUser &Operands, 5044 bool InvariantCond, 5045 VPTransformState &State) { 5046 setDebugLocFromInst(&I); 5047 5048 // The condition can be loop invariant but still defined inside the 5049 // loop. This means that we can't just use the original 'cond' value. 5050 // We have to take the 'vectorized' value and pick the first lane. 5051 // Instcombine will make this a no-op. 5052 auto *InvarCond = InvariantCond 5053 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 5054 : nullptr; 5055 5056 for (unsigned Part = 0; Part < UF; ++Part) { 5057 Value *Cond = 5058 InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part); 5059 Value *Op0 = State.get(Operands.getOperand(1), Part); 5060 Value *Op1 = State.get(Operands.getOperand(2), Part); 5061 Value *Sel = Builder.CreateSelect(Cond, Op0, Op1); 5062 State.set(VPDef, Sel, Part); 5063 addMetadata(Sel, &I); 5064 } 5065 } 5066 5067 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 5068 // We should not collect Scalars more than once per VF. Right now, this 5069 // function is called from collectUniformsAndScalars(), which already does 5070 // this check. Collecting Scalars for VF=1 does not make any sense. 5071 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 5072 "This function should not be visited twice for the same VF"); 5073 5074 SmallSetVector<Instruction *, 8> Worklist; 5075 5076 // These sets are used to seed the analysis with pointers used by memory 5077 // accesses that will remain scalar. 5078 SmallSetVector<Instruction *, 8> ScalarPtrs; 5079 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 5080 auto *Latch = TheLoop->getLoopLatch(); 5081 5082 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 5083 // The pointer operands of loads and stores will be scalar as long as the 5084 // memory access is not a gather or scatter operation. The value operand of a 5085 // store will remain scalar if the store is scalarized. 5086 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 5087 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 5088 assert(WideningDecision != CM_Unknown && 5089 "Widening decision should be ready at this moment"); 5090 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 5091 if (Ptr == Store->getValueOperand()) 5092 return WideningDecision == CM_Scalarize; 5093 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 5094 "Ptr is neither a value or pointer operand"); 5095 return WideningDecision != CM_GatherScatter; 5096 }; 5097 5098 // A helper that returns true if the given value is a bitcast or 5099 // getelementptr instruction contained in the loop. 5100 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 5101 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 5102 isa<GetElementPtrInst>(V)) && 5103 !TheLoop->isLoopInvariant(V); 5104 }; 5105 5106 auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) { 5107 if (!isa<PHINode>(Ptr) || 5108 !Legal->getInductionVars().count(cast<PHINode>(Ptr))) 5109 return false; 5110 auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)]; 5111 if (Induction.getKind() != InductionDescriptor::IK_PtrInduction) 5112 return false; 5113 return isScalarUse(MemAccess, Ptr); 5114 }; 5115 5116 // A helper that evaluates a memory access's use of a pointer. If the 5117 // pointer is actually the pointer induction of a loop, it is being 5118 // inserted into Worklist. If the use will be a scalar use, and the 5119 // pointer is only used by memory accesses, we place the pointer in 5120 // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs. 5121 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 5122 if (isScalarPtrInduction(MemAccess, Ptr)) { 5123 Worklist.insert(cast<Instruction>(Ptr)); 5124 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr 5125 << "\n"); 5126 5127 Instruction *Update = cast<Instruction>( 5128 cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch)); 5129 ScalarPtrs.insert(Update); 5130 return; 5131 } 5132 // We only care about bitcast and getelementptr instructions contained in 5133 // the loop. 5134 if (!isLoopVaryingBitCastOrGEP(Ptr)) 5135 return; 5136 5137 // If the pointer has already been identified as scalar (e.g., if it was 5138 // also identified as uniform), there's nothing to do. 5139 auto *I = cast<Instruction>(Ptr); 5140 if (Worklist.count(I)) 5141 return; 5142 5143 // If the use of the pointer will be a scalar use, and all users of the 5144 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 5145 // place the pointer in PossibleNonScalarPtrs. 5146 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 5147 return isa<LoadInst>(U) || isa<StoreInst>(U); 5148 })) 5149 ScalarPtrs.insert(I); 5150 else 5151 PossibleNonScalarPtrs.insert(I); 5152 }; 5153 5154 // We seed the scalars analysis with three classes of instructions: (1) 5155 // instructions marked uniform-after-vectorization and (2) bitcast, 5156 // getelementptr and (pointer) phi instructions used by memory accesses 5157 // requiring a scalar use. 5158 // 5159 // (1) Add to the worklist all instructions that have been identified as 5160 // uniform-after-vectorization. 5161 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 5162 5163 // (2) Add to the worklist all bitcast and getelementptr instructions used by 5164 // memory accesses requiring a scalar use. The pointer operands of loads and 5165 // stores will be scalar as long as the memory accesses is not a gather or 5166 // scatter operation. The value operand of a store will remain scalar if the 5167 // store is scalarized. 5168 for (auto *BB : TheLoop->blocks()) 5169 for (auto &I : *BB) { 5170 if (auto *Load = dyn_cast<LoadInst>(&I)) { 5171 evaluatePtrUse(Load, Load->getPointerOperand()); 5172 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 5173 evaluatePtrUse(Store, Store->getPointerOperand()); 5174 evaluatePtrUse(Store, Store->getValueOperand()); 5175 } 5176 } 5177 for (auto *I : ScalarPtrs) 5178 if (!PossibleNonScalarPtrs.count(I)) { 5179 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 5180 Worklist.insert(I); 5181 } 5182 5183 // Insert the forced scalars. 5184 // FIXME: Currently widenPHIInstruction() often creates a dead vector 5185 // induction variable when the PHI user is scalarized. 5186 auto ForcedScalar = ForcedScalars.find(VF); 5187 if (ForcedScalar != ForcedScalars.end()) 5188 for (auto *I : ForcedScalar->second) 5189 Worklist.insert(I); 5190 5191 // Expand the worklist by looking through any bitcasts and getelementptr 5192 // instructions we've already identified as scalar. This is similar to the 5193 // expansion step in collectLoopUniforms(); however, here we're only 5194 // expanding to include additional bitcasts and getelementptr instructions. 5195 unsigned Idx = 0; 5196 while (Idx != Worklist.size()) { 5197 Instruction *Dst = Worklist[Idx++]; 5198 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 5199 continue; 5200 auto *Src = cast<Instruction>(Dst->getOperand(0)); 5201 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 5202 auto *J = cast<Instruction>(U); 5203 return !TheLoop->contains(J) || Worklist.count(J) || 5204 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 5205 isScalarUse(J, Src)); 5206 })) { 5207 Worklist.insert(Src); 5208 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 5209 } 5210 } 5211 5212 // An induction variable will remain scalar if all users of the induction 5213 // variable and induction variable update remain scalar. 5214 for (auto &Induction : Legal->getInductionVars()) { 5215 auto *Ind = Induction.first; 5216 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5217 5218 // If tail-folding is applied, the primary induction variable will be used 5219 // to feed a vector compare. 5220 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 5221 continue; 5222 5223 // Determine if all users of the induction variable are scalar after 5224 // vectorization. 5225 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5226 auto *I = cast<Instruction>(U); 5227 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 5228 }); 5229 if (!ScalarInd) 5230 continue; 5231 5232 // Determine if all users of the induction variable update instruction are 5233 // scalar after vectorization. 5234 auto ScalarIndUpdate = 5235 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5236 auto *I = cast<Instruction>(U); 5237 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 5238 }); 5239 if (!ScalarIndUpdate) 5240 continue; 5241 5242 // The induction variable and its update instruction will remain scalar. 5243 Worklist.insert(Ind); 5244 Worklist.insert(IndUpdate); 5245 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5246 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 5247 << "\n"); 5248 } 5249 5250 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 5251 } 5252 5253 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) const { 5254 if (!blockNeedsPredication(I->getParent())) 5255 return false; 5256 switch(I->getOpcode()) { 5257 default: 5258 break; 5259 case Instruction::Load: 5260 case Instruction::Store: { 5261 if (!Legal->isMaskRequired(I)) 5262 return false; 5263 auto *Ptr = getLoadStorePointerOperand(I); 5264 auto *Ty = getLoadStoreType(I); 5265 const Align Alignment = getLoadStoreAlignment(I); 5266 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 5267 TTI.isLegalMaskedGather(Ty, Alignment)) 5268 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 5269 TTI.isLegalMaskedScatter(Ty, Alignment)); 5270 } 5271 case Instruction::UDiv: 5272 case Instruction::SDiv: 5273 case Instruction::SRem: 5274 case Instruction::URem: 5275 return mayDivideByZero(*I); 5276 } 5277 return false; 5278 } 5279 5280 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 5281 Instruction *I, ElementCount VF) { 5282 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 5283 assert(getWideningDecision(I, VF) == CM_Unknown && 5284 "Decision should not be set yet."); 5285 auto *Group = getInterleavedAccessGroup(I); 5286 assert(Group && "Must have a group."); 5287 5288 // If the instruction's allocated size doesn't equal it's type size, it 5289 // requires padding and will be scalarized. 5290 auto &DL = I->getModule()->getDataLayout(); 5291 auto *ScalarTy = getLoadStoreType(I); 5292 if (hasIrregularType(ScalarTy, DL)) 5293 return false; 5294 5295 // Check if masking is required. 5296 // A Group may need masking for one of two reasons: it resides in a block that 5297 // needs predication, or it was decided to use masking to deal with gaps 5298 // (either a gap at the end of a load-access that may result in a speculative 5299 // load, or any gaps in a store-access). 5300 bool PredicatedAccessRequiresMasking = 5301 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 5302 bool LoadAccessWithGapsRequiresEpilogMasking = 5303 isa<LoadInst>(I) && Group->requiresScalarEpilogue() && 5304 !isScalarEpilogueAllowed(); 5305 bool StoreAccessWithGapsRequiresMasking = 5306 isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()); 5307 if (!PredicatedAccessRequiresMasking && 5308 !LoadAccessWithGapsRequiresEpilogMasking && 5309 !StoreAccessWithGapsRequiresMasking) 5310 return true; 5311 5312 // If masked interleaving is required, we expect that the user/target had 5313 // enabled it, because otherwise it either wouldn't have been created or 5314 // it should have been invalidated by the CostModel. 5315 assert(useMaskedInterleavedAccesses(TTI) && 5316 "Masked interleave-groups for predicated accesses are not enabled."); 5317 5318 auto *Ty = getLoadStoreType(I); 5319 const Align Alignment = getLoadStoreAlignment(I); 5320 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 5321 : TTI.isLegalMaskedStore(Ty, Alignment); 5322 } 5323 5324 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 5325 Instruction *I, ElementCount VF) { 5326 // Get and ensure we have a valid memory instruction. 5327 LoadInst *LI = dyn_cast<LoadInst>(I); 5328 StoreInst *SI = dyn_cast<StoreInst>(I); 5329 assert((LI || SI) && "Invalid memory instruction"); 5330 5331 auto *Ptr = getLoadStorePointerOperand(I); 5332 5333 // In order to be widened, the pointer should be consecutive, first of all. 5334 if (!Legal->isConsecutivePtr(Ptr)) 5335 return false; 5336 5337 // If the instruction is a store located in a predicated block, it will be 5338 // scalarized. 5339 if (isScalarWithPredication(I)) 5340 return false; 5341 5342 // If the instruction's allocated size doesn't equal it's type size, it 5343 // requires padding and will be scalarized. 5344 auto &DL = I->getModule()->getDataLayout(); 5345 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 5346 if (hasIrregularType(ScalarTy, DL)) 5347 return false; 5348 5349 return true; 5350 } 5351 5352 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 5353 // We should not collect Uniforms more than once per VF. Right now, 5354 // this function is called from collectUniformsAndScalars(), which 5355 // already does this check. Collecting Uniforms for VF=1 does not make any 5356 // sense. 5357 5358 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 5359 "This function should not be visited twice for the same VF"); 5360 5361 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5362 // not analyze again. Uniforms.count(VF) will return 1. 5363 Uniforms[VF].clear(); 5364 5365 // We now know that the loop is vectorizable! 5366 // Collect instructions inside the loop that will remain uniform after 5367 // vectorization. 5368 5369 // Global values, params and instructions outside of current loop are out of 5370 // scope. 5371 auto isOutOfScope = [&](Value *V) -> bool { 5372 Instruction *I = dyn_cast<Instruction>(V); 5373 return (!I || !TheLoop->contains(I)); 5374 }; 5375 5376 SetVector<Instruction *> Worklist; 5377 BasicBlock *Latch = TheLoop->getLoopLatch(); 5378 5379 // Instructions that are scalar with predication must not be considered 5380 // uniform after vectorization, because that would create an erroneous 5381 // replicating region where only a single instance out of VF should be formed. 5382 // TODO: optimize such seldom cases if found important, see PR40816. 5383 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 5384 if (isOutOfScope(I)) { 5385 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 5386 << *I << "\n"); 5387 return; 5388 } 5389 if (isScalarWithPredication(I)) { 5390 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 5391 << *I << "\n"); 5392 return; 5393 } 5394 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 5395 Worklist.insert(I); 5396 }; 5397 5398 // Start with the conditional branch. If the branch condition is an 5399 // instruction contained in the loop that is only used by the branch, it is 5400 // uniform. 5401 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5402 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 5403 addToWorklistIfAllowed(Cmp); 5404 5405 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 5406 InstWidening WideningDecision = getWideningDecision(I, VF); 5407 assert(WideningDecision != CM_Unknown && 5408 "Widening decision should be ready at this moment"); 5409 5410 // A uniform memory op is itself uniform. We exclude uniform stores 5411 // here as they demand the last lane, not the first one. 5412 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 5413 assert(WideningDecision == CM_Scalarize); 5414 return true; 5415 } 5416 5417 return (WideningDecision == CM_Widen || 5418 WideningDecision == CM_Widen_Reverse || 5419 WideningDecision == CM_Interleave); 5420 }; 5421 5422 5423 // Returns true if Ptr is the pointer operand of a memory access instruction 5424 // I, and I is known to not require scalarization. 5425 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5426 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5427 }; 5428 5429 // Holds a list of values which are known to have at least one uniform use. 5430 // Note that there may be other uses which aren't uniform. A "uniform use" 5431 // here is something which only demands lane 0 of the unrolled iterations; 5432 // it does not imply that all lanes produce the same value (e.g. this is not 5433 // the usual meaning of uniform) 5434 SetVector<Value *> HasUniformUse; 5435 5436 // Scan the loop for instructions which are either a) known to have only 5437 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 5438 for (auto *BB : TheLoop->blocks()) 5439 for (auto &I : *BB) { 5440 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) { 5441 switch (II->getIntrinsicID()) { 5442 case Intrinsic::sideeffect: 5443 case Intrinsic::experimental_noalias_scope_decl: 5444 case Intrinsic::assume: 5445 case Intrinsic::lifetime_start: 5446 case Intrinsic::lifetime_end: 5447 if (TheLoop->hasLoopInvariantOperands(&I)) 5448 addToWorklistIfAllowed(&I); 5449 break; 5450 default: 5451 break; 5452 } 5453 } 5454 5455 // ExtractValue instructions must be uniform, because the operands are 5456 // known to be loop-invariant. 5457 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) { 5458 assert(isOutOfScope(EVI->getAggregateOperand()) && 5459 "Expected aggregate value to be loop invariant"); 5460 addToWorklistIfAllowed(EVI); 5461 continue; 5462 } 5463 5464 // If there's no pointer operand, there's nothing to do. 5465 auto *Ptr = getLoadStorePointerOperand(&I); 5466 if (!Ptr) 5467 continue; 5468 5469 // A uniform memory op is itself uniform. We exclude uniform stores 5470 // here as they demand the last lane, not the first one. 5471 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 5472 addToWorklistIfAllowed(&I); 5473 5474 if (isUniformDecision(&I, VF)) { 5475 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 5476 HasUniformUse.insert(Ptr); 5477 } 5478 } 5479 5480 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 5481 // demanding) users. Since loops are assumed to be in LCSSA form, this 5482 // disallows uses outside the loop as well. 5483 for (auto *V : HasUniformUse) { 5484 if (isOutOfScope(V)) 5485 continue; 5486 auto *I = cast<Instruction>(V); 5487 auto UsersAreMemAccesses = 5488 llvm::all_of(I->users(), [&](User *U) -> bool { 5489 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 5490 }); 5491 if (UsersAreMemAccesses) 5492 addToWorklistIfAllowed(I); 5493 } 5494 5495 // Expand Worklist in topological order: whenever a new instruction 5496 // is added , its users should be already inside Worklist. It ensures 5497 // a uniform instruction will only be used by uniform instructions. 5498 unsigned idx = 0; 5499 while (idx != Worklist.size()) { 5500 Instruction *I = Worklist[idx++]; 5501 5502 for (auto OV : I->operand_values()) { 5503 // isOutOfScope operands cannot be uniform instructions. 5504 if (isOutOfScope(OV)) 5505 continue; 5506 // First order recurrence Phi's should typically be considered 5507 // non-uniform. 5508 auto *OP = dyn_cast<PHINode>(OV); 5509 if (OP && Legal->isFirstOrderRecurrence(OP)) 5510 continue; 5511 // If all the users of the operand are uniform, then add the 5512 // operand into the uniform worklist. 5513 auto *OI = cast<Instruction>(OV); 5514 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5515 auto *J = cast<Instruction>(U); 5516 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 5517 })) 5518 addToWorklistIfAllowed(OI); 5519 } 5520 } 5521 5522 // For an instruction to be added into Worklist above, all its users inside 5523 // the loop should also be in Worklist. However, this condition cannot be 5524 // true for phi nodes that form a cyclic dependence. We must process phi 5525 // nodes separately. An induction variable will remain uniform if all users 5526 // of the induction variable and induction variable update remain uniform. 5527 // The code below handles both pointer and non-pointer induction variables. 5528 for (auto &Induction : Legal->getInductionVars()) { 5529 auto *Ind = Induction.first; 5530 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5531 5532 // Determine if all users of the induction variable are uniform after 5533 // vectorization. 5534 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5535 auto *I = cast<Instruction>(U); 5536 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5537 isVectorizedMemAccessUse(I, Ind); 5538 }); 5539 if (!UniformInd) 5540 continue; 5541 5542 // Determine if all users of the induction variable update instruction are 5543 // uniform after vectorization. 5544 auto UniformIndUpdate = 5545 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5546 auto *I = cast<Instruction>(U); 5547 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5548 isVectorizedMemAccessUse(I, IndUpdate); 5549 }); 5550 if (!UniformIndUpdate) 5551 continue; 5552 5553 // The induction variable and its update instruction will remain uniform. 5554 addToWorklistIfAllowed(Ind); 5555 addToWorklistIfAllowed(IndUpdate); 5556 } 5557 5558 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5559 } 5560 5561 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5562 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5563 5564 if (Legal->getRuntimePointerChecking()->Need) { 5565 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5566 "runtime pointer checks needed. Enable vectorization of this " 5567 "loop with '#pragma clang loop vectorize(enable)' when " 5568 "compiling with -Os/-Oz", 5569 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5570 return true; 5571 } 5572 5573 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5574 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5575 "runtime SCEV checks needed. Enable vectorization of this " 5576 "loop with '#pragma clang loop vectorize(enable)' when " 5577 "compiling with -Os/-Oz", 5578 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5579 return true; 5580 } 5581 5582 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5583 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5584 reportVectorizationFailure("Runtime stride check for small trip count", 5585 "runtime stride == 1 checks needed. Enable vectorization of " 5586 "this loop without such check by compiling with -Os/-Oz", 5587 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5588 return true; 5589 } 5590 5591 return false; 5592 } 5593 5594 ElementCount 5595 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 5596 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) 5597 return ElementCount::getScalable(0); 5598 5599 if (Hints->isScalableVectorizationDisabled()) { 5600 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 5601 "ScalableVectorizationDisabled", ORE, TheLoop); 5602 return ElementCount::getScalable(0); 5603 } 5604 5605 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n"); 5606 5607 auto MaxScalableVF = ElementCount::getScalable( 5608 std::numeric_limits<ElementCount::ScalarTy>::max()); 5609 5610 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 5611 // FIXME: While for scalable vectors this is currently sufficient, this should 5612 // be replaced by a more detailed mechanism that filters out specific VFs, 5613 // instead of invalidating vectorization for a whole set of VFs based on the 5614 // MaxVF. 5615 5616 // Disable scalable vectorization if the loop contains unsupported reductions. 5617 if (!canVectorizeReductions(MaxScalableVF)) { 5618 reportVectorizationInfo( 5619 "Scalable vectorization not supported for the reduction " 5620 "operations found in this loop.", 5621 "ScalableVFUnfeasible", ORE, TheLoop); 5622 return ElementCount::getScalable(0); 5623 } 5624 5625 // Disable scalable vectorization if the loop contains any instructions 5626 // with element types not supported for scalable vectors. 5627 if (any_of(ElementTypesInLoop, [&](Type *Ty) { 5628 return !Ty->isVoidTy() && 5629 !this->TTI.isElementTypeLegalForScalableVector(Ty); 5630 })) { 5631 reportVectorizationInfo("Scalable vectorization is not supported " 5632 "for all element types found in this loop.", 5633 "ScalableVFUnfeasible", ORE, TheLoop); 5634 return ElementCount::getScalable(0); 5635 } 5636 5637 if (Legal->isSafeForAnyVectorWidth()) 5638 return MaxScalableVF; 5639 5640 // Limit MaxScalableVF by the maximum safe dependence distance. 5641 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5642 if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) { 5643 unsigned VScaleMax = TheFunction->getFnAttribute(Attribute::VScaleRange) 5644 .getVScaleRangeArgs() 5645 .second; 5646 if (VScaleMax > 0) 5647 MaxVScale = VScaleMax; 5648 } 5649 MaxScalableVF = ElementCount::getScalable( 5650 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5651 if (!MaxScalableVF) 5652 reportVectorizationInfo( 5653 "Max legal vector width too small, scalable vectorization " 5654 "unfeasible.", 5655 "ScalableVFUnfeasible", ORE, TheLoop); 5656 5657 return MaxScalableVF; 5658 } 5659 5660 FixedScalableVFPair 5661 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount, 5662 ElementCount UserVF) { 5663 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5664 unsigned SmallestType, WidestType; 5665 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5666 5667 // Get the maximum safe dependence distance in bits computed by LAA. 5668 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5669 // the memory accesses that is most restrictive (involved in the smallest 5670 // dependence distance). 5671 unsigned MaxSafeElements = 5672 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 5673 5674 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 5675 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 5676 5677 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 5678 << ".\n"); 5679 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 5680 << ".\n"); 5681 5682 // First analyze the UserVF, fall back if the UserVF should be ignored. 5683 if (UserVF) { 5684 auto MaxSafeUserVF = 5685 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 5686 5687 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { 5688 // If `VF=vscale x N` is safe, then so is `VF=N` 5689 if (UserVF.isScalable()) 5690 return FixedScalableVFPair( 5691 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); 5692 else 5693 return UserVF; 5694 } 5695 5696 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 5697 5698 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 5699 // is better to ignore the hint and let the compiler choose a suitable VF. 5700 if (!UserVF.isScalable()) { 5701 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5702 << " is unsafe, clamping to max safe VF=" 5703 << MaxSafeFixedVF << ".\n"); 5704 ORE->emit([&]() { 5705 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5706 TheLoop->getStartLoc(), 5707 TheLoop->getHeader()) 5708 << "User-specified vectorization factor " 5709 << ore::NV("UserVectorizationFactor", UserVF) 5710 << " is unsafe, clamping to maximum safe vectorization factor " 5711 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 5712 }); 5713 return MaxSafeFixedVF; 5714 } 5715 5716 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 5717 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5718 << " is ignored because scalable vectors are not " 5719 "available.\n"); 5720 ORE->emit([&]() { 5721 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5722 TheLoop->getStartLoc(), 5723 TheLoop->getHeader()) 5724 << "User-specified vectorization factor " 5725 << ore::NV("UserVectorizationFactor", UserVF) 5726 << " is ignored because the target does not support scalable " 5727 "vectors. The compiler will pick a more suitable value."; 5728 }); 5729 } else { 5730 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5731 << " is unsafe. Ignoring scalable UserVF.\n"); 5732 ORE->emit([&]() { 5733 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5734 TheLoop->getStartLoc(), 5735 TheLoop->getHeader()) 5736 << "User-specified vectorization factor " 5737 << ore::NV("UserVectorizationFactor", UserVF) 5738 << " is unsafe. Ignoring the hint to let the compiler pick a " 5739 "more suitable value."; 5740 }); 5741 } 5742 } 5743 5744 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5745 << " / " << WidestType << " bits.\n"); 5746 5747 FixedScalableVFPair Result(ElementCount::getFixed(1), 5748 ElementCount::getScalable(0)); 5749 if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, 5750 WidestType, MaxSafeFixedVF)) 5751 Result.FixedVF = MaxVF; 5752 5753 if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, 5754 WidestType, MaxSafeScalableVF)) 5755 if (MaxVF.isScalable()) { 5756 Result.ScalableVF = MaxVF; 5757 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 5758 << "\n"); 5759 } 5760 5761 return Result; 5762 } 5763 5764 FixedScalableVFPair 5765 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5766 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5767 // TODO: It may by useful to do since it's still likely to be dynamically 5768 // uniform if the target can skip. 5769 reportVectorizationFailure( 5770 "Not inserting runtime ptr check for divergent target", 5771 "runtime pointer checks needed. Not enabled for divergent target", 5772 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5773 return FixedScalableVFPair::getNone(); 5774 } 5775 5776 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5777 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5778 if (TC == 1) { 5779 reportVectorizationFailure("Single iteration (non) loop", 5780 "loop trip count is one, irrelevant for vectorization", 5781 "SingleIterationLoop", ORE, TheLoop); 5782 return FixedScalableVFPair::getNone(); 5783 } 5784 5785 switch (ScalarEpilogueStatus) { 5786 case CM_ScalarEpilogueAllowed: 5787 return computeFeasibleMaxVF(TC, UserVF); 5788 case CM_ScalarEpilogueNotAllowedUsePredicate: 5789 LLVM_FALLTHROUGH; 5790 case CM_ScalarEpilogueNotNeededUsePredicate: 5791 LLVM_DEBUG( 5792 dbgs() << "LV: vector predicate hint/switch found.\n" 5793 << "LV: Not allowing scalar epilogue, creating predicated " 5794 << "vector loop.\n"); 5795 break; 5796 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5797 // fallthrough as a special case of OptForSize 5798 case CM_ScalarEpilogueNotAllowedOptSize: 5799 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5800 LLVM_DEBUG( 5801 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5802 else 5803 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5804 << "count.\n"); 5805 5806 // Bail if runtime checks are required, which are not good when optimising 5807 // for size. 5808 if (runtimeChecksRequired()) 5809 return FixedScalableVFPair::getNone(); 5810 5811 break; 5812 } 5813 5814 // The only loops we can vectorize without a scalar epilogue, are loops with 5815 // a bottom-test and a single exiting block. We'd have to handle the fact 5816 // that not every instruction executes on the last iteration. This will 5817 // require a lane mask which varies through the vector loop body. (TODO) 5818 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5819 // If there was a tail-folding hint/switch, but we can't fold the tail by 5820 // masking, fallback to a vectorization with a scalar epilogue. 5821 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5822 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5823 "scalar epilogue instead.\n"); 5824 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5825 return computeFeasibleMaxVF(TC, UserVF); 5826 } 5827 return FixedScalableVFPair::getNone(); 5828 } 5829 5830 // Now try the tail folding 5831 5832 // Invalidate interleave groups that require an epilogue if we can't mask 5833 // the interleave-group. 5834 if (!useMaskedInterleavedAccesses(TTI)) { 5835 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5836 "No decisions should have been taken at this point"); 5837 // Note: There is no need to invalidate any cost modeling decisions here, as 5838 // non where taken so far. 5839 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5840 } 5841 5842 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF); 5843 // Avoid tail folding if the trip count is known to be a multiple of any VF 5844 // we chose. 5845 // FIXME: The condition below pessimises the case for fixed-width vectors, 5846 // when scalable VFs are also candidates for vectorization. 5847 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5848 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5849 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5850 "MaxFixedVF must be a power of 2"); 5851 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5852 : MaxFixedVF.getFixedValue(); 5853 ScalarEvolution *SE = PSE.getSE(); 5854 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5855 const SCEV *ExitCount = SE->getAddExpr( 5856 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5857 const SCEV *Rem = SE->getURemExpr( 5858 SE->applyLoopGuards(ExitCount, TheLoop), 5859 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5860 if (Rem->isZero()) { 5861 // Accept MaxFixedVF if we do not have a tail. 5862 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5863 return MaxFactors; 5864 } 5865 } 5866 5867 // For scalable vectors, don't use tail folding as this is currently not yet 5868 // supported. The code is likely to have ended up here if the tripcount is 5869 // low, in which case it makes sense not to use scalable vectors. 5870 if (MaxFactors.ScalableVF.isVector()) 5871 MaxFactors.ScalableVF = ElementCount::getScalable(0); 5872 5873 // If we don't know the precise trip count, or if the trip count that we 5874 // found modulo the vectorization factor is not zero, try to fold the tail 5875 // by masking. 5876 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5877 if (Legal->prepareToFoldTailByMasking()) { 5878 FoldTailByMasking = true; 5879 return MaxFactors; 5880 } 5881 5882 // If there was a tail-folding hint/switch, but we can't fold the tail by 5883 // masking, fallback to a vectorization with a scalar epilogue. 5884 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5885 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5886 "scalar epilogue instead.\n"); 5887 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5888 return MaxFactors; 5889 } 5890 5891 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5892 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5893 return FixedScalableVFPair::getNone(); 5894 } 5895 5896 if (TC == 0) { 5897 reportVectorizationFailure( 5898 "Unable to calculate the loop count due to complex control flow", 5899 "unable to calculate the loop count due to complex control flow", 5900 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5901 return FixedScalableVFPair::getNone(); 5902 } 5903 5904 reportVectorizationFailure( 5905 "Cannot optimize for size and vectorize at the same time.", 5906 "cannot optimize for size and vectorize at the same time. " 5907 "Enable vectorization of this loop with '#pragma clang loop " 5908 "vectorize(enable)' when compiling with -Os/-Oz", 5909 "NoTailLoopWithOptForSize", ORE, TheLoop); 5910 return FixedScalableVFPair::getNone(); 5911 } 5912 5913 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5914 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5915 const ElementCount &MaxSafeVF) { 5916 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5917 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5918 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5919 : TargetTransformInfo::RGK_FixedWidthVector); 5920 5921 // Convenience function to return the minimum of two ElementCounts. 5922 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5923 assert((LHS.isScalable() == RHS.isScalable()) && 5924 "Scalable flags must match"); 5925 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5926 }; 5927 5928 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5929 // Note that both WidestRegister and WidestType may not be a powers of 2. 5930 auto MaxVectorElementCount = ElementCount::get( 5931 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5932 ComputeScalableMaxVF); 5933 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5934 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5935 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5936 5937 if (!MaxVectorElementCount) { 5938 LLVM_DEBUG(dbgs() << "LV: The target has no " 5939 << (ComputeScalableMaxVF ? "scalable" : "fixed") 5940 << " vector registers.\n"); 5941 return ElementCount::getFixed(1); 5942 } 5943 5944 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5945 if (ConstTripCount && 5946 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5947 isPowerOf2_32(ConstTripCount)) { 5948 // We need to clamp the VF to be the ConstTripCount. There is no point in 5949 // choosing a higher viable VF as done in the loop below. If 5950 // MaxVectorElementCount is scalable, we only fall back on a fixed VF when 5951 // the TC is less than or equal to the known number of lanes. 5952 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 5953 << ConstTripCount << "\n"); 5954 return TripCountEC; 5955 } 5956 5957 ElementCount MaxVF = MaxVectorElementCount; 5958 if (TTI.shouldMaximizeVectorBandwidth() || 5959 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5960 auto MaxVectorElementCountMaxBW = ElementCount::get( 5961 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5962 ComputeScalableMaxVF); 5963 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5964 5965 // Collect all viable vectorization factors larger than the default MaxVF 5966 // (i.e. MaxVectorElementCount). 5967 SmallVector<ElementCount, 8> VFs; 5968 for (ElementCount VS = MaxVectorElementCount * 2; 5969 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5970 VFs.push_back(VS); 5971 5972 // For each VF calculate its register usage. 5973 auto RUs = calculateRegisterUsage(VFs); 5974 5975 // Select the largest VF which doesn't require more registers than existing 5976 // ones. 5977 for (int i = RUs.size() - 1; i >= 0; --i) { 5978 bool Selected = true; 5979 for (auto &pair : RUs[i].MaxLocalUsers) { 5980 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5981 if (pair.second > TargetNumRegisters) 5982 Selected = false; 5983 } 5984 if (Selected) { 5985 MaxVF = VFs[i]; 5986 break; 5987 } 5988 } 5989 if (ElementCount MinVF = 5990 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 5991 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5992 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5993 << ") with target's minimum: " << MinVF << '\n'); 5994 MaxVF = MinVF; 5995 } 5996 } 5997 } 5998 return MaxVF; 5999 } 6000 6001 bool LoopVectorizationCostModel::isMoreProfitable( 6002 const VectorizationFactor &A, const VectorizationFactor &B) const { 6003 InstructionCost CostA = A.Cost; 6004 InstructionCost CostB = B.Cost; 6005 6006 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 6007 6008 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 6009 MaxTripCount) { 6010 // If we are folding the tail and the trip count is a known (possibly small) 6011 // constant, the trip count will be rounded up to an integer number of 6012 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 6013 // which we compare directly. When not folding the tail, the total cost will 6014 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 6015 // approximated with the per-lane cost below instead of using the tripcount 6016 // as here. 6017 auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 6018 auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 6019 return RTCostA < RTCostB; 6020 } 6021 6022 // When set to preferred, for now assume vscale may be larger than 1, so 6023 // that scalable vectorization is slightly favorable over fixed-width 6024 // vectorization. 6025 if (Hints->isScalableVectorizationPreferred()) 6026 if (A.Width.isScalable() && !B.Width.isScalable()) 6027 return (CostA * B.Width.getKnownMinValue()) <= 6028 (CostB * A.Width.getKnownMinValue()); 6029 6030 // To avoid the need for FP division: 6031 // (CostA / A.Width) < (CostB / B.Width) 6032 // <=> (CostA * B.Width) < (CostB * A.Width) 6033 return (CostA * B.Width.getKnownMinValue()) < 6034 (CostB * A.Width.getKnownMinValue()); 6035 } 6036 6037 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 6038 const ElementCountSet &VFCandidates) { 6039 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 6040 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 6041 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 6042 assert(VFCandidates.count(ElementCount::getFixed(1)) && 6043 "Expected Scalar VF to be a candidate"); 6044 6045 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 6046 VectorizationFactor ChosenFactor = ScalarCost; 6047 6048 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 6049 if (ForceVectorization && VFCandidates.size() > 1) { 6050 // Ignore scalar width, because the user explicitly wants vectorization. 6051 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 6052 // evaluation. 6053 ChosenFactor.Cost = InstructionCost::getMax(); 6054 } 6055 6056 SmallVector<InstructionVFPair> InvalidCosts; 6057 for (const auto &i : VFCandidates) { 6058 // The cost for scalar VF=1 is already calculated, so ignore it. 6059 if (i.isScalar()) 6060 continue; 6061 6062 VectorizationCostTy C = expectedCost(i, &InvalidCosts); 6063 VectorizationFactor Candidate(i, C.first); 6064 LLVM_DEBUG( 6065 dbgs() << "LV: Vector loop of width " << i << " costs: " 6066 << (Candidate.Cost / Candidate.Width.getKnownMinValue()) 6067 << (i.isScalable() ? " (assuming a minimum vscale of 1)" : "") 6068 << ".\n"); 6069 6070 if (!C.second && !ForceVectorization) { 6071 LLVM_DEBUG( 6072 dbgs() << "LV: Not considering vector loop of width " << i 6073 << " because it will not generate any vector instructions.\n"); 6074 continue; 6075 } 6076 6077 // If profitable add it to ProfitableVF list. 6078 if (isMoreProfitable(Candidate, ScalarCost)) 6079 ProfitableVFs.push_back(Candidate); 6080 6081 if (isMoreProfitable(Candidate, ChosenFactor)) 6082 ChosenFactor = Candidate; 6083 } 6084 6085 // Emit a report of VFs with invalid costs in the loop. 6086 if (!InvalidCosts.empty()) { 6087 // Group the remarks per instruction, keeping the instruction order from 6088 // InvalidCosts. 6089 std::map<Instruction *, unsigned> Numbering; 6090 unsigned I = 0; 6091 for (auto &Pair : InvalidCosts) 6092 if (!Numbering.count(Pair.first)) 6093 Numbering[Pair.first] = I++; 6094 6095 // Sort the list, first on instruction(number) then on VF. 6096 llvm::sort(InvalidCosts, 6097 [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { 6098 if (Numbering[A.first] != Numbering[B.first]) 6099 return Numbering[A.first] < Numbering[B.first]; 6100 ElementCountComparator ECC; 6101 return ECC(A.second, B.second); 6102 }); 6103 6104 // For a list of ordered instruction-vf pairs: 6105 // [(load, vf1), (load, vf2), (store, vf1)] 6106 // Group the instructions together to emit separate remarks for: 6107 // load (vf1, vf2) 6108 // store (vf1) 6109 auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); 6110 auto Subset = ArrayRef<InstructionVFPair>(); 6111 do { 6112 if (Subset.empty()) 6113 Subset = Tail.take_front(1); 6114 6115 Instruction *I = Subset.front().first; 6116 6117 // If the next instruction is different, or if there are no other pairs, 6118 // emit a remark for the collated subset. e.g. 6119 // [(load, vf1), (load, vf2))] 6120 // to emit: 6121 // remark: invalid costs for 'load' at VF=(vf, vf2) 6122 if (Subset == Tail || Tail[Subset.size()].first != I) { 6123 std::string OutString; 6124 raw_string_ostream OS(OutString); 6125 assert(!Subset.empty() && "Unexpected empty range"); 6126 OS << "Instruction with invalid costs prevented vectorization at VF=("; 6127 for (auto &Pair : Subset) 6128 OS << (Pair.second == Subset.front().second ? "" : ", ") 6129 << Pair.second; 6130 OS << "):"; 6131 if (auto *CI = dyn_cast<CallInst>(I)) 6132 OS << " call to " << CI->getCalledFunction()->getName(); 6133 else 6134 OS << " " << I->getOpcodeName(); 6135 OS.flush(); 6136 reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); 6137 Tail = Tail.drop_front(Subset.size()); 6138 Subset = {}; 6139 } else 6140 // Grow the subset by one element 6141 Subset = Tail.take_front(Subset.size() + 1); 6142 } while (!Tail.empty()); 6143 } 6144 6145 if (!EnableCondStoresVectorization && NumPredStores) { 6146 reportVectorizationFailure("There are conditional stores.", 6147 "store that is conditionally executed prevents vectorization", 6148 "ConditionalStore", ORE, TheLoop); 6149 ChosenFactor = ScalarCost; 6150 } 6151 6152 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 6153 ChosenFactor.Cost >= ScalarCost.Cost) dbgs() 6154 << "LV: Vectorization seems to be not beneficial, " 6155 << "but was forced by a user.\n"); 6156 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 6157 return ChosenFactor; 6158 } 6159 6160 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 6161 const Loop &L, ElementCount VF) const { 6162 // Cross iteration phis such as reductions need special handling and are 6163 // currently unsupported. 6164 if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) { 6165 return Legal->isFirstOrderRecurrence(&Phi) || 6166 Legal->isReductionVariable(&Phi); 6167 })) 6168 return false; 6169 6170 // Phis with uses outside of the loop require special handling and are 6171 // currently unsupported. 6172 for (auto &Entry : Legal->getInductionVars()) { 6173 // Look for uses of the value of the induction at the last iteration. 6174 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 6175 for (User *U : PostInc->users()) 6176 if (!L.contains(cast<Instruction>(U))) 6177 return false; 6178 // Look for uses of penultimate value of the induction. 6179 for (User *U : Entry.first->users()) 6180 if (!L.contains(cast<Instruction>(U))) 6181 return false; 6182 } 6183 6184 // Induction variables that are widened require special handling that is 6185 // currently not supported. 6186 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 6187 return !(this->isScalarAfterVectorization(Entry.first, VF) || 6188 this->isProfitableToScalarize(Entry.first, VF)); 6189 })) 6190 return false; 6191 6192 // Epilogue vectorization code has not been auditted to ensure it handles 6193 // non-latch exits properly. It may be fine, but it needs auditted and 6194 // tested. 6195 if (L.getExitingBlock() != L.getLoopLatch()) 6196 return false; 6197 6198 return true; 6199 } 6200 6201 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 6202 const ElementCount VF) const { 6203 // FIXME: We need a much better cost-model to take different parameters such 6204 // as register pressure, code size increase and cost of extra branches into 6205 // account. For now we apply a very crude heuristic and only consider loops 6206 // with vectorization factors larger than a certain value. 6207 // We also consider epilogue vectorization unprofitable for targets that don't 6208 // consider interleaving beneficial (eg. MVE). 6209 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 6210 return false; 6211 if (VF.getFixedValue() >= EpilogueVectorizationMinVF) 6212 return true; 6213 return false; 6214 } 6215 6216 VectorizationFactor 6217 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 6218 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 6219 VectorizationFactor Result = VectorizationFactor::Disabled(); 6220 if (!EnableEpilogueVectorization) { 6221 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 6222 return Result; 6223 } 6224 6225 if (!isScalarEpilogueAllowed()) { 6226 LLVM_DEBUG( 6227 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 6228 "allowed.\n";); 6229 return Result; 6230 } 6231 6232 // FIXME: This can be fixed for scalable vectors later, because at this stage 6233 // the LoopVectorizer will only consider vectorizing a loop with scalable 6234 // vectors when the loop has a hint to enable vectorization for a given VF. 6235 if (MainLoopVF.isScalable()) { 6236 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not " 6237 "yet supported.\n"); 6238 return Result; 6239 } 6240 6241 // Not really a cost consideration, but check for unsupported cases here to 6242 // simplify the logic. 6243 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 6244 LLVM_DEBUG( 6245 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 6246 "not a supported candidate.\n";); 6247 return Result; 6248 } 6249 6250 if (EpilogueVectorizationForceVF > 1) { 6251 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 6252 if (LVP.hasPlanWithVFs( 6253 {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)})) 6254 return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0}; 6255 else { 6256 LLVM_DEBUG( 6257 dbgs() 6258 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 6259 return Result; 6260 } 6261 } 6262 6263 if (TheLoop->getHeader()->getParent()->hasOptSize() || 6264 TheLoop->getHeader()->getParent()->hasMinSize()) { 6265 LLVM_DEBUG( 6266 dbgs() 6267 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 6268 return Result; 6269 } 6270 6271 if (!isEpilogueVectorizationProfitable(MainLoopVF)) 6272 return Result; 6273 6274 for (auto &NextVF : ProfitableVFs) 6275 if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) && 6276 (Result.Width.getFixedValue() == 1 || 6277 isMoreProfitable(NextVF, Result)) && 6278 LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width})) 6279 Result = NextVF; 6280 6281 if (Result != VectorizationFactor::Disabled()) 6282 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 6283 << Result.Width.getFixedValue() << "\n";); 6284 return Result; 6285 } 6286 6287 std::pair<unsigned, unsigned> 6288 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6289 unsigned MinWidth = -1U; 6290 unsigned MaxWidth = 8; 6291 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6292 for (Type *T : ElementTypesInLoop) { 6293 MinWidth = std::min<unsigned>( 6294 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 6295 MaxWidth = std::max<unsigned>( 6296 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 6297 } 6298 return {MinWidth, MaxWidth}; 6299 } 6300 6301 void LoopVectorizationCostModel::collectElementTypesForWidening() { 6302 ElementTypesInLoop.clear(); 6303 // For each block. 6304 for (BasicBlock *BB : TheLoop->blocks()) { 6305 // For each instruction in the loop. 6306 for (Instruction &I : BB->instructionsWithoutDebug()) { 6307 Type *T = I.getType(); 6308 6309 // Skip ignored values. 6310 if (ValuesToIgnore.count(&I)) 6311 continue; 6312 6313 // Only examine Loads, Stores and PHINodes. 6314 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6315 continue; 6316 6317 // Examine PHI nodes that are reduction variables. Update the type to 6318 // account for the recurrence type. 6319 if (auto *PN = dyn_cast<PHINode>(&I)) { 6320 if (!Legal->isReductionVariable(PN)) 6321 continue; 6322 const RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[PN]; 6323 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 6324 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 6325 RdxDesc.getRecurrenceType(), 6326 TargetTransformInfo::ReductionFlags())) 6327 continue; 6328 T = RdxDesc.getRecurrenceType(); 6329 } 6330 6331 // Examine the stored values. 6332 if (auto *ST = dyn_cast<StoreInst>(&I)) 6333 T = ST->getValueOperand()->getType(); 6334 6335 // Ignore loaded pointer types and stored pointer types that are not 6336 // vectorizable. 6337 // 6338 // FIXME: The check here attempts to predict whether a load or store will 6339 // be vectorized. We only know this for certain after a VF has 6340 // been selected. Here, we assume that if an access can be 6341 // vectorized, it will be. We should also look at extending this 6342 // optimization to non-pointer types. 6343 // 6344 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 6345 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 6346 continue; 6347 6348 ElementTypesInLoop.insert(T); 6349 } 6350 } 6351 } 6352 6353 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 6354 unsigned LoopCost) { 6355 // -- The interleave heuristics -- 6356 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6357 // There are many micro-architectural considerations that we can't predict 6358 // at this level. For example, frontend pressure (on decode or fetch) due to 6359 // code size, or the number and capabilities of the execution ports. 6360 // 6361 // We use the following heuristics to select the interleave count: 6362 // 1. If the code has reductions, then we interleave to break the cross 6363 // iteration dependency. 6364 // 2. If the loop is really small, then we interleave to reduce the loop 6365 // overhead. 6366 // 3. We don't interleave if we think that we will spill registers to memory 6367 // due to the increased register pressure. 6368 6369 if (!isScalarEpilogueAllowed()) 6370 return 1; 6371 6372 // We used the distance for the interleave count. 6373 if (Legal->getMaxSafeDepDistBytes() != -1U) 6374 return 1; 6375 6376 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 6377 const bool HasReductions = !Legal->getReductionVars().empty(); 6378 // Do not interleave loops with a relatively small known or estimated trip 6379 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 6380 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 6381 // because with the above conditions interleaving can expose ILP and break 6382 // cross iteration dependences for reductions. 6383 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 6384 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 6385 return 1; 6386 6387 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6388 // We divide by these constants so assume that we have at least one 6389 // instruction that uses at least one register. 6390 for (auto& pair : R.MaxLocalUsers) { 6391 pair.second = std::max(pair.second, 1U); 6392 } 6393 6394 // We calculate the interleave count using the following formula. 6395 // Subtract the number of loop invariants from the number of available 6396 // registers. These registers are used by all of the interleaved instances. 6397 // Next, divide the remaining registers by the number of registers that is 6398 // required by the loop, in order to estimate how many parallel instances 6399 // fit without causing spills. All of this is rounded down if necessary to be 6400 // a power of two. We want power of two interleave count to simplify any 6401 // addressing operations or alignment considerations. 6402 // We also want power of two interleave counts to ensure that the induction 6403 // variable of the vector loop wraps to zero, when tail is folded by masking; 6404 // this currently happens when OptForSize, in which case IC is set to 1 above. 6405 unsigned IC = UINT_MAX; 6406 6407 for (auto& pair : R.MaxLocalUsers) { 6408 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 6409 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6410 << " registers of " 6411 << TTI.getRegisterClassName(pair.first) << " register class\n"); 6412 if (VF.isScalar()) { 6413 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6414 TargetNumRegisters = ForceTargetNumScalarRegs; 6415 } else { 6416 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6417 TargetNumRegisters = ForceTargetNumVectorRegs; 6418 } 6419 unsigned MaxLocalUsers = pair.second; 6420 unsigned LoopInvariantRegs = 0; 6421 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 6422 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 6423 6424 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 6425 // Don't count the induction variable as interleaved. 6426 if (EnableIndVarRegisterHeur) { 6427 TmpIC = 6428 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 6429 std::max(1U, (MaxLocalUsers - 1))); 6430 } 6431 6432 IC = std::min(IC, TmpIC); 6433 } 6434 6435 // Clamp the interleave ranges to reasonable counts. 6436 unsigned MaxInterleaveCount = 6437 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 6438 6439 // Check if the user has overridden the max. 6440 if (VF.isScalar()) { 6441 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6442 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6443 } else { 6444 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6445 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6446 } 6447 6448 // If trip count is known or estimated compile time constant, limit the 6449 // interleave count to be less than the trip count divided by VF, provided it 6450 // is at least 1. 6451 // 6452 // For scalable vectors we can't know if interleaving is beneficial. It may 6453 // not be beneficial for small loops if none of the lanes in the second vector 6454 // iterations is enabled. However, for larger loops, there is likely to be a 6455 // similar benefit as for fixed-width vectors. For now, we choose to leave 6456 // the InterleaveCount as if vscale is '1', although if some information about 6457 // the vector is known (e.g. min vector size), we can make a better decision. 6458 if (BestKnownTC) { 6459 MaxInterleaveCount = 6460 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 6461 // Make sure MaxInterleaveCount is greater than 0. 6462 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 6463 } 6464 6465 assert(MaxInterleaveCount > 0 && 6466 "Maximum interleave count must be greater than 0"); 6467 6468 // Clamp the calculated IC to be between the 1 and the max interleave count 6469 // that the target and trip count allows. 6470 if (IC > MaxInterleaveCount) 6471 IC = MaxInterleaveCount; 6472 else 6473 // Make sure IC is greater than 0. 6474 IC = std::max(1u, IC); 6475 6476 assert(IC > 0 && "Interleave count must be greater than 0."); 6477 6478 // If we did not calculate the cost for VF (because the user selected the VF) 6479 // then we calculate the cost of VF here. 6480 if (LoopCost == 0) { 6481 InstructionCost C = expectedCost(VF).first; 6482 assert(C.isValid() && "Expected to have chosen a VF with valid cost"); 6483 LoopCost = *C.getValue(); 6484 } 6485 6486 assert(LoopCost && "Non-zero loop cost expected"); 6487 6488 // Interleave if we vectorized this loop and there is a reduction that could 6489 // benefit from interleaving. 6490 if (VF.isVector() && HasReductions) { 6491 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6492 return IC; 6493 } 6494 6495 // Note that if we've already vectorized the loop we will have done the 6496 // runtime check and so interleaving won't require further checks. 6497 bool InterleavingRequiresRuntimePointerCheck = 6498 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 6499 6500 // We want to interleave small loops in order to reduce the loop overhead and 6501 // potentially expose ILP opportunities. 6502 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 6503 << "LV: IC is " << IC << '\n' 6504 << "LV: VF is " << VF << '\n'); 6505 const bool AggressivelyInterleaveReductions = 6506 TTI.enableAggressiveInterleaving(HasReductions); 6507 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6508 // We assume that the cost overhead is 1 and we use the cost model 6509 // to estimate the cost of the loop and interleave until the cost of the 6510 // loop overhead is about 5% of the cost of the loop. 6511 unsigned SmallIC = 6512 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6513 6514 // Interleave until store/load ports (estimated by max interleave count) are 6515 // saturated. 6516 unsigned NumStores = Legal->getNumStores(); 6517 unsigned NumLoads = Legal->getNumLoads(); 6518 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6519 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6520 6521 // If we have a scalar reduction (vector reductions are already dealt with 6522 // by this point), we can increase the critical path length if the loop 6523 // we're interleaving is inside another loop. For tree-wise reductions 6524 // set the limit to 2, and for ordered reductions it's best to disable 6525 // interleaving entirely. 6526 if (HasReductions && TheLoop->getLoopDepth() > 1) { 6527 bool HasOrderedReductions = 6528 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 6529 const RecurrenceDescriptor &RdxDesc = Reduction.second; 6530 return RdxDesc.isOrdered(); 6531 }); 6532 if (HasOrderedReductions) { 6533 LLVM_DEBUG( 6534 dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); 6535 return 1; 6536 } 6537 6538 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6539 SmallIC = std::min(SmallIC, F); 6540 StoresIC = std::min(StoresIC, F); 6541 LoadsIC = std::min(LoadsIC, F); 6542 } 6543 6544 if (EnableLoadStoreRuntimeInterleave && 6545 std::max(StoresIC, LoadsIC) > SmallIC) { 6546 LLVM_DEBUG( 6547 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6548 return std::max(StoresIC, LoadsIC); 6549 } 6550 6551 // If there are scalar reductions and TTI has enabled aggressive 6552 // interleaving for reductions, we will interleave to expose ILP. 6553 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6554 AggressivelyInterleaveReductions) { 6555 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6556 // Interleave no less than SmallIC but not as aggressive as the normal IC 6557 // to satisfy the rare situation when resources are too limited. 6558 return std::max(IC / 2, SmallIC); 6559 } else { 6560 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6561 return SmallIC; 6562 } 6563 } 6564 6565 // Interleave if this is a large loop (small loops are already dealt with by 6566 // this point) that could benefit from interleaving. 6567 if (AggressivelyInterleaveReductions) { 6568 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6569 return IC; 6570 } 6571 6572 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6573 return 1; 6574 } 6575 6576 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6577 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6578 // This function calculates the register usage by measuring the highest number 6579 // of values that are alive at a single location. Obviously, this is a very 6580 // rough estimation. We scan the loop in a topological order in order and 6581 // assign a number to each instruction. We use RPO to ensure that defs are 6582 // met before their users. We assume that each instruction that has in-loop 6583 // users starts an interval. We record every time that an in-loop value is 6584 // used, so we have a list of the first and last occurrences of each 6585 // instruction. Next, we transpose this data structure into a multi map that 6586 // holds the list of intervals that *end* at a specific location. This multi 6587 // map allows us to perform a linear search. We scan the instructions linearly 6588 // and record each time that a new interval starts, by placing it in a set. 6589 // If we find this value in the multi-map then we remove it from the set. 6590 // The max register usage is the maximum size of the set. 6591 // We also search for instructions that are defined outside the loop, but are 6592 // used inside the loop. We need this number separately from the max-interval 6593 // usage number because when we unroll, loop-invariant values do not take 6594 // more register. 6595 LoopBlocksDFS DFS(TheLoop); 6596 DFS.perform(LI); 6597 6598 RegisterUsage RU; 6599 6600 // Each 'key' in the map opens a new interval. The values 6601 // of the map are the index of the 'last seen' usage of the 6602 // instruction that is the key. 6603 using IntervalMap = DenseMap<Instruction *, unsigned>; 6604 6605 // Maps instruction to its index. 6606 SmallVector<Instruction *, 64> IdxToInstr; 6607 // Marks the end of each interval. 6608 IntervalMap EndPoint; 6609 // Saves the list of instruction indices that are used in the loop. 6610 SmallPtrSet<Instruction *, 8> Ends; 6611 // Saves the list of values that are used in the loop but are 6612 // defined outside the loop, such as arguments and constants. 6613 SmallPtrSet<Value *, 8> LoopInvariants; 6614 6615 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6616 for (Instruction &I : BB->instructionsWithoutDebug()) { 6617 IdxToInstr.push_back(&I); 6618 6619 // Save the end location of each USE. 6620 for (Value *U : I.operands()) { 6621 auto *Instr = dyn_cast<Instruction>(U); 6622 6623 // Ignore non-instruction values such as arguments, constants, etc. 6624 if (!Instr) 6625 continue; 6626 6627 // If this instruction is outside the loop then record it and continue. 6628 if (!TheLoop->contains(Instr)) { 6629 LoopInvariants.insert(Instr); 6630 continue; 6631 } 6632 6633 // Overwrite previous end points. 6634 EndPoint[Instr] = IdxToInstr.size(); 6635 Ends.insert(Instr); 6636 } 6637 } 6638 } 6639 6640 // Saves the list of intervals that end with the index in 'key'. 6641 using InstrList = SmallVector<Instruction *, 2>; 6642 DenseMap<unsigned, InstrList> TransposeEnds; 6643 6644 // Transpose the EndPoints to a list of values that end at each index. 6645 for (auto &Interval : EndPoint) 6646 TransposeEnds[Interval.second].push_back(Interval.first); 6647 6648 SmallPtrSet<Instruction *, 8> OpenIntervals; 6649 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6650 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6651 6652 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6653 6654 // A lambda that gets the register usage for the given type and VF. 6655 const auto &TTICapture = TTI; 6656 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned { 6657 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6658 return 0; 6659 InstructionCost::CostType RegUsage = 6660 *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue(); 6661 assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() && 6662 "Nonsensical values for register usage."); 6663 return RegUsage; 6664 }; 6665 6666 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6667 Instruction *I = IdxToInstr[i]; 6668 6669 // Remove all of the instructions that end at this location. 6670 InstrList &List = TransposeEnds[i]; 6671 for (Instruction *ToRemove : List) 6672 OpenIntervals.erase(ToRemove); 6673 6674 // Ignore instructions that are never used within the loop. 6675 if (!Ends.count(I)) 6676 continue; 6677 6678 // Skip ignored values. 6679 if (ValuesToIgnore.count(I)) 6680 continue; 6681 6682 // For each VF find the maximum usage of registers. 6683 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6684 // Count the number of live intervals. 6685 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6686 6687 if (VFs[j].isScalar()) { 6688 for (auto Inst : OpenIntervals) { 6689 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6690 if (RegUsage.find(ClassID) == RegUsage.end()) 6691 RegUsage[ClassID] = 1; 6692 else 6693 RegUsage[ClassID] += 1; 6694 } 6695 } else { 6696 collectUniformsAndScalars(VFs[j]); 6697 for (auto Inst : OpenIntervals) { 6698 // Skip ignored values for VF > 1. 6699 if (VecValuesToIgnore.count(Inst)) 6700 continue; 6701 if (isScalarAfterVectorization(Inst, VFs[j])) { 6702 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6703 if (RegUsage.find(ClassID) == RegUsage.end()) 6704 RegUsage[ClassID] = 1; 6705 else 6706 RegUsage[ClassID] += 1; 6707 } else { 6708 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6709 if (RegUsage.find(ClassID) == RegUsage.end()) 6710 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6711 else 6712 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6713 } 6714 } 6715 } 6716 6717 for (auto& pair : RegUsage) { 6718 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6719 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6720 else 6721 MaxUsages[j][pair.first] = pair.second; 6722 } 6723 } 6724 6725 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6726 << OpenIntervals.size() << '\n'); 6727 6728 // Add the current instruction to the list of open intervals. 6729 OpenIntervals.insert(I); 6730 } 6731 6732 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6733 SmallMapVector<unsigned, unsigned, 4> Invariant; 6734 6735 for (auto Inst : LoopInvariants) { 6736 unsigned Usage = 6737 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6738 unsigned ClassID = 6739 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6740 if (Invariant.find(ClassID) == Invariant.end()) 6741 Invariant[ClassID] = Usage; 6742 else 6743 Invariant[ClassID] += Usage; 6744 } 6745 6746 LLVM_DEBUG({ 6747 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6748 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6749 << " item\n"; 6750 for (const auto &pair : MaxUsages[i]) { 6751 dbgs() << "LV(REG): RegisterClass: " 6752 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6753 << " registers\n"; 6754 } 6755 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6756 << " item\n"; 6757 for (const auto &pair : Invariant) { 6758 dbgs() << "LV(REG): RegisterClass: " 6759 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6760 << " registers\n"; 6761 } 6762 }); 6763 6764 RU.LoopInvariantRegs = Invariant; 6765 RU.MaxLocalUsers = MaxUsages[i]; 6766 RUs[i] = RU; 6767 } 6768 6769 return RUs; 6770 } 6771 6772 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 6773 // TODO: Cost model for emulated masked load/store is completely 6774 // broken. This hack guides the cost model to use an artificially 6775 // high enough value to practically disable vectorization with such 6776 // operations, except where previously deployed legality hack allowed 6777 // using very low cost values. This is to avoid regressions coming simply 6778 // from moving "masked load/store" check from legality to cost model. 6779 // Masked Load/Gather emulation was previously never allowed. 6780 // Limited number of Masked Store/Scatter emulation was allowed. 6781 assert(isPredicatedInst(I) && 6782 "Expecting a scalar emulated instruction"); 6783 return isa<LoadInst>(I) || 6784 (isa<StoreInst>(I) && 6785 NumPredStores > NumberOfStoresToPredicate); 6786 } 6787 6788 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6789 // If we aren't vectorizing the loop, or if we've already collected the 6790 // instructions to scalarize, there's nothing to do. Collection may already 6791 // have occurred if we have a user-selected VF and are now computing the 6792 // expected cost for interleaving. 6793 if (VF.isScalar() || VF.isZero() || 6794 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6795 return; 6796 6797 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6798 // not profitable to scalarize any instructions, the presence of VF in the 6799 // map will indicate that we've analyzed it already. 6800 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6801 6802 // Find all the instructions that are scalar with predication in the loop and 6803 // determine if it would be better to not if-convert the blocks they are in. 6804 // If so, we also record the instructions to scalarize. 6805 for (BasicBlock *BB : TheLoop->blocks()) { 6806 if (!blockNeedsPredication(BB)) 6807 continue; 6808 for (Instruction &I : *BB) 6809 if (isScalarWithPredication(&I)) { 6810 ScalarCostsTy ScalarCosts; 6811 // Do not apply discount if scalable, because that would lead to 6812 // invalid scalarization costs. 6813 // Do not apply discount logic if hacked cost is needed 6814 // for emulated masked memrefs. 6815 if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I) && 6816 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6817 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6818 // Remember that BB will remain after vectorization. 6819 PredicatedBBsAfterVectorization.insert(BB); 6820 } 6821 } 6822 } 6823 6824 int LoopVectorizationCostModel::computePredInstDiscount( 6825 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6826 assert(!isUniformAfterVectorization(PredInst, VF) && 6827 "Instruction marked uniform-after-vectorization will be predicated"); 6828 6829 // Initialize the discount to zero, meaning that the scalar version and the 6830 // vector version cost the same. 6831 InstructionCost Discount = 0; 6832 6833 // Holds instructions to analyze. The instructions we visit are mapped in 6834 // ScalarCosts. Those instructions are the ones that would be scalarized if 6835 // we find that the scalar version costs less. 6836 SmallVector<Instruction *, 8> Worklist; 6837 6838 // Returns true if the given instruction can be scalarized. 6839 auto canBeScalarized = [&](Instruction *I) -> bool { 6840 // We only attempt to scalarize instructions forming a single-use chain 6841 // from the original predicated block that would otherwise be vectorized. 6842 // Although not strictly necessary, we give up on instructions we know will 6843 // already be scalar to avoid traversing chains that are unlikely to be 6844 // beneficial. 6845 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6846 isScalarAfterVectorization(I, VF)) 6847 return false; 6848 6849 // If the instruction is scalar with predication, it will be analyzed 6850 // separately. We ignore it within the context of PredInst. 6851 if (isScalarWithPredication(I)) 6852 return false; 6853 6854 // If any of the instruction's operands are uniform after vectorization, 6855 // the instruction cannot be scalarized. This prevents, for example, a 6856 // masked load from being scalarized. 6857 // 6858 // We assume we will only emit a value for lane zero of an instruction 6859 // marked uniform after vectorization, rather than VF identical values. 6860 // Thus, if we scalarize an instruction that uses a uniform, we would 6861 // create uses of values corresponding to the lanes we aren't emitting code 6862 // for. This behavior can be changed by allowing getScalarValue to clone 6863 // the lane zero values for uniforms rather than asserting. 6864 for (Use &U : I->operands()) 6865 if (auto *J = dyn_cast<Instruction>(U.get())) 6866 if (isUniformAfterVectorization(J, VF)) 6867 return false; 6868 6869 // Otherwise, we can scalarize the instruction. 6870 return true; 6871 }; 6872 6873 // Compute the expected cost discount from scalarizing the entire expression 6874 // feeding the predicated instruction. We currently only consider expressions 6875 // that are single-use instruction chains. 6876 Worklist.push_back(PredInst); 6877 while (!Worklist.empty()) { 6878 Instruction *I = Worklist.pop_back_val(); 6879 6880 // If we've already analyzed the instruction, there's nothing to do. 6881 if (ScalarCosts.find(I) != ScalarCosts.end()) 6882 continue; 6883 6884 // Compute the cost of the vector instruction. Note that this cost already 6885 // includes the scalarization overhead of the predicated instruction. 6886 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6887 6888 // Compute the cost of the scalarized instruction. This cost is the cost of 6889 // the instruction as if it wasn't if-converted and instead remained in the 6890 // predicated block. We will scale this cost by block probability after 6891 // computing the scalarization overhead. 6892 InstructionCost ScalarCost = 6893 VF.getFixedValue() * 6894 getInstructionCost(I, ElementCount::getFixed(1)).first; 6895 6896 // Compute the scalarization overhead of needed insertelement instructions 6897 // and phi nodes. 6898 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6899 ScalarCost += TTI.getScalarizationOverhead( 6900 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6901 APInt::getAllOnesValue(VF.getFixedValue()), true, false); 6902 ScalarCost += 6903 VF.getFixedValue() * 6904 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6905 } 6906 6907 // Compute the scalarization overhead of needed extractelement 6908 // instructions. For each of the instruction's operands, if the operand can 6909 // be scalarized, add it to the worklist; otherwise, account for the 6910 // overhead. 6911 for (Use &U : I->operands()) 6912 if (auto *J = dyn_cast<Instruction>(U.get())) { 6913 assert(VectorType::isValidElementType(J->getType()) && 6914 "Instruction has non-scalar type"); 6915 if (canBeScalarized(J)) 6916 Worklist.push_back(J); 6917 else if (needsExtract(J, VF)) { 6918 ScalarCost += TTI.getScalarizationOverhead( 6919 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6920 APInt::getAllOnesValue(VF.getFixedValue()), false, true); 6921 } 6922 } 6923 6924 // Scale the total scalar cost by block probability. 6925 ScalarCost /= getReciprocalPredBlockProb(); 6926 6927 // Compute the discount. A non-negative discount means the vector version 6928 // of the instruction costs more, and scalarizing would be beneficial. 6929 Discount += VectorCost - ScalarCost; 6930 ScalarCosts[I] = ScalarCost; 6931 } 6932 6933 return *Discount.getValue(); 6934 } 6935 6936 LoopVectorizationCostModel::VectorizationCostTy 6937 LoopVectorizationCostModel::expectedCost( 6938 ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { 6939 VectorizationCostTy Cost; 6940 6941 // For each block. 6942 for (BasicBlock *BB : TheLoop->blocks()) { 6943 VectorizationCostTy BlockCost; 6944 6945 // For each instruction in the old loop. 6946 for (Instruction &I : BB->instructionsWithoutDebug()) { 6947 // Skip ignored values. 6948 if (ValuesToIgnore.count(&I) || 6949 (VF.isVector() && VecValuesToIgnore.count(&I))) 6950 continue; 6951 6952 VectorizationCostTy C = getInstructionCost(&I, VF); 6953 6954 // Check if we should override the cost. 6955 if (C.first.isValid() && 6956 ForceTargetInstructionCost.getNumOccurrences() > 0) 6957 C.first = InstructionCost(ForceTargetInstructionCost); 6958 6959 // Keep a list of instructions with invalid costs. 6960 if (Invalid && !C.first.isValid()) 6961 Invalid->emplace_back(&I, VF); 6962 6963 BlockCost.first += C.first; 6964 BlockCost.second |= C.second; 6965 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6966 << " for VF " << VF << " For instruction: " << I 6967 << '\n'); 6968 } 6969 6970 // If we are vectorizing a predicated block, it will have been 6971 // if-converted. This means that the block's instructions (aside from 6972 // stores and instructions that may divide by zero) will now be 6973 // unconditionally executed. For the scalar case, we may not always execute 6974 // the predicated block, if it is an if-else block. Thus, scale the block's 6975 // cost by the probability of executing it. blockNeedsPredication from 6976 // Legal is used so as to not include all blocks in tail folded loops. 6977 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6978 BlockCost.first /= getReciprocalPredBlockProb(); 6979 6980 Cost.first += BlockCost.first; 6981 Cost.second |= BlockCost.second; 6982 } 6983 6984 return Cost; 6985 } 6986 6987 /// Gets Address Access SCEV after verifying that the access pattern 6988 /// is loop invariant except the induction variable dependence. 6989 /// 6990 /// This SCEV can be sent to the Target in order to estimate the address 6991 /// calculation cost. 6992 static const SCEV *getAddressAccessSCEV( 6993 Value *Ptr, 6994 LoopVectorizationLegality *Legal, 6995 PredicatedScalarEvolution &PSE, 6996 const Loop *TheLoop) { 6997 6998 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6999 if (!Gep) 7000 return nullptr; 7001 7002 // We are looking for a gep with all loop invariant indices except for one 7003 // which should be an induction variable. 7004 auto SE = PSE.getSE(); 7005 unsigned NumOperands = Gep->getNumOperands(); 7006 for (unsigned i = 1; i < NumOperands; ++i) { 7007 Value *Opd = Gep->getOperand(i); 7008 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 7009 !Legal->isInductionVariable(Opd)) 7010 return nullptr; 7011 } 7012 7013 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 7014 return PSE.getSCEV(Ptr); 7015 } 7016 7017 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 7018 return Legal->hasStride(I->getOperand(0)) || 7019 Legal->hasStride(I->getOperand(1)); 7020 } 7021 7022 InstructionCost 7023 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 7024 ElementCount VF) { 7025 assert(VF.isVector() && 7026 "Scalarization cost of instruction implies vectorization."); 7027 if (VF.isScalable()) 7028 return InstructionCost::getInvalid(); 7029 7030 Type *ValTy = getLoadStoreType(I); 7031 auto SE = PSE.getSE(); 7032 7033 unsigned AS = getLoadStoreAddressSpace(I); 7034 Value *Ptr = getLoadStorePointerOperand(I); 7035 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 7036 7037 // Figure out whether the access is strided and get the stride value 7038 // if it's known in compile time 7039 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 7040 7041 // Get the cost of the scalar memory instruction and address computation. 7042 InstructionCost Cost = 7043 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 7044 7045 // Don't pass *I here, since it is scalar but will actually be part of a 7046 // vectorized loop where the user of it is a vectorized instruction. 7047 const Align Alignment = getLoadStoreAlignment(I); 7048 Cost += VF.getKnownMinValue() * 7049 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 7050 AS, TTI::TCK_RecipThroughput); 7051 7052 // Get the overhead of the extractelement and insertelement instructions 7053 // we might create due to scalarization. 7054 Cost += getScalarizationOverhead(I, VF); 7055 7056 // If we have a predicated load/store, it will need extra i1 extracts and 7057 // conditional branches, but may not be executed for each vector lane. Scale 7058 // the cost by the probability of executing the predicated block. 7059 if (isPredicatedInst(I)) { 7060 Cost /= getReciprocalPredBlockProb(); 7061 7062 // Add the cost of an i1 extract and a branch 7063 auto *Vec_i1Ty = 7064 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 7065 Cost += TTI.getScalarizationOverhead( 7066 Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()), 7067 /*Insert=*/false, /*Extract=*/true); 7068 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 7069 7070 if (useEmulatedMaskMemRefHack(I)) 7071 // Artificially setting to a high enough value to practically disable 7072 // vectorization with such operations. 7073 Cost = 3000000; 7074 } 7075 7076 return Cost; 7077 } 7078 7079 InstructionCost 7080 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 7081 ElementCount VF) { 7082 Type *ValTy = getLoadStoreType(I); 7083 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7084 Value *Ptr = getLoadStorePointerOperand(I); 7085 unsigned AS = getLoadStoreAddressSpace(I); 7086 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 7087 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7088 7089 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7090 "Stride should be 1 or -1 for consecutive memory access"); 7091 const Align Alignment = getLoadStoreAlignment(I); 7092 InstructionCost Cost = 0; 7093 if (Legal->isMaskRequired(I)) 7094 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 7095 CostKind); 7096 else 7097 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 7098 CostKind, I); 7099 7100 bool Reverse = ConsecutiveStride < 0; 7101 if (Reverse) 7102 Cost += 7103 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 7104 return Cost; 7105 } 7106 7107 InstructionCost 7108 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 7109 ElementCount VF) { 7110 assert(Legal->isUniformMemOp(*I)); 7111 7112 Type *ValTy = getLoadStoreType(I); 7113 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7114 const Align Alignment = getLoadStoreAlignment(I); 7115 unsigned AS = getLoadStoreAddressSpace(I); 7116 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7117 if (isa<LoadInst>(I)) { 7118 return TTI.getAddressComputationCost(ValTy) + 7119 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 7120 CostKind) + 7121 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 7122 } 7123 StoreInst *SI = cast<StoreInst>(I); 7124 7125 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 7126 return TTI.getAddressComputationCost(ValTy) + 7127 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 7128 CostKind) + 7129 (isLoopInvariantStoreValue 7130 ? 0 7131 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 7132 VF.getKnownMinValue() - 1)); 7133 } 7134 7135 InstructionCost 7136 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 7137 ElementCount VF) { 7138 Type *ValTy = getLoadStoreType(I); 7139 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7140 const Align Alignment = getLoadStoreAlignment(I); 7141 const Value *Ptr = getLoadStorePointerOperand(I); 7142 7143 return TTI.getAddressComputationCost(VectorTy) + 7144 TTI.getGatherScatterOpCost( 7145 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 7146 TargetTransformInfo::TCK_RecipThroughput, I); 7147 } 7148 7149 InstructionCost 7150 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 7151 ElementCount VF) { 7152 // TODO: Once we have support for interleaving with scalable vectors 7153 // we can calculate the cost properly here. 7154 if (VF.isScalable()) 7155 return InstructionCost::getInvalid(); 7156 7157 Type *ValTy = getLoadStoreType(I); 7158 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7159 unsigned AS = getLoadStoreAddressSpace(I); 7160 7161 auto Group = getInterleavedAccessGroup(I); 7162 assert(Group && "Fail to get an interleaved access group."); 7163 7164 unsigned InterleaveFactor = Group->getFactor(); 7165 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 7166 7167 // Holds the indices of existing members in the interleaved group. 7168 SmallVector<unsigned, 4> Indices; 7169 for (unsigned IF = 0; IF < InterleaveFactor; IF++) 7170 if (Group->getMember(IF)) 7171 Indices.push_back(IF); 7172 7173 // Calculate the cost of the whole interleaved group. 7174 bool UseMaskForGaps = 7175 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) || 7176 (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor())); 7177 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 7178 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 7179 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 7180 7181 if (Group->isReverse()) { 7182 // TODO: Add support for reversed masked interleaved access. 7183 assert(!Legal->isMaskRequired(I) && 7184 "Reverse masked interleaved access not supported."); 7185 Cost += 7186 Group->getNumMembers() * 7187 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 7188 } 7189 return Cost; 7190 } 7191 7192 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( 7193 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 7194 using namespace llvm::PatternMatch; 7195 // Early exit for no inloop reductions 7196 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 7197 return None; 7198 auto *VectorTy = cast<VectorType>(Ty); 7199 7200 // We are looking for a pattern of, and finding the minimal acceptable cost: 7201 // reduce(mul(ext(A), ext(B))) or 7202 // reduce(mul(A, B)) or 7203 // reduce(ext(A)) or 7204 // reduce(A). 7205 // The basic idea is that we walk down the tree to do that, finding the root 7206 // reduction instruction in InLoopReductionImmediateChains. From there we find 7207 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 7208 // of the components. If the reduction cost is lower then we return it for the 7209 // reduction instruction and 0 for the other instructions in the pattern. If 7210 // it is not we return an invalid cost specifying the orignal cost method 7211 // should be used. 7212 Instruction *RetI = I; 7213 if (match(RetI, m_ZExtOrSExt(m_Value()))) { 7214 if (!RetI->hasOneUser()) 7215 return None; 7216 RetI = RetI->user_back(); 7217 } 7218 if (match(RetI, m_Mul(m_Value(), m_Value())) && 7219 RetI->user_back()->getOpcode() == Instruction::Add) { 7220 if (!RetI->hasOneUser()) 7221 return None; 7222 RetI = RetI->user_back(); 7223 } 7224 7225 // Test if the found instruction is a reduction, and if not return an invalid 7226 // cost specifying the parent to use the original cost modelling. 7227 if (!InLoopReductionImmediateChains.count(RetI)) 7228 return None; 7229 7230 // Find the reduction this chain is a part of and calculate the basic cost of 7231 // the reduction on its own. 7232 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 7233 Instruction *ReductionPhi = LastChain; 7234 while (!isa<PHINode>(ReductionPhi)) 7235 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 7236 7237 const RecurrenceDescriptor &RdxDesc = 7238 Legal->getReductionVars()[cast<PHINode>(ReductionPhi)]; 7239 7240 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 7241 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); 7242 7243 // If we're using ordered reductions then we can just return the base cost 7244 // here, since getArithmeticReductionCost calculates the full ordered 7245 // reduction cost when FP reassociation is not allowed. 7246 if (useOrderedReductions(RdxDesc)) 7247 return BaseCost; 7248 7249 // Get the operand that was not the reduction chain and match it to one of the 7250 // patterns, returning the better cost if it is found. 7251 Instruction *RedOp = RetI->getOperand(1) == LastChain 7252 ? dyn_cast<Instruction>(RetI->getOperand(0)) 7253 : dyn_cast<Instruction>(RetI->getOperand(1)); 7254 7255 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 7256 7257 Instruction *Op0, *Op1; 7258 if (RedOp && 7259 match(RedOp, 7260 m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) && 7261 match(Op0, m_ZExtOrSExt(m_Value())) && 7262 Op0->getOpcode() == Op1->getOpcode() && 7263 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 7264 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) && 7265 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) { 7266 7267 // Matched reduce(ext(mul(ext(A), ext(B))) 7268 // Note that the extend opcodes need to all match, or if A==B they will have 7269 // been converted to zext(mul(sext(A), sext(A))) as it is known positive, 7270 // which is equally fine. 7271 bool IsUnsigned = isa<ZExtInst>(Op0); 7272 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 7273 auto *MulType = VectorType::get(Op0->getType(), VectorTy); 7274 7275 InstructionCost ExtCost = 7276 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType, 7277 TTI::CastContextHint::None, CostKind, Op0); 7278 InstructionCost MulCost = 7279 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind); 7280 InstructionCost Ext2Cost = 7281 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType, 7282 TTI::CastContextHint::None, CostKind, RedOp); 7283 7284 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7285 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7286 CostKind); 7287 7288 if (RedCost.isValid() && 7289 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost) 7290 return I == RetI ? RedCost : 0; 7291 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && 7292 !TheLoop->isLoopInvariant(RedOp)) { 7293 // Matched reduce(ext(A)) 7294 bool IsUnsigned = isa<ZExtInst>(RedOp); 7295 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 7296 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7297 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7298 CostKind); 7299 7300 InstructionCost ExtCost = 7301 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 7302 TTI::CastContextHint::None, CostKind, RedOp); 7303 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 7304 return I == RetI ? RedCost : 0; 7305 } else if (RedOp && 7306 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { 7307 if (match(Op0, m_ZExtOrSExt(m_Value())) && 7308 Op0->getOpcode() == Op1->getOpcode() && 7309 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 7310 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 7311 bool IsUnsigned = isa<ZExtInst>(Op0); 7312 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 7313 // Matched reduce(mul(ext, ext)) 7314 InstructionCost ExtCost = 7315 TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType, 7316 TTI::CastContextHint::None, CostKind, Op0); 7317 InstructionCost MulCost = 7318 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7319 7320 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7321 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7322 CostKind); 7323 7324 if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost) 7325 return I == RetI ? RedCost : 0; 7326 } else if (!match(I, m_ZExtOrSExt(m_Value()))) { 7327 // Matched reduce(mul()) 7328 InstructionCost MulCost = 7329 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7330 7331 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7332 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 7333 CostKind); 7334 7335 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 7336 return I == RetI ? RedCost : 0; 7337 } 7338 } 7339 7340 return I == RetI ? Optional<InstructionCost>(BaseCost) : None; 7341 } 7342 7343 InstructionCost 7344 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 7345 ElementCount VF) { 7346 // Calculate scalar cost only. Vectorization cost should be ready at this 7347 // moment. 7348 if (VF.isScalar()) { 7349 Type *ValTy = getLoadStoreType(I); 7350 const Align Alignment = getLoadStoreAlignment(I); 7351 unsigned AS = getLoadStoreAddressSpace(I); 7352 7353 return TTI.getAddressComputationCost(ValTy) + 7354 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 7355 TTI::TCK_RecipThroughput, I); 7356 } 7357 return getWideningCost(I, VF); 7358 } 7359 7360 LoopVectorizationCostModel::VectorizationCostTy 7361 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7362 ElementCount VF) { 7363 // If we know that this instruction will remain uniform, check the cost of 7364 // the scalar version. 7365 if (isUniformAfterVectorization(I, VF)) 7366 VF = ElementCount::getFixed(1); 7367 7368 if (VF.isVector() && isProfitableToScalarize(I, VF)) 7369 return VectorizationCostTy(InstsToScalarize[VF][I], false); 7370 7371 // Forced scalars do not have any scalarization overhead. 7372 auto ForcedScalar = ForcedScalars.find(VF); 7373 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 7374 auto InstSet = ForcedScalar->second; 7375 if (InstSet.count(I)) 7376 return VectorizationCostTy( 7377 (getInstructionCost(I, ElementCount::getFixed(1)).first * 7378 VF.getKnownMinValue()), 7379 false); 7380 } 7381 7382 Type *VectorTy; 7383 InstructionCost C = getInstructionCost(I, VF, VectorTy); 7384 7385 bool TypeNotScalarized = 7386 VF.isVector() && VectorTy->isVectorTy() && 7387 TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue(); 7388 return VectorizationCostTy(C, TypeNotScalarized); 7389 } 7390 7391 InstructionCost 7392 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 7393 ElementCount VF) const { 7394 7395 // There is no mechanism yet to create a scalable scalarization loop, 7396 // so this is currently Invalid. 7397 if (VF.isScalable()) 7398 return InstructionCost::getInvalid(); 7399 7400 if (VF.isScalar()) 7401 return 0; 7402 7403 InstructionCost Cost = 0; 7404 Type *RetTy = ToVectorTy(I->getType(), VF); 7405 if (!RetTy->isVoidTy() && 7406 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 7407 Cost += TTI.getScalarizationOverhead( 7408 cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()), 7409 true, false); 7410 7411 // Some targets keep addresses scalar. 7412 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 7413 return Cost; 7414 7415 // Some targets support efficient element stores. 7416 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 7417 return Cost; 7418 7419 // Collect operands to consider. 7420 CallInst *CI = dyn_cast<CallInst>(I); 7421 Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands(); 7422 7423 // Skip operands that do not require extraction/scalarization and do not incur 7424 // any overhead. 7425 SmallVector<Type *> Tys; 7426 for (auto *V : filterExtractingOperands(Ops, VF)) 7427 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 7428 return Cost + TTI.getOperandsScalarizationOverhead( 7429 filterExtractingOperands(Ops, VF), Tys); 7430 } 7431 7432 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 7433 if (VF.isScalar()) 7434 return; 7435 NumPredStores = 0; 7436 for (BasicBlock *BB : TheLoop->blocks()) { 7437 // For each instruction in the old loop. 7438 for (Instruction &I : *BB) { 7439 Value *Ptr = getLoadStorePointerOperand(&I); 7440 if (!Ptr) 7441 continue; 7442 7443 // TODO: We should generate better code and update the cost model for 7444 // predicated uniform stores. Today they are treated as any other 7445 // predicated store (see added test cases in 7446 // invariant-store-vectorization.ll). 7447 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 7448 NumPredStores++; 7449 7450 if (Legal->isUniformMemOp(I)) { 7451 // TODO: Avoid replicating loads and stores instead of 7452 // relying on instcombine to remove them. 7453 // Load: Scalar load + broadcast 7454 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 7455 InstructionCost Cost; 7456 if (isa<StoreInst>(&I) && VF.isScalable() && 7457 isLegalGatherOrScatter(&I)) { 7458 Cost = getGatherScatterCost(&I, VF); 7459 setWideningDecision(&I, VF, CM_GatherScatter, Cost); 7460 } else { 7461 assert((isa<LoadInst>(&I) || !VF.isScalable()) && 7462 "Cannot yet scalarize uniform stores"); 7463 Cost = getUniformMemOpCost(&I, VF); 7464 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7465 } 7466 continue; 7467 } 7468 7469 // We assume that widening is the best solution when possible. 7470 if (memoryInstructionCanBeWidened(&I, VF)) { 7471 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 7472 int ConsecutiveStride = 7473 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 7474 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7475 "Expected consecutive stride."); 7476 InstWidening Decision = 7477 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 7478 setWideningDecision(&I, VF, Decision, Cost); 7479 continue; 7480 } 7481 7482 // Choose between Interleaving, Gather/Scatter or Scalarization. 7483 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 7484 unsigned NumAccesses = 1; 7485 if (isAccessInterleaved(&I)) { 7486 auto Group = getInterleavedAccessGroup(&I); 7487 assert(Group && "Fail to get an interleaved access group."); 7488 7489 // Make one decision for the whole group. 7490 if (getWideningDecision(&I, VF) != CM_Unknown) 7491 continue; 7492 7493 NumAccesses = Group->getNumMembers(); 7494 if (interleavedAccessCanBeWidened(&I, VF)) 7495 InterleaveCost = getInterleaveGroupCost(&I, VF); 7496 } 7497 7498 InstructionCost GatherScatterCost = 7499 isLegalGatherOrScatter(&I) 7500 ? getGatherScatterCost(&I, VF) * NumAccesses 7501 : InstructionCost::getInvalid(); 7502 7503 InstructionCost ScalarizationCost = 7504 getMemInstScalarizationCost(&I, VF) * NumAccesses; 7505 7506 // Choose better solution for the current VF, 7507 // write down this decision and use it during vectorization. 7508 InstructionCost Cost; 7509 InstWidening Decision; 7510 if (InterleaveCost <= GatherScatterCost && 7511 InterleaveCost < ScalarizationCost) { 7512 Decision = CM_Interleave; 7513 Cost = InterleaveCost; 7514 } else if (GatherScatterCost < ScalarizationCost) { 7515 Decision = CM_GatherScatter; 7516 Cost = GatherScatterCost; 7517 } else { 7518 Decision = CM_Scalarize; 7519 Cost = ScalarizationCost; 7520 } 7521 // If the instructions belongs to an interleave group, the whole group 7522 // receives the same decision. The whole group receives the cost, but 7523 // the cost will actually be assigned to one instruction. 7524 if (auto Group = getInterleavedAccessGroup(&I)) 7525 setWideningDecision(Group, VF, Decision, Cost); 7526 else 7527 setWideningDecision(&I, VF, Decision, Cost); 7528 } 7529 } 7530 7531 // Make sure that any load of address and any other address computation 7532 // remains scalar unless there is gather/scatter support. This avoids 7533 // inevitable extracts into address registers, and also has the benefit of 7534 // activating LSR more, since that pass can't optimize vectorized 7535 // addresses. 7536 if (TTI.prefersVectorizedAddressing()) 7537 return; 7538 7539 // Start with all scalar pointer uses. 7540 SmallPtrSet<Instruction *, 8> AddrDefs; 7541 for (BasicBlock *BB : TheLoop->blocks()) 7542 for (Instruction &I : *BB) { 7543 Instruction *PtrDef = 7544 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7545 if (PtrDef && TheLoop->contains(PtrDef) && 7546 getWideningDecision(&I, VF) != CM_GatherScatter) 7547 AddrDefs.insert(PtrDef); 7548 } 7549 7550 // Add all instructions used to generate the addresses. 7551 SmallVector<Instruction *, 4> Worklist; 7552 append_range(Worklist, AddrDefs); 7553 while (!Worklist.empty()) { 7554 Instruction *I = Worklist.pop_back_val(); 7555 for (auto &Op : I->operands()) 7556 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7557 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7558 AddrDefs.insert(InstOp).second) 7559 Worklist.push_back(InstOp); 7560 } 7561 7562 for (auto *I : AddrDefs) { 7563 if (isa<LoadInst>(I)) { 7564 // Setting the desired widening decision should ideally be handled in 7565 // by cost functions, but since this involves the task of finding out 7566 // if the loaded register is involved in an address computation, it is 7567 // instead changed here when we know this is the case. 7568 InstWidening Decision = getWideningDecision(I, VF); 7569 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7570 // Scalarize a widened load of address. 7571 setWideningDecision( 7572 I, VF, CM_Scalarize, 7573 (VF.getKnownMinValue() * 7574 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7575 else if (auto Group = getInterleavedAccessGroup(I)) { 7576 // Scalarize an interleave group of address loads. 7577 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7578 if (Instruction *Member = Group->getMember(I)) 7579 setWideningDecision( 7580 Member, VF, CM_Scalarize, 7581 (VF.getKnownMinValue() * 7582 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7583 } 7584 } 7585 } else 7586 // Make sure I gets scalarized and a cost estimate without 7587 // scalarization overhead. 7588 ForcedScalars[VF].insert(I); 7589 } 7590 } 7591 7592 InstructionCost 7593 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 7594 Type *&VectorTy) { 7595 Type *RetTy = I->getType(); 7596 if (canTruncateToMinimalBitwidth(I, VF)) 7597 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7598 auto SE = PSE.getSE(); 7599 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7600 7601 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 7602 ElementCount VF) -> bool { 7603 if (VF.isScalar()) 7604 return true; 7605 7606 auto Scalarized = InstsToScalarize.find(VF); 7607 assert(Scalarized != InstsToScalarize.end() && 7608 "VF not yet analyzed for scalarization profitability"); 7609 return !Scalarized->second.count(I) && 7610 llvm::all_of(I->users(), [&](User *U) { 7611 auto *UI = cast<Instruction>(U); 7612 return !Scalarized->second.count(UI); 7613 }); 7614 }; 7615 (void) hasSingleCopyAfterVectorization; 7616 7617 if (isScalarAfterVectorization(I, VF)) { 7618 // With the exception of GEPs and PHIs, after scalarization there should 7619 // only be one copy of the instruction generated in the loop. This is 7620 // because the VF is either 1, or any instructions that need scalarizing 7621 // have already been dealt with by the the time we get here. As a result, 7622 // it means we don't have to multiply the instruction cost by VF. 7623 assert(I->getOpcode() == Instruction::GetElementPtr || 7624 I->getOpcode() == Instruction::PHI || 7625 (I->getOpcode() == Instruction::BitCast && 7626 I->getType()->isPointerTy()) || 7627 hasSingleCopyAfterVectorization(I, VF)); 7628 VectorTy = RetTy; 7629 } else 7630 VectorTy = ToVectorTy(RetTy, VF); 7631 7632 // TODO: We need to estimate the cost of intrinsic calls. 7633 switch (I->getOpcode()) { 7634 case Instruction::GetElementPtr: 7635 // We mark this instruction as zero-cost because the cost of GEPs in 7636 // vectorized code depends on whether the corresponding memory instruction 7637 // is scalarized or not. Therefore, we handle GEPs with the memory 7638 // instruction cost. 7639 return 0; 7640 case Instruction::Br: { 7641 // In cases of scalarized and predicated instructions, there will be VF 7642 // predicated blocks in the vectorized loop. Each branch around these 7643 // blocks requires also an extract of its vector compare i1 element. 7644 bool ScalarPredicatedBB = false; 7645 BranchInst *BI = cast<BranchInst>(I); 7646 if (VF.isVector() && BI->isConditional() && 7647 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7648 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7649 ScalarPredicatedBB = true; 7650 7651 if (ScalarPredicatedBB) { 7652 // Not possible to scalarize scalable vector with predicated instructions. 7653 if (VF.isScalable()) 7654 return InstructionCost::getInvalid(); 7655 // Return cost for branches around scalarized and predicated blocks. 7656 auto *Vec_i1Ty = 7657 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7658 return ( 7659 TTI.getScalarizationOverhead( 7660 Vec_i1Ty, APInt::getAllOnesValue(VF.getFixedValue()), false, 7661 true) + 7662 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); 7663 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7664 // The back-edge branch will remain, as will all scalar branches. 7665 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7666 else 7667 // This branch will be eliminated by if-conversion. 7668 return 0; 7669 // Note: We currently assume zero cost for an unconditional branch inside 7670 // a predicated block since it will become a fall-through, although we 7671 // may decide in the future to call TTI for all branches. 7672 } 7673 case Instruction::PHI: { 7674 auto *Phi = cast<PHINode>(I); 7675 7676 // First-order recurrences are replaced by vector shuffles inside the loop. 7677 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7678 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7679 return TTI.getShuffleCost( 7680 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7681 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7682 7683 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7684 // converted into select instructions. We require N - 1 selects per phi 7685 // node, where N is the number of incoming values. 7686 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7687 return (Phi->getNumIncomingValues() - 1) * 7688 TTI.getCmpSelInstrCost( 7689 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7690 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7691 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7692 7693 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7694 } 7695 case Instruction::UDiv: 7696 case Instruction::SDiv: 7697 case Instruction::URem: 7698 case Instruction::SRem: 7699 // If we have a predicated instruction, it may not be executed for each 7700 // vector lane. Get the scalarization cost and scale this amount by the 7701 // probability of executing the predicated block. If the instruction is not 7702 // predicated, we fall through to the next case. 7703 if (VF.isVector() && isScalarWithPredication(I)) { 7704 InstructionCost Cost = 0; 7705 7706 // These instructions have a non-void type, so account for the phi nodes 7707 // that we will create. This cost is likely to be zero. The phi node 7708 // cost, if any, should be scaled by the block probability because it 7709 // models a copy at the end of each predicated block. 7710 Cost += VF.getKnownMinValue() * 7711 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7712 7713 // The cost of the non-predicated instruction. 7714 Cost += VF.getKnownMinValue() * 7715 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7716 7717 // The cost of insertelement and extractelement instructions needed for 7718 // scalarization. 7719 Cost += getScalarizationOverhead(I, VF); 7720 7721 // Scale the cost by the probability of executing the predicated blocks. 7722 // This assumes the predicated block for each vector lane is equally 7723 // likely. 7724 return Cost / getReciprocalPredBlockProb(); 7725 } 7726 LLVM_FALLTHROUGH; 7727 case Instruction::Add: 7728 case Instruction::FAdd: 7729 case Instruction::Sub: 7730 case Instruction::FSub: 7731 case Instruction::Mul: 7732 case Instruction::FMul: 7733 case Instruction::FDiv: 7734 case Instruction::FRem: 7735 case Instruction::Shl: 7736 case Instruction::LShr: 7737 case Instruction::AShr: 7738 case Instruction::And: 7739 case Instruction::Or: 7740 case Instruction::Xor: { 7741 // Since we will replace the stride by 1 the multiplication should go away. 7742 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7743 return 0; 7744 7745 // Detect reduction patterns 7746 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7747 return *RedCost; 7748 7749 // Certain instructions can be cheaper to vectorize if they have a constant 7750 // second vector operand. One example of this are shifts on x86. 7751 Value *Op2 = I->getOperand(1); 7752 TargetTransformInfo::OperandValueProperties Op2VP; 7753 TargetTransformInfo::OperandValueKind Op2VK = 7754 TTI.getOperandInfo(Op2, Op2VP); 7755 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7756 Op2VK = TargetTransformInfo::OK_UniformValue; 7757 7758 SmallVector<const Value *, 4> Operands(I->operand_values()); 7759 return TTI.getArithmeticInstrCost( 7760 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7761 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7762 } 7763 case Instruction::FNeg: { 7764 return TTI.getArithmeticInstrCost( 7765 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7766 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7767 TargetTransformInfo::OP_None, I->getOperand(0), I); 7768 } 7769 case Instruction::Select: { 7770 SelectInst *SI = cast<SelectInst>(I); 7771 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7772 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7773 7774 const Value *Op0, *Op1; 7775 using namespace llvm::PatternMatch; 7776 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7777 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7778 // select x, y, false --> x & y 7779 // select x, true, y --> x | y 7780 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7781 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7782 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7783 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7784 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7785 Op1->getType()->getScalarSizeInBits() == 1); 7786 7787 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7788 return TTI.getArithmeticInstrCost( 7789 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7790 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7791 } 7792 7793 Type *CondTy = SI->getCondition()->getType(); 7794 if (!ScalarCond) 7795 CondTy = VectorType::get(CondTy, VF); 7796 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, 7797 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7798 } 7799 case Instruction::ICmp: 7800 case Instruction::FCmp: { 7801 Type *ValTy = I->getOperand(0)->getType(); 7802 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7803 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7804 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7805 VectorTy = ToVectorTy(ValTy, VF); 7806 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7807 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7808 } 7809 case Instruction::Store: 7810 case Instruction::Load: { 7811 ElementCount Width = VF; 7812 if (Width.isVector()) { 7813 InstWidening Decision = getWideningDecision(I, Width); 7814 assert(Decision != CM_Unknown && 7815 "CM decision should be taken at this point"); 7816 if (Decision == CM_Scalarize) 7817 Width = ElementCount::getFixed(1); 7818 } 7819 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7820 return getMemoryInstructionCost(I, VF); 7821 } 7822 case Instruction::BitCast: 7823 if (I->getType()->isPointerTy()) 7824 return 0; 7825 LLVM_FALLTHROUGH; 7826 case Instruction::ZExt: 7827 case Instruction::SExt: 7828 case Instruction::FPToUI: 7829 case Instruction::FPToSI: 7830 case Instruction::FPExt: 7831 case Instruction::PtrToInt: 7832 case Instruction::IntToPtr: 7833 case Instruction::SIToFP: 7834 case Instruction::UIToFP: 7835 case Instruction::Trunc: 7836 case Instruction::FPTrunc: { 7837 // Computes the CastContextHint from a Load/Store instruction. 7838 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7839 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7840 "Expected a load or a store!"); 7841 7842 if (VF.isScalar() || !TheLoop->contains(I)) 7843 return TTI::CastContextHint::Normal; 7844 7845 switch (getWideningDecision(I, VF)) { 7846 case LoopVectorizationCostModel::CM_GatherScatter: 7847 return TTI::CastContextHint::GatherScatter; 7848 case LoopVectorizationCostModel::CM_Interleave: 7849 return TTI::CastContextHint::Interleave; 7850 case LoopVectorizationCostModel::CM_Scalarize: 7851 case LoopVectorizationCostModel::CM_Widen: 7852 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7853 : TTI::CastContextHint::Normal; 7854 case LoopVectorizationCostModel::CM_Widen_Reverse: 7855 return TTI::CastContextHint::Reversed; 7856 case LoopVectorizationCostModel::CM_Unknown: 7857 llvm_unreachable("Instr did not go through cost modelling?"); 7858 } 7859 7860 llvm_unreachable("Unhandled case!"); 7861 }; 7862 7863 unsigned Opcode = I->getOpcode(); 7864 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7865 // For Trunc, the context is the only user, which must be a StoreInst. 7866 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7867 if (I->hasOneUse()) 7868 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7869 CCH = ComputeCCH(Store); 7870 } 7871 // For Z/Sext, the context is the operand, which must be a LoadInst. 7872 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7873 Opcode == Instruction::FPExt) { 7874 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7875 CCH = ComputeCCH(Load); 7876 } 7877 7878 // We optimize the truncation of induction variables having constant 7879 // integer steps. The cost of these truncations is the same as the scalar 7880 // operation. 7881 if (isOptimizableIVTruncate(I, VF)) { 7882 auto *Trunc = cast<TruncInst>(I); 7883 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7884 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7885 } 7886 7887 // Detect reduction patterns 7888 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7889 return *RedCost; 7890 7891 Type *SrcScalarTy = I->getOperand(0)->getType(); 7892 Type *SrcVecTy = 7893 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7894 if (canTruncateToMinimalBitwidth(I, VF)) { 7895 // This cast is going to be shrunk. This may remove the cast or it might 7896 // turn it into slightly different cast. For example, if MinBW == 16, 7897 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7898 // 7899 // Calculate the modified src and dest types. 7900 Type *MinVecTy = VectorTy; 7901 if (Opcode == Instruction::Trunc) { 7902 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7903 VectorTy = 7904 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7905 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7906 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7907 VectorTy = 7908 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7909 } 7910 } 7911 7912 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7913 } 7914 case Instruction::Call: { 7915 bool NeedToScalarize; 7916 CallInst *CI = cast<CallInst>(I); 7917 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7918 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7919 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7920 return std::min(CallCost, IntrinsicCost); 7921 } 7922 return CallCost; 7923 } 7924 case Instruction::ExtractValue: 7925 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7926 case Instruction::Alloca: 7927 // We cannot easily widen alloca to a scalable alloca, as 7928 // the result would need to be a vector of pointers. 7929 if (VF.isScalable()) 7930 return InstructionCost::getInvalid(); 7931 LLVM_FALLTHROUGH; 7932 default: 7933 // This opcode is unknown. Assume that it is the same as 'mul'. 7934 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7935 } // end of switch. 7936 } 7937 7938 char LoopVectorize::ID = 0; 7939 7940 static const char lv_name[] = "Loop Vectorization"; 7941 7942 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7943 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7944 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7945 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7946 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7947 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7948 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7949 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7950 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7951 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7952 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7953 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7954 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7955 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7956 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7957 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7958 7959 namespace llvm { 7960 7961 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7962 7963 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7964 bool VectorizeOnlyWhenForced) { 7965 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7966 } 7967 7968 } // end namespace llvm 7969 7970 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7971 // Check if the pointer operand of a load or store instruction is 7972 // consecutive. 7973 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7974 return Legal->isConsecutivePtr(Ptr); 7975 return false; 7976 } 7977 7978 void LoopVectorizationCostModel::collectValuesToIgnore() { 7979 // Ignore ephemeral values. 7980 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7981 7982 // Ignore type-promoting instructions we identified during reduction 7983 // detection. 7984 for (auto &Reduction : Legal->getReductionVars()) { 7985 RecurrenceDescriptor &RedDes = Reduction.second; 7986 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7987 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7988 } 7989 // Ignore type-casting instructions we identified during induction 7990 // detection. 7991 for (auto &Induction : Legal->getInductionVars()) { 7992 InductionDescriptor &IndDes = Induction.second; 7993 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7994 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7995 } 7996 } 7997 7998 void LoopVectorizationCostModel::collectInLoopReductions() { 7999 for (auto &Reduction : Legal->getReductionVars()) { 8000 PHINode *Phi = Reduction.first; 8001 RecurrenceDescriptor &RdxDesc = Reduction.second; 8002 8003 // We don't collect reductions that are type promoted (yet). 8004 if (RdxDesc.getRecurrenceType() != Phi->getType()) 8005 continue; 8006 8007 // If the target would prefer this reduction to happen "in-loop", then we 8008 // want to record it as such. 8009 unsigned Opcode = RdxDesc.getOpcode(); 8010 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 8011 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 8012 TargetTransformInfo::ReductionFlags())) 8013 continue; 8014 8015 // Check that we can correctly put the reductions into the loop, by 8016 // finding the chain of operations that leads from the phi to the loop 8017 // exit value. 8018 SmallVector<Instruction *, 4> ReductionOperations = 8019 RdxDesc.getReductionOpChain(Phi, TheLoop); 8020 bool InLoop = !ReductionOperations.empty(); 8021 if (InLoop) { 8022 InLoopReductionChains[Phi] = ReductionOperations; 8023 // Add the elements to InLoopReductionImmediateChains for cost modelling. 8024 Instruction *LastChain = Phi; 8025 for (auto *I : ReductionOperations) { 8026 InLoopReductionImmediateChains[I] = LastChain; 8027 LastChain = I; 8028 } 8029 } 8030 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 8031 << " reduction for phi: " << *Phi << "\n"); 8032 } 8033 } 8034 8035 // TODO: we could return a pair of values that specify the max VF and 8036 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 8037 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 8038 // doesn't have a cost model that can choose which plan to execute if 8039 // more than one is generated. 8040 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 8041 LoopVectorizationCostModel &CM) { 8042 unsigned WidestType; 8043 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 8044 return WidestVectorRegBits / WidestType; 8045 } 8046 8047 VectorizationFactor 8048 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 8049 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 8050 ElementCount VF = UserVF; 8051 // Outer loop handling: They may require CFG and instruction level 8052 // transformations before even evaluating whether vectorization is profitable. 8053 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 8054 // the vectorization pipeline. 8055 if (!OrigLoop->isInnermost()) { 8056 // If the user doesn't provide a vectorization factor, determine a 8057 // reasonable one. 8058 if (UserVF.isZero()) { 8059 VF = ElementCount::getFixed(determineVPlanVF( 8060 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 8061 .getFixedSize(), 8062 CM)); 8063 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 8064 8065 // Make sure we have a VF > 1 for stress testing. 8066 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 8067 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 8068 << "overriding computed VF.\n"); 8069 VF = ElementCount::getFixed(4); 8070 } 8071 } 8072 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 8073 assert(isPowerOf2_32(VF.getKnownMinValue()) && 8074 "VF needs to be a power of two"); 8075 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 8076 << "VF " << VF << " to build VPlans.\n"); 8077 buildVPlans(VF, VF); 8078 8079 // For VPlan build stress testing, we bail out after VPlan construction. 8080 if (VPlanBuildStressTest) 8081 return VectorizationFactor::Disabled(); 8082 8083 return {VF, 0 /*Cost*/}; 8084 } 8085 8086 LLVM_DEBUG( 8087 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 8088 "VPlan-native path.\n"); 8089 return VectorizationFactor::Disabled(); 8090 } 8091 8092 Optional<VectorizationFactor> 8093 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 8094 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8095 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 8096 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 8097 return None; 8098 8099 // Invalidate interleave groups if all blocks of loop will be predicated. 8100 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 8101 !useMaskedInterleavedAccesses(*TTI)) { 8102 LLVM_DEBUG( 8103 dbgs() 8104 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 8105 "which requires masked-interleaved support.\n"); 8106 if (CM.InterleaveInfo.invalidateGroups()) 8107 // Invalidating interleave groups also requires invalidating all decisions 8108 // based on them, which includes widening decisions and uniform and scalar 8109 // values. 8110 CM.invalidateCostModelingDecisions(); 8111 } 8112 8113 ElementCount MaxUserVF = 8114 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 8115 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 8116 if (!UserVF.isZero() && UserVFIsLegal) { 8117 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 8118 "VF needs to be a power of two"); 8119 // Collect the instructions (and their associated costs) that will be more 8120 // profitable to scalarize. 8121 if (CM.selectUserVectorizationFactor(UserVF)) { 8122 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 8123 CM.collectInLoopReductions(); 8124 buildVPlansWithVPRecipes(UserVF, UserVF); 8125 LLVM_DEBUG(printPlans(dbgs())); 8126 return {{UserVF, 0}}; 8127 } else 8128 reportVectorizationInfo("UserVF ignored because of invalid costs.", 8129 "InvalidCost", ORE, OrigLoop); 8130 } 8131 8132 // Populate the set of Vectorization Factor Candidates. 8133 ElementCountSet VFCandidates; 8134 for (auto VF = ElementCount::getFixed(1); 8135 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 8136 VFCandidates.insert(VF); 8137 for (auto VF = ElementCount::getScalable(1); 8138 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 8139 VFCandidates.insert(VF); 8140 8141 for (const auto &VF : VFCandidates) { 8142 // Collect Uniform and Scalar instructions after vectorization with VF. 8143 CM.collectUniformsAndScalars(VF); 8144 8145 // Collect the instructions (and their associated costs) that will be more 8146 // profitable to scalarize. 8147 if (VF.isVector()) 8148 CM.collectInstsToScalarize(VF); 8149 } 8150 8151 CM.collectInLoopReductions(); 8152 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 8153 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 8154 8155 LLVM_DEBUG(printPlans(dbgs())); 8156 if (!MaxFactors.hasVector()) 8157 return VectorizationFactor::Disabled(); 8158 8159 // Select the optimal vectorization factor. 8160 auto SelectedVF = CM.selectVectorizationFactor(VFCandidates); 8161 8162 // Check if it is profitable to vectorize with runtime checks. 8163 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 8164 if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { 8165 bool PragmaThresholdReached = 8166 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 8167 bool ThresholdReached = 8168 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 8169 if ((ThresholdReached && !Hints.allowReordering()) || 8170 PragmaThresholdReached) { 8171 ORE->emit([&]() { 8172 return OptimizationRemarkAnalysisAliasing( 8173 DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(), 8174 OrigLoop->getHeader()) 8175 << "loop not vectorized: cannot prove it is safe to reorder " 8176 "memory operations"; 8177 }); 8178 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 8179 Hints.emitRemarkWithHints(); 8180 return VectorizationFactor::Disabled(); 8181 } 8182 } 8183 return SelectedVF; 8184 } 8185 8186 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) { 8187 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 8188 << '\n'); 8189 BestVF = VF; 8190 BestUF = UF; 8191 8192 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 8193 return !Plan->hasVF(VF); 8194 }); 8195 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 8196 } 8197 8198 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 8199 DominatorTree *DT) { 8200 // Perform the actual loop transformation. 8201 8202 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 8203 assert(BestVF.hasValue() && "Vectorization Factor is missing"); 8204 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 8205 8206 VPTransformState State{ 8207 *BestVF, BestUF, LI, DT, ILV.Builder, &ILV, VPlans.front().get()}; 8208 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 8209 State.TripCount = ILV.getOrCreateTripCount(nullptr); 8210 State.CanonicalIV = ILV.Induction; 8211 8212 ILV.printDebugTracesAtStart(); 8213 8214 //===------------------------------------------------===// 8215 // 8216 // Notice: any optimization or new instruction that go 8217 // into the code below should also be implemented in 8218 // the cost-model. 8219 // 8220 //===------------------------------------------------===// 8221 8222 // 2. Copy and widen instructions from the old loop into the new loop. 8223 VPlans.front()->execute(&State); 8224 8225 // 3. Fix the vectorized code: take care of header phi's, live-outs, 8226 // predication, updating analyses. 8227 ILV.fixVectorizedLoop(State); 8228 8229 ILV.printDebugTracesAtEnd(); 8230 } 8231 8232 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 8233 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 8234 for (const auto &Plan : VPlans) 8235 if (PrintVPlansInDotFormat) 8236 Plan->printDOT(O); 8237 else 8238 Plan->print(O); 8239 } 8240 #endif 8241 8242 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 8243 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 8244 8245 // We create new control-flow for the vectorized loop, so the original exit 8246 // conditions will be dead after vectorization if it's only used by the 8247 // terminator 8248 SmallVector<BasicBlock*> ExitingBlocks; 8249 OrigLoop->getExitingBlocks(ExitingBlocks); 8250 for (auto *BB : ExitingBlocks) { 8251 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 8252 if (!Cmp || !Cmp->hasOneUse()) 8253 continue; 8254 8255 // TODO: we should introduce a getUniqueExitingBlocks on Loop 8256 if (!DeadInstructions.insert(Cmp).second) 8257 continue; 8258 8259 // The operands of the icmp is often a dead trunc, used by IndUpdate. 8260 // TODO: can recurse through operands in general 8261 for (Value *Op : Cmp->operands()) { 8262 if (isa<TruncInst>(Op) && Op->hasOneUse()) 8263 DeadInstructions.insert(cast<Instruction>(Op)); 8264 } 8265 } 8266 8267 // We create new "steps" for induction variable updates to which the original 8268 // induction variables map. An original update instruction will be dead if 8269 // all its users except the induction variable are dead. 8270 auto *Latch = OrigLoop->getLoopLatch(); 8271 for (auto &Induction : Legal->getInductionVars()) { 8272 PHINode *Ind = Induction.first; 8273 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 8274 8275 // If the tail is to be folded by masking, the primary induction variable, 8276 // if exists, isn't dead: it will be used for masking. Don't kill it. 8277 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 8278 continue; 8279 8280 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 8281 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 8282 })) 8283 DeadInstructions.insert(IndUpdate); 8284 8285 // We record as "Dead" also the type-casting instructions we had identified 8286 // during induction analysis. We don't need any handling for them in the 8287 // vectorized loop because we have proven that, under a proper runtime 8288 // test guarding the vectorized loop, the value of the phi, and the casted 8289 // value of the phi, are the same. The last instruction in this casting chain 8290 // will get its scalar/vector/widened def from the scalar/vector/widened def 8291 // of the respective phi node. Any other casts in the induction def-use chain 8292 // have no other uses outside the phi update chain, and will be ignored. 8293 InductionDescriptor &IndDes = Induction.second; 8294 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 8295 DeadInstructions.insert(Casts.begin(), Casts.end()); 8296 } 8297 } 8298 8299 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 8300 8301 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 8302 8303 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 8304 Instruction::BinaryOps BinOp) { 8305 // When unrolling and the VF is 1, we only need to add a simple scalar. 8306 Type *Ty = Val->getType(); 8307 assert(!Ty->isVectorTy() && "Val must be a scalar"); 8308 8309 if (Ty->isFloatingPointTy()) { 8310 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 8311 8312 // Floating-point operations inherit FMF via the builder's flags. 8313 Value *MulOp = Builder.CreateFMul(C, Step); 8314 return Builder.CreateBinOp(BinOp, Val, MulOp); 8315 } 8316 Constant *C = ConstantInt::get(Ty, StartIdx); 8317 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 8318 } 8319 8320 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 8321 SmallVector<Metadata *, 4> MDs; 8322 // Reserve first location for self reference to the LoopID metadata node. 8323 MDs.push_back(nullptr); 8324 bool IsUnrollMetadata = false; 8325 MDNode *LoopID = L->getLoopID(); 8326 if (LoopID) { 8327 // First find existing loop unrolling disable metadata. 8328 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 8329 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 8330 if (MD) { 8331 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 8332 IsUnrollMetadata = 8333 S && S->getString().startswith("llvm.loop.unroll.disable"); 8334 } 8335 MDs.push_back(LoopID->getOperand(i)); 8336 } 8337 } 8338 8339 if (!IsUnrollMetadata) { 8340 // Add runtime unroll disable metadata. 8341 LLVMContext &Context = L->getHeader()->getContext(); 8342 SmallVector<Metadata *, 1> DisableOperands; 8343 DisableOperands.push_back( 8344 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 8345 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 8346 MDs.push_back(DisableNode); 8347 MDNode *NewLoopID = MDNode::get(Context, MDs); 8348 // Set operand 0 to refer to the loop id itself. 8349 NewLoopID->replaceOperandWith(0, NewLoopID); 8350 L->setLoopID(NewLoopID); 8351 } 8352 } 8353 8354 //===--------------------------------------------------------------------===// 8355 // EpilogueVectorizerMainLoop 8356 //===--------------------------------------------------------------------===// 8357 8358 /// This function is partially responsible for generating the control flow 8359 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8360 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 8361 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8362 Loop *Lp = createVectorLoopSkeleton(""); 8363 8364 // Generate the code to check the minimum iteration count of the vector 8365 // epilogue (see below). 8366 EPI.EpilogueIterationCountCheck = 8367 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); 8368 EPI.EpilogueIterationCountCheck->setName("iter.check"); 8369 8370 // Generate the code to check any assumptions that we've made for SCEV 8371 // expressions. 8372 EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader); 8373 8374 // Generate the code that checks at runtime if arrays overlap. We put the 8375 // checks into a separate block to make the more common case of few elements 8376 // faster. 8377 EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 8378 8379 // Generate the iteration count check for the main loop, *after* the check 8380 // for the epilogue loop, so that the path-length is shorter for the case 8381 // that goes directly through the vector epilogue. The longer-path length for 8382 // the main loop is compensated for, by the gain from vectorizing the larger 8383 // trip count. Note: the branch will get updated later on when we vectorize 8384 // the epilogue. 8385 EPI.MainLoopIterationCountCheck = 8386 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); 8387 8388 // Generate the induction variable. 8389 OldInduction = Legal->getPrimaryInduction(); 8390 Type *IdxTy = Legal->getWidestInductionType(); 8391 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8392 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 8393 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8394 EPI.VectorTripCount = CountRoundDown; 8395 Induction = 8396 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8397 getDebugLocFromInstOrOperands(OldInduction)); 8398 8399 // Skip induction resume value creation here because they will be created in 8400 // the second pass. If we created them here, they wouldn't be used anyway, 8401 // because the vplan in the second pass still contains the inductions from the 8402 // original loop. 8403 8404 return completeLoopSkeleton(Lp, OrigLoopID); 8405 } 8406 8407 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 8408 LLVM_DEBUG({ 8409 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 8410 << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue() 8411 << ", Main Loop UF:" << EPI.MainLoopUF 8412 << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 8413 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8414 }); 8415 } 8416 8417 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 8418 DEBUG_WITH_TYPE(VerboseDebug, { 8419 dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n"; 8420 }); 8421 } 8422 8423 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( 8424 Loop *L, BasicBlock *Bypass, bool ForEpilogue) { 8425 assert(L && "Expected valid Loop."); 8426 assert(Bypass && "Expected valid bypass basic block."); 8427 unsigned VFactor = 8428 ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue(); 8429 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 8430 Value *Count = getOrCreateTripCount(L); 8431 // Reuse existing vector loop preheader for TC checks. 8432 // Note that new preheader block is generated for vector loop. 8433 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 8434 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 8435 8436 // Generate code to check if the loop's trip count is less than VF * UF of the 8437 // main vector loop. 8438 auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? 8439 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8440 8441 Value *CheckMinIters = Builder.CreateICmp( 8442 P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor), 8443 "min.iters.check"); 8444 8445 if (!ForEpilogue) 8446 TCCheckBlock->setName("vector.main.loop.iter.check"); 8447 8448 // Create new preheader for vector loop. 8449 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 8450 DT, LI, nullptr, "vector.ph"); 8451 8452 if (ForEpilogue) { 8453 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 8454 DT->getNode(Bypass)->getIDom()) && 8455 "TC check is expected to dominate Bypass"); 8456 8457 // Update dominator for Bypass & LoopExit. 8458 DT->changeImmediateDominator(Bypass, TCCheckBlock); 8459 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8460 // For loops with multiple exits, there's no edge from the middle block 8461 // to exit blocks (as the epilogue must run) and thus no need to update 8462 // the immediate dominator of the exit blocks. 8463 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 8464 8465 LoopBypassBlocks.push_back(TCCheckBlock); 8466 8467 // Save the trip count so we don't have to regenerate it in the 8468 // vec.epilog.iter.check. This is safe to do because the trip count 8469 // generated here dominates the vector epilog iter check. 8470 EPI.TripCount = Count; 8471 } 8472 8473 ReplaceInstWithInst( 8474 TCCheckBlock->getTerminator(), 8475 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8476 8477 return TCCheckBlock; 8478 } 8479 8480 //===--------------------------------------------------------------------===// 8481 // EpilogueVectorizerEpilogueLoop 8482 //===--------------------------------------------------------------------===// 8483 8484 /// This function is partially responsible for generating the control flow 8485 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8486 BasicBlock * 8487 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 8488 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8489 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 8490 8491 // Now, compare the remaining count and if there aren't enough iterations to 8492 // execute the vectorized epilogue skip to the scalar part. 8493 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 8494 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 8495 LoopVectorPreHeader = 8496 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 8497 LI, nullptr, "vec.epilog.ph"); 8498 emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, 8499 VecEpilogueIterationCountCheck); 8500 8501 // Adjust the control flow taking the state info from the main loop 8502 // vectorization into account. 8503 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 8504 "expected this to be saved from the previous pass."); 8505 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 8506 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 8507 8508 DT->changeImmediateDominator(LoopVectorPreHeader, 8509 EPI.MainLoopIterationCountCheck); 8510 8511 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 8512 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8513 8514 if (EPI.SCEVSafetyCheck) 8515 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 8516 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8517 if (EPI.MemSafetyCheck) 8518 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 8519 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8520 8521 DT->changeImmediateDominator( 8522 VecEpilogueIterationCountCheck, 8523 VecEpilogueIterationCountCheck->getSinglePredecessor()); 8524 8525 DT->changeImmediateDominator(LoopScalarPreHeader, 8526 EPI.EpilogueIterationCountCheck); 8527 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8528 // If there is an epilogue which must run, there's no edge from the 8529 // middle block to exit blocks and thus no need to update the immediate 8530 // dominator of the exit blocks. 8531 DT->changeImmediateDominator(LoopExitBlock, 8532 EPI.EpilogueIterationCountCheck); 8533 8534 // Keep track of bypass blocks, as they feed start values to the induction 8535 // phis in the scalar loop preheader. 8536 if (EPI.SCEVSafetyCheck) 8537 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 8538 if (EPI.MemSafetyCheck) 8539 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 8540 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 8541 8542 // Generate a resume induction for the vector epilogue and put it in the 8543 // vector epilogue preheader 8544 Type *IdxTy = Legal->getWidestInductionType(); 8545 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 8546 LoopVectorPreHeader->getFirstNonPHI()); 8547 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 8548 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 8549 EPI.MainLoopIterationCountCheck); 8550 8551 // Generate the induction variable. 8552 OldInduction = Legal->getPrimaryInduction(); 8553 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8554 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 8555 Value *StartIdx = EPResumeVal; 8556 Induction = 8557 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8558 getDebugLocFromInstOrOperands(OldInduction)); 8559 8560 // Generate induction resume values. These variables save the new starting 8561 // indexes for the scalar loop. They are used to test if there are any tail 8562 // iterations left once the vector loop has completed. 8563 // Note that when the vectorized epilogue is skipped due to iteration count 8564 // check, then the resume value for the induction variable comes from 8565 // the trip count of the main vector loop, hence passing the AdditionalBypass 8566 // argument. 8567 createInductionResumeValues(Lp, CountRoundDown, 8568 {VecEpilogueIterationCountCheck, 8569 EPI.VectorTripCount} /* AdditionalBypass */); 8570 8571 AddRuntimeUnrollDisableMetaData(Lp); 8572 return completeLoopSkeleton(Lp, OrigLoopID); 8573 } 8574 8575 BasicBlock * 8576 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 8577 Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { 8578 8579 assert(EPI.TripCount && 8580 "Expected trip count to have been safed in the first pass."); 8581 assert( 8582 (!isa<Instruction>(EPI.TripCount) || 8583 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 8584 "saved trip count does not dominate insertion point."); 8585 Value *TC = EPI.TripCount; 8586 IRBuilder<> Builder(Insert->getTerminator()); 8587 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 8588 8589 // Generate code to check if the loop's trip count is less than VF * UF of the 8590 // vector epilogue loop. 8591 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? 8592 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8593 8594 Value *CheckMinIters = Builder.CreateICmp( 8595 P, Count, 8596 ConstantInt::get(Count->getType(), 8597 EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF), 8598 "min.epilog.iters.check"); 8599 8600 ReplaceInstWithInst( 8601 Insert->getTerminator(), 8602 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8603 8604 LoopBypassBlocks.push_back(Insert); 8605 return Insert; 8606 } 8607 8608 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 8609 LLVM_DEBUG({ 8610 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 8611 << "Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 8612 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8613 }); 8614 } 8615 8616 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 8617 DEBUG_WITH_TYPE(VerboseDebug, { 8618 dbgs() << "final fn:\n" << *Induction->getFunction() << "\n"; 8619 }); 8620 } 8621 8622 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8623 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8624 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8625 bool PredicateAtRangeStart = Predicate(Range.Start); 8626 8627 for (ElementCount TmpVF = Range.Start * 2; 8628 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8629 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8630 Range.End = TmpVF; 8631 break; 8632 } 8633 8634 return PredicateAtRangeStart; 8635 } 8636 8637 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8638 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8639 /// of VF's starting at a given VF and extending it as much as possible. Each 8640 /// vectorization decision can potentially shorten this sub-range during 8641 /// buildVPlan(). 8642 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8643 ElementCount MaxVF) { 8644 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8645 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8646 VFRange SubRange = {VF, MaxVFPlusOne}; 8647 VPlans.push_back(buildVPlan(SubRange)); 8648 VF = SubRange.End; 8649 } 8650 } 8651 8652 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8653 VPlanPtr &Plan) { 8654 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8655 8656 // Look for cached value. 8657 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8658 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8659 if (ECEntryIt != EdgeMaskCache.end()) 8660 return ECEntryIt->second; 8661 8662 VPValue *SrcMask = createBlockInMask(Src, Plan); 8663 8664 // The terminator has to be a branch inst! 8665 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8666 assert(BI && "Unexpected terminator found"); 8667 8668 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8669 return EdgeMaskCache[Edge] = SrcMask; 8670 8671 // If source is an exiting block, we know the exit edge is dynamically dead 8672 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8673 // adding uses of an otherwise potentially dead instruction. 8674 if (OrigLoop->isLoopExiting(Src)) 8675 return EdgeMaskCache[Edge] = SrcMask; 8676 8677 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8678 assert(EdgeMask && "No Edge Mask found for condition"); 8679 8680 if (BI->getSuccessor(0) != Dst) 8681 EdgeMask = Builder.createNot(EdgeMask); 8682 8683 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8684 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8685 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8686 // The select version does not introduce new UB if SrcMask is false and 8687 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8688 VPValue *False = Plan->getOrAddVPValue( 8689 ConstantInt::getFalse(BI->getCondition()->getType())); 8690 EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False); 8691 } 8692 8693 return EdgeMaskCache[Edge] = EdgeMask; 8694 } 8695 8696 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8697 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8698 8699 // Look for cached value. 8700 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8701 if (BCEntryIt != BlockMaskCache.end()) 8702 return BCEntryIt->second; 8703 8704 // All-one mask is modelled as no-mask following the convention for masked 8705 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8706 VPValue *BlockMask = nullptr; 8707 8708 if (OrigLoop->getHeader() == BB) { 8709 if (!CM.blockNeedsPredication(BB)) 8710 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8711 8712 // Create the block in mask as the first non-phi instruction in the block. 8713 VPBuilder::InsertPointGuard Guard(Builder); 8714 auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi(); 8715 Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint); 8716 8717 // Introduce the early-exit compare IV <= BTC to form header block mask. 8718 // This is used instead of IV < TC because TC may wrap, unlike BTC. 8719 // Start by constructing the desired canonical IV. 8720 VPValue *IV = nullptr; 8721 if (Legal->getPrimaryInduction()) 8722 IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction()); 8723 else { 8724 auto IVRecipe = new VPWidenCanonicalIVRecipe(); 8725 Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint); 8726 IV = IVRecipe->getVPSingleValue(); 8727 } 8728 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8729 bool TailFolded = !CM.isScalarEpilogueAllowed(); 8730 8731 if (TailFolded && CM.TTI.emitGetActiveLaneMask()) { 8732 // While ActiveLaneMask is a binary op that consumes the loop tripcount 8733 // as a second argument, we only pass the IV here and extract the 8734 // tripcount from the transform state where codegen of the VP instructions 8735 // happen. 8736 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV}); 8737 } else { 8738 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8739 } 8740 return BlockMaskCache[BB] = BlockMask; 8741 } 8742 8743 // This is the block mask. We OR all incoming edges. 8744 for (auto *Predecessor : predecessors(BB)) { 8745 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8746 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8747 return BlockMaskCache[BB] = EdgeMask; 8748 8749 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8750 BlockMask = EdgeMask; 8751 continue; 8752 } 8753 8754 BlockMask = Builder.createOr(BlockMask, EdgeMask); 8755 } 8756 8757 return BlockMaskCache[BB] = BlockMask; 8758 } 8759 8760 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8761 ArrayRef<VPValue *> Operands, 8762 VFRange &Range, 8763 VPlanPtr &Plan) { 8764 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8765 "Must be called with either a load or store"); 8766 8767 auto willWiden = [&](ElementCount VF) -> bool { 8768 if (VF.isScalar()) 8769 return false; 8770 LoopVectorizationCostModel::InstWidening Decision = 8771 CM.getWideningDecision(I, VF); 8772 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8773 "CM decision should be taken at this point."); 8774 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8775 return true; 8776 if (CM.isScalarAfterVectorization(I, VF) || 8777 CM.isProfitableToScalarize(I, VF)) 8778 return false; 8779 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8780 }; 8781 8782 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8783 return nullptr; 8784 8785 VPValue *Mask = nullptr; 8786 if (Legal->isMaskRequired(I)) 8787 Mask = createBlockInMask(I->getParent(), Plan); 8788 8789 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8790 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask); 8791 8792 StoreInst *Store = cast<StoreInst>(I); 8793 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8794 Mask); 8795 } 8796 8797 VPWidenIntOrFpInductionRecipe * 8798 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, 8799 ArrayRef<VPValue *> Operands) const { 8800 // Check if this is an integer or fp induction. If so, build the recipe that 8801 // produces its scalar and vector values. 8802 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8803 if (II.getKind() == InductionDescriptor::IK_IntInduction || 8804 II.getKind() == InductionDescriptor::IK_FpInduction) { 8805 assert(II.getStartValue() == 8806 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8807 const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts(); 8808 return new VPWidenIntOrFpInductionRecipe( 8809 Phi, Operands[0], Casts.empty() ? nullptr : Casts.front()); 8810 } 8811 8812 return nullptr; 8813 } 8814 8815 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8816 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, 8817 VPlan &Plan) const { 8818 // Optimize the special case where the source is a constant integer 8819 // induction variable. Notice that we can only optimize the 'trunc' case 8820 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8821 // (c) other casts depend on pointer size. 8822 8823 // Determine whether \p K is a truncation based on an induction variable that 8824 // can be optimized. 8825 auto isOptimizableIVTruncate = 8826 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8827 return [=](ElementCount VF) -> bool { 8828 return CM.isOptimizableIVTruncate(K, VF); 8829 }; 8830 }; 8831 8832 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8833 isOptimizableIVTruncate(I), Range)) { 8834 8835 InductionDescriptor II = 8836 Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0))); 8837 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8838 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 8839 Start, nullptr, I); 8840 } 8841 return nullptr; 8842 } 8843 8844 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8845 ArrayRef<VPValue *> Operands, 8846 VPlanPtr &Plan) { 8847 // If all incoming values are equal, the incoming VPValue can be used directly 8848 // instead of creating a new VPBlendRecipe. 8849 VPValue *FirstIncoming = Operands[0]; 8850 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8851 return FirstIncoming == Inc; 8852 })) { 8853 return Operands[0]; 8854 } 8855 8856 // We know that all PHIs in non-header blocks are converted into selects, so 8857 // we don't have to worry about the insertion order and we can just use the 8858 // builder. At this point we generate the predication tree. There may be 8859 // duplications since this is a simple recursive scan, but future 8860 // optimizations will clean it up. 8861 SmallVector<VPValue *, 2> OperandsWithMask; 8862 unsigned NumIncoming = Phi->getNumIncomingValues(); 8863 8864 for (unsigned In = 0; In < NumIncoming; In++) { 8865 VPValue *EdgeMask = 8866 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8867 assert((EdgeMask || NumIncoming == 1) && 8868 "Multiple predecessors with one having a full mask"); 8869 OperandsWithMask.push_back(Operands[In]); 8870 if (EdgeMask) 8871 OperandsWithMask.push_back(EdgeMask); 8872 } 8873 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8874 } 8875 8876 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8877 ArrayRef<VPValue *> Operands, 8878 VFRange &Range) const { 8879 8880 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8881 [this, CI](ElementCount VF) { return CM.isScalarWithPredication(CI); }, 8882 Range); 8883 8884 if (IsPredicated) 8885 return nullptr; 8886 8887 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8888 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8889 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8890 ID == Intrinsic::pseudoprobe || 8891 ID == Intrinsic::experimental_noalias_scope_decl)) 8892 return nullptr; 8893 8894 auto willWiden = [&](ElementCount VF) -> bool { 8895 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8896 // The following case may be scalarized depending on the VF. 8897 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8898 // version of the instruction. 8899 // Is it beneficial to perform intrinsic call compared to lib call? 8900 bool NeedToScalarize = false; 8901 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8902 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8903 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8904 return UseVectorIntrinsic || !NeedToScalarize; 8905 }; 8906 8907 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8908 return nullptr; 8909 8910 ArrayRef<VPValue *> Ops = Operands.take_front(CI->getNumArgOperands()); 8911 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8912 } 8913 8914 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8915 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8916 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8917 // Instruction should be widened, unless it is scalar after vectorization, 8918 // scalarization is profitable or it is predicated. 8919 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8920 return CM.isScalarAfterVectorization(I, VF) || 8921 CM.isProfitableToScalarize(I, VF) || CM.isScalarWithPredication(I); 8922 }; 8923 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8924 Range); 8925 } 8926 8927 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8928 ArrayRef<VPValue *> Operands) const { 8929 auto IsVectorizableOpcode = [](unsigned Opcode) { 8930 switch (Opcode) { 8931 case Instruction::Add: 8932 case Instruction::And: 8933 case Instruction::AShr: 8934 case Instruction::BitCast: 8935 case Instruction::FAdd: 8936 case Instruction::FCmp: 8937 case Instruction::FDiv: 8938 case Instruction::FMul: 8939 case Instruction::FNeg: 8940 case Instruction::FPExt: 8941 case Instruction::FPToSI: 8942 case Instruction::FPToUI: 8943 case Instruction::FPTrunc: 8944 case Instruction::FRem: 8945 case Instruction::FSub: 8946 case Instruction::ICmp: 8947 case Instruction::IntToPtr: 8948 case Instruction::LShr: 8949 case Instruction::Mul: 8950 case Instruction::Or: 8951 case Instruction::PtrToInt: 8952 case Instruction::SDiv: 8953 case Instruction::Select: 8954 case Instruction::SExt: 8955 case Instruction::Shl: 8956 case Instruction::SIToFP: 8957 case Instruction::SRem: 8958 case Instruction::Sub: 8959 case Instruction::Trunc: 8960 case Instruction::UDiv: 8961 case Instruction::UIToFP: 8962 case Instruction::URem: 8963 case Instruction::Xor: 8964 case Instruction::ZExt: 8965 return true; 8966 } 8967 return false; 8968 }; 8969 8970 if (!IsVectorizableOpcode(I->getOpcode())) 8971 return nullptr; 8972 8973 // Success: widen this instruction. 8974 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 8975 } 8976 8977 void VPRecipeBuilder::fixHeaderPhis() { 8978 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 8979 for (VPWidenPHIRecipe *R : PhisToFix) { 8980 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 8981 VPRecipeBase *IncR = 8982 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 8983 R->addOperand(IncR->getVPSingleValue()); 8984 } 8985 } 8986 8987 VPBasicBlock *VPRecipeBuilder::handleReplication( 8988 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8989 VPlanPtr &Plan) { 8990 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8991 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8992 Range); 8993 8994 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8995 [&](ElementCount VF) { return CM.isPredicatedInst(I); }, Range); 8996 8997 // Even if the instruction is not marked as uniform, there are certain 8998 // intrinsic calls that can be effectively treated as such, so we check for 8999 // them here. Conservatively, we only do this for scalable vectors, since 9000 // for fixed-width VFs we can always fall back on full scalarization. 9001 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) { 9002 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) { 9003 case Intrinsic::assume: 9004 case Intrinsic::lifetime_start: 9005 case Intrinsic::lifetime_end: 9006 // For scalable vectors if one of the operands is variant then we still 9007 // want to mark as uniform, which will generate one instruction for just 9008 // the first lane of the vector. We can't scalarize the call in the same 9009 // way as for fixed-width vectors because we don't know how many lanes 9010 // there are. 9011 // 9012 // The reasons for doing it this way for scalable vectors are: 9013 // 1. For the assume intrinsic generating the instruction for the first 9014 // lane is still be better than not generating any at all. For 9015 // example, the input may be a splat across all lanes. 9016 // 2. For the lifetime start/end intrinsics the pointer operand only 9017 // does anything useful when the input comes from a stack object, 9018 // which suggests it should always be uniform. For non-stack objects 9019 // the effect is to poison the object, which still allows us to 9020 // remove the call. 9021 IsUniform = true; 9022 break; 9023 default: 9024 break; 9025 } 9026 } 9027 9028 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 9029 IsUniform, IsPredicated); 9030 setRecipe(I, Recipe); 9031 Plan->addVPValue(I, Recipe); 9032 9033 // Find if I uses a predicated instruction. If so, it will use its scalar 9034 // value. Avoid hoisting the insert-element which packs the scalar value into 9035 // a vector value, as that happens iff all users use the vector value. 9036 for (VPValue *Op : Recipe->operands()) { 9037 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 9038 if (!PredR) 9039 continue; 9040 auto *RepR = 9041 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 9042 assert(RepR->isPredicated() && 9043 "expected Replicate recipe to be predicated"); 9044 RepR->setAlsoPack(false); 9045 } 9046 9047 // Finalize the recipe for Instr, first if it is not predicated. 9048 if (!IsPredicated) { 9049 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 9050 VPBB->appendRecipe(Recipe); 9051 return VPBB; 9052 } 9053 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 9054 assert(VPBB->getSuccessors().empty() && 9055 "VPBB has successors when handling predicated replication."); 9056 // Record predicated instructions for above packing optimizations. 9057 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 9058 VPBlockUtils::insertBlockAfter(Region, VPBB); 9059 auto *RegSucc = new VPBasicBlock(); 9060 VPBlockUtils::insertBlockAfter(RegSucc, Region); 9061 return RegSucc; 9062 } 9063 9064 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 9065 VPRecipeBase *PredRecipe, 9066 VPlanPtr &Plan) { 9067 // Instructions marked for predication are replicated and placed under an 9068 // if-then construct to prevent side-effects. 9069 9070 // Generate recipes to compute the block mask for this region. 9071 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 9072 9073 // Build the triangular if-then region. 9074 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 9075 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 9076 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 9077 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 9078 auto *PHIRecipe = Instr->getType()->isVoidTy() 9079 ? nullptr 9080 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 9081 if (PHIRecipe) { 9082 Plan->removeVPValueFor(Instr); 9083 Plan->addVPValue(Instr, PHIRecipe); 9084 } 9085 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 9086 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 9087 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 9088 9089 // Note: first set Entry as region entry and then connect successors starting 9090 // from it in order, to propagate the "parent" of each VPBasicBlock. 9091 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 9092 VPBlockUtils::connectBlocks(Pred, Exit); 9093 9094 return Region; 9095 } 9096 9097 VPRecipeOrVPValueTy 9098 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 9099 ArrayRef<VPValue *> Operands, 9100 VFRange &Range, VPlanPtr &Plan) { 9101 // First, check for specific widening recipes that deal with calls, memory 9102 // operations, inductions and Phi nodes. 9103 if (auto *CI = dyn_cast<CallInst>(Instr)) 9104 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 9105 9106 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 9107 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 9108 9109 VPRecipeBase *Recipe; 9110 if (auto Phi = dyn_cast<PHINode>(Instr)) { 9111 if (Phi->getParent() != OrigLoop->getHeader()) 9112 return tryToBlend(Phi, Operands, Plan); 9113 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands))) 9114 return toVPRecipeResult(Recipe); 9115 9116 VPWidenPHIRecipe *PhiRecipe = nullptr; 9117 if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) { 9118 VPValue *StartV = Operands[0]; 9119 if (Legal->isReductionVariable(Phi)) { 9120 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 9121 assert(RdxDesc.getRecurrenceStartValue() == 9122 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 9123 PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, 9124 CM.isInLoopReduction(Phi), 9125 CM.useOrderedReductions(RdxDesc)); 9126 } else { 9127 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); 9128 } 9129 9130 // Record the incoming value from the backedge, so we can add the incoming 9131 // value from the backedge after all recipes have been created. 9132 recordRecipeOf(cast<Instruction>( 9133 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 9134 PhisToFix.push_back(PhiRecipe); 9135 } else { 9136 // TODO: record start and backedge value for remaining pointer induction 9137 // phis. 9138 assert(Phi->getType()->isPointerTy() && 9139 "only pointer phis should be handled here"); 9140 PhiRecipe = new VPWidenPHIRecipe(Phi); 9141 } 9142 9143 return toVPRecipeResult(PhiRecipe); 9144 } 9145 9146 if (isa<TruncInst>(Instr) && 9147 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 9148 Range, *Plan))) 9149 return toVPRecipeResult(Recipe); 9150 9151 if (!shouldWiden(Instr, Range)) 9152 return nullptr; 9153 9154 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 9155 return toVPRecipeResult(new VPWidenGEPRecipe( 9156 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 9157 9158 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 9159 bool InvariantCond = 9160 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 9161 return toVPRecipeResult(new VPWidenSelectRecipe( 9162 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 9163 } 9164 9165 return toVPRecipeResult(tryToWiden(Instr, Operands)); 9166 } 9167 9168 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 9169 ElementCount MaxVF) { 9170 assert(OrigLoop->isInnermost() && "Inner loop expected."); 9171 9172 // Collect instructions from the original loop that will become trivially dead 9173 // in the vectorized loop. We don't need to vectorize these instructions. For 9174 // example, original induction update instructions can become dead because we 9175 // separately emit induction "steps" when generating code for the new loop. 9176 // Similarly, we create a new latch condition when setting up the structure 9177 // of the new loop, so the old one can become dead. 9178 SmallPtrSet<Instruction *, 4> DeadInstructions; 9179 collectTriviallyDeadInstructions(DeadInstructions); 9180 9181 // Add assume instructions we need to drop to DeadInstructions, to prevent 9182 // them from being added to the VPlan. 9183 // TODO: We only need to drop assumes in blocks that get flattend. If the 9184 // control flow is preserved, we should keep them. 9185 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 9186 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 9187 9188 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 9189 // Dead instructions do not need sinking. Remove them from SinkAfter. 9190 for (Instruction *I : DeadInstructions) 9191 SinkAfter.erase(I); 9192 9193 // Cannot sink instructions after dead instructions (there won't be any 9194 // recipes for them). Instead, find the first non-dead previous instruction. 9195 for (auto &P : Legal->getSinkAfter()) { 9196 Instruction *SinkTarget = P.second; 9197 Instruction *FirstInst = &*SinkTarget->getParent()->begin(); 9198 (void)FirstInst; 9199 while (DeadInstructions.contains(SinkTarget)) { 9200 assert( 9201 SinkTarget != FirstInst && 9202 "Must find a live instruction (at least the one feeding the " 9203 "first-order recurrence PHI) before reaching beginning of the block"); 9204 SinkTarget = SinkTarget->getPrevNode(); 9205 assert(SinkTarget != P.first && 9206 "sink source equals target, no sinking required"); 9207 } 9208 P.second = SinkTarget; 9209 } 9210 9211 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 9212 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 9213 VFRange SubRange = {VF, MaxVFPlusOne}; 9214 VPlans.push_back( 9215 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 9216 VF = SubRange.End; 9217 } 9218 } 9219 9220 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 9221 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 9222 const MapVector<Instruction *, Instruction *> &SinkAfter) { 9223 9224 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 9225 9226 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 9227 9228 // --------------------------------------------------------------------------- 9229 // Pre-construction: record ingredients whose recipes we'll need to further 9230 // process after constructing the initial VPlan. 9231 // --------------------------------------------------------------------------- 9232 9233 // Mark instructions we'll need to sink later and their targets as 9234 // ingredients whose recipe we'll need to record. 9235 for (auto &Entry : SinkAfter) { 9236 RecipeBuilder.recordRecipeOf(Entry.first); 9237 RecipeBuilder.recordRecipeOf(Entry.second); 9238 } 9239 for (auto &Reduction : CM.getInLoopReductionChains()) { 9240 PHINode *Phi = Reduction.first; 9241 RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind(); 9242 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9243 9244 RecipeBuilder.recordRecipeOf(Phi); 9245 for (auto &R : ReductionOperations) { 9246 RecipeBuilder.recordRecipeOf(R); 9247 // For min/max reducitons, where we have a pair of icmp/select, we also 9248 // need to record the ICmp recipe, so it can be removed later. 9249 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 9250 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 9251 } 9252 } 9253 9254 // For each interleave group which is relevant for this (possibly trimmed) 9255 // Range, add it to the set of groups to be later applied to the VPlan and add 9256 // placeholders for its members' Recipes which we'll be replacing with a 9257 // single VPInterleaveRecipe. 9258 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 9259 auto applyIG = [IG, this](ElementCount VF) -> bool { 9260 return (VF.isVector() && // Query is illegal for VF == 1 9261 CM.getWideningDecision(IG->getInsertPos(), VF) == 9262 LoopVectorizationCostModel::CM_Interleave); 9263 }; 9264 if (!getDecisionAndClampRange(applyIG, Range)) 9265 continue; 9266 InterleaveGroups.insert(IG); 9267 for (unsigned i = 0; i < IG->getFactor(); i++) 9268 if (Instruction *Member = IG->getMember(i)) 9269 RecipeBuilder.recordRecipeOf(Member); 9270 }; 9271 9272 // --------------------------------------------------------------------------- 9273 // Build initial VPlan: Scan the body of the loop in a topological order to 9274 // visit each basic block after having visited its predecessor basic blocks. 9275 // --------------------------------------------------------------------------- 9276 9277 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 9278 auto Plan = std::make_unique<VPlan>(); 9279 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 9280 Plan->setEntry(VPBB); 9281 9282 // Scan the body of the loop in a topological order to visit each basic block 9283 // after having visited its predecessor basic blocks. 9284 LoopBlocksDFS DFS(OrigLoop); 9285 DFS.perform(LI); 9286 9287 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 9288 // Relevant instructions from basic block BB will be grouped into VPRecipe 9289 // ingredients and fill a new VPBasicBlock. 9290 unsigned VPBBsForBB = 0; 9291 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 9292 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 9293 VPBB = FirstVPBBForBB; 9294 Builder.setInsertPoint(VPBB); 9295 9296 // Introduce each ingredient into VPlan. 9297 // TODO: Model and preserve debug instrinsics in VPlan. 9298 for (Instruction &I : BB->instructionsWithoutDebug()) { 9299 Instruction *Instr = &I; 9300 9301 // First filter out irrelevant instructions, to ensure no recipes are 9302 // built for them. 9303 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 9304 continue; 9305 9306 SmallVector<VPValue *, 4> Operands; 9307 auto *Phi = dyn_cast<PHINode>(Instr); 9308 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 9309 Operands.push_back(Plan->getOrAddVPValue( 9310 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 9311 } else { 9312 auto OpRange = Plan->mapToVPValues(Instr->operands()); 9313 Operands = {OpRange.begin(), OpRange.end()}; 9314 } 9315 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 9316 Instr, Operands, Range, Plan)) { 9317 // If Instr can be simplified to an existing VPValue, use it. 9318 if (RecipeOrValue.is<VPValue *>()) { 9319 auto *VPV = RecipeOrValue.get<VPValue *>(); 9320 Plan->addVPValue(Instr, VPV); 9321 // If the re-used value is a recipe, register the recipe for the 9322 // instruction, in case the recipe for Instr needs to be recorded. 9323 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 9324 RecipeBuilder.setRecipe(Instr, R); 9325 continue; 9326 } 9327 // Otherwise, add the new recipe. 9328 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 9329 for (auto *Def : Recipe->definedValues()) { 9330 auto *UV = Def->getUnderlyingValue(); 9331 Plan->addVPValue(UV, Def); 9332 } 9333 9334 RecipeBuilder.setRecipe(Instr, Recipe); 9335 VPBB->appendRecipe(Recipe); 9336 continue; 9337 } 9338 9339 // Otherwise, if all widening options failed, Instruction is to be 9340 // replicated. This may create a successor for VPBB. 9341 VPBasicBlock *NextVPBB = 9342 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 9343 if (NextVPBB != VPBB) { 9344 VPBB = NextVPBB; 9345 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 9346 : ""); 9347 } 9348 } 9349 } 9350 9351 RecipeBuilder.fixHeaderPhis(); 9352 9353 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 9354 // may also be empty, such as the last one VPBB, reflecting original 9355 // basic-blocks with no recipes. 9356 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 9357 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 9358 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 9359 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 9360 delete PreEntry; 9361 9362 // --------------------------------------------------------------------------- 9363 // Transform initial VPlan: Apply previously taken decisions, in order, to 9364 // bring the VPlan to its final state. 9365 // --------------------------------------------------------------------------- 9366 9367 // Apply Sink-After legal constraints. 9368 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 9369 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 9370 if (Region && Region->isReplicator()) { 9371 assert(Region->getNumSuccessors() == 1 && 9372 Region->getNumPredecessors() == 1 && "Expected SESE region!"); 9373 assert(R->getParent()->size() == 1 && 9374 "A recipe in an original replicator region must be the only " 9375 "recipe in its block"); 9376 return Region; 9377 } 9378 return nullptr; 9379 }; 9380 for (auto &Entry : SinkAfter) { 9381 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 9382 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 9383 9384 auto *TargetRegion = GetReplicateRegion(Target); 9385 auto *SinkRegion = GetReplicateRegion(Sink); 9386 if (!SinkRegion) { 9387 // If the sink source is not a replicate region, sink the recipe directly. 9388 if (TargetRegion) { 9389 // The target is in a replication region, make sure to move Sink to 9390 // the block after it, not into the replication region itself. 9391 VPBasicBlock *NextBlock = 9392 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 9393 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 9394 } else 9395 Sink->moveAfter(Target); 9396 continue; 9397 } 9398 9399 // The sink source is in a replicate region. Unhook the region from the CFG. 9400 auto *SinkPred = SinkRegion->getSinglePredecessor(); 9401 auto *SinkSucc = SinkRegion->getSingleSuccessor(); 9402 VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); 9403 VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); 9404 VPBlockUtils::connectBlocks(SinkPred, SinkSucc); 9405 9406 if (TargetRegion) { 9407 // The target recipe is also in a replicate region, move the sink region 9408 // after the target region. 9409 auto *TargetSucc = TargetRegion->getSingleSuccessor(); 9410 VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); 9411 VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); 9412 VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); 9413 } else { 9414 // The sink source is in a replicate region, we need to move the whole 9415 // replicate region, which should only contain a single recipe in the 9416 // main block. 9417 auto *SplitBlock = 9418 Target->getParent()->splitAt(std::next(Target->getIterator())); 9419 9420 auto *SplitPred = SplitBlock->getSinglePredecessor(); 9421 9422 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 9423 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 9424 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 9425 if (VPBB == SplitPred) 9426 VPBB = SplitBlock; 9427 } 9428 } 9429 9430 // Adjust the recipes for any inloop reductions. 9431 adjustRecipesForReductions(VPBB, Plan, RecipeBuilder, Range.Start); 9432 9433 // Introduce a recipe to combine the incoming and previous values of a 9434 // first-order recurrence. 9435 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9436 auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); 9437 if (!RecurPhi) 9438 continue; 9439 9440 auto *RecurSplice = cast<VPInstruction>( 9441 Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 9442 {RecurPhi, RecurPhi->getBackedgeValue()})); 9443 9444 VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); 9445 if (auto *Region = GetReplicateRegion(PrevRecipe)) { 9446 VPBasicBlock *Succ = cast<VPBasicBlock>(Region->getSingleSuccessor()); 9447 RecurSplice->moveBefore(*Succ, Succ->getFirstNonPhi()); 9448 } else 9449 RecurSplice->moveAfter(PrevRecipe); 9450 RecurPhi->replaceAllUsesWith(RecurSplice); 9451 // Set the first operand of RecurSplice to RecurPhi again, after replacing 9452 // all users. 9453 RecurSplice->setOperand(0, RecurPhi); 9454 } 9455 9456 // Interleave memory: for each Interleave Group we marked earlier as relevant 9457 // for this VPlan, replace the Recipes widening its memory instructions with a 9458 // single VPInterleaveRecipe at its insertion point. 9459 for (auto IG : InterleaveGroups) { 9460 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 9461 RecipeBuilder.getRecipe(IG->getInsertPos())); 9462 SmallVector<VPValue *, 4> StoredValues; 9463 for (unsigned i = 0; i < IG->getFactor(); ++i) 9464 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 9465 auto *StoreR = 9466 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); 9467 StoredValues.push_back(StoreR->getStoredValue()); 9468 } 9469 9470 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 9471 Recipe->getMask()); 9472 VPIG->insertBefore(Recipe); 9473 unsigned J = 0; 9474 for (unsigned i = 0; i < IG->getFactor(); ++i) 9475 if (Instruction *Member = IG->getMember(i)) { 9476 if (!Member->getType()->isVoidTy()) { 9477 VPValue *OriginalV = Plan->getVPValue(Member); 9478 Plan->removeVPValueFor(Member); 9479 Plan->addVPValue(Member, VPIG->getVPValue(J)); 9480 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 9481 J++; 9482 } 9483 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9484 } 9485 } 9486 9487 // From this point onwards, VPlan-to-VPlan transformations may change the plan 9488 // in ways that accessing values using original IR values is incorrect. 9489 Plan->disableValue2VPValue(); 9490 9491 VPlanTransforms::sinkScalarOperands(*Plan); 9492 VPlanTransforms::mergeReplicateRegions(*Plan); 9493 9494 std::string PlanName; 9495 raw_string_ostream RSO(PlanName); 9496 ElementCount VF = Range.Start; 9497 Plan->addVF(VF); 9498 RSO << "Initial VPlan for VF={" << VF; 9499 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9500 Plan->addVF(VF); 9501 RSO << "," << VF; 9502 } 9503 RSO << "},UF>=1"; 9504 RSO.flush(); 9505 Plan->setName(PlanName); 9506 9507 return Plan; 9508 } 9509 9510 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9511 // Outer loop handling: They may require CFG and instruction level 9512 // transformations before even evaluating whether vectorization is profitable. 9513 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9514 // the vectorization pipeline. 9515 assert(!OrigLoop->isInnermost()); 9516 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9517 9518 // Create new empty VPlan 9519 auto Plan = std::make_unique<VPlan>(); 9520 9521 // Build hierarchical CFG 9522 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9523 HCFGBuilder.buildHierarchicalCFG(); 9524 9525 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9526 VF *= 2) 9527 Plan->addVF(VF); 9528 9529 if (EnableVPlanPredication) { 9530 VPlanPredicator VPP(*Plan); 9531 VPP.predicate(); 9532 9533 // Avoid running transformation to recipes until masked code generation in 9534 // VPlan-native path is in place. 9535 return Plan; 9536 } 9537 9538 SmallPtrSet<Instruction *, 1> DeadInstructions; 9539 VPlanTransforms::VPInstructionsToVPRecipes(OrigLoop, Plan, 9540 Legal->getInductionVars(), 9541 DeadInstructions, *PSE.getSE()); 9542 return Plan; 9543 } 9544 9545 // Adjust the recipes for reductions. For in-loop reductions the chain of 9546 // instructions leading from the loop exit instr to the phi need to be converted 9547 // to reductions, with one operand being vector and the other being the scalar 9548 // reduction chain. For other reductions, a select is introduced between the phi 9549 // and live-out recipes when folding the tail. 9550 void LoopVectorizationPlanner::adjustRecipesForReductions( 9551 VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, 9552 ElementCount MinVF) { 9553 for (auto &Reduction : CM.getInLoopReductionChains()) { 9554 PHINode *Phi = Reduction.first; 9555 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 9556 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9557 9558 if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) 9559 continue; 9560 9561 // ReductionOperations are orders top-down from the phi's use to the 9562 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9563 // which of the two operands will remain scalar and which will be reduced. 9564 // For minmax the chain will be the select instructions. 9565 Instruction *Chain = Phi; 9566 for (Instruction *R : ReductionOperations) { 9567 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9568 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9569 9570 VPValue *ChainOp = Plan->getVPValue(Chain); 9571 unsigned FirstOpId; 9572 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9573 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9574 "Expected to replace a VPWidenSelectSC"); 9575 FirstOpId = 1; 9576 } else { 9577 assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe)) && 9578 "Expected to replace a VPWidenSC"); 9579 FirstOpId = 0; 9580 } 9581 unsigned VecOpId = 9582 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9583 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9584 9585 auto *CondOp = CM.foldTailByMasking() 9586 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9587 : nullptr; 9588 VPReductionRecipe *RedRecipe = new VPReductionRecipe( 9589 &RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9590 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9591 Plan->removeVPValueFor(R); 9592 Plan->addVPValue(R, RedRecipe); 9593 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9594 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9595 WidenRecipe->eraseFromParent(); 9596 9597 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9598 VPRecipeBase *CompareRecipe = 9599 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9600 assert(isa<VPWidenRecipe>(CompareRecipe) && 9601 "Expected to replace a VPWidenSC"); 9602 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9603 "Expected no remaining users"); 9604 CompareRecipe->eraseFromParent(); 9605 } 9606 Chain = R; 9607 } 9608 } 9609 9610 // If tail is folded by masking, introduce selects between the phi 9611 // and the live-out instruction of each reduction, at the end of the latch. 9612 if (CM.foldTailByMasking()) { 9613 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9614 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R); 9615 if (!PhiR || PhiR->isInLoop()) 9616 continue; 9617 Builder.setInsertPoint(LatchVPBB); 9618 VPValue *Cond = 9619 RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9620 VPValue *Red = PhiR->getBackedgeValue(); 9621 Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR}); 9622 } 9623 } 9624 } 9625 9626 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9627 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9628 VPSlotTracker &SlotTracker) const { 9629 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9630 IG->getInsertPos()->printAsOperand(O, false); 9631 O << ", "; 9632 getAddr()->printAsOperand(O, SlotTracker); 9633 VPValue *Mask = getMask(); 9634 if (Mask) { 9635 O << ", "; 9636 Mask->printAsOperand(O, SlotTracker); 9637 } 9638 9639 unsigned OpIdx = 0; 9640 for (unsigned i = 0; i < IG->getFactor(); ++i) { 9641 if (!IG->getMember(i)) 9642 continue; 9643 if (getNumStoreOperands() > 0) { 9644 O << "\n" << Indent << " store "; 9645 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker); 9646 O << " to index " << i; 9647 } else { 9648 O << "\n" << Indent << " "; 9649 getVPValue(OpIdx)->printAsOperand(O, SlotTracker); 9650 O << " = load from index " << i; 9651 } 9652 ++OpIdx; 9653 } 9654 } 9655 #endif 9656 9657 void VPWidenCallRecipe::execute(VPTransformState &State) { 9658 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9659 *this, State); 9660 } 9661 9662 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9663 State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()), 9664 this, *this, InvariantCond, State); 9665 } 9666 9667 void VPWidenRecipe::execute(VPTransformState &State) { 9668 State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State); 9669 } 9670 9671 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9672 State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this, 9673 *this, State.UF, State.VF, IsPtrLoopInvariant, 9674 IsIndexLoopInvariant, State); 9675 } 9676 9677 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9678 assert(!State.Instance && "Int or FP induction being replicated."); 9679 State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(), 9680 getTruncInst(), getVPValue(0), 9681 getCastValue(), State); 9682 } 9683 9684 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9685 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this, 9686 State); 9687 } 9688 9689 void VPBlendRecipe::execute(VPTransformState &State) { 9690 State.ILV->setDebugLocFromInst(Phi, &State.Builder); 9691 // We know that all PHIs in non-header blocks are converted into 9692 // selects, so we don't have to worry about the insertion order and we 9693 // can just use the builder. 9694 // At this point we generate the predication tree. There may be 9695 // duplications since this is a simple recursive scan, but future 9696 // optimizations will clean it up. 9697 9698 unsigned NumIncoming = getNumIncomingValues(); 9699 9700 // Generate a sequence of selects of the form: 9701 // SELECT(Mask3, In3, 9702 // SELECT(Mask2, In2, 9703 // SELECT(Mask1, In1, 9704 // In0))) 9705 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9706 // are essentially undef are taken from In0. 9707 InnerLoopVectorizer::VectorParts Entry(State.UF); 9708 for (unsigned In = 0; In < NumIncoming; ++In) { 9709 for (unsigned Part = 0; Part < State.UF; ++Part) { 9710 // We might have single edge PHIs (blocks) - use an identity 9711 // 'select' for the first PHI operand. 9712 Value *In0 = State.get(getIncomingValue(In), Part); 9713 if (In == 0) 9714 Entry[Part] = In0; // Initialize with the first incoming value. 9715 else { 9716 // Select between the current value and the previous incoming edge 9717 // based on the incoming mask. 9718 Value *Cond = State.get(getMask(In), Part); 9719 Entry[Part] = 9720 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9721 } 9722 } 9723 } 9724 for (unsigned Part = 0; Part < State.UF; ++Part) 9725 State.set(this, Entry[Part], Part); 9726 } 9727 9728 void VPInterleaveRecipe::execute(VPTransformState &State) { 9729 assert(!State.Instance && "Interleave group being replicated."); 9730 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9731 getStoredValues(), getMask()); 9732 } 9733 9734 void VPReductionRecipe::execute(VPTransformState &State) { 9735 assert(!State.Instance && "Reduction being replicated."); 9736 Value *PrevInChain = State.get(getChainOp(), 0); 9737 for (unsigned Part = 0; Part < State.UF; ++Part) { 9738 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9739 bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); 9740 Value *NewVecOp = State.get(getVecOp(), Part); 9741 if (VPValue *Cond = getCondOp()) { 9742 Value *NewCond = State.get(Cond, Part); 9743 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9744 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 9745 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9746 Constant *IdenVec = 9747 ConstantVector::getSplat(VecTy->getElementCount(), Iden); 9748 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9749 NewVecOp = Select; 9750 } 9751 Value *NewRed; 9752 Value *NextInChain; 9753 if (IsOrdered) { 9754 if (State.VF.isVector()) 9755 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9756 PrevInChain); 9757 else 9758 NewRed = State.Builder.CreateBinOp( 9759 (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), 9760 PrevInChain, NewVecOp); 9761 PrevInChain = NewRed; 9762 } else { 9763 PrevInChain = State.get(getChainOp(), Part); 9764 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9765 } 9766 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9767 NextInChain = 9768 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9769 NewRed, PrevInChain); 9770 } else if (IsOrdered) 9771 NextInChain = NewRed; 9772 else { 9773 NextInChain = State.Builder.CreateBinOp( 9774 (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed, 9775 PrevInChain); 9776 } 9777 State.set(this, NextInChain, Part); 9778 } 9779 } 9780 9781 void VPReplicateRecipe::execute(VPTransformState &State) { 9782 if (State.Instance) { // Generate a single instance. 9783 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9784 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, 9785 *State.Instance, IsPredicated, State); 9786 // Insert scalar instance packing it into a vector. 9787 if (AlsoPack && State.VF.isVector()) { 9788 // If we're constructing lane 0, initialize to start from poison. 9789 if (State.Instance->Lane.isFirstLane()) { 9790 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9791 Value *Poison = PoisonValue::get( 9792 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9793 State.set(this, Poison, State.Instance->Part); 9794 } 9795 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9796 } 9797 return; 9798 } 9799 9800 // Generate scalar instances for all VF lanes of all UF parts, unless the 9801 // instruction is uniform inwhich case generate only the first lane for each 9802 // of the UF parts. 9803 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9804 assert((!State.VF.isScalable() || IsUniform) && 9805 "Can't scalarize a scalable vector"); 9806 for (unsigned Part = 0; Part < State.UF; ++Part) 9807 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9808 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, 9809 VPIteration(Part, Lane), IsPredicated, 9810 State); 9811 } 9812 9813 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9814 assert(State.Instance && "Branch on Mask works only on single instance."); 9815 9816 unsigned Part = State.Instance->Part; 9817 unsigned Lane = State.Instance->Lane.getKnownLane(); 9818 9819 Value *ConditionBit = nullptr; 9820 VPValue *BlockInMask = getMask(); 9821 if (BlockInMask) { 9822 ConditionBit = State.get(BlockInMask, Part); 9823 if (ConditionBit->getType()->isVectorTy()) 9824 ConditionBit = State.Builder.CreateExtractElement( 9825 ConditionBit, State.Builder.getInt32(Lane)); 9826 } else // Block in mask is all-one. 9827 ConditionBit = State.Builder.getTrue(); 9828 9829 // Replace the temporary unreachable terminator with a new conditional branch, 9830 // whose two destinations will be set later when they are created. 9831 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9832 assert(isa<UnreachableInst>(CurrentTerminator) && 9833 "Expected to replace unreachable terminator with conditional branch."); 9834 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9835 CondBr->setSuccessor(0, nullptr); 9836 ReplaceInstWithInst(CurrentTerminator, CondBr); 9837 } 9838 9839 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9840 assert(State.Instance && "Predicated instruction PHI works per instance."); 9841 Instruction *ScalarPredInst = 9842 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9843 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9844 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9845 assert(PredicatingBB && "Predicated block has no single predecessor."); 9846 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9847 "operand must be VPReplicateRecipe"); 9848 9849 // By current pack/unpack logic we need to generate only a single phi node: if 9850 // a vector value for the predicated instruction exists at this point it means 9851 // the instruction has vector users only, and a phi for the vector value is 9852 // needed. In this case the recipe of the predicated instruction is marked to 9853 // also do that packing, thereby "hoisting" the insert-element sequence. 9854 // Otherwise, a phi node for the scalar value is needed. 9855 unsigned Part = State.Instance->Part; 9856 if (State.hasVectorValue(getOperand(0), Part)) { 9857 Value *VectorValue = State.get(getOperand(0), Part); 9858 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9859 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9860 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9861 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9862 if (State.hasVectorValue(this, Part)) 9863 State.reset(this, VPhi, Part); 9864 else 9865 State.set(this, VPhi, Part); 9866 // NOTE: Currently we need to update the value of the operand, so the next 9867 // predicated iteration inserts its generated value in the correct vector. 9868 State.reset(getOperand(0), VPhi, Part); 9869 } else { 9870 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9871 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9872 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9873 PredicatingBB); 9874 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9875 if (State.hasScalarValue(this, *State.Instance)) 9876 State.reset(this, Phi, *State.Instance); 9877 else 9878 State.set(this, Phi, *State.Instance); 9879 // NOTE: Currently we need to update the value of the operand, so the next 9880 // predicated iteration inserts its generated value in the correct vector. 9881 State.reset(getOperand(0), Phi, *State.Instance); 9882 } 9883 } 9884 9885 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9886 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9887 State.ILV->vectorizeMemoryInstruction( 9888 &Ingredient, State, StoredValue ? nullptr : getVPSingleValue(), getAddr(), 9889 StoredValue, getMask()); 9890 } 9891 9892 // Determine how to lower the scalar epilogue, which depends on 1) optimising 9893 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 9894 // predication, and 4) a TTI hook that analyses whether the loop is suitable 9895 // for predication. 9896 static ScalarEpilogueLowering getScalarEpilogueLowering( 9897 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 9898 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 9899 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 9900 LoopVectorizationLegality &LVL) { 9901 // 1) OptSize takes precedence over all other options, i.e. if this is set, 9902 // don't look at hints or options, and don't request a scalar epilogue. 9903 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 9904 // LoopAccessInfo (due to code dependency and not being able to reliably get 9905 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 9906 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 9907 // versioning when the vectorization is forced, unlike hasOptSize. So revert 9908 // back to the old way and vectorize with versioning when forced. See D81345.) 9909 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 9910 PGSOQueryType::IRPass) && 9911 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 9912 return CM_ScalarEpilogueNotAllowedOptSize; 9913 9914 // 2) If set, obey the directives 9915 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 9916 switch (PreferPredicateOverEpilogue) { 9917 case PreferPredicateTy::ScalarEpilogue: 9918 return CM_ScalarEpilogueAllowed; 9919 case PreferPredicateTy::PredicateElseScalarEpilogue: 9920 return CM_ScalarEpilogueNotNeededUsePredicate; 9921 case PreferPredicateTy::PredicateOrDontVectorize: 9922 return CM_ScalarEpilogueNotAllowedUsePredicate; 9923 }; 9924 } 9925 9926 // 3) If set, obey the hints 9927 switch (Hints.getPredicate()) { 9928 case LoopVectorizeHints::FK_Enabled: 9929 return CM_ScalarEpilogueNotNeededUsePredicate; 9930 case LoopVectorizeHints::FK_Disabled: 9931 return CM_ScalarEpilogueAllowed; 9932 }; 9933 9934 // 4) if the TTI hook indicates this is profitable, request predication. 9935 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 9936 LVL.getLAI())) 9937 return CM_ScalarEpilogueNotNeededUsePredicate; 9938 9939 return CM_ScalarEpilogueAllowed; 9940 } 9941 9942 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 9943 // If Values have been set for this Def return the one relevant for \p Part. 9944 if (hasVectorValue(Def, Part)) 9945 return Data.PerPartOutput[Def][Part]; 9946 9947 if (!hasScalarValue(Def, {Part, 0})) { 9948 Value *IRV = Def->getLiveInIRValue(); 9949 Value *B = ILV->getBroadcastInstrs(IRV); 9950 set(Def, B, Part); 9951 return B; 9952 } 9953 9954 Value *ScalarValue = get(Def, {Part, 0}); 9955 // If we aren't vectorizing, we can just copy the scalar map values over 9956 // to the vector map. 9957 if (VF.isScalar()) { 9958 set(Def, ScalarValue, Part); 9959 return ScalarValue; 9960 } 9961 9962 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 9963 bool IsUniform = RepR && RepR->isUniform(); 9964 9965 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 9966 // Check if there is a scalar value for the selected lane. 9967 if (!hasScalarValue(Def, {Part, LastLane})) { 9968 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 9969 assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) && 9970 "unexpected recipe found to be invariant"); 9971 IsUniform = true; 9972 LastLane = 0; 9973 } 9974 9975 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 9976 // Set the insert point after the last scalarized instruction or after the 9977 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 9978 // will directly follow the scalar definitions. 9979 auto OldIP = Builder.saveIP(); 9980 auto NewIP = 9981 isa<PHINode>(LastInst) 9982 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 9983 : std::next(BasicBlock::iterator(LastInst)); 9984 Builder.SetInsertPoint(&*NewIP); 9985 9986 // However, if we are vectorizing, we need to construct the vector values. 9987 // If the value is known to be uniform after vectorization, we can just 9988 // broadcast the scalar value corresponding to lane zero for each unroll 9989 // iteration. Otherwise, we construct the vector values using 9990 // insertelement instructions. Since the resulting vectors are stored in 9991 // State, we will only generate the insertelements once. 9992 Value *VectorValue = nullptr; 9993 if (IsUniform) { 9994 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 9995 set(Def, VectorValue, Part); 9996 } else { 9997 // Initialize packing with insertelements to start from undef. 9998 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 9999 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 10000 set(Def, Undef, Part); 10001 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 10002 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 10003 VectorValue = get(Def, Part); 10004 } 10005 Builder.restoreIP(OldIP); 10006 return VectorValue; 10007 } 10008 10009 // Process the loop in the VPlan-native vectorization path. This path builds 10010 // VPlan upfront in the vectorization pipeline, which allows to apply 10011 // VPlan-to-VPlan transformations from the very beginning without modifying the 10012 // input LLVM IR. 10013 static bool processLoopInVPlanNativePath( 10014 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 10015 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 10016 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 10017 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 10018 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 10019 LoopVectorizationRequirements &Requirements) { 10020 10021 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 10022 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 10023 return false; 10024 } 10025 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 10026 Function *F = L->getHeader()->getParent(); 10027 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 10028 10029 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10030 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 10031 10032 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 10033 &Hints, IAI); 10034 // Use the planner for outer loop vectorization. 10035 // TODO: CM is not used at this point inside the planner. Turn CM into an 10036 // optional argument if we don't need it in the future. 10037 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 10038 Requirements, ORE); 10039 10040 // Get user vectorization factor. 10041 ElementCount UserVF = Hints.getWidth(); 10042 10043 CM.collectElementTypesForWidening(); 10044 10045 // Plan how to best vectorize, return the best VF and its cost. 10046 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 10047 10048 // If we are stress testing VPlan builds, do not attempt to generate vector 10049 // code. Masked vector code generation support will follow soon. 10050 // Also, do not attempt to vectorize if no vector code will be produced. 10051 if (VPlanBuildStressTest || EnableVPlanPredication || 10052 VectorizationFactor::Disabled() == VF) 10053 return false; 10054 10055 LVP.setBestPlan(VF.Width, 1); 10056 10057 { 10058 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10059 F->getParent()->getDataLayout()); 10060 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 10061 &CM, BFI, PSI, Checks); 10062 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 10063 << L->getHeader()->getParent()->getName() << "\"\n"); 10064 LVP.executePlan(LB, DT); 10065 } 10066 10067 // Mark the loop as already vectorized to avoid vectorizing again. 10068 Hints.setAlreadyVectorized(); 10069 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10070 return true; 10071 } 10072 10073 // Emit a remark if there are stores to floats that required a floating point 10074 // extension. If the vectorized loop was generated with floating point there 10075 // will be a performance penalty from the conversion overhead and the change in 10076 // the vector width. 10077 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 10078 SmallVector<Instruction *, 4> Worklist; 10079 for (BasicBlock *BB : L->getBlocks()) { 10080 for (Instruction &Inst : *BB) { 10081 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 10082 if (S->getValueOperand()->getType()->isFloatTy()) 10083 Worklist.push_back(S); 10084 } 10085 } 10086 } 10087 10088 // Traverse the floating point stores upwards searching, for floating point 10089 // conversions. 10090 SmallPtrSet<const Instruction *, 4> Visited; 10091 SmallPtrSet<const Instruction *, 4> EmittedRemark; 10092 while (!Worklist.empty()) { 10093 auto *I = Worklist.pop_back_val(); 10094 if (!L->contains(I)) 10095 continue; 10096 if (!Visited.insert(I).second) 10097 continue; 10098 10099 // Emit a remark if the floating point store required a floating 10100 // point conversion. 10101 // TODO: More work could be done to identify the root cause such as a 10102 // constant or a function return type and point the user to it. 10103 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 10104 ORE->emit([&]() { 10105 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 10106 I->getDebugLoc(), L->getHeader()) 10107 << "floating point conversion changes vector width. " 10108 << "Mixed floating point precision requires an up/down " 10109 << "cast that will negatively impact performance."; 10110 }); 10111 10112 for (Use &Op : I->operands()) 10113 if (auto *OpI = dyn_cast<Instruction>(Op)) 10114 Worklist.push_back(OpI); 10115 } 10116 } 10117 10118 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 10119 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 10120 !EnableLoopInterleaving), 10121 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 10122 !EnableLoopVectorization) {} 10123 10124 bool LoopVectorizePass::processLoop(Loop *L) { 10125 assert((EnableVPlanNativePath || L->isInnermost()) && 10126 "VPlan-native path is not enabled. Only process inner loops."); 10127 10128 #ifndef NDEBUG 10129 const std::string DebugLocStr = getDebugLocString(L); 10130 #endif /* NDEBUG */ 10131 10132 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 10133 << L->getHeader()->getParent()->getName() << "\" from " 10134 << DebugLocStr << "\n"); 10135 10136 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 10137 10138 LLVM_DEBUG( 10139 dbgs() << "LV: Loop hints:" 10140 << " force=" 10141 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 10142 ? "disabled" 10143 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 10144 ? "enabled" 10145 : "?")) 10146 << " width=" << Hints.getWidth() 10147 << " interleave=" << Hints.getInterleave() << "\n"); 10148 10149 // Function containing loop 10150 Function *F = L->getHeader()->getParent(); 10151 10152 // Looking at the diagnostic output is the only way to determine if a loop 10153 // was vectorized (other than looking at the IR or machine code), so it 10154 // is important to generate an optimization remark for each loop. Most of 10155 // these messages are generated as OptimizationRemarkAnalysis. Remarks 10156 // generated as OptimizationRemark and OptimizationRemarkMissed are 10157 // less verbose reporting vectorized loops and unvectorized loops that may 10158 // benefit from vectorization, respectively. 10159 10160 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 10161 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 10162 return false; 10163 } 10164 10165 PredicatedScalarEvolution PSE(*SE, *L); 10166 10167 // Check if it is legal to vectorize the loop. 10168 LoopVectorizationRequirements Requirements; 10169 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 10170 &Requirements, &Hints, DB, AC, BFI, PSI); 10171 if (!LVL.canVectorize(EnableVPlanNativePath)) { 10172 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 10173 Hints.emitRemarkWithHints(); 10174 return false; 10175 } 10176 10177 // Check the function attributes and profiles to find out if this function 10178 // should be optimized for size. 10179 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10180 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 10181 10182 // Entrance to the VPlan-native vectorization path. Outer loops are processed 10183 // here. They may require CFG and instruction level transformations before 10184 // even evaluating whether vectorization is profitable. Since we cannot modify 10185 // the incoming IR, we need to build VPlan upfront in the vectorization 10186 // pipeline. 10187 if (!L->isInnermost()) 10188 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 10189 ORE, BFI, PSI, Hints, Requirements); 10190 10191 assert(L->isInnermost() && "Inner loop expected."); 10192 10193 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 10194 // count by optimizing for size, to minimize overheads. 10195 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 10196 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 10197 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 10198 << "This loop is worth vectorizing only if no scalar " 10199 << "iteration overheads are incurred."); 10200 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 10201 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 10202 else { 10203 LLVM_DEBUG(dbgs() << "\n"); 10204 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 10205 } 10206 } 10207 10208 // Check the function attributes to see if implicit floats are allowed. 10209 // FIXME: This check doesn't seem possibly correct -- what if the loop is 10210 // an integer loop and the vector instructions selected are purely integer 10211 // vector instructions? 10212 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10213 reportVectorizationFailure( 10214 "Can't vectorize when the NoImplicitFloat attribute is used", 10215 "loop not vectorized due to NoImplicitFloat attribute", 10216 "NoImplicitFloat", ORE, L); 10217 Hints.emitRemarkWithHints(); 10218 return false; 10219 } 10220 10221 // Check if the target supports potentially unsafe FP vectorization. 10222 // FIXME: Add a check for the type of safety issue (denormal, signaling) 10223 // for the target we're vectorizing for, to make sure none of the 10224 // additional fp-math flags can help. 10225 if (Hints.isPotentiallyUnsafe() && 10226 TTI->isFPVectorizationPotentiallyUnsafe()) { 10227 reportVectorizationFailure( 10228 "Potentially unsafe FP op prevents vectorization", 10229 "loop not vectorized due to unsafe FP support.", 10230 "UnsafeFP", ORE, L); 10231 Hints.emitRemarkWithHints(); 10232 return false; 10233 } 10234 10235 bool AllowOrderedReductions; 10236 // If the flag is set, use that instead and override the TTI behaviour. 10237 if (ForceOrderedReductions.getNumOccurrences() > 0) 10238 AllowOrderedReductions = ForceOrderedReductions; 10239 else 10240 AllowOrderedReductions = TTI->enableOrderedReductions(); 10241 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) { 10242 ORE->emit([&]() { 10243 auto *ExactFPMathInst = Requirements.getExactFPInst(); 10244 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 10245 ExactFPMathInst->getDebugLoc(), 10246 ExactFPMathInst->getParent()) 10247 << "loop not vectorized: cannot prove it is safe to reorder " 10248 "floating-point operations"; 10249 }); 10250 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 10251 "reorder floating-point operations\n"); 10252 Hints.emitRemarkWithHints(); 10253 return false; 10254 } 10255 10256 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 10257 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 10258 10259 // If an override option has been passed in for interleaved accesses, use it. 10260 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 10261 UseInterleaved = EnableInterleavedMemAccesses; 10262 10263 // Analyze interleaved memory accesses. 10264 if (UseInterleaved) { 10265 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10266 } 10267 10268 // Use the cost model. 10269 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10270 F, &Hints, IAI); 10271 CM.collectValuesToIgnore(); 10272 CM.collectElementTypesForWidening(); 10273 10274 // Use the planner for vectorization. 10275 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 10276 Requirements, ORE); 10277 10278 // Get user vectorization factor and interleave count. 10279 ElementCount UserVF = Hints.getWidth(); 10280 unsigned UserIC = Hints.getInterleave(); 10281 10282 // Plan how to best vectorize, return the best VF and its cost. 10283 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10284 10285 VectorizationFactor VF = VectorizationFactor::Disabled(); 10286 unsigned IC = 1; 10287 10288 if (MaybeVF) { 10289 VF = *MaybeVF; 10290 // Select the interleave count. 10291 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10292 } 10293 10294 // Identify the diagnostic messages that should be produced. 10295 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10296 bool VectorizeLoop = true, InterleaveLoop = true; 10297 if (VF.Width.isScalar()) { 10298 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10299 VecDiagMsg = std::make_pair( 10300 "VectorizationNotBeneficial", 10301 "the cost-model indicates that vectorization is not beneficial"); 10302 VectorizeLoop = false; 10303 } 10304 10305 if (!MaybeVF && UserIC > 1) { 10306 // Tell the user interleaving was avoided up-front, despite being explicitly 10307 // requested. 10308 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10309 "interleaving should be avoided up front\n"); 10310 IntDiagMsg = std::make_pair( 10311 "InterleavingAvoided", 10312 "Ignoring UserIC, because interleaving was avoided up front"); 10313 InterleaveLoop = false; 10314 } else if (IC == 1 && UserIC <= 1) { 10315 // Tell the user interleaving is not beneficial. 10316 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10317 IntDiagMsg = std::make_pair( 10318 "InterleavingNotBeneficial", 10319 "the cost-model indicates that interleaving is not beneficial"); 10320 InterleaveLoop = false; 10321 if (UserIC == 1) { 10322 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10323 IntDiagMsg.second += 10324 " and is explicitly disabled or interleave count is set to 1"; 10325 } 10326 } else if (IC > 1 && UserIC == 1) { 10327 // Tell the user interleaving is beneficial, but it explicitly disabled. 10328 LLVM_DEBUG( 10329 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10330 IntDiagMsg = std::make_pair( 10331 "InterleavingBeneficialButDisabled", 10332 "the cost-model indicates that interleaving is beneficial " 10333 "but is explicitly disabled or interleave count is set to 1"); 10334 InterleaveLoop = false; 10335 } 10336 10337 // Override IC if user provided an interleave count. 10338 IC = UserIC > 0 ? UserIC : IC; 10339 10340 // Emit diagnostic messages, if any. 10341 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10342 if (!VectorizeLoop && !InterleaveLoop) { 10343 // Do not vectorize or interleaving the loop. 10344 ORE->emit([&]() { 10345 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10346 L->getStartLoc(), L->getHeader()) 10347 << VecDiagMsg.second; 10348 }); 10349 ORE->emit([&]() { 10350 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10351 L->getStartLoc(), L->getHeader()) 10352 << IntDiagMsg.second; 10353 }); 10354 return false; 10355 } else if (!VectorizeLoop && InterleaveLoop) { 10356 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10357 ORE->emit([&]() { 10358 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10359 L->getStartLoc(), L->getHeader()) 10360 << VecDiagMsg.second; 10361 }); 10362 } else if (VectorizeLoop && !InterleaveLoop) { 10363 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10364 << ") in " << DebugLocStr << '\n'); 10365 ORE->emit([&]() { 10366 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10367 L->getStartLoc(), L->getHeader()) 10368 << IntDiagMsg.second; 10369 }); 10370 } else if (VectorizeLoop && InterleaveLoop) { 10371 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10372 << ") in " << DebugLocStr << '\n'); 10373 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10374 } 10375 10376 bool DisableRuntimeUnroll = false; 10377 MDNode *OrigLoopID = L->getLoopID(); 10378 { 10379 // Optimistically generate runtime checks. Drop them if they turn out to not 10380 // be profitable. Limit the scope of Checks, so the cleanup happens 10381 // immediately after vector codegeneration is done. 10382 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10383 F->getParent()->getDataLayout()); 10384 if (!VF.Width.isScalar() || IC > 1) 10385 Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate()); 10386 LVP.setBestPlan(VF.Width, IC); 10387 10388 using namespace ore; 10389 if (!VectorizeLoop) { 10390 assert(IC > 1 && "interleave count should not be 1 or 0"); 10391 // If we decided that it is not legal to vectorize the loop, then 10392 // interleave it. 10393 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10394 &CM, BFI, PSI, Checks); 10395 LVP.executePlan(Unroller, DT); 10396 10397 ORE->emit([&]() { 10398 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10399 L->getHeader()) 10400 << "interleaved loop (interleaved count: " 10401 << NV("InterleaveCount", IC) << ")"; 10402 }); 10403 } else { 10404 // If we decided that it is *legal* to vectorize the loop, then do it. 10405 10406 // Consider vectorizing the epilogue too if it's profitable. 10407 VectorizationFactor EpilogueVF = 10408 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10409 if (EpilogueVF.Width.isVector()) { 10410 10411 // The first pass vectorizes the main loop and creates a scalar epilogue 10412 // to be vectorized by executing the plan (potentially with a different 10413 // factor) again shortly afterwards. 10414 EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC, 10415 EpilogueVF.Width.getKnownMinValue(), 10416 1); 10417 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10418 EPI, &LVL, &CM, BFI, PSI, Checks); 10419 10420 LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF); 10421 LVP.executePlan(MainILV, DT); 10422 ++LoopsVectorized; 10423 10424 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10425 formLCSSARecursively(*L, *DT, LI, SE); 10426 10427 // Second pass vectorizes the epilogue and adjusts the control flow 10428 // edges from the first pass. 10429 LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF); 10430 EPI.MainLoopVF = EPI.EpilogueVF; 10431 EPI.MainLoopUF = EPI.EpilogueUF; 10432 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10433 ORE, EPI, &LVL, &CM, BFI, PSI, 10434 Checks); 10435 LVP.executePlan(EpilogILV, DT); 10436 ++LoopsEpilogueVectorized; 10437 10438 if (!MainILV.areSafetyChecksAdded()) 10439 DisableRuntimeUnroll = true; 10440 } else { 10441 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 10442 &LVL, &CM, BFI, PSI, Checks); 10443 LVP.executePlan(LB, DT); 10444 ++LoopsVectorized; 10445 10446 // Add metadata to disable runtime unrolling a scalar loop when there 10447 // are no runtime checks about strides and memory. A scalar loop that is 10448 // rarely used is not worth unrolling. 10449 if (!LB.areSafetyChecksAdded()) 10450 DisableRuntimeUnroll = true; 10451 } 10452 // Report the vectorization decision. 10453 ORE->emit([&]() { 10454 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10455 L->getHeader()) 10456 << "vectorized loop (vectorization width: " 10457 << NV("VectorizationFactor", VF.Width) 10458 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10459 }); 10460 } 10461 10462 if (ORE->allowExtraAnalysis(LV_NAME)) 10463 checkMixedPrecision(L, ORE); 10464 } 10465 10466 Optional<MDNode *> RemainderLoopID = 10467 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10468 LLVMLoopVectorizeFollowupEpilogue}); 10469 if (RemainderLoopID.hasValue()) { 10470 L->setLoopID(RemainderLoopID.getValue()); 10471 } else { 10472 if (DisableRuntimeUnroll) 10473 AddRuntimeUnrollDisableMetaData(L); 10474 10475 // Mark the loop as already vectorized to avoid vectorizing again. 10476 Hints.setAlreadyVectorized(); 10477 } 10478 10479 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10480 return true; 10481 } 10482 10483 LoopVectorizeResult LoopVectorizePass::runImpl( 10484 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10485 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10486 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10487 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10488 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10489 SE = &SE_; 10490 LI = &LI_; 10491 TTI = &TTI_; 10492 DT = &DT_; 10493 BFI = &BFI_; 10494 TLI = TLI_; 10495 AA = &AA_; 10496 AC = &AC_; 10497 GetLAA = &GetLAA_; 10498 DB = &DB_; 10499 ORE = &ORE_; 10500 PSI = PSI_; 10501 10502 // Don't attempt if 10503 // 1. the target claims to have no vector registers, and 10504 // 2. interleaving won't help ILP. 10505 // 10506 // The second condition is necessary because, even if the target has no 10507 // vector registers, loop vectorization may still enable scalar 10508 // interleaving. 10509 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10510 TTI->getMaxInterleaveFactor(1) < 2) 10511 return LoopVectorizeResult(false, false); 10512 10513 bool Changed = false, CFGChanged = false; 10514 10515 // The vectorizer requires loops to be in simplified form. 10516 // Since simplification may add new inner loops, it has to run before the 10517 // legality and profitability checks. This means running the loop vectorizer 10518 // will simplify all loops, regardless of whether anything end up being 10519 // vectorized. 10520 for (auto &L : *LI) 10521 Changed |= CFGChanged |= 10522 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10523 10524 // Build up a worklist of inner-loops to vectorize. This is necessary as 10525 // the act of vectorizing or partially unrolling a loop creates new loops 10526 // and can invalidate iterators across the loops. 10527 SmallVector<Loop *, 8> Worklist; 10528 10529 for (Loop *L : *LI) 10530 collectSupportedLoops(*L, LI, ORE, Worklist); 10531 10532 LoopsAnalyzed += Worklist.size(); 10533 10534 // Now walk the identified inner loops. 10535 while (!Worklist.empty()) { 10536 Loop *L = Worklist.pop_back_val(); 10537 10538 // For the inner loops we actually process, form LCSSA to simplify the 10539 // transform. 10540 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10541 10542 Changed |= CFGChanged |= processLoop(L); 10543 } 10544 10545 // Process each loop nest in the function. 10546 return LoopVectorizeResult(Changed, CFGChanged); 10547 } 10548 10549 PreservedAnalyses LoopVectorizePass::run(Function &F, 10550 FunctionAnalysisManager &AM) { 10551 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10552 auto &LI = AM.getResult<LoopAnalysis>(F); 10553 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10554 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10555 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10556 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10557 auto &AA = AM.getResult<AAManager>(F); 10558 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10559 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10560 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10561 10562 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10563 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10564 [&](Loop &L) -> const LoopAccessInfo & { 10565 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10566 TLI, TTI, nullptr, nullptr}; 10567 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10568 }; 10569 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10570 ProfileSummaryInfo *PSI = 10571 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10572 LoopVectorizeResult Result = 10573 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10574 if (!Result.MadeAnyChange) 10575 return PreservedAnalyses::all(); 10576 PreservedAnalyses PA; 10577 10578 // We currently do not preserve loopinfo/dominator analyses with outer loop 10579 // vectorization. Until this is addressed, mark these analyses as preserved 10580 // only for non-VPlan-native path. 10581 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10582 if (!EnableVPlanNativePath) { 10583 PA.preserve<LoopAnalysis>(); 10584 PA.preserve<DominatorTreeAnalysis>(); 10585 } 10586 if (!Result.MadeCFGChange) 10587 PA.preserveSet<CFGAnalyses>(); 10588 return PA; 10589 } 10590